2 * Host Controller Driver for the Elan Digital Systems U132 adapter
4 * Copyright(C) 2006 Elan Digital Systems Limited
5 * http://www.elandigitalsystems.com
7 * Author and Maintainer - Tony Olech - Elan Digital Systems
8 * tony.olech@elandigitalsystems.com
10 * This program is free software;you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation, version 2.
15 * This driver was written by Tony Olech(tony.olech@elandigitalsystems.com)
16 * based on various USB host drivers in the 2.6.15 linux kernel
17 * with constant reference to the 3rd Edition of Linux Device Drivers
18 * published by O'Reilly
20 * The U132 adapter is a USB to CardBus adapter specifically designed
21 * for PC cards that contain an OHCI host controller. Typical PC cards
22 * are the Orange Mobile 3G Option GlobeTrotter Fusion card.
24 * The U132 adapter will *NOT *work with PC cards that do not contain
25 * an OHCI controller. A simple way to test whether a PC card has an
26 * OHCI controller as an interface is to insert the PC card directly
27 * into a laptop(or desktop) with a CardBus slot and if "lspci" shows
28 * a new USB controller and "lsusb -v" shows a new OHCI Host Controller
29 * then there is a good chance that the U132 adapter will support the
30 * PC card.(you also need the specific client driver for the PC card)
32 * Please inform the Author and Maintainer about any PC cards that
33 * contain OHCI Host Controller and work when directly connected to
34 * an embedded CardBus slot but do not work when they are connected
35 * via an ELAN U132 adapter.
38 #include <linux/kernel.h>
39 #include <linux/module.h>
40 #include <linux/moduleparam.h>
41 #include <linux/delay.h>
42 #include <linux/ioport.h>
43 #include <linux/pci_ids.h>
44 #include <linux/sched.h>
45 #include <linux/slab.h>
46 #include <linux/errno.h>
47 #include <linux/init.h>
48 #include <linux/timer.h>
49 #include <linux/list.h>
50 #include <linux/interrupt.h>
51 #include <linux/usb.h>
52 #include <linux/usb/hcd.h>
53 #include <linux/workqueue.h>
54 #include <linux/platform_device.h>
55 #include <linux/mutex.h>
58 #include <asm/byteorder.h>
60 /* FIXME ohci.h is ONLY for internal use by the OHCI driver.
61 * If you're going to try stuff like this, you need to split
62 * out shareable stuff (register declarations?) into its own
63 * file, maybe name <linux/usb/ohci.h>
67 #define OHCI_CONTROL_INIT OHCI_CTRL_CBSR
68 #define OHCI_INTR_INIT (OHCI_INTR_MIE | OHCI_INTR_UE | OHCI_INTR_RD | \
70 MODULE_AUTHOR("Tony Olech - Elan Digital Systems Limited");
71 MODULE_DESCRIPTION("U132 USB Host Controller Driver");
72 MODULE_LICENSE("GPL");
73 #define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444)
74 INT_MODULE_PARM(testing
, 0);
75 /* Some boards misreport power switching/overcurrent*/
76 static bool distrust_firmware
= true;
77 module_param(distrust_firmware
, bool, 0);
78 MODULE_PARM_DESC(distrust_firmware
, "true to distrust firmware power/overcurren"
80 static DECLARE_WAIT_QUEUE_HEAD(u132_hcd_wait
);
82 * u132_module_lock exists to protect access to global variables
85 static struct mutex u132_module_lock
;
86 static int u132_exiting
;
87 static int u132_instances
;
88 static struct list_head u132_static_list
;
90 * end of the global variables protected by u132_module_lock
92 static struct workqueue_struct
*workqueue
;
93 #define MAX_U132_PORTS 7
94 #define MAX_U132_ADDRS 128
95 #define MAX_U132_UDEVS 4
96 #define MAX_U132_ENDPS 100
97 #define MAX_U132_RINGS 4
98 static const char *cc_to_text
[16] = {
128 struct usb_device
*usb_device
;
133 u8 endp_number_in
[16];
134 u8 endp_number_out
[16];
136 #define ENDP_QUEUE_SHIFT 3
137 #define ENDP_QUEUE_SIZE (1<<ENDP_QUEUE_SHIFT)
138 #define ENDP_QUEUE_MASK (ENDP_QUEUE_SIZE-1)
140 struct list_head urb_more
;
153 struct list_head endp_ring
;
154 struct u132_ring
*ring
;
155 unsigned toggle_bits
:2;
161 unsigned dequeueing
:1;
162 unsigned edset_flush
:1;
163 unsigned spare_bits
:14;
164 unsigned long jiffies
;
165 struct usb_host_endpoint
*hep
;
166 struct u132_spin queue_lock
;
170 struct urb
*urb_list
[ENDP_QUEUE_SIZE
];
171 struct list_head urb_more
;
172 struct delayed_work scheduler
;
179 struct u132_endp
*curr_endp
;
180 struct delayed_work scheduler
;
184 struct list_head u132_list
;
185 struct mutex sw_lock
;
186 struct mutex scheduler_lock
;
187 struct u132_platform_data
*board
;
188 struct platform_device
*platform_dev
;
189 struct u132_ring ring
[MAX_U132_RINGS
];
197 u32 hc_roothub_status
;
199 u32 hc_roothub_portstatus
[MAX_ROOT_PORTS
];
201 unsigned long next_statechange
;
202 struct delayed_work monitor
;
204 struct u132_addr addr
[MAX_U132_ADDRS
];
205 struct u132_udev udev
[MAX_U132_UDEVS
];
206 struct u132_port port
[MAX_U132_PORTS
];
207 struct u132_endp
*endp
[MAX_U132_ENDPS
];
211 * these cannot be inlines because we need the structure offset!!
212 * Does anyone have a better way?????
214 #define ftdi_read_pcimem(pdev, member, data) usb_ftdi_elan_read_pcimem(pdev, \
215 offsetof(struct ohci_regs, member), 0, data);
216 #define ftdi_write_pcimem(pdev, member, data) usb_ftdi_elan_write_pcimem(pdev, \
217 offsetof(struct ohci_regs, member), 0, data);
218 #define u132_read_pcimem(u132, member, data) \
219 usb_ftdi_elan_read_pcimem(u132->platform_dev, offsetof(struct \
220 ohci_regs, member), 0, data);
221 #define u132_write_pcimem(u132, member, data) \
222 usb_ftdi_elan_write_pcimem(u132->platform_dev, offsetof(struct \
223 ohci_regs, member), 0, data);
224 static inline struct u132
*udev_to_u132(struct u132_udev
*udev
)
226 u8 udev_number
= udev
->udev_number
;
227 return container_of(udev
, struct u132
, udev
[udev_number
]);
230 static inline struct u132
*hcd_to_u132(struct usb_hcd
*hcd
)
232 return (struct u132
*)(hcd
->hcd_priv
);
235 static inline struct usb_hcd
*u132_to_hcd(struct u132
*u132
)
237 return container_of((void *)u132
, struct usb_hcd
, hcd_priv
);
240 static inline void u132_disable(struct u132
*u132
)
242 u132_to_hcd(u132
)->state
= HC_STATE_HALT
;
246 #define kref_to_u132(d) container_of(d, struct u132, kref)
247 #define kref_to_u132_endp(d) container_of(d, struct u132_endp, kref)
248 #define kref_to_u132_udev(d) container_of(d, struct u132_udev, kref)
249 #include "../misc/usb_u132.h"
250 static const char hcd_name
[] = "u132_hcd";
251 #define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE | \
252 USB_PORT_STAT_C_SUSPEND | USB_PORT_STAT_C_OVERCURRENT | \
253 USB_PORT_STAT_C_RESET) << 16)
254 static void u132_hcd_delete(struct kref
*kref
)
256 struct u132
*u132
= kref_to_u132(kref
);
257 struct platform_device
*pdev
= u132
->platform_dev
;
258 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
260 mutex_lock(&u132_module_lock
);
261 list_del_init(&u132
->u132_list
);
263 mutex_unlock(&u132_module_lock
);
264 dev_warn(&u132
->platform_dev
->dev
, "FREEING the hcd=%p and thus the u13"
265 "2=%p going=%d pdev=%p\n", hcd
, u132
, u132
->going
, pdev
);
269 static inline void u132_u132_put_kref(struct u132
*u132
)
271 kref_put(&u132
->kref
, u132_hcd_delete
);
274 static inline void u132_u132_init_kref(struct u132
*u132
)
276 kref_init(&u132
->kref
);
279 static void u132_udev_delete(struct kref
*kref
)
281 struct u132_udev
*udev
= kref_to_u132_udev(kref
);
282 udev
->udev_number
= 0;
283 udev
->usb_device
= NULL
;
285 udev
->enumeration
= 0;
288 static inline void u132_udev_put_kref(struct u132
*u132
, struct u132_udev
*udev
)
290 kref_put(&udev
->kref
, u132_udev_delete
);
293 static inline void u132_udev_get_kref(struct u132
*u132
, struct u132_udev
*udev
)
295 kref_get(&udev
->kref
);
298 static inline void u132_udev_init_kref(struct u132
*u132
,
299 struct u132_udev
*udev
)
301 kref_init(&udev
->kref
);
304 static inline void u132_ring_put_kref(struct u132
*u132
, struct u132_ring
*ring
)
306 kref_put(&u132
->kref
, u132_hcd_delete
);
309 static void u132_ring_requeue_work(struct u132
*u132
, struct u132_ring
*ring
,
313 if (queue_delayed_work(workqueue
, &ring
->scheduler
, delta
))
315 } else if (queue_delayed_work(workqueue
, &ring
->scheduler
, 0))
317 kref_put(&u132
->kref
, u132_hcd_delete
);
320 static void u132_ring_queue_work(struct u132
*u132
, struct u132_ring
*ring
,
323 kref_get(&u132
->kref
);
324 u132_ring_requeue_work(u132
, ring
, delta
);
327 static void u132_ring_cancel_work(struct u132
*u132
, struct u132_ring
*ring
)
329 if (cancel_delayed_work(&ring
->scheduler
))
330 kref_put(&u132
->kref
, u132_hcd_delete
);
333 static void u132_endp_delete(struct kref
*kref
)
335 struct u132_endp
*endp
= kref_to_u132_endp(kref
);
336 struct u132
*u132
= endp
->u132
;
337 u8 usb_addr
= endp
->usb_addr
;
338 u8 usb_endp
= endp
->usb_endp
;
339 u8 address
= u132
->addr
[usb_addr
].address
;
340 struct u132_udev
*udev
= &u132
->udev
[address
];
341 u8 endp_number
= endp
->endp_number
;
342 struct usb_host_endpoint
*hep
= endp
->hep
;
343 struct u132_ring
*ring
= endp
->ring
;
344 struct list_head
*head
= &endp
->endp_ring
;
346 if (endp
== ring
->curr_endp
) {
347 if (list_empty(head
)) {
348 ring
->curr_endp
= NULL
;
351 struct u132_endp
*next_endp
= list_entry(head
->next
,
352 struct u132_endp
, endp_ring
);
353 ring
->curr_endp
= next_endp
;
359 udev
->endp_number_in
[usb_endp
] = 0;
360 u132_udev_put_kref(u132
, udev
);
363 udev
->endp_number_out
[usb_endp
] = 0;
364 u132_udev_put_kref(u132
, udev
);
366 u132
->endp
[endp_number
- 1] = NULL
;
369 u132_u132_put_kref(u132
);
372 static inline void u132_endp_put_kref(struct u132
*u132
, struct u132_endp
*endp
)
374 kref_put(&endp
->kref
, u132_endp_delete
);
377 static inline void u132_endp_get_kref(struct u132
*u132
, struct u132_endp
*endp
)
379 kref_get(&endp
->kref
);
382 static inline void u132_endp_init_kref(struct u132
*u132
,
383 struct u132_endp
*endp
)
385 kref_init(&endp
->kref
);
386 kref_get(&u132
->kref
);
389 static void u132_endp_queue_work(struct u132
*u132
, struct u132_endp
*endp
,
392 if (queue_delayed_work(workqueue
, &endp
->scheduler
, delta
))
393 kref_get(&endp
->kref
);
396 static void u132_endp_cancel_work(struct u132
*u132
, struct u132_endp
*endp
)
398 if (cancel_delayed_work(&endp
->scheduler
))
399 kref_put(&endp
->kref
, u132_endp_delete
);
402 static inline void u132_monitor_put_kref(struct u132
*u132
)
404 kref_put(&u132
->kref
, u132_hcd_delete
);
407 static void u132_monitor_queue_work(struct u132
*u132
, unsigned int delta
)
409 if (queue_delayed_work(workqueue
, &u132
->monitor
, delta
))
410 kref_get(&u132
->kref
);
413 static void u132_monitor_requeue_work(struct u132
*u132
, unsigned int delta
)
415 if (!queue_delayed_work(workqueue
, &u132
->monitor
, delta
))
416 kref_put(&u132
->kref
, u132_hcd_delete
);
419 static void u132_monitor_cancel_work(struct u132
*u132
)
421 if (cancel_delayed_work(&u132
->monitor
))
422 kref_put(&u132
->kref
, u132_hcd_delete
);
425 static int read_roothub_info(struct u132
*u132
)
429 retval
= u132_read_pcimem(u132
, revision
, &revision
);
431 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device co"
434 } else if ((revision
& 0xFF) == 0x10) {
435 } else if ((revision
& 0xFF) == 0x11) {
437 dev_err(&u132
->platform_dev
->dev
, "device revision is not valid"
438 " %08X\n", revision
);
441 retval
= u132_read_pcimem(u132
, control
, &u132
->hc_control
);
443 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device co"
447 retval
= u132_read_pcimem(u132
, roothub
.status
,
448 &u132
->hc_roothub_status
);
450 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device re"
451 "g roothub.status\n", retval
);
454 retval
= u132_read_pcimem(u132
, roothub
.a
, &u132
->hc_roothub_a
);
456 dev_err(&u132
->platform_dev
->dev
, "error %d accessing device re"
457 "g roothub.a\n", retval
);
461 int I
= u132
->num_ports
;
464 retval
= u132_read_pcimem(u132
, roothub
.portstatus
[i
],
465 &u132
->hc_roothub_portstatus
[i
]);
467 dev_err(&u132
->platform_dev
->dev
, "error %d acc"
468 "essing device roothub.portstatus[%d]\n"
478 static void u132_hcd_monitor_work(struct work_struct
*work
)
480 struct u132
*u132
= container_of(work
, struct u132
, monitor
.work
);
481 if (u132
->going
> 1) {
482 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
484 u132_monitor_put_kref(u132
);
486 } else if (u132
->going
> 0) {
487 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
488 u132_monitor_put_kref(u132
);
492 mutex_lock(&u132
->sw_lock
);
493 retval
= read_roothub_info(u132
);
495 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
498 mutex_unlock(&u132
->sw_lock
);
500 ftdi_elan_gone_away(u132
->platform_dev
);
501 u132_monitor_put_kref(u132
);
504 u132_monitor_requeue_work(u132
, 500);
505 mutex_unlock(&u132
->sw_lock
);
511 static void u132_hcd_giveback_urb(struct u132
*u132
, struct u132_endp
*endp
,
512 struct urb
*urb
, int status
)
514 struct u132_ring
*ring
;
516 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
517 urb
->error_count
= 0;
518 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
519 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
520 endp
->queue_next
+= 1;
521 if (ENDP_QUEUE_SIZE
> --endp
->queue_size
) {
523 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
525 struct list_head
*next
= endp
->urb_more
.next
;
526 struct u132_urbq
*urbq
= list_entry(next
, struct u132_urbq
,
529 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] =
532 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
535 mutex_lock(&u132
->scheduler_lock
);
538 u132_ring_cancel_work(u132
, ring
);
539 u132_ring_queue_work(u132
, ring
, 0);
540 mutex_unlock(&u132
->scheduler_lock
);
541 u132_endp_put_kref(u132
, endp
);
542 usb_hcd_giveback_urb(hcd
, urb
, status
);
545 static void u132_hcd_forget_urb(struct u132
*u132
, struct u132_endp
*endp
,
546 struct urb
*urb
, int status
)
548 u132_endp_put_kref(u132
, endp
);
551 static void u132_hcd_abandon_urb(struct u132
*u132
, struct u132_endp
*endp
,
552 struct urb
*urb
, int status
)
555 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
556 urb
->error_count
= 0;
557 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
558 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
559 endp
->queue_next
+= 1;
560 if (ENDP_QUEUE_SIZE
> --endp
->queue_size
) {
562 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
564 struct list_head
*next
= endp
->urb_more
.next
;
565 struct u132_urbq
*urbq
= list_entry(next
, struct u132_urbq
,
568 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] =
571 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
574 usb_hcd_giveback_urb(hcd
, urb
, status
);
577 static inline int edset_input(struct u132
*u132
, struct u132_ring
*ring
,
578 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
579 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
580 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
581 int halted
, int skipped
, int actual
, int non_null
))
583 return usb_ftdi_elan_edset_input(u132
->platform_dev
, ring
->number
, endp
,
584 urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
587 static inline int edset_setup(struct u132
*u132
, struct u132_ring
*ring
,
588 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
589 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
590 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
591 int halted
, int skipped
, int actual
, int non_null
))
593 return usb_ftdi_elan_edset_setup(u132
->platform_dev
, ring
->number
, endp
,
594 urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
597 static inline int edset_single(struct u132
*u132
, struct u132_ring
*ring
,
598 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
599 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
600 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
601 int halted
, int skipped
, int actual
, int non_null
))
603 return usb_ftdi_elan_edset_single(u132
->platform_dev
, ring
->number
,
604 endp
, urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
607 static inline int edset_output(struct u132
*u132
, struct u132_ring
*ring
,
608 struct u132_endp
*endp
, struct urb
*urb
, u8 address
, u8 toggle_bits
,
609 void (*callback
) (void *endp
, struct urb
*urb
, u8
*buf
, int len
,
610 int toggle_bits
, int error_count
, int condition_code
, int repeat_number
,
611 int halted
, int skipped
, int actual
, int non_null
))
613 return usb_ftdi_elan_edset_output(u132
->platform_dev
, ring
->number
,
614 endp
, urb
, address
, endp
->usb_endp
, toggle_bits
, callback
);
619 * must not LOCK sw_lock
622 static void u132_hcd_interrupt_recv(void *data
, struct urb
*urb
, u8
*buf
,
623 int len
, int toggle_bits
, int error_count
, int condition_code
,
624 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
626 struct u132_endp
*endp
= data
;
627 struct u132
*u132
= endp
->u132
;
628 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
629 struct u132_udev
*udev
= &u132
->udev
[address
];
630 mutex_lock(&u132
->scheduler_lock
);
631 if (u132
->going
> 1) {
632 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
634 mutex_unlock(&u132
->scheduler_lock
);
635 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
637 } else if (endp
->dequeueing
) {
638 endp
->dequeueing
= 0;
639 mutex_unlock(&u132
->scheduler_lock
);
640 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
642 } else if (u132
->going
> 0) {
643 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
645 mutex_unlock(&u132
->scheduler_lock
);
646 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
648 } else if (!urb
->unlinked
) {
649 struct u132_ring
*ring
= endp
->ring
;
650 u8
*u
= urb
->transfer_buffer
+ urb
->actual_length
;
657 urb
->actual_length
+= len
;
658 if ((condition_code
== TD_CC_NOERROR
) &&
659 (urb
->transfer_buffer_length
> urb
->actual_length
)) {
660 endp
->toggle_bits
= toggle_bits
;
661 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
663 if (urb
->actual_length
> 0) {
665 mutex_unlock(&u132
->scheduler_lock
);
666 retval
= edset_single(u132
, ring
, endp
, urb
,
667 address
, endp
->toggle_bits
,
668 u132_hcd_interrupt_recv
);
670 u132_hcd_giveback_urb(u132
, endp
, urb
,
675 endp
->jiffies
= jiffies
+
676 msecs_to_jiffies(urb
->interval
);
677 u132_ring_cancel_work(u132
, ring
);
678 u132_ring_queue_work(u132
, ring
, 0);
679 mutex_unlock(&u132
->scheduler_lock
);
680 u132_endp_put_kref(u132
, endp
);
683 } else if ((condition_code
== TD_DATAUNDERRUN
) &&
684 ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0)) {
685 endp
->toggle_bits
= toggle_bits
;
686 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
688 mutex_unlock(&u132
->scheduler_lock
);
689 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
692 if (condition_code
== TD_CC_NOERROR
) {
693 endp
->toggle_bits
= toggle_bits
;
694 usb_settoggle(udev
->usb_device
, endp
->usb_endp
,
696 } else if (condition_code
== TD_CC_STALL
) {
697 endp
->toggle_bits
= 0x2;
698 usb_settoggle(udev
->usb_device
, endp
->usb_endp
,
701 endp
->toggle_bits
= 0x2;
702 usb_settoggle(udev
->usb_device
, endp
->usb_endp
,
704 dev_err(&u132
->platform_dev
->dev
, "urb=%p givin"
705 "g back INTERRUPT %s\n", urb
,
706 cc_to_text
[condition_code
]);
708 mutex_unlock(&u132
->scheduler_lock
);
709 u132_hcd_giveback_urb(u132
, endp
, urb
,
710 cc_to_error
[condition_code
]);
714 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
715 "unlinked=%d\n", urb
, urb
->unlinked
);
716 mutex_unlock(&u132
->scheduler_lock
);
717 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
722 static void u132_hcd_bulk_output_sent(void *data
, struct urb
*urb
, u8
*buf
,
723 int len
, int toggle_bits
, int error_count
, int condition_code
,
724 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
726 struct u132_endp
*endp
= data
;
727 struct u132
*u132
= endp
->u132
;
728 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
729 mutex_lock(&u132
->scheduler_lock
);
730 if (u132
->going
> 1) {
731 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
733 mutex_unlock(&u132
->scheduler_lock
);
734 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
736 } else if (endp
->dequeueing
) {
737 endp
->dequeueing
= 0;
738 mutex_unlock(&u132
->scheduler_lock
);
739 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
741 } else if (u132
->going
> 0) {
742 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
744 mutex_unlock(&u132
->scheduler_lock
);
745 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
747 } else if (!urb
->unlinked
) {
748 struct u132_ring
*ring
= endp
->ring
;
749 urb
->actual_length
+= len
;
750 endp
->toggle_bits
= toggle_bits
;
751 if (urb
->transfer_buffer_length
> urb
->actual_length
) {
753 mutex_unlock(&u132
->scheduler_lock
);
754 retval
= edset_output(u132
, ring
, endp
, urb
, address
,
755 endp
->toggle_bits
, u132_hcd_bulk_output_sent
);
757 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
760 mutex_unlock(&u132
->scheduler_lock
);
761 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
765 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
766 "unlinked=%d\n", urb
, urb
->unlinked
);
767 mutex_unlock(&u132
->scheduler_lock
);
768 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
773 static void u132_hcd_bulk_input_recv(void *data
, struct urb
*urb
, u8
*buf
,
774 int len
, int toggle_bits
, int error_count
, int condition_code
,
775 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
777 struct u132_endp
*endp
= data
;
778 struct u132
*u132
= endp
->u132
;
779 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
780 struct u132_udev
*udev
= &u132
->udev
[address
];
781 mutex_lock(&u132
->scheduler_lock
);
782 if (u132
->going
> 1) {
783 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
785 mutex_unlock(&u132
->scheduler_lock
);
786 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
788 } else if (endp
->dequeueing
) {
789 endp
->dequeueing
= 0;
790 mutex_unlock(&u132
->scheduler_lock
);
791 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
793 } else if (u132
->going
> 0) {
794 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
796 mutex_unlock(&u132
->scheduler_lock
);
797 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
799 } else if (!urb
->unlinked
) {
800 struct u132_ring
*ring
= endp
->ring
;
801 u8
*u
= urb
->transfer_buffer
+ urb
->actual_length
;
808 urb
->actual_length
+= len
;
809 if ((condition_code
== TD_CC_NOERROR
) &&
810 (urb
->transfer_buffer_length
> urb
->actual_length
)) {
812 endp
->toggle_bits
= toggle_bits
;
813 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
815 mutex_unlock(&u132
->scheduler_lock
);
816 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
817 ring
->number
, endp
, urb
, address
,
818 endp
->usb_endp
, endp
->toggle_bits
,
819 u132_hcd_bulk_input_recv
);
821 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
823 } else if (condition_code
== TD_CC_NOERROR
) {
824 endp
->toggle_bits
= toggle_bits
;
825 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
827 mutex_unlock(&u132
->scheduler_lock
);
828 u132_hcd_giveback_urb(u132
, endp
, urb
,
829 cc_to_error
[condition_code
]);
831 } else if ((condition_code
== TD_DATAUNDERRUN
) &&
832 ((urb
->transfer_flags
& URB_SHORT_NOT_OK
) == 0)) {
833 endp
->toggle_bits
= toggle_bits
;
834 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
836 mutex_unlock(&u132
->scheduler_lock
);
837 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
839 } else if (condition_code
== TD_DATAUNDERRUN
) {
840 endp
->toggle_bits
= toggle_bits
;
841 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0,
843 dev_warn(&u132
->platform_dev
->dev
, "urb=%p(SHORT NOT OK"
844 ") giving back BULK IN %s\n", urb
,
845 cc_to_text
[condition_code
]);
846 mutex_unlock(&u132
->scheduler_lock
);
847 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
849 } else if (condition_code
== TD_CC_STALL
) {
850 endp
->toggle_bits
= 0x2;
851 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0, 0);
852 mutex_unlock(&u132
->scheduler_lock
);
853 u132_hcd_giveback_urb(u132
, endp
, urb
,
854 cc_to_error
[condition_code
]);
857 endp
->toggle_bits
= 0x2;
858 usb_settoggle(udev
->usb_device
, endp
->usb_endp
, 0, 0);
859 dev_err(&u132
->platform_dev
->dev
, "urb=%p giving back B"
860 "ULK IN code=%d %s\n", urb
, condition_code
,
861 cc_to_text
[condition_code
]);
862 mutex_unlock(&u132
->scheduler_lock
);
863 u132_hcd_giveback_urb(u132
, endp
, urb
,
864 cc_to_error
[condition_code
]);
868 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
869 "unlinked=%d\n", urb
, urb
->unlinked
);
870 mutex_unlock(&u132
->scheduler_lock
);
871 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
876 static void u132_hcd_configure_empty_sent(void *data
, struct urb
*urb
, u8
*buf
,
877 int len
, int toggle_bits
, int error_count
, int condition_code
,
878 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
880 struct u132_endp
*endp
= data
;
881 struct u132
*u132
= endp
->u132
;
882 mutex_lock(&u132
->scheduler_lock
);
883 if (u132
->going
> 1) {
884 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
886 mutex_unlock(&u132
->scheduler_lock
);
887 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
889 } else if (endp
->dequeueing
) {
890 endp
->dequeueing
= 0;
891 mutex_unlock(&u132
->scheduler_lock
);
892 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
894 } else if (u132
->going
> 0) {
895 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
897 mutex_unlock(&u132
->scheduler_lock
);
898 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
900 } else if (!urb
->unlinked
) {
901 mutex_unlock(&u132
->scheduler_lock
);
902 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
905 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
906 "unlinked=%d\n", urb
, urb
->unlinked
);
907 mutex_unlock(&u132
->scheduler_lock
);
908 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
913 static void u132_hcd_configure_input_recv(void *data
, struct urb
*urb
, u8
*buf
,
914 int len
, int toggle_bits
, int error_count
, int condition_code
,
915 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
917 struct u132_endp
*endp
= data
;
918 struct u132
*u132
= endp
->u132
;
919 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
920 mutex_lock(&u132
->scheduler_lock
);
921 if (u132
->going
> 1) {
922 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
924 mutex_unlock(&u132
->scheduler_lock
);
925 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
927 } else if (endp
->dequeueing
) {
928 endp
->dequeueing
= 0;
929 mutex_unlock(&u132
->scheduler_lock
);
930 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
932 } else if (u132
->going
> 0) {
933 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
935 mutex_unlock(&u132
->scheduler_lock
);
936 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
938 } else if (!urb
->unlinked
) {
939 struct u132_ring
*ring
= endp
->ring
;
940 u8
*u
= urb
->transfer_buffer
;
947 urb
->actual_length
= len
;
948 if ((condition_code
== TD_CC_NOERROR
) || ((condition_code
==
949 TD_DATAUNDERRUN
) && ((urb
->transfer_flags
&
950 URB_SHORT_NOT_OK
) == 0))) {
952 mutex_unlock(&u132
->scheduler_lock
);
953 retval
= usb_ftdi_elan_edset_empty(u132
->platform_dev
,
954 ring
->number
, endp
, urb
, address
,
956 u132_hcd_configure_empty_sent
);
958 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
960 } else if (condition_code
== TD_CC_STALL
) {
961 mutex_unlock(&u132
->scheduler_lock
);
962 dev_warn(&u132
->platform_dev
->dev
, "giving back SETUP I"
963 "NPUT STALL urb %p\n", urb
);
964 u132_hcd_giveback_urb(u132
, endp
, urb
,
965 cc_to_error
[condition_code
]);
968 mutex_unlock(&u132
->scheduler_lock
);
969 dev_err(&u132
->platform_dev
->dev
, "giving back SETUP IN"
970 "PUT %s urb %p\n", cc_to_text
[condition_code
],
972 u132_hcd_giveback_urb(u132
, endp
, urb
,
973 cc_to_error
[condition_code
]);
977 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
978 "unlinked=%d\n", urb
, urb
->unlinked
);
979 mutex_unlock(&u132
->scheduler_lock
);
980 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
985 static void u132_hcd_configure_empty_recv(void *data
, struct urb
*urb
, u8
*buf
,
986 int len
, int toggle_bits
, int error_count
, int condition_code
,
987 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
989 struct u132_endp
*endp
= data
;
990 struct u132
*u132
= endp
->u132
;
991 mutex_lock(&u132
->scheduler_lock
);
992 if (u132
->going
> 1) {
993 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
995 mutex_unlock(&u132
->scheduler_lock
);
996 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
998 } else if (endp
->dequeueing
) {
999 endp
->dequeueing
= 0;
1000 mutex_unlock(&u132
->scheduler_lock
);
1001 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1003 } else if (u132
->going
> 0) {
1004 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1006 mutex_unlock(&u132
->scheduler_lock
);
1007 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1009 } else if (!urb
->unlinked
) {
1010 mutex_unlock(&u132
->scheduler_lock
);
1011 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1014 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1015 "unlinked=%d\n", urb
, urb
->unlinked
);
1016 mutex_unlock(&u132
->scheduler_lock
);
1017 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1022 static void u132_hcd_configure_setup_sent(void *data
, struct urb
*urb
, u8
*buf
,
1023 int len
, int toggle_bits
, int error_count
, int condition_code
,
1024 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1026 struct u132_endp
*endp
= data
;
1027 struct u132
*u132
= endp
->u132
;
1028 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1029 mutex_lock(&u132
->scheduler_lock
);
1030 if (u132
->going
> 1) {
1031 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1033 mutex_unlock(&u132
->scheduler_lock
);
1034 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1036 } else if (endp
->dequeueing
) {
1037 endp
->dequeueing
= 0;
1038 mutex_unlock(&u132
->scheduler_lock
);
1039 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1041 } else if (u132
->going
> 0) {
1042 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1044 mutex_unlock(&u132
->scheduler_lock
);
1045 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1047 } else if (!urb
->unlinked
) {
1048 if (usb_pipein(urb
->pipe
)) {
1050 struct u132_ring
*ring
= endp
->ring
;
1051 mutex_unlock(&u132
->scheduler_lock
);
1052 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1053 ring
->number
, endp
, urb
, address
,
1055 u132_hcd_configure_input_recv
);
1057 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1061 struct u132_ring
*ring
= endp
->ring
;
1062 mutex_unlock(&u132
->scheduler_lock
);
1063 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1064 ring
->number
, endp
, urb
, address
,
1066 u132_hcd_configure_empty_recv
);
1068 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1072 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1073 "unlinked=%d\n", urb
, urb
->unlinked
);
1074 mutex_unlock(&u132
->scheduler_lock
);
1075 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1080 static void u132_hcd_enumeration_empty_recv(void *data
, struct urb
*urb
,
1081 u8
*buf
, int len
, int toggle_bits
, int error_count
, int condition_code
,
1082 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1084 struct u132_endp
*endp
= data
;
1085 struct u132
*u132
= endp
->u132
;
1086 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1087 struct u132_udev
*udev
= &u132
->udev
[address
];
1088 mutex_lock(&u132
->scheduler_lock
);
1089 if (u132
->going
> 1) {
1090 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1092 mutex_unlock(&u132
->scheduler_lock
);
1093 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1095 } else if (endp
->dequeueing
) {
1096 endp
->dequeueing
= 0;
1097 mutex_unlock(&u132
->scheduler_lock
);
1098 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1100 } else if (u132
->going
> 0) {
1101 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1103 mutex_unlock(&u132
->scheduler_lock
);
1104 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1106 } else if (!urb
->unlinked
) {
1107 u132
->addr
[0].address
= 0;
1108 endp
->usb_addr
= udev
->usb_addr
;
1109 mutex_unlock(&u132
->scheduler_lock
);
1110 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1113 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1114 "unlinked=%d\n", urb
, urb
->unlinked
);
1115 mutex_unlock(&u132
->scheduler_lock
);
1116 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1121 static void u132_hcd_enumeration_address_sent(void *data
, struct urb
*urb
,
1122 u8
*buf
, int len
, int toggle_bits
, int error_count
, int condition_code
,
1123 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1125 struct u132_endp
*endp
= data
;
1126 struct u132
*u132
= endp
->u132
;
1127 mutex_lock(&u132
->scheduler_lock
);
1128 if (u132
->going
> 1) {
1129 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1131 mutex_unlock(&u132
->scheduler_lock
);
1132 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1134 } else if (endp
->dequeueing
) {
1135 endp
->dequeueing
= 0;
1136 mutex_unlock(&u132
->scheduler_lock
);
1137 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1139 } else if (u132
->going
> 0) {
1140 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1142 mutex_unlock(&u132
->scheduler_lock
);
1143 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1145 } else if (!urb
->unlinked
) {
1147 struct u132_ring
*ring
= endp
->ring
;
1148 mutex_unlock(&u132
->scheduler_lock
);
1149 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1150 ring
->number
, endp
, urb
, 0, endp
->usb_endp
, 0,
1151 u132_hcd_enumeration_empty_recv
);
1153 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1156 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1157 "unlinked=%d\n", urb
, urb
->unlinked
);
1158 mutex_unlock(&u132
->scheduler_lock
);
1159 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1164 static void u132_hcd_initial_empty_sent(void *data
, struct urb
*urb
, u8
*buf
,
1165 int len
, int toggle_bits
, int error_count
, int condition_code
,
1166 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1168 struct u132_endp
*endp
= data
;
1169 struct u132
*u132
= endp
->u132
;
1170 mutex_lock(&u132
->scheduler_lock
);
1171 if (u132
->going
> 1) {
1172 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1174 mutex_unlock(&u132
->scheduler_lock
);
1175 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1177 } else if (endp
->dequeueing
) {
1178 endp
->dequeueing
= 0;
1179 mutex_unlock(&u132
->scheduler_lock
);
1180 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1182 } else if (u132
->going
> 0) {
1183 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1185 mutex_unlock(&u132
->scheduler_lock
);
1186 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1188 } else if (!urb
->unlinked
) {
1189 mutex_unlock(&u132
->scheduler_lock
);
1190 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1193 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1194 "unlinked=%d\n", urb
, urb
->unlinked
);
1195 mutex_unlock(&u132
->scheduler_lock
);
1196 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1201 static void u132_hcd_initial_input_recv(void *data
, struct urb
*urb
, u8
*buf
,
1202 int len
, int toggle_bits
, int error_count
, int condition_code
,
1203 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1205 struct u132_endp
*endp
= data
;
1206 struct u132
*u132
= endp
->u132
;
1207 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1208 mutex_lock(&u132
->scheduler_lock
);
1209 if (u132
->going
> 1) {
1210 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1212 mutex_unlock(&u132
->scheduler_lock
);
1213 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1215 } else if (endp
->dequeueing
) {
1216 endp
->dequeueing
= 0;
1217 mutex_unlock(&u132
->scheduler_lock
);
1218 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1220 } else if (u132
->going
> 0) {
1221 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1223 mutex_unlock(&u132
->scheduler_lock
);
1224 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1226 } else if (!urb
->unlinked
) {
1228 struct u132_ring
*ring
= endp
->ring
;
1229 u8
*u
= urb
->transfer_buffer
;
1236 urb
->actual_length
= len
;
1237 mutex_unlock(&u132
->scheduler_lock
);
1238 retval
= usb_ftdi_elan_edset_empty(u132
->platform_dev
,
1239 ring
->number
, endp
, urb
, address
, endp
->usb_endp
, 0x3,
1240 u132_hcd_initial_empty_sent
);
1242 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1245 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1246 "unlinked=%d\n", urb
, urb
->unlinked
);
1247 mutex_unlock(&u132
->scheduler_lock
);
1248 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1253 static void u132_hcd_initial_setup_sent(void *data
, struct urb
*urb
, u8
*buf
,
1254 int len
, int toggle_bits
, int error_count
, int condition_code
,
1255 int repeat_number
, int halted
, int skipped
, int actual
, int non_null
)
1257 struct u132_endp
*endp
= data
;
1258 struct u132
*u132
= endp
->u132
;
1259 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1260 mutex_lock(&u132
->scheduler_lock
);
1261 if (u132
->going
> 1) {
1262 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1264 mutex_unlock(&u132
->scheduler_lock
);
1265 u132_hcd_forget_urb(u132
, endp
, urb
, -ENODEV
);
1267 } else if (endp
->dequeueing
) {
1268 endp
->dequeueing
= 0;
1269 mutex_unlock(&u132
->scheduler_lock
);
1270 u132_hcd_giveback_urb(u132
, endp
, urb
, -EINTR
);
1272 } else if (u132
->going
> 0) {
1273 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
1275 mutex_unlock(&u132
->scheduler_lock
);
1276 u132_hcd_giveback_urb(u132
, endp
, urb
, -ENODEV
);
1278 } else if (!urb
->unlinked
) {
1280 struct u132_ring
*ring
= endp
->ring
;
1281 mutex_unlock(&u132
->scheduler_lock
);
1282 retval
= usb_ftdi_elan_edset_input(u132
->platform_dev
,
1283 ring
->number
, endp
, urb
, address
, endp
->usb_endp
, 0,
1284 u132_hcd_initial_input_recv
);
1286 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1289 dev_err(&u132
->platform_dev
->dev
, "CALLBACK called urb=%p "
1290 "unlinked=%d\n", urb
, urb
->unlinked
);
1291 mutex_unlock(&u132
->scheduler_lock
);
1292 u132_hcd_giveback_urb(u132
, endp
, urb
, 0);
1298 * this work function is only executed from the work queue
1301 static void u132_hcd_ring_work_scheduler(struct work_struct
*work
)
1303 struct u132_ring
*ring
=
1304 container_of(work
, struct u132_ring
, scheduler
.work
);
1305 struct u132
*u132
= ring
->u132
;
1306 mutex_lock(&u132
->scheduler_lock
);
1308 mutex_unlock(&u132
->scheduler_lock
);
1309 u132_ring_put_kref(u132
, ring
);
1311 } else if (ring
->curr_endp
) {
1312 struct u132_endp
*endp
, *last_endp
= ring
->curr_endp
;
1313 unsigned long wakeup
= 0;
1314 list_for_each_entry(endp
, &last_endp
->endp_ring
, endp_ring
) {
1315 if (endp
->queue_next
== endp
->queue_last
) {
1316 } else if ((endp
->delayed
== 0)
1317 || time_after_eq(jiffies
, endp
->jiffies
)) {
1318 ring
->curr_endp
= endp
;
1319 u132_endp_cancel_work(u132
, last_endp
);
1320 u132_endp_queue_work(u132
, last_endp
, 0);
1321 mutex_unlock(&u132
->scheduler_lock
);
1322 u132_ring_put_kref(u132
, ring
);
1325 unsigned long delta
= endp
->jiffies
- jiffies
;
1330 if (last_endp
->queue_next
== last_endp
->queue_last
) {
1331 } else if ((last_endp
->delayed
== 0) || time_after_eq(jiffies
,
1332 last_endp
->jiffies
)) {
1333 u132_endp_cancel_work(u132
, last_endp
);
1334 u132_endp_queue_work(u132
, last_endp
, 0);
1335 mutex_unlock(&u132
->scheduler_lock
);
1336 u132_ring_put_kref(u132
, ring
);
1339 unsigned long delta
= last_endp
->jiffies
- jiffies
;
1344 u132_ring_requeue_work(u132
, ring
, wakeup
);
1345 mutex_unlock(&u132
->scheduler_lock
);
1348 mutex_unlock(&u132
->scheduler_lock
);
1349 u132_ring_put_kref(u132
, ring
);
1353 mutex_unlock(&u132
->scheduler_lock
);
1354 u132_ring_put_kref(u132
, ring
);
1359 static void u132_hcd_endp_work_scheduler(struct work_struct
*work
)
1361 struct u132_ring
*ring
;
1362 struct u132_endp
*endp
=
1363 container_of(work
, struct u132_endp
, scheduler
.work
);
1364 struct u132
*u132
= endp
->u132
;
1365 mutex_lock(&u132
->scheduler_lock
);
1367 if (endp
->edset_flush
) {
1368 endp
->edset_flush
= 0;
1369 if (endp
->dequeueing
)
1370 usb_ftdi_elan_edset_flush(u132
->platform_dev
,
1371 ring
->number
, endp
);
1372 mutex_unlock(&u132
->scheduler_lock
);
1373 u132_endp_put_kref(u132
, endp
);
1375 } else if (endp
->active
) {
1376 mutex_unlock(&u132
->scheduler_lock
);
1377 u132_endp_put_kref(u132
, endp
);
1379 } else if (ring
->in_use
) {
1380 mutex_unlock(&u132
->scheduler_lock
);
1381 u132_endp_put_kref(u132
, endp
);
1383 } else if (endp
->queue_next
== endp
->queue_last
) {
1384 mutex_unlock(&u132
->scheduler_lock
);
1385 u132_endp_put_kref(u132
, endp
);
1387 } else if (endp
->pipetype
== PIPE_INTERRUPT
) {
1388 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1390 mutex_unlock(&u132
->scheduler_lock
);
1391 u132_endp_put_kref(u132
, endp
);
1395 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1398 ring
->curr_endp
= endp
;
1400 mutex_unlock(&u132
->scheduler_lock
);
1401 retval
= edset_single(u132
, ring
, endp
, urb
, address
,
1402 endp
->toggle_bits
, u132_hcd_interrupt_recv
);
1404 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1407 } else if (endp
->pipetype
== PIPE_CONTROL
) {
1408 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1410 mutex_unlock(&u132
->scheduler_lock
);
1411 u132_endp_put_kref(u132
, endp
);
1413 } else if (address
== 0) {
1415 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1418 ring
->curr_endp
= endp
;
1420 mutex_unlock(&u132
->scheduler_lock
);
1421 retval
= edset_setup(u132
, ring
, endp
, urb
, address
,
1422 0x2, u132_hcd_initial_setup_sent
);
1424 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1426 } else if (endp
->usb_addr
== 0) {
1428 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1431 ring
->curr_endp
= endp
;
1433 mutex_unlock(&u132
->scheduler_lock
);
1434 retval
= edset_setup(u132
, ring
, endp
, urb
, 0, 0x2,
1435 u132_hcd_enumeration_address_sent
);
1437 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1441 struct urb
*urb
= endp
->urb_list
[ENDP_QUEUE_MASK
&
1443 address
= u132
->addr
[endp
->usb_addr
].address
;
1445 ring
->curr_endp
= endp
;
1447 mutex_unlock(&u132
->scheduler_lock
);
1448 retval
= edset_setup(u132
, ring
, endp
, urb
, address
,
1449 0x2, u132_hcd_configure_setup_sent
);
1451 u132_hcd_giveback_urb(u132
, endp
, urb
, retval
);
1456 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1458 mutex_unlock(&u132
->scheduler_lock
);
1459 u132_endp_put_kref(u132
, endp
);
1463 struct urb
*urb
= endp
->urb_list
[
1464 ENDP_QUEUE_MASK
& endp
->queue_next
];
1466 ring
->curr_endp
= endp
;
1468 mutex_unlock(&u132
->scheduler_lock
);
1469 retval
= edset_input(u132
, ring
, endp
, urb
,
1470 address
, endp
->toggle_bits
,
1471 u132_hcd_bulk_input_recv
);
1474 u132_hcd_giveback_urb(u132
, endp
, urb
,
1478 } else { /* output pipe */
1479 u8 address
= u132
->addr
[endp
->usb_addr
].address
;
1481 mutex_unlock(&u132
->scheduler_lock
);
1482 u132_endp_put_kref(u132
, endp
);
1486 struct urb
*urb
= endp
->urb_list
[
1487 ENDP_QUEUE_MASK
& endp
->queue_next
];
1489 ring
->curr_endp
= endp
;
1491 mutex_unlock(&u132
->scheduler_lock
);
1492 retval
= edset_output(u132
, ring
, endp
, urb
,
1493 address
, endp
->toggle_bits
,
1494 u132_hcd_bulk_output_sent
);
1497 u132_hcd_giveback_urb(u132
, endp
, urb
,
1506 static void port_power(struct u132
*u132
, int pn
, int is_on
)
1508 u132
->port
[pn
].power
= is_on
;
1513 static void u132_power(struct u132
*u132
, int is_on
)
1515 struct usb_hcd
*hcd
= u132_to_hcd(u132
)
1516 ; /* hub is inactive unless the port is powered */
1523 hcd
->state
= HC_STATE_HALT
;
1527 static int u132_periodic_reinit(struct u132
*u132
)
1530 u32 fi
= u132
->hc_fminterval
& 0x03fff;
1533 retval
= u132_read_pcimem(u132
, fminterval
, &fminterval
);
1536 fit
= fminterval
& FIT
;
1537 retval
= u132_write_pcimem(u132
, fminterval
,
1538 (fit
^ FIT
) | u132
->hc_fminterval
);
1541 return u132_write_pcimem(u132
, periodicstart
,
1542 ((9 * fi
) / 10) & 0x3fff);
1545 static char *hcfs2string(int state
)
1548 case OHCI_USB_RESET
:
1550 case OHCI_USB_RESUME
:
1553 return "operational";
1554 case OHCI_USB_SUSPEND
:
1560 static int u132_init(struct u132
*u132
)
1565 u132
->next_statechange
= jiffies
;
1566 retval
= u132_write_pcimem(u132
, intrdisable
, OHCI_INTR_MIE
);
1569 retval
= u132_read_pcimem(u132
, control
, &control
);
1572 if (u132
->num_ports
== 0) {
1574 retval
= u132_read_pcimem(u132
, roothub
.a
, &rh_a
);
1577 u132
->num_ports
= rh_a
& RH_A_NDP
;
1578 retval
= read_roothub_info(u132
);
1582 if (u132
->num_ports
> MAX_U132_PORTS
)
1589 /* Start an OHCI controller, set the BUS operational
1590 * resets USB and controller
1593 static int u132_run(struct u132
*u132
)
1602 int mask
= OHCI_INTR_INIT
;
1603 int first
= u132
->hc_fminterval
== 0;
1605 int reset_timeout
= 30; /* ... allow extra time */
1609 retval
= u132_read_pcimem(u132
, fminterval
, &temp
);
1612 u132
->hc_fminterval
= temp
& 0x3fff;
1613 u132
->hc_fminterval
|= FSMP(u132
->hc_fminterval
) << 16;
1615 retval
= u132_read_pcimem(u132
, control
, &u132
->hc_control
);
1618 dev_info(&u132
->platform_dev
->dev
, "resetting from state '%s', control "
1619 "= %08X\n", hcfs2string(u132
->hc_control
& OHCI_CTRL_HCFS
),
1621 switch (u132
->hc_control
& OHCI_CTRL_HCFS
) {
1625 case OHCI_USB_SUSPEND
:
1626 case OHCI_USB_RESUME
:
1627 u132
->hc_control
&= OHCI_CTRL_RWC
;
1628 u132
->hc_control
|= OHCI_USB_RESUME
;
1632 u132
->hc_control
&= OHCI_CTRL_RWC
;
1633 u132
->hc_control
|= OHCI_USB_RESET
;
1637 retval
= u132_write_pcimem(u132
, control
, u132
->hc_control
);
1640 retval
= u132_read_pcimem(u132
, control
, &control
);
1644 retval
= u132_read_pcimem(u132
, roothub
.a
, &roothub_a
);
1647 if (!(roothub_a
& RH_A_NPS
)) {
1648 int temp
; /* power down each port */
1649 for (temp
= 0; temp
< u132
->num_ports
; temp
++) {
1650 retval
= u132_write_pcimem(u132
,
1651 roothub
.portstatus
[temp
], RH_PS_LSDA
);
1656 retval
= u132_read_pcimem(u132
, control
, &control
);
1660 retval
= u132_read_pcimem(u132
, cmdstatus
, &status
);
1663 retval
= u132_write_pcimem(u132
, cmdstatus
, OHCI_HCR
);
1667 retval
= u132_read_pcimem(u132
, cmdstatus
, &status
);
1670 if (0 != (status
& OHCI_HCR
)) {
1671 if (--reset_timeout
== 0) {
1672 dev_err(&u132
->platform_dev
->dev
, "USB HC reset"
1681 if (u132
->flags
& OHCI_QUIRK_INITRESET
) {
1682 retval
= u132_write_pcimem(u132
, control
, u132
->hc_control
);
1685 retval
= u132_read_pcimem(u132
, control
, &control
);
1689 retval
= u132_write_pcimem(u132
, ed_controlhead
, 0x00000000);
1692 retval
= u132_write_pcimem(u132
, ed_bulkhead
, 0x11000000);
1695 retval
= u132_write_pcimem(u132
, hcca
, 0x00000000);
1698 retval
= u132_periodic_reinit(u132
);
1701 retval
= u132_read_pcimem(u132
, fminterval
, &fminterval
);
1704 retval
= u132_read_pcimem(u132
, periodicstart
, &periodicstart
);
1707 if (0 == (fminterval
& 0x3fff0000) || 0 == periodicstart
) {
1708 if (!(u132
->flags
& OHCI_QUIRK_INITRESET
)) {
1709 u132
->flags
|= OHCI_QUIRK_INITRESET
;
1712 dev_err(&u132
->platform_dev
->dev
, "init err(%08x %04x)"
1713 "\n", fminterval
, periodicstart
);
1714 } /* start controller operations */
1715 u132
->hc_control
&= OHCI_CTRL_RWC
;
1716 u132
->hc_control
|= OHCI_CONTROL_INIT
| OHCI_CTRL_BLE
| OHCI_USB_OPER
;
1717 retval
= u132_write_pcimem(u132
, control
, u132
->hc_control
);
1720 retval
= u132_write_pcimem(u132
, cmdstatus
, OHCI_BLF
);
1723 retval
= u132_read_pcimem(u132
, cmdstatus
, &cmdstatus
);
1726 retval
= u132_read_pcimem(u132
, control
, &control
);
1729 u132_to_hcd(u132
)->state
= HC_STATE_RUNNING
;
1730 retval
= u132_write_pcimem(u132
, roothub
.status
, RH_HS_DRWE
);
1733 retval
= u132_write_pcimem(u132
, intrstatus
, mask
);
1736 retval
= u132_write_pcimem(u132
, intrdisable
,
1737 OHCI_INTR_MIE
| OHCI_INTR_OC
| OHCI_INTR_RHSC
| OHCI_INTR_FNO
|
1738 OHCI_INTR_UE
| OHCI_INTR_RD
| OHCI_INTR_SF
| OHCI_INTR_WDH
|
1741 return retval
; /* handle root hub init quirks ... */
1742 retval
= u132_read_pcimem(u132
, roothub
.a
, &roothub_a
);
1745 roothub_a
&= ~(RH_A_PSM
| RH_A_OCPM
);
1746 if (u132
->flags
& OHCI_QUIRK_SUPERIO
) {
1747 roothub_a
|= RH_A_NOCP
;
1748 roothub_a
&= ~(RH_A_POTPGT
| RH_A_NPS
);
1749 retval
= u132_write_pcimem(u132
, roothub
.a
, roothub_a
);
1752 } else if ((u132
->flags
& OHCI_QUIRK_AMD756
) || distrust_firmware
) {
1753 roothub_a
|= RH_A_NPS
;
1754 retval
= u132_write_pcimem(u132
, roothub
.a
, roothub_a
);
1758 retval
= u132_write_pcimem(u132
, roothub
.status
, RH_HS_LPSC
);
1761 retval
= u132_write_pcimem(u132
, roothub
.b
,
1762 (roothub_a
& RH_A_NPS
) ? 0 : RH_B_PPCM
);
1765 retval
= u132_read_pcimem(u132
, control
, &control
);
1768 mdelay((roothub_a
>> 23) & 0x1fe);
1769 u132_to_hcd(u132
)->state
= HC_STATE_RUNNING
;
1773 static void u132_hcd_stop(struct usb_hcd
*hcd
)
1775 struct u132
*u132
= hcd_to_u132(hcd
);
1776 if (u132
->going
> 1) {
1777 dev_err(&u132
->platform_dev
->dev
, "u132 device %p(hcd=%p) has b"
1778 "een removed %d\n", u132
, hcd
, u132
->going
);
1779 } else if (u132
->going
> 0) {
1780 dev_err(&u132
->platform_dev
->dev
, "device hcd=%p is being remov"
1783 mutex_lock(&u132
->sw_lock
);
1785 u132_power(u132
, 0);
1786 mutex_unlock(&u132
->sw_lock
);
1790 static int u132_hcd_start(struct usb_hcd
*hcd
)
1792 struct u132
*u132
= hcd_to_u132(hcd
);
1793 if (u132
->going
> 1) {
1794 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1797 } else if (u132
->going
> 0) {
1798 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
1800 } else if (hcd
->self
.controller
) {
1802 struct platform_device
*pdev
=
1803 to_platform_device(hcd
->self
.controller
);
1804 u16 vendor
= ((struct u132_platform_data
*)
1805 dev_get_platdata(&pdev
->dev
))->vendor
;
1806 u16 device
= ((struct u132_platform_data
*)
1807 dev_get_platdata(&pdev
->dev
))->device
;
1808 mutex_lock(&u132
->sw_lock
);
1810 if (vendor
== PCI_VENDOR_ID_AMD
&& device
== 0x740c) {
1811 u132
->flags
= OHCI_QUIRK_AMD756
;
1812 } else if (vendor
== PCI_VENDOR_ID_OPTI
&& device
== 0xc861) {
1813 dev_err(&u132
->platform_dev
->dev
, "WARNING: OPTi workar"
1814 "ounds unavailable\n");
1815 } else if (vendor
== PCI_VENDOR_ID_COMPAQ
&& device
== 0xa0f8)
1816 u132
->flags
|= OHCI_QUIRK_ZFMICRO
;
1817 retval
= u132_run(u132
);
1823 mutex_unlock(&u132
->sw_lock
);
1826 dev_err(&u132
->platform_dev
->dev
, "platform_device missing\n");
1831 static int u132_hcd_reset(struct usb_hcd
*hcd
)
1833 struct u132
*u132
= hcd_to_u132(hcd
);
1834 if (u132
->going
> 1) {
1835 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
1838 } else if (u132
->going
> 0) {
1839 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
1843 mutex_lock(&u132
->sw_lock
);
1844 retval
= u132_init(u132
);
1849 mutex_unlock(&u132
->sw_lock
);
1854 static int create_endpoint_and_queue_int(struct u132
*u132
,
1855 struct u132_udev
*udev
, struct urb
*urb
,
1856 struct usb_device
*usb_dev
, u8 usb_addr
, u8 usb_endp
, u8 address
,
1859 struct u132_ring
*ring
;
1863 struct u132_endp
*endp
= kmalloc(sizeof(struct u132_endp
), mem_flags
);
1868 spin_lock_init(&endp
->queue_lock
.slock
);
1869 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
1870 rc
= usb_hcd_link_urb_to_ep(u132_to_hcd(u132
), urb
);
1872 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
1877 endp_number
= ++u132
->num_endpoints
;
1878 urb
->ep
->hcpriv
= u132
->endp
[endp_number
- 1] = endp
;
1879 INIT_DELAYED_WORK(&endp
->scheduler
, u132_hcd_endp_work_scheduler
);
1880 INIT_LIST_HEAD(&endp
->urb_more
);
1881 ring
= endp
->ring
= &u132
->ring
[0];
1882 if (ring
->curr_endp
) {
1883 list_add_tail(&endp
->endp_ring
, &ring
->curr_endp
->endp_ring
);
1885 INIT_LIST_HEAD(&endp
->endp_ring
);
1886 ring
->curr_endp
= endp
;
1889 endp
->dequeueing
= 0;
1890 endp
->edset_flush
= 0;
1893 endp
->endp_number
= endp_number
;
1895 endp
->hep
= urb
->ep
;
1896 endp
->pipetype
= usb_pipetype(urb
->pipe
);
1897 u132_endp_init_kref(u132
, endp
);
1898 if (usb_pipein(urb
->pipe
)) {
1899 endp
->toggle_bits
= 0x2;
1900 usb_settoggle(udev
->usb_device
, usb_endp
, 0, 0);
1903 udev
->endp_number_in
[usb_endp
] = endp_number
;
1904 u132_udev_get_kref(u132
, udev
);
1906 endp
->toggle_bits
= 0x2;
1907 usb_settoggle(udev
->usb_device
, usb_endp
, 1, 0);
1910 udev
->endp_number_out
[usb_endp
] = endp_number
;
1911 u132_udev_get_kref(u132
, udev
);
1915 endp
->jiffies
= jiffies
+ msecs_to_jiffies(urb
->interval
);
1916 endp
->udev_number
= address
;
1917 endp
->usb_addr
= usb_addr
;
1918 endp
->usb_endp
= usb_endp
;
1919 endp
->queue_size
= 1;
1920 endp
->queue_last
= 0;
1921 endp
->queue_next
= 0;
1922 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
1923 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
1924 u132_endp_queue_work(u132
, endp
, msecs_to_jiffies(urb
->interval
));
1928 static int queue_int_on_old_endpoint(struct u132
*u132
,
1929 struct u132_udev
*udev
, struct urb
*urb
,
1930 struct usb_device
*usb_dev
, struct u132_endp
*endp
, u8 usb_addr
,
1931 u8 usb_endp
, u8 address
)
1935 endp
->jiffies
= jiffies
+ msecs_to_jiffies(urb
->interval
);
1936 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
1937 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
1939 struct u132_urbq
*urbq
= kmalloc(sizeof(struct u132_urbq
),
1942 endp
->queue_size
-= 1;
1945 list_add_tail(&urbq
->urb_more
, &endp
->urb_more
);
1952 static int create_endpoint_and_queue_bulk(struct u132
*u132
,
1953 struct u132_udev
*udev
, struct urb
*urb
,
1954 struct usb_device
*usb_dev
, u8 usb_addr
, u8 usb_endp
, u8 address
,
1958 struct u132_ring
*ring
;
1962 struct u132_endp
*endp
= kmalloc(sizeof(struct u132_endp
), mem_flags
);
1967 spin_lock_init(&endp
->queue_lock
.slock
);
1968 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
1969 rc
= usb_hcd_link_urb_to_ep(u132_to_hcd(u132
), urb
);
1971 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
1976 endp_number
= ++u132
->num_endpoints
;
1977 urb
->ep
->hcpriv
= u132
->endp
[endp_number
- 1] = endp
;
1978 INIT_DELAYED_WORK(&endp
->scheduler
, u132_hcd_endp_work_scheduler
);
1979 INIT_LIST_HEAD(&endp
->urb_more
);
1980 endp
->dequeueing
= 0;
1981 endp
->edset_flush
= 0;
1984 endp
->endp_number
= endp_number
;
1986 endp
->hep
= urb
->ep
;
1987 endp
->pipetype
= usb_pipetype(urb
->pipe
);
1988 u132_endp_init_kref(u132
, endp
);
1989 if (usb_pipein(urb
->pipe
)) {
1990 endp
->toggle_bits
= 0x2;
1991 usb_settoggle(udev
->usb_device
, usb_endp
, 0, 0);
1995 udev
->endp_number_in
[usb_endp
] = endp_number
;
1996 u132_udev_get_kref(u132
, udev
);
1998 endp
->toggle_bits
= 0x2;
1999 usb_settoggle(udev
->usb_device
, usb_endp
, 1, 0);
2003 udev
->endp_number_out
[usb_endp
] = endp_number
;
2004 u132_udev_get_kref(u132
, udev
);
2006 ring
= endp
->ring
= &u132
->ring
[ring_number
- 1];
2007 if (ring
->curr_endp
) {
2008 list_add_tail(&endp
->endp_ring
, &ring
->curr_endp
->endp_ring
);
2010 INIT_LIST_HEAD(&endp
->endp_ring
);
2011 ring
->curr_endp
= endp
;
2015 endp
->udev_number
= address
;
2016 endp
->usb_addr
= usb_addr
;
2017 endp
->usb_endp
= usb_endp
;
2018 endp
->queue_size
= 1;
2019 endp
->queue_last
= 0;
2020 endp
->queue_next
= 0;
2021 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2022 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2023 u132_endp_queue_work(u132
, endp
, 0);
2027 static int queue_bulk_on_old_endpoint(struct u132
*u132
, struct u132_udev
*udev
,
2029 struct usb_device
*usb_dev
, struct u132_endp
*endp
, u8 usb_addr
,
2030 u8 usb_endp
, u8 address
)
2033 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2034 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2036 struct u132_urbq
*urbq
= kmalloc(sizeof(struct u132_urbq
),
2039 endp
->queue_size
-= 1;
2042 list_add_tail(&urbq
->urb_more
, &endp
->urb_more
);
2049 static int create_endpoint_and_queue_control(struct u132
*u132
,
2051 struct usb_device
*usb_dev
, u8 usb_addr
, u8 usb_endp
,
2054 struct u132_ring
*ring
;
2058 struct u132_endp
*endp
= kmalloc(sizeof(struct u132_endp
), mem_flags
);
2063 spin_lock_init(&endp
->queue_lock
.slock
);
2064 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
2065 rc
= usb_hcd_link_urb_to_ep(u132_to_hcd(u132
), urb
);
2067 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2072 endp_number
= ++u132
->num_endpoints
;
2073 urb
->ep
->hcpriv
= u132
->endp
[endp_number
- 1] = endp
;
2074 INIT_DELAYED_WORK(&endp
->scheduler
, u132_hcd_endp_work_scheduler
);
2075 INIT_LIST_HEAD(&endp
->urb_more
);
2076 ring
= endp
->ring
= &u132
->ring
[0];
2077 if (ring
->curr_endp
) {
2078 list_add_tail(&endp
->endp_ring
, &ring
->curr_endp
->endp_ring
);
2080 INIT_LIST_HEAD(&endp
->endp_ring
);
2081 ring
->curr_endp
= endp
;
2084 endp
->dequeueing
= 0;
2085 endp
->edset_flush
= 0;
2088 endp
->endp_number
= endp_number
;
2090 endp
->hep
= urb
->ep
;
2091 u132_endp_init_kref(u132
, endp
);
2092 u132_endp_get_kref(u132
, endp
);
2093 if (usb_addr
== 0) {
2094 u8 address
= u132
->addr
[usb_addr
].address
;
2095 struct u132_udev
*udev
= &u132
->udev
[address
];
2096 endp
->udev_number
= address
;
2097 endp
->usb_addr
= usb_addr
;
2098 endp
->usb_endp
= usb_endp
;
2101 endp
->pipetype
= usb_pipetype(urb
->pipe
);
2102 u132_udev_init_kref(u132
, udev
);
2103 u132_udev_get_kref(u132
, udev
);
2104 udev
->endp_number_in
[usb_endp
] = endp_number
;
2105 udev
->endp_number_out
[usb_endp
] = endp_number
;
2107 endp
->queue_size
= 1;
2108 endp
->queue_last
= 0;
2109 endp
->queue_next
= 0;
2110 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2111 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2112 u132_endp_queue_work(u132
, endp
, 0);
2114 } else { /*(usb_addr > 0) */
2115 u8 address
= u132
->addr
[usb_addr
].address
;
2116 struct u132_udev
*udev
= &u132
->udev
[address
];
2117 endp
->udev_number
= address
;
2118 endp
->usb_addr
= usb_addr
;
2119 endp
->usb_endp
= usb_endp
;
2122 endp
->pipetype
= usb_pipetype(urb
->pipe
);
2123 u132_udev_get_kref(u132
, udev
);
2124 udev
->enumeration
= 2;
2125 udev
->endp_number_in
[usb_endp
] = endp_number
;
2126 udev
->endp_number_out
[usb_endp
] = endp_number
;
2128 endp
->queue_size
= 1;
2129 endp
->queue_last
= 0;
2130 endp
->queue_next
= 0;
2131 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] = urb
;
2132 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2133 u132_endp_queue_work(u132
, endp
, 0);
2138 static int queue_control_on_old_endpoint(struct u132
*u132
,
2140 struct usb_device
*usb_dev
, struct u132_endp
*endp
, u8 usb_addr
,
2143 if (usb_addr
== 0) {
2144 if (usb_pipein(urb
->pipe
)) {
2146 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2147 endp
->urb_list
[ENDP_QUEUE_MASK
&
2148 endp
->queue_last
++] = urb
;
2150 struct u132_urbq
*urbq
=
2151 kmalloc(sizeof(struct u132_urbq
),
2154 endp
->queue_size
-= 1;
2157 list_add_tail(&urbq
->urb_more
,
2163 } else { /* usb_pipeout(urb->pipe) */
2164 struct u132_addr
*addr
= &u132
->addr
[usb_dev
->devnum
];
2165 int I
= MAX_U132_UDEVS
;
2168 struct u132_udev
*udev
= &u132
->udev
[++i
];
2169 if (udev
->usb_device
) {
2172 udev
->enumeration
= 1;
2173 u132
->addr
[0].address
= i
;
2174 endp
->udev_number
= i
;
2175 udev
->udev_number
= i
;
2176 udev
->usb_addr
= usb_dev
->devnum
;
2177 u132_udev_init_kref(u132
, udev
);
2178 udev
->endp_number_in
[usb_endp
] =
2180 u132_udev_get_kref(u132
, udev
);
2181 udev
->endp_number_out
[usb_endp
] =
2183 udev
->usb_device
= usb_dev
;
2184 ((u8
*) (urb
->setup_packet
))[2] =
2186 u132_udev_get_kref(u132
, udev
);
2191 dev_err(&u132
->platform_dev
->dev
, "run out of d"
2196 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2197 endp
->urb_list
[ENDP_QUEUE_MASK
&
2198 endp
->queue_last
++] = urb
;
2200 struct u132_urbq
*urbq
=
2201 kmalloc(sizeof(struct u132_urbq
),
2204 endp
->queue_size
-= 1;
2207 list_add_tail(&urbq
->urb_more
,
2214 } else { /*(usb_addr > 0) */
2215 u8 address
= u132
->addr
[usb_addr
].address
;
2216 struct u132_udev
*udev
= &u132
->udev
[address
];
2218 if (udev
->enumeration
!= 2)
2219 udev
->enumeration
= 2;
2220 if (endp
->queue_size
++ < ENDP_QUEUE_SIZE
) {
2221 endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_last
++] =
2224 struct u132_urbq
*urbq
=
2225 kmalloc(sizeof(struct u132_urbq
), GFP_ATOMIC
);
2227 endp
->queue_size
-= 1;
2230 list_add_tail(&urbq
->urb_more
, &endp
->urb_more
);
2238 static int u132_urb_enqueue(struct usb_hcd
*hcd
, struct urb
*urb
,
2241 struct u132
*u132
= hcd_to_u132(hcd
);
2242 if (irqs_disabled()) {
2243 if (gfpflags_allow_blocking(mem_flags
)) {
2244 printk(KERN_ERR
"invalid context for function that might sleep\n");
2248 if (u132
->going
> 1) {
2249 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2252 } else if (u132
->going
> 0) {
2253 dev_err(&u132
->platform_dev
->dev
, "device is being removed "
2257 u8 usb_addr
= usb_pipedevice(urb
->pipe
);
2258 u8 usb_endp
= usb_pipeendpoint(urb
->pipe
);
2259 struct usb_device
*usb_dev
= urb
->dev
;
2260 if (usb_pipetype(urb
->pipe
) == PIPE_INTERRUPT
) {
2261 u8 address
= u132
->addr
[usb_addr
].address
;
2262 struct u132_udev
*udev
= &u132
->udev
[address
];
2263 struct u132_endp
*endp
= urb
->ep
->hcpriv
;
2264 urb
->actual_length
= 0;
2268 spin_lock_irqsave(&endp
->queue_lock
.slock
,
2270 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2272 retval
= queue_int_on_old_endpoint(
2278 usb_hcd_unlink_urb_from_ep(
2281 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2286 u132_endp_queue_work(u132
, endp
,
2287 msecs_to_jiffies(urb
->interval
))
2291 } else if (u132
->num_endpoints
== MAX_U132_ENDPS
) {
2293 } else { /*(endp == NULL) */
2294 return create_endpoint_and_queue_int(u132
, udev
,
2295 urb
, usb_dev
, usb_addr
,
2296 usb_endp
, address
, mem_flags
);
2298 } else if (usb_pipetype(urb
->pipe
) == PIPE_ISOCHRONOUS
) {
2299 dev_err(&u132
->platform_dev
->dev
, "the hardware does no"
2300 "t support PIPE_ISOCHRONOUS\n");
2302 } else if (usb_pipetype(urb
->pipe
) == PIPE_BULK
) {
2303 u8 address
= u132
->addr
[usb_addr
].address
;
2304 struct u132_udev
*udev
= &u132
->udev
[address
];
2305 struct u132_endp
*endp
= urb
->ep
->hcpriv
;
2306 urb
->actual_length
= 0;
2310 spin_lock_irqsave(&endp
->queue_lock
.slock
,
2312 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2314 retval
= queue_bulk_on_old_endpoint(
2320 usb_hcd_unlink_urb_from_ep(
2323 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2328 u132_endp_queue_work(u132
, endp
, 0);
2331 } else if (u132
->num_endpoints
== MAX_U132_ENDPS
) {
2334 return create_endpoint_and_queue_bulk(u132
,
2335 udev
, urb
, usb_dev
, usb_addr
,
2336 usb_endp
, address
, mem_flags
);
2338 struct u132_endp
*endp
= urb
->ep
->hcpriv
;
2340 u8
*b
= urb
->setup_packet
;
2342 char data
[30 * 3 + 4];
2344 int m
= (sizeof(data
) - 1) / 3;
2347 while (urb_size
-- > 0) {
2349 } else if (i
++ < m
) {
2350 int w
= sprintf(d
, " %02X", *b
++);
2354 d
+= sprintf(d
, " ..");
2359 spin_lock_irqsave(&endp
->queue_lock
.slock
,
2361 retval
= usb_hcd_link_urb_to_ep(hcd
, urb
);
2363 retval
= queue_control_on_old_endpoint(
2368 usb_hcd_unlink_urb_from_ep(
2371 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2376 u132_endp_queue_work(u132
, endp
, 0);
2379 } else if (u132
->num_endpoints
== MAX_U132_ENDPS
) {
2382 return create_endpoint_and_queue_control(u132
,
2383 urb
, usb_dev
, usb_addr
, usb_endp
,
2389 static int dequeue_from_overflow_chain(struct u132
*u132
,
2390 struct u132_endp
*endp
, struct urb
*urb
)
2392 struct u132_urbq
*urbq
;
2394 list_for_each_entry(urbq
, &endp
->urb_more
, urb_more
) {
2395 if (urbq
->urb
== urb
) {
2396 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
2397 list_del(&urbq
->urb_more
);
2398 endp
->queue_size
-= 1;
2399 urb
->error_count
= 0;
2400 usb_hcd_giveback_urb(hcd
, urb
, 0);
2405 dev_err(&u132
->platform_dev
->dev
, "urb=%p not found in endp[%d]=%p ring"
2406 "[%d] %c%c usb_endp=%d usb_addr=%d size=%d next=%04X last=%04X"
2407 "\n", urb
, endp
->endp_number
, endp
, endp
->ring
->number
,
2408 endp
->input
? 'I' : ' ', endp
->output
? 'O' : ' ',
2409 endp
->usb_endp
, endp
->usb_addr
, endp
->queue_size
,
2410 endp
->queue_next
, endp
->queue_last
);
2414 static int u132_endp_urb_dequeue(struct u132
*u132
, struct u132_endp
*endp
,
2415 struct urb
*urb
, int status
)
2420 spin_lock_irqsave(&endp
->queue_lock
.slock
, irqs
);
2421 rc
= usb_hcd_check_unlink_urb(u132_to_hcd(u132
), urb
, status
);
2423 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2426 if (endp
->queue_size
== 0) {
2427 dev_err(&u132
->platform_dev
->dev
, "urb=%p not found in endp[%d]"
2428 "=%p ring[%d] %c%c usb_endp=%d usb_addr=%d\n", urb
,
2429 endp
->endp_number
, endp
, endp
->ring
->number
,
2430 endp
->input
? 'I' : ' ', endp
->output
? 'O' : ' ',
2431 endp
->usb_endp
, endp
->usb_addr
);
2432 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2435 if (urb
== endp
->urb_list
[ENDP_QUEUE_MASK
& endp
->queue_next
]) {
2437 endp
->dequeueing
= 1;
2438 endp
->edset_flush
= 1;
2439 u132_endp_queue_work(u132
, endp
, 0);
2440 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2443 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2444 u132_hcd_abandon_urb(u132
, endp
, urb
, status
);
2449 u16 queue_size
= endp
->queue_size
;
2450 u16 queue_scan
= endp
->queue_next
;
2451 struct urb
**urb_slot
= NULL
;
2452 while (++queue_list
< ENDP_QUEUE_SIZE
&& --queue_size
> 0) {
2453 if (urb
== endp
->urb_list
[ENDP_QUEUE_MASK
&
2455 urb_slot
= &endp
->urb_list
[ENDP_QUEUE_MASK
&
2461 while (++queue_list
< ENDP_QUEUE_SIZE
&& --queue_size
> 0) {
2462 *urb_slot
= endp
->urb_list
[ENDP_QUEUE_MASK
&
2464 urb_slot
= &endp
->urb_list
[ENDP_QUEUE_MASK
&
2468 struct usb_hcd
*hcd
= u132_to_hcd(u132
);
2470 usb_hcd_unlink_urb_from_ep(hcd
, urb
);
2471 endp
->queue_size
-= 1;
2472 if (list_empty(&endp
->urb_more
)) {
2473 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2476 struct list_head
*next
= endp
->urb_more
.next
;
2477 struct u132_urbq
*urbq
= list_entry(next
,
2478 struct u132_urbq
, urb_more
);
2480 *urb_slot
= urbq
->urb
;
2481 spin_unlock_irqrestore(&endp
->queue_lock
.slock
,
2484 } urb
->error_count
= 0;
2485 usb_hcd_giveback_urb(hcd
, urb
, status
);
2487 } else if (list_empty(&endp
->urb_more
)) {
2488 dev_err(&u132
->platform_dev
->dev
, "urb=%p not found in "
2489 "endp[%d]=%p ring[%d] %c%c usb_endp=%d usb_addr"
2490 "=%d size=%d next=%04X last=%04X\n", urb
,
2491 endp
->endp_number
, endp
, endp
->ring
->number
,
2492 endp
->input
? 'I' : ' ',
2493 endp
->output
? 'O' : ' ', endp
->usb_endp
,
2494 endp
->usb_addr
, endp
->queue_size
,
2495 endp
->queue_next
, endp
->queue_last
);
2496 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2501 usb_hcd_unlink_urb_from_ep(u132_to_hcd(u132
), urb
);
2502 retval
= dequeue_from_overflow_chain(u132
, endp
,
2504 spin_unlock_irqrestore(&endp
->queue_lock
.slock
, irqs
);
2510 static int u132_urb_dequeue(struct usb_hcd
*hcd
, struct urb
*urb
, int status
)
2512 struct u132
*u132
= hcd_to_u132(hcd
);
2513 if (u132
->going
> 2) {
2514 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2518 u8 usb_addr
= usb_pipedevice(urb
->pipe
);
2519 u8 usb_endp
= usb_pipeendpoint(urb
->pipe
);
2520 u8 address
= u132
->addr
[usb_addr
].address
;
2521 struct u132_udev
*udev
= &u132
->udev
[address
];
2522 if (usb_pipein(urb
->pipe
)) {
2523 u8 endp_number
= udev
->endp_number_in
[usb_endp
];
2524 struct u132_endp
*endp
= u132
->endp
[endp_number
- 1];
2525 return u132_endp_urb_dequeue(u132
, endp
, urb
, status
);
2527 u8 endp_number
= udev
->endp_number_out
[usb_endp
];
2528 struct u132_endp
*endp
= u132
->endp
[endp_number
- 1];
2529 return u132_endp_urb_dequeue(u132
, endp
, urb
, status
);
2534 static void u132_endpoint_disable(struct usb_hcd
*hcd
,
2535 struct usb_host_endpoint
*hep
)
2537 struct u132
*u132
= hcd_to_u132(hcd
);
2538 if (u132
->going
> 2) {
2539 dev_err(&u132
->platform_dev
->dev
, "u132 device %p(hcd=%p hep=%p"
2540 ") has been removed %d\n", u132
, hcd
, hep
,
2543 struct u132_endp
*endp
= hep
->hcpriv
;
2545 u132_endp_put_kref(u132
, endp
);
2549 static int u132_get_frame(struct usb_hcd
*hcd
)
2551 struct u132
*u132
= hcd_to_u132(hcd
);
2552 if (u132
->going
> 1) {
2553 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2556 } else if (u132
->going
> 0) {
2557 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2561 dev_err(&u132
->platform_dev
->dev
, "TODO: u132_get_frame\n");
2567 static int u132_roothub_descriptor(struct u132
*u132
,
2568 struct usb_hub_descriptor
*desc
)
2574 retval
= u132_read_pcimem(u132
, roothub
.a
, &rh_a
);
2577 desc
->bDescriptorType
= USB_DT_HUB
;
2578 desc
->bPwrOn2PwrGood
= (rh_a
& RH_A_POTPGT
) >> 24;
2579 desc
->bHubContrCurrent
= 0;
2580 desc
->bNbrPorts
= u132
->num_ports
;
2581 temp
= 1 + (u132
->num_ports
/ 8);
2582 desc
->bDescLength
= 7 + 2 * temp
;
2583 temp
= HUB_CHAR_COMMON_LPSM
| HUB_CHAR_COMMON_OCPM
;
2584 if (rh_a
& RH_A_NPS
)
2585 temp
|= HUB_CHAR_NO_LPSM
;
2586 if (rh_a
& RH_A_PSM
)
2587 temp
|= HUB_CHAR_INDV_PORT_LPSM
;
2588 if (rh_a
& RH_A_NOCP
)
2589 temp
|= HUB_CHAR_NO_OCPM
;
2590 else if (rh_a
& RH_A_OCPM
)
2591 temp
|= HUB_CHAR_INDV_PORT_OCPM
;
2592 desc
->wHubCharacteristics
= cpu_to_le16(temp
);
2593 retval
= u132_read_pcimem(u132
, roothub
.b
, &rh_b
);
2596 memset(desc
->u
.hs
.DeviceRemovable
, 0xff,
2597 sizeof(desc
->u
.hs
.DeviceRemovable
));
2598 desc
->u
.hs
.DeviceRemovable
[0] = rh_b
& RH_B_DR
;
2599 if (u132
->num_ports
> 7) {
2600 desc
->u
.hs
.DeviceRemovable
[1] = (rh_b
& RH_B_DR
) >> 8;
2601 desc
->u
.hs
.DeviceRemovable
[2] = 0xff;
2603 desc
->u
.hs
.DeviceRemovable
[1] = 0xff;
2607 static int u132_roothub_status(struct u132
*u132
, __le32
*desc
)
2610 int ret_status
= u132_read_pcimem(u132
, roothub
.status
, &rh_status
);
2611 *desc
= cpu_to_le32(rh_status
);
2615 static int u132_roothub_portstatus(struct u132
*u132
, __le32
*desc
, u16 wIndex
)
2617 if (wIndex
== 0 || wIndex
> u132
->num_ports
) {
2620 int port
= wIndex
- 1;
2621 u32 rh_portstatus
= -1;
2622 int ret_portstatus
= u132_read_pcimem(u132
,
2623 roothub
.portstatus
[port
], &rh_portstatus
);
2624 *desc
= cpu_to_le32(rh_portstatus
);
2625 if (*(u16
*) (desc
+ 2)) {
2626 dev_info(&u132
->platform_dev
->dev
, "Port %d Status Chan"
2627 "ge = %08X\n", port
, *desc
);
2629 return ret_portstatus
;
2634 /* this timer value might be vendor-specific ... */
2635 #define PORT_RESET_HW_MSEC 10
2636 #define PORT_RESET_MSEC 10
2637 /* wrap-aware logic morphed from <linux/jiffies.h> */
2638 #define tick_before(t1, t2) ((s16)(((s16)(t1))-((s16)(t2))) < 0)
2639 static int u132_roothub_portreset(struct u132
*u132
, int port_index
)
2645 retval
= u132_read_pcimem(u132
, fmnumber
, &fmnumber
);
2649 reset_done
= now
+ PORT_RESET_MSEC
;
2653 retval
= u132_read_pcimem(u132
,
2654 roothub
.portstatus
[port_index
], &portstat
);
2657 if (RH_PS_PRS
& portstat
)
2661 } while (tick_before(now
, reset_done
));
2662 if (RH_PS_PRS
& portstat
)
2664 if (RH_PS_CCS
& portstat
) {
2665 if (RH_PS_PRSC
& portstat
) {
2666 retval
= u132_write_pcimem(u132
,
2667 roothub
.portstatus
[port_index
],
2673 break; /* start the next reset,
2674 sleep till it's probably done */
2675 retval
= u132_write_pcimem(u132
, roothub
.portstatus
[port_index
],
2679 msleep(PORT_RESET_HW_MSEC
);
2680 retval
= u132_read_pcimem(u132
, fmnumber
, &fmnumber
);
2684 } while (tick_before(now
, reset_done
));
2688 static int u132_roothub_setportfeature(struct u132
*u132
, u16 wValue
,
2691 if (wIndex
== 0 || wIndex
> u132
->num_ports
) {
2694 int port_index
= wIndex
- 1;
2695 struct u132_port
*port
= &u132
->port
[port_index
];
2696 port
->Status
&= ~(1 << wValue
);
2698 case USB_PORT_FEAT_SUSPEND
:
2699 return u132_write_pcimem(u132
,
2700 roothub
.portstatus
[port_index
], RH_PS_PSS
);
2701 case USB_PORT_FEAT_POWER
:
2702 return u132_write_pcimem(u132
,
2703 roothub
.portstatus
[port_index
], RH_PS_PPS
);
2704 case USB_PORT_FEAT_RESET
:
2705 return u132_roothub_portreset(u132
, port_index
);
2712 static int u132_roothub_clearportfeature(struct u132
*u132
, u16 wValue
,
2715 if (wIndex
== 0 || wIndex
> u132
->num_ports
) {
2718 int port_index
= wIndex
- 1;
2720 struct u132_port
*port
= &u132
->port
[port_index
];
2721 port
->Status
&= ~(1 << wValue
);
2723 case USB_PORT_FEAT_ENABLE
:
2726 case USB_PORT_FEAT_C_ENABLE
:
2729 case USB_PORT_FEAT_SUSPEND
:
2731 if ((u132
->hc_control
& OHCI_CTRL_HCFS
)
2733 dev_err(&u132
->platform_dev
->dev
, "TODO resume_"
2737 case USB_PORT_FEAT_C_SUSPEND
:
2740 case USB_PORT_FEAT_POWER
:
2743 case USB_PORT_FEAT_C_CONNECTION
:
2746 case USB_PORT_FEAT_C_OVER_CURRENT
:
2749 case USB_PORT_FEAT_C_RESET
:
2755 return u132_write_pcimem(u132
, roothub
.portstatus
[port_index
],
2761 /* the virtual root hub timer IRQ checks for hub status*/
2762 static int u132_hub_status_data(struct usb_hcd
*hcd
, char *buf
)
2764 struct u132
*u132
= hcd_to_u132(hcd
);
2765 if (u132
->going
> 1) {
2766 dev_err(&u132
->platform_dev
->dev
, "device hcd=%p has been remov"
2767 "ed %d\n", hcd
, u132
->going
);
2769 } else if (u132
->going
> 0) {
2770 dev_err(&u132
->platform_dev
->dev
, "device hcd=%p is being remov"
2774 int i
, changed
= 0, length
= 1;
2775 if (u132
->flags
& OHCI_QUIRK_AMD756
) {
2776 if ((u132
->hc_roothub_a
& RH_A_NDP
) > MAX_ROOT_PORTS
) {
2777 dev_err(&u132
->platform_dev
->dev
, "bogus NDP, r"
2778 "ereads as NDP=%d\n",
2779 u132
->hc_roothub_a
& RH_A_NDP
);
2783 if (u132
->hc_roothub_status
& (RH_HS_LPSC
| RH_HS_OCIC
))
2784 buf
[0] = changed
= 1;
2787 if (u132
->num_ports
> 7) {
2791 for (i
= 0; i
< u132
->num_ports
; i
++) {
2792 if (u132
->hc_roothub_portstatus
[i
] & (RH_PS_CSC
|
2793 RH_PS_PESC
| RH_PS_PSSC
| RH_PS_OCIC
|
2797 buf
[0] |= 1 << (i
+ 1);
2799 buf
[1] |= 1 << (i
- 7);
2802 if (!(u132
->hc_roothub_portstatus
[i
] & RH_PS_CCS
))
2805 if ((u132
->hc_roothub_portstatus
[i
] & RH_PS_PSS
))
2809 return changed
? length
: 0;
2813 static int u132_hub_control(struct usb_hcd
*hcd
, u16 typeReq
, u16 wValue
,
2814 u16 wIndex
, char *buf
, u16 wLength
)
2816 struct u132
*u132
= hcd_to_u132(hcd
);
2817 if (u132
->going
> 1) {
2818 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2821 } else if (u132
->going
> 0) {
2822 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2826 mutex_lock(&u132
->sw_lock
);
2828 case ClearHubFeature
:
2830 case C_HUB_OVER_CURRENT
:
2831 case C_HUB_LOCAL_POWER
:
2839 case C_HUB_OVER_CURRENT
:
2840 case C_HUB_LOCAL_POWER
:
2846 case ClearPortFeature
:{
2847 retval
= u132_roothub_clearportfeature(u132
,
2853 case GetHubDescriptor
:{
2854 retval
= u132_roothub_descriptor(u132
,
2855 (struct usb_hub_descriptor
*)buf
);
2861 retval
= u132_roothub_status(u132
,
2867 case GetPortStatus
:{
2868 retval
= u132_roothub_portstatus(u132
,
2869 (__le32
*) buf
, wIndex
);
2874 case SetPortFeature
:{
2875 retval
= u132_roothub_setportfeature(u132
,
2891 mutex_unlock(&u132
->sw_lock
);
2896 static int u132_start_port_reset(struct usb_hcd
*hcd
, unsigned port_num
)
2898 struct u132
*u132
= hcd_to_u132(hcd
);
2899 if (u132
->going
> 1) {
2900 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2903 } else if (u132
->going
> 0) {
2904 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2912 static int u132_bus_suspend(struct usb_hcd
*hcd
)
2914 struct u132
*u132
= hcd_to_u132(hcd
);
2915 if (u132
->going
> 1) {
2916 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2919 } else if (u132
->going
> 0) {
2920 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2926 static int u132_bus_resume(struct usb_hcd
*hcd
)
2928 struct u132
*u132
= hcd_to_u132(hcd
);
2929 if (u132
->going
> 1) {
2930 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
2933 } else if (u132
->going
> 0) {
2934 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
2941 #define u132_bus_suspend NULL
2942 #define u132_bus_resume NULL
2944 static struct hc_driver u132_hc_driver
= {
2945 .description
= hcd_name
,
2946 .hcd_priv_size
= sizeof(struct u132
),
2948 .flags
= HCD_USB11
| HCD_MEMORY
,
2949 .reset
= u132_hcd_reset
,
2950 .start
= u132_hcd_start
,
2951 .stop
= u132_hcd_stop
,
2952 .urb_enqueue
= u132_urb_enqueue
,
2953 .urb_dequeue
= u132_urb_dequeue
,
2954 .endpoint_disable
= u132_endpoint_disable
,
2955 .get_frame_number
= u132_get_frame
,
2956 .hub_status_data
= u132_hub_status_data
,
2957 .hub_control
= u132_hub_control
,
2958 .bus_suspend
= u132_bus_suspend
,
2959 .bus_resume
= u132_bus_resume
,
2960 .start_port_reset
= u132_start_port_reset
,
2964 * This function may be called by the USB core whilst the "usb_all_devices_rwsem"
2965 * is held for writing, thus this module must not call usb_remove_hcd()
2966 * synchronously - but instead should immediately stop activity to the
2967 * device and asynchronously call usb_remove_hcd()
2969 static int u132_remove(struct platform_device
*pdev
)
2971 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
2973 struct u132
*u132
= hcd_to_u132(hcd
);
2974 if (u132
->going
++ > 1) {
2975 dev_err(&u132
->platform_dev
->dev
, "already being remove"
2979 int rings
= MAX_U132_RINGS
;
2980 int endps
= MAX_U132_ENDPS
;
2981 dev_err(&u132
->platform_dev
->dev
, "removing device u132"
2982 ".%d\n", u132
->sequence_num
);
2984 mutex_lock(&u132
->sw_lock
);
2985 u132_monitor_cancel_work(u132
);
2986 while (rings
-- > 0) {
2987 struct u132_ring
*ring
= &u132
->ring
[rings
];
2988 u132_ring_cancel_work(u132
, ring
);
2989 } while (endps
-- > 0) {
2990 struct u132_endp
*endp
= u132
->endp
[endps
];
2992 u132_endp_cancel_work(u132
, endp
);
2995 printk(KERN_INFO
"removing device u132.%d\n",
2996 u132
->sequence_num
);
2997 mutex_unlock(&u132
->sw_lock
);
2998 usb_remove_hcd(hcd
);
2999 u132_u132_put_kref(u132
);
3006 static void u132_initialise(struct u132
*u132
, struct platform_device
*pdev
)
3008 int rings
= MAX_U132_RINGS
;
3009 int ports
= MAX_U132_PORTS
;
3010 int addrs
= MAX_U132_ADDRS
;
3011 int udevs
= MAX_U132_UDEVS
;
3012 int endps
= MAX_U132_ENDPS
;
3013 u132
->board
= dev_get_platdata(&pdev
->dev
);
3014 u132
->platform_dev
= pdev
;
3017 mutex_init(&u132
->sw_lock
);
3018 mutex_init(&u132
->scheduler_lock
);
3019 while (rings
-- > 0) {
3020 struct u132_ring
*ring
= &u132
->ring
[rings
];
3022 ring
->number
= rings
+ 1;
3024 ring
->curr_endp
= NULL
;
3025 INIT_DELAYED_WORK(&ring
->scheduler
,
3026 u132_hcd_ring_work_scheduler
);
3028 mutex_lock(&u132
->sw_lock
);
3029 INIT_DELAYED_WORK(&u132
->monitor
, u132_hcd_monitor_work
);
3030 while (ports
-- > 0) {
3031 struct u132_port
*port
= &u132
->port
[ports
];
3038 while (addrs
-- > 0) {
3039 struct u132_addr
*addr
= &u132
->addr
[addrs
];
3042 while (udevs
-- > 0) {
3043 struct u132_udev
*udev
= &u132
->udev
[udevs
];
3044 int i
= ARRAY_SIZE(udev
->endp_number_in
);
3045 int o
= ARRAY_SIZE(udev
->endp_number_out
);
3046 udev
->usb_device
= NULL
;
3047 udev
->udev_number
= 0;
3049 udev
->portnumber
= 0;
3051 udev
->endp_number_in
[i
] = 0;
3054 udev
->endp_number_out
[o
] = 0;
3058 u132
->endp
[endps
] = NULL
;
3060 mutex_unlock(&u132
->sw_lock
);
3063 static int u132_probe(struct platform_device
*pdev
)
3065 struct usb_hcd
*hcd
;
3072 if (u132_exiting
> 0)
3075 retval
= ftdi_write_pcimem(pdev
, intrdisable
, OHCI_INTR_MIE
);
3078 retval
= ftdi_read_pcimem(pdev
, control
, &control
);
3081 retval
= ftdi_read_pcimem(pdev
, roothub
.a
, &rh_a
);
3084 num_ports
= rh_a
& RH_A_NDP
; /* refuse to confuse usbcore */
3085 if (pdev
->dev
.dma_mask
)
3088 hcd
= usb_create_hcd(&u132_hc_driver
, &pdev
->dev
, dev_name(&pdev
->dev
));
3090 printk(KERN_ERR
"failed to create the usb hcd struct for U132\n"
3092 ftdi_elan_gone_away(pdev
);
3095 struct u132
*u132
= hcd_to_u132(hcd
);
3097 hcd
->rsrc_start
= 0;
3098 mutex_lock(&u132_module_lock
);
3099 list_add_tail(&u132
->u132_list
, &u132_static_list
);
3100 u132
->sequence_num
= ++u132_instances
;
3101 mutex_unlock(&u132_module_lock
);
3102 u132_u132_init_kref(u132
);
3103 u132_initialise(u132
, pdev
);
3104 hcd
->product_desc
= "ELAN U132 Host Controller";
3105 retval
= usb_add_hcd(hcd
, 0, 0);
3107 dev_err(&u132
->platform_dev
->dev
, "init error %d\n",
3109 u132_u132_put_kref(u132
);
3112 device_wakeup_enable(hcd
->self
.controller
);
3113 u132_monitor_queue_work(u132
, 100);
3122 * for this device there's no useful distinction between the controller
3125 static int u132_suspend(struct platform_device
*pdev
, pm_message_t state
)
3127 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
3128 struct u132
*u132
= hcd_to_u132(hcd
);
3129 if (u132
->going
> 1) {
3130 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
3133 } else if (u132
->going
> 0) {
3134 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
3137 int retval
= 0, ports
;
3139 switch (state
.event
) {
3140 case PM_EVENT_FREEZE
:
3141 retval
= u132_bus_suspend(hcd
);
3143 case PM_EVENT_SUSPEND
:
3144 case PM_EVENT_HIBERNATE
:
3145 ports
= MAX_U132_PORTS
;
3146 while (ports
-- > 0) {
3147 port_power(u132
, ports
, 0);
3155 static int u132_resume(struct platform_device
*pdev
)
3157 struct usb_hcd
*hcd
= platform_get_drvdata(pdev
);
3158 struct u132
*u132
= hcd_to_u132(hcd
);
3159 if (u132
->going
> 1) {
3160 dev_err(&u132
->platform_dev
->dev
, "device has been removed %d\n"
3163 } else if (u132
->going
> 0) {
3164 dev_err(&u132
->platform_dev
->dev
, "device is being removed\n");
3168 if (!u132
->port
[0].power
) {
3169 int ports
= MAX_U132_PORTS
;
3170 while (ports
-- > 0) {
3171 port_power(u132
, ports
, 1);
3175 retval
= u132_bus_resume(hcd
);
3182 #define u132_suspend NULL
3183 #define u132_resume NULL
3186 * this driver is loaded explicitly by ftdi_u132
3188 * the platform_driver struct is static because it is per type of module
3190 static struct platform_driver u132_platform_driver
= {
3191 .probe
= u132_probe
,
3192 .remove
= u132_remove
,
3193 .suspend
= u132_suspend
,
3194 .resume
= u132_resume
,
3199 static int __init
u132_hcd_init(void)
3202 INIT_LIST_HEAD(&u132_static_list
);
3205 mutex_init(&u132_module_lock
);
3208 printk(KERN_INFO
"driver %s\n", hcd_name
);
3209 workqueue
= create_singlethread_workqueue("u132");
3210 retval
= platform_driver_register(&u132_platform_driver
);
3215 module_init(u132_hcd_init
);
3216 static void __exit
u132_hcd_exit(void)
3220 mutex_lock(&u132_module_lock
);
3222 mutex_unlock(&u132_module_lock
);
3223 list_for_each_entry_safe(u132
, temp
, &u132_static_list
, u132_list
) {
3224 platform_device_unregister(u132
->platform_dev
);
3226 platform_driver_unregister(&u132_platform_driver
);
3227 printk(KERN_INFO
"u132-hcd driver deregistered\n");
3228 wait_event(u132_hcd_wait
, u132_instances
== 0);
3229 flush_workqueue(workqueue
);
3230 destroy_workqueue(workqueue
);
3234 module_exit(u132_hcd_exit
);
3235 MODULE_LICENSE("GPL");
3236 MODULE_ALIAS("platform:u132_hcd");