3 * Data transfer and URB enqueing
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
33 * Sounds simple, it is a pain to implement.
40 * LIFE CYCLE / STATE DIAGRAM
44 * THIS CODE IS DISGUSTING
46 * Warned you are; it's my second try and still not happy with it.
52 * - Supports DMA xfers, control, bulk and maybe interrupt
54 * - Does not recycle unused rpipes
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
62 * Two methods it could be done:
64 * (a) set up a timer every time an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different required components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/slab.h>
85 #include <linux/hash.h>
106 static void wa_xfer_delayed_run(struct wa_rpipe
*);
109 * Life cycle governed by 'struct urb' (the refcount of the struct is
110 * that of the 'struct urb' and usb_free_urb() would free the whole
115 struct urb
*dto_urb
; /* for data output? */
116 struct list_head list_node
; /* for rpipe->req_list */
117 struct wa_xfer
*xfer
; /* out xfer */
118 u8 index
; /* which segment we are */
119 enum wa_seg_status status
;
120 ssize_t result
; /* bytes xfered or error */
121 struct wa_xfer_hdr xfer_hdr
;
122 u8 xfer_extra
[]; /* xtra space for xfer_hdr_ctl */
125 static void wa_seg_init(struct wa_seg
*seg
)
127 /* usb_init_urb() repeats a lot of work, so we do it here */
128 kref_init(&seg
->urb
.kref
);
132 * Protected by xfer->lock
137 struct list_head list_node
;
141 struct wahc
*wa
; /* Wire adapter we are plugged to */
142 struct usb_host_endpoint
*ep
;
143 struct urb
*urb
; /* URB we are transferring for */
144 struct wa_seg
**seg
; /* transfer segments */
145 u8 segs
, segs_submitted
, segs_done
;
146 unsigned is_inbound
:1;
151 gfp_t gfp
; /* allocation mask */
153 struct wusb_dev
*wusb_dev
; /* for activity timestamps */
156 static inline void wa_xfer_init(struct wa_xfer
*xfer
)
158 kref_init(&xfer
->refcnt
);
159 INIT_LIST_HEAD(&xfer
->list_node
);
160 spin_lock_init(&xfer
->lock
);
164 * Destroy a transfer structure
166 * Note that the xfer->seg[index] thingies follow the URB life cycle,
167 * so we need to put them, not free them.
169 static void wa_xfer_destroy(struct kref
*_xfer
)
171 struct wa_xfer
*xfer
= container_of(_xfer
, struct wa_xfer
, refcnt
);
174 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
175 if (xfer
->is_inbound
)
176 usb_put_urb(xfer
->seg
[cnt
]->dto_urb
);
177 usb_put_urb(&xfer
->seg
[cnt
]->urb
);
183 static void wa_xfer_get(struct wa_xfer
*xfer
)
185 kref_get(&xfer
->refcnt
);
188 static void wa_xfer_put(struct wa_xfer
*xfer
)
190 kref_put(&xfer
->refcnt
, wa_xfer_destroy
);
196 * xfer->lock has to be unlocked
198 * We take xfer->lock for setting the result; this is a barrier
199 * against drivers/usb/core/hcd.c:unlink1() being called after we call
200 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
201 * reference to the transfer.
203 static void wa_xfer_giveback(struct wa_xfer
*xfer
)
207 spin_lock_irqsave(&xfer
->wa
->xfer_list_lock
, flags
);
208 list_del_init(&xfer
->list_node
);
209 spin_unlock_irqrestore(&xfer
->wa
->xfer_list_lock
, flags
);
210 /* FIXME: segmentation broken -- kills DWA */
211 wusbhc_giveback_urb(xfer
->wa
->wusb
, xfer
->urb
, xfer
->result
);
219 * xfer->lock has to be unlocked
221 static void wa_xfer_completion(struct wa_xfer
*xfer
)
224 wusb_dev_put(xfer
->wusb_dev
);
225 rpipe_put(xfer
->ep
->hcpriv
);
226 wa_xfer_giveback(xfer
);
230 * If transfer is done, wrap it up and return true
232 * xfer->lock has to be locked
234 static unsigned __wa_xfer_is_done(struct wa_xfer
*xfer
)
236 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
237 unsigned result
, cnt
;
239 struct urb
*urb
= xfer
->urb
;
240 unsigned found_short
= 0;
242 result
= xfer
->segs_done
== xfer
->segs_submitted
;
245 urb
->actual_length
= 0;
246 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
247 seg
= xfer
->seg
[cnt
];
248 switch (seg
->status
) {
250 if (found_short
&& seg
->result
> 0) {
251 dev_dbg(dev
, "xfer %p#%u: bad short segments (%zu)\n",
252 xfer
, cnt
, seg
->result
);
253 urb
->status
= -EINVAL
;
256 urb
->actual_length
+= seg
->result
;
257 if (seg
->result
< xfer
->seg_size
258 && cnt
!= xfer
->segs
-1)
260 dev_dbg(dev
, "xfer %p#%u: DONE short %d "
261 "result %zu urb->actual_length %d\n",
262 xfer
, seg
->index
, found_short
, seg
->result
,
266 xfer
->result
= seg
->result
;
267 dev_dbg(dev
, "xfer %p#%u: ERROR result %zu\n",
268 xfer
, seg
->index
, seg
->result
);
271 dev_dbg(dev
, "xfer %p#%u ABORTED: result %d\n",
272 xfer
, seg
->index
, urb
->status
);
273 xfer
->result
= urb
->status
;
276 dev_warn(dev
, "xfer %p#%u: is_done bad state %d\n",
277 xfer
, cnt
, seg
->status
);
278 xfer
->result
= -EINVAL
;
288 * Initialize a transfer's ID
290 * We need to use a sequential number; if we use the pointer or the
291 * hash of the pointer, it can repeat over sequential transfers and
292 * then it will confuse the HWA....wonder why in hell they put a 32
293 * bit handle in there then.
295 static void wa_xfer_id_init(struct wa_xfer
*xfer
)
297 xfer
->id
= atomic_add_return(1, &xfer
->wa
->xfer_id_count
);
301 * Return the xfer's ID associated with xfer
305 static u32
wa_xfer_id(struct wa_xfer
*xfer
)
311 * Search for a transfer list ID on the HCD's URB list
313 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
314 * 32-bit hash of the pointer.
316 * @returns NULL if not found.
318 static struct wa_xfer
*wa_xfer_get_by_id(struct wahc
*wa
, u32 id
)
321 struct wa_xfer
*xfer_itr
;
322 spin_lock_irqsave(&wa
->xfer_list_lock
, flags
);
323 list_for_each_entry(xfer_itr
, &wa
->xfer_list
, list_node
) {
324 if (id
== xfer_itr
->id
) {
325 wa_xfer_get(xfer_itr
);
331 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags
);
335 struct wa_xfer_abort_buffer
{
337 struct wa_xfer_abort cmd
;
340 static void __wa_xfer_abort_cb(struct urb
*urb
)
342 struct wa_xfer_abort_buffer
*b
= urb
->context
;
343 usb_put_urb(&b
->urb
);
347 * Aborts an ongoing transaction
349 * Assumes the transfer is referenced and locked and in a submitted
350 * state (mainly that there is an endpoint/rpipe assigned).
352 * The callback (see above) does nothing but freeing up the data by
353 * putting the URB. Because the URB is allocated at the head of the
354 * struct, the whole space we allocated is kfreed.
356 * We'll get an 'aborted transaction' xfer result on DTI, that'll
357 * politely ignore because at this point the transaction has been
358 * marked as aborted already.
360 static void __wa_xfer_abort(struct wa_xfer
*xfer
)
363 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
364 struct wa_xfer_abort_buffer
*b
;
365 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
367 b
= kmalloc(sizeof(*b
), GFP_ATOMIC
);
370 b
->cmd
.bLength
= sizeof(b
->cmd
);
371 b
->cmd
.bRequestType
= WA_XFER_ABORT
;
372 b
->cmd
.wRPipe
= rpipe
->descr
.wRPipeIndex
;
373 b
->cmd
.dwTransferID
= wa_xfer_id(xfer
);
375 usb_init_urb(&b
->urb
);
376 usb_fill_bulk_urb(&b
->urb
, xfer
->wa
->usb_dev
,
377 usb_sndbulkpipe(xfer
->wa
->usb_dev
,
378 xfer
->wa
->dto_epd
->bEndpointAddress
),
379 &b
->cmd
, sizeof(b
->cmd
), __wa_xfer_abort_cb
, b
);
380 result
= usb_submit_urb(&b
->urb
, GFP_ATOMIC
);
383 return; /* callback frees! */
387 if (printk_ratelimit())
388 dev_err(dev
, "xfer %p: Can't submit abort request: %d\n",
398 * @returns < 0 on error, transfer segment request size if ok
400 static ssize_t
__wa_xfer_setup_sizes(struct wa_xfer
*xfer
,
401 enum wa_xfer_type
*pxfer_type
)
404 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
406 struct urb
*urb
= xfer
->urb
;
407 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
409 switch (rpipe
->descr
.bmAttribute
& 0x3) {
410 case USB_ENDPOINT_XFER_CONTROL
:
411 *pxfer_type
= WA_XFER_TYPE_CTL
;
412 result
= sizeof(struct wa_xfer_ctl
);
414 case USB_ENDPOINT_XFER_INT
:
415 case USB_ENDPOINT_XFER_BULK
:
416 *pxfer_type
= WA_XFER_TYPE_BI
;
417 result
= sizeof(struct wa_xfer_bi
);
419 case USB_ENDPOINT_XFER_ISOC
:
420 dev_err(dev
, "FIXME: ISOC not implemented\n");
426 result
= -EINVAL
; /* shut gcc up */
428 xfer
->is_inbound
= urb
->pipe
& USB_DIR_IN
? 1 : 0;
429 xfer
->is_dma
= urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
? 1 : 0;
430 xfer
->seg_size
= le16_to_cpu(rpipe
->descr
.wBlocks
)
431 * 1 << (xfer
->wa
->wa_descr
->bRPipeBlockSize
- 1);
432 /* Compute the segment size and make sure it is a multiple of
433 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
435 maxpktsize
= le16_to_cpu(rpipe
->descr
.wMaxPacketSize
);
436 if (xfer
->seg_size
< maxpktsize
) {
437 dev_err(dev
, "HW BUG? seg_size %zu smaller than maxpktsize "
438 "%zu\n", xfer
->seg_size
, maxpktsize
);
442 xfer
->seg_size
= (xfer
->seg_size
/ maxpktsize
) * maxpktsize
;
443 xfer
->segs
= (urb
->transfer_buffer_length
+ xfer
->seg_size
- 1)
445 if (xfer
->segs
>= WA_SEGS_MAX
) {
446 dev_err(dev
, "BUG? ops, number of segments %d bigger than %d\n",
447 (int)(urb
->transfer_buffer_length
/ xfer
->seg_size
),
452 if (xfer
->segs
== 0 && *pxfer_type
== WA_XFER_TYPE_CTL
)
458 /* Fill in the common request header and xfer-type specific data. */
459 static void __wa_xfer_setup_hdr0(struct wa_xfer
*xfer
,
460 struct wa_xfer_hdr
*xfer_hdr0
,
461 enum wa_xfer_type xfer_type
,
462 size_t xfer_hdr_size
)
464 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
466 xfer_hdr0
= &xfer
->seg
[0]->xfer_hdr
;
467 xfer_hdr0
->bLength
= xfer_hdr_size
;
468 xfer_hdr0
->bRequestType
= xfer_type
;
469 xfer_hdr0
->wRPipe
= rpipe
->descr
.wRPipeIndex
;
470 xfer_hdr0
->dwTransferID
= wa_xfer_id(xfer
);
471 xfer_hdr0
->bTransferSegment
= 0;
473 case WA_XFER_TYPE_CTL
: {
474 struct wa_xfer_ctl
*xfer_ctl
=
475 container_of(xfer_hdr0
, struct wa_xfer_ctl
, hdr
);
476 xfer_ctl
->bmAttribute
= xfer
->is_inbound
? 1 : 0;
477 memcpy(&xfer_ctl
->baSetupData
, xfer
->urb
->setup_packet
,
478 sizeof(xfer_ctl
->baSetupData
));
481 case WA_XFER_TYPE_BI
:
483 case WA_XFER_TYPE_ISO
:
484 printk(KERN_ERR
"FIXME: ISOC not implemented\n");
491 * Callback for the OUT data phase of the segment request
493 * Check wa_seg_cb(); most comments also apply here because this
494 * function does almost the same thing and they work closely
497 * If the seg request has failed but this DTO phase has succeeded,
498 * wa_seg_cb() has already failed the segment and moved the
499 * status to WA_SEG_ERROR, so this will go through 'case 0' and
500 * effectively do nothing.
502 static void wa_seg_dto_cb(struct urb
*urb
)
504 struct wa_seg
*seg
= urb
->context
;
505 struct wa_xfer
*xfer
= seg
->xfer
;
508 struct wa_rpipe
*rpipe
;
510 unsigned rpipe_ready
= 0;
513 switch (urb
->status
) {
515 spin_lock_irqsave(&xfer
->lock
, flags
);
517 dev
= &wa
->usb_iface
->dev
;
518 dev_dbg(dev
, "xfer %p#%u: data out done (%d bytes)\n",
519 xfer
, seg
->index
, urb
->actual_length
);
520 if (seg
->status
< WA_SEG_PENDING
)
521 seg
->status
= WA_SEG_PENDING
;
522 seg
->result
= urb
->actual_length
;
523 spin_unlock_irqrestore(&xfer
->lock
, flags
);
525 case -ECONNRESET
: /* URB unlinked; no need to do anything */
526 case -ENOENT
: /* as it was done by the who unlinked us */
528 default: /* Other errors ... */
529 spin_lock_irqsave(&xfer
->lock
, flags
);
531 dev
= &wa
->usb_iface
->dev
;
532 rpipe
= xfer
->ep
->hcpriv
;
533 dev_dbg(dev
, "xfer %p#%u: data out error %d\n",
534 xfer
, seg
->index
, urb
->status
);
535 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
536 EDC_ERROR_TIMEFRAME
)){
537 dev_err(dev
, "DTO: URB max acceptable errors "
538 "exceeded, resetting device\n");
541 if (seg
->status
!= WA_SEG_ERROR
) {
542 seg
->status
= WA_SEG_ERROR
;
543 seg
->result
= urb
->status
;
545 __wa_xfer_abort(xfer
);
546 rpipe_ready
= rpipe_avail_inc(rpipe
);
547 done
= __wa_xfer_is_done(xfer
);
549 spin_unlock_irqrestore(&xfer
->lock
, flags
);
551 wa_xfer_completion(xfer
);
553 wa_xfer_delayed_run(rpipe
);
558 * Callback for the segment request
560 * If successful transition state (unless already transitioned or
561 * outbound transfer); otherwise, take a note of the error, mark this
562 * segment done and try completion.
564 * Note we don't access until we are sure that the transfer hasn't
565 * been cancelled (ECONNRESET, ENOENT), which could mean that
566 * seg->xfer could be already gone.
568 * We have to check before setting the status to WA_SEG_PENDING
569 * because sometimes the xfer result callback arrives before this
570 * callback (geeeeeeze), so it might happen that we are already in
571 * another state. As well, we don't set it if the transfer is inbound,
572 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
575 static void wa_seg_cb(struct urb
*urb
)
577 struct wa_seg
*seg
= urb
->context
;
578 struct wa_xfer
*xfer
= seg
->xfer
;
581 struct wa_rpipe
*rpipe
;
583 unsigned rpipe_ready
;
586 switch (urb
->status
) {
588 spin_lock_irqsave(&xfer
->lock
, flags
);
590 dev
= &wa
->usb_iface
->dev
;
591 dev_dbg(dev
, "xfer %p#%u: request done\n", xfer
, seg
->index
);
592 if (xfer
->is_inbound
&& seg
->status
< WA_SEG_PENDING
)
593 seg
->status
= WA_SEG_PENDING
;
594 spin_unlock_irqrestore(&xfer
->lock
, flags
);
596 case -ECONNRESET
: /* URB unlinked; no need to do anything */
597 case -ENOENT
: /* as it was done by the who unlinked us */
599 default: /* Other errors ... */
600 spin_lock_irqsave(&xfer
->lock
, flags
);
602 dev
= &wa
->usb_iface
->dev
;
603 rpipe
= xfer
->ep
->hcpriv
;
604 if (printk_ratelimit())
605 dev_err(dev
, "xfer %p#%u: request error %d\n",
606 xfer
, seg
->index
, urb
->status
);
607 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
608 EDC_ERROR_TIMEFRAME
)){
609 dev_err(dev
, "DTO: URB max acceptable errors "
610 "exceeded, resetting device\n");
613 usb_unlink_urb(seg
->dto_urb
);
614 seg
->status
= WA_SEG_ERROR
;
615 seg
->result
= urb
->status
;
617 __wa_xfer_abort(xfer
);
618 rpipe_ready
= rpipe_avail_inc(rpipe
);
619 done
= __wa_xfer_is_done(xfer
);
620 spin_unlock_irqrestore(&xfer
->lock
, flags
);
622 wa_xfer_completion(xfer
);
624 wa_xfer_delayed_run(rpipe
);
629 * Allocate the segs array and initialize each of them
631 * The segments are freed by wa_xfer_destroy() when the xfer use count
632 * drops to zero; however, because each segment is given the same life
633 * cycle as the USB URB it contains, it is actually freed by
634 * usb_put_urb() on the contained USB URB (twisted, eh?).
636 static int __wa_xfer_setup_segs(struct wa_xfer
*xfer
, size_t xfer_hdr_size
)
639 size_t alloc_size
= sizeof(*xfer
->seg
[0])
640 - sizeof(xfer
->seg
[0]->xfer_hdr
) + xfer_hdr_size
;
641 struct usb_device
*usb_dev
= xfer
->wa
->usb_dev
;
642 const struct usb_endpoint_descriptor
*dto_epd
= xfer
->wa
->dto_epd
;
644 size_t buf_itr
, buf_size
, buf_itr_size
;
647 xfer
->seg
= kcalloc(xfer
->segs
, sizeof(xfer
->seg
[0]), GFP_ATOMIC
);
648 if (xfer
->seg
== NULL
)
649 goto error_segs_kzalloc
;
651 buf_size
= xfer
->urb
->transfer_buffer_length
;
652 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
653 seg
= xfer
->seg
[cnt
] = kzalloc(alloc_size
, GFP_ATOMIC
);
655 goto error_seg_kzalloc
;
659 usb_fill_bulk_urb(&seg
->urb
, usb_dev
,
660 usb_sndbulkpipe(usb_dev
,
661 dto_epd
->bEndpointAddress
),
662 &seg
->xfer_hdr
, xfer_hdr_size
,
664 buf_itr_size
= buf_size
> xfer
->seg_size
?
665 xfer
->seg_size
: buf_size
;
666 if (xfer
->is_inbound
== 0 && buf_size
> 0) {
667 seg
->dto_urb
= usb_alloc_urb(0, GFP_ATOMIC
);
668 if (seg
->dto_urb
== NULL
)
669 goto error_dto_alloc
;
671 seg
->dto_urb
, usb_dev
,
672 usb_sndbulkpipe(usb_dev
,
673 dto_epd
->bEndpointAddress
),
674 NULL
, 0, wa_seg_dto_cb
, seg
);
676 seg
->dto_urb
->transfer_dma
=
677 xfer
->urb
->transfer_dma
+ buf_itr
;
678 seg
->dto_urb
->transfer_flags
|=
679 URB_NO_TRANSFER_DMA_MAP
;
681 seg
->dto_urb
->transfer_buffer
=
682 xfer
->urb
->transfer_buffer
+ buf_itr
;
683 seg
->dto_urb
->transfer_buffer_length
= buf_itr_size
;
685 seg
->status
= WA_SEG_READY
;
686 buf_itr
+= buf_itr_size
;
687 buf_size
-= buf_itr_size
;
692 kfree(xfer
->seg
[cnt
]);
695 /* use the fact that cnt is left at were it failed */
696 for (; cnt
> 0; cnt
--) {
697 if (xfer
->is_inbound
== 0)
698 kfree(xfer
->seg
[cnt
]->dto_urb
);
699 kfree(xfer
->seg
[cnt
]);
706 * Allocates all the stuff needed to submit a transfer
708 * Breaks the whole data buffer in a list of segments, each one has a
709 * structure allocated to it and linked in xfer->seg[index]
711 * FIXME: merge setup_segs() and the last part of this function, no
712 * need to do two for loops when we could run everything in a
715 static int __wa_xfer_setup(struct wa_xfer
*xfer
, struct urb
*urb
)
718 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
719 enum wa_xfer_type xfer_type
= 0; /* shut up GCC */
720 size_t xfer_hdr_size
, cnt
, transfer_size
;
721 struct wa_xfer_hdr
*xfer_hdr0
, *xfer_hdr
;
723 result
= __wa_xfer_setup_sizes(xfer
, &xfer_type
);
725 goto error_setup_sizes
;
726 xfer_hdr_size
= result
;
727 result
= __wa_xfer_setup_segs(xfer
, xfer_hdr_size
);
729 dev_err(dev
, "xfer %p: Failed to allocate %d segments: %d\n",
730 xfer
, xfer
->segs
, result
);
731 goto error_setup_segs
;
733 /* Fill the first header */
734 xfer_hdr0
= &xfer
->seg
[0]->xfer_hdr
;
735 wa_xfer_id_init(xfer
);
736 __wa_xfer_setup_hdr0(xfer
, xfer_hdr0
, xfer_type
, xfer_hdr_size
);
738 /* Fill remainig headers */
739 xfer_hdr
= xfer_hdr0
;
740 transfer_size
= urb
->transfer_buffer_length
;
741 xfer_hdr0
->dwTransferLength
= transfer_size
> xfer
->seg_size
?
742 xfer
->seg_size
: transfer_size
;
743 transfer_size
-= xfer
->seg_size
;
744 for (cnt
= 1; cnt
< xfer
->segs
; cnt
++) {
745 xfer_hdr
= &xfer
->seg
[cnt
]->xfer_hdr
;
746 memcpy(xfer_hdr
, xfer_hdr0
, xfer_hdr_size
);
747 xfer_hdr
->bTransferSegment
= cnt
;
748 xfer_hdr
->dwTransferLength
= transfer_size
> xfer
->seg_size
?
749 cpu_to_le32(xfer
->seg_size
)
750 : cpu_to_le32(transfer_size
);
751 xfer
->seg
[cnt
]->status
= WA_SEG_READY
;
752 transfer_size
-= xfer
->seg_size
;
754 xfer_hdr
->bTransferSegment
|= 0x80; /* this is the last segment */
764 * rpipe->seg_lock is held!
766 static int __wa_seg_submit(struct wa_rpipe
*rpipe
, struct wa_xfer
*xfer
,
770 result
= usb_submit_urb(&seg
->urb
, GFP_ATOMIC
);
772 printk(KERN_ERR
"xfer %p#%u: REQ submit failed: %d\n",
773 xfer
, seg
->index
, result
);
774 goto error_seg_submit
;
777 result
= usb_submit_urb(seg
->dto_urb
, GFP_ATOMIC
);
779 printk(KERN_ERR
"xfer %p#%u: DTO submit failed: %d\n",
780 xfer
, seg
->index
, result
);
781 goto error_dto_submit
;
784 seg
->status
= WA_SEG_SUBMITTED
;
785 rpipe_avail_dec(rpipe
);
789 usb_unlink_urb(&seg
->urb
);
791 seg
->status
= WA_SEG_ERROR
;
792 seg
->result
= result
;
797 * Execute more queued request segments until the maximum concurrent allowed
799 * The ugly unlock/lock sequence on the error path is needed as the
800 * xfer->lock normally nests the seg_lock and not viceversa.
803 static void wa_xfer_delayed_run(struct wa_rpipe
*rpipe
)
806 struct device
*dev
= &rpipe
->wa
->usb_iface
->dev
;
808 struct wa_xfer
*xfer
;
811 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
812 while (atomic_read(&rpipe
->segs_available
) > 0
813 && !list_empty(&rpipe
->seg_list
)) {
814 seg
= list_entry(rpipe
->seg_list
.next
, struct wa_seg
,
816 list_del(&seg
->list_node
);
818 result
= __wa_seg_submit(rpipe
, xfer
, seg
);
819 dev_dbg(dev
, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
820 xfer
, seg
->index
, atomic_read(&rpipe
->segs_available
), result
);
821 if (unlikely(result
< 0)) {
822 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
823 spin_lock_irqsave(&xfer
->lock
, flags
);
824 __wa_xfer_abort(xfer
);
826 spin_unlock_irqrestore(&xfer
->lock
, flags
);
827 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
830 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
835 * xfer->lock is taken
837 * On failure submitting we just stop submitting and return error;
838 * wa_urb_enqueue_b() will execute the completion path
840 static int __wa_xfer_submit(struct wa_xfer
*xfer
)
843 struct wahc
*wa
= xfer
->wa
;
844 struct device
*dev
= &wa
->usb_iface
->dev
;
848 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
849 size_t maxrequests
= le16_to_cpu(rpipe
->descr
.wRequests
);
853 spin_lock_irqsave(&wa
->xfer_list_lock
, flags
);
854 list_add_tail(&xfer
->list_node
, &wa
->xfer_list
);
855 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags
);
857 BUG_ON(atomic_read(&rpipe
->segs_available
) > maxrequests
);
859 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
860 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
861 available
= atomic_read(&rpipe
->segs_available
);
862 empty
= list_empty(&rpipe
->seg_list
);
863 seg
= xfer
->seg
[cnt
];
864 dev_dbg(dev
, "xfer %p#%u: available %u empty %u (%s)\n",
865 xfer
, cnt
, available
, empty
,
866 available
== 0 || !empty
? "delayed" : "submitted");
867 if (available
== 0 || !empty
) {
868 dev_dbg(dev
, "xfer %p#%u: delayed\n", xfer
, cnt
);
869 seg
->status
= WA_SEG_DELAYED
;
870 list_add_tail(&seg
->list_node
, &rpipe
->seg_list
);
872 result
= __wa_seg_submit(rpipe
, xfer
, seg
);
874 __wa_xfer_abort(xfer
);
875 goto error_seg_submit
;
878 xfer
->segs_submitted
++;
881 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
886 * Second part of a URB/transfer enqueuement
888 * Assumes this comes from wa_urb_enqueue() [maybe through
889 * wa_urb_enqueue_run()]. At this point:
891 * xfer->wa filled and refcounted
892 * xfer->ep filled with rpipe refcounted if
894 * xfer->urb filled and refcounted (this is the case when called
895 * from wa_urb_enqueue() as we come from usb_submit_urb()
896 * and when called by wa_urb_enqueue_run(), as we took an
897 * extra ref dropped by _run() after we return).
900 * If we fail at __wa_xfer_submit(), then we just check if we are done
901 * and if so, we run the completion procedure. However, if we are not
902 * yet done, we do nothing and wait for the completion handlers from
903 * the submitted URBs or from the xfer-result path to kick in. If xfer
904 * result never kicks in, the xfer will timeout from the USB code and
905 * dequeue() will be called.
907 static void wa_urb_enqueue_b(struct wa_xfer
*xfer
)
911 struct urb
*urb
= xfer
->urb
;
912 struct wahc
*wa
= xfer
->wa
;
913 struct wusbhc
*wusbhc
= wa
->wusb
;
914 struct wusb_dev
*wusb_dev
;
917 result
= rpipe_get_by_ep(wa
, xfer
->ep
, urb
, xfer
->gfp
);
919 goto error_rpipe_get
;
921 /* FIXME: segmentation broken -- kills DWA */
922 mutex_lock(&wusbhc
->mutex
); /* get a WUSB dev */
923 if (urb
->dev
== NULL
) {
924 mutex_unlock(&wusbhc
->mutex
);
927 wusb_dev
= __wusb_dev_get_by_usb_dev(wusbhc
, urb
->dev
);
928 if (wusb_dev
== NULL
) {
929 mutex_unlock(&wusbhc
->mutex
);
932 mutex_unlock(&wusbhc
->mutex
);
934 spin_lock_irqsave(&xfer
->lock
, flags
);
935 xfer
->wusb_dev
= wusb_dev
;
936 result
= urb
->status
;
937 if (urb
->status
!= -EINPROGRESS
)
940 result
= __wa_xfer_setup(xfer
, urb
);
942 goto error_xfer_setup
;
943 result
= __wa_xfer_submit(xfer
);
945 goto error_xfer_submit
;
946 spin_unlock_irqrestore(&xfer
->lock
, flags
);
949 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
950 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
955 spin_unlock_irqrestore(&xfer
->lock
, flags
);
956 /* FIXME: segmentation broken, kills DWA */
958 wusb_dev_put(wusb_dev
);
960 rpipe_put(xfer
->ep
->hcpriv
);
962 xfer
->result
= result
;
963 wa_xfer_giveback(xfer
);
967 done
= __wa_xfer_is_done(xfer
);
968 xfer
->result
= result
;
969 spin_unlock_irqrestore(&xfer
->lock
, flags
);
971 wa_xfer_completion(xfer
);
975 * Execute the delayed transfers in the Wire Adapter @wa
977 * We need to be careful here, as dequeue() could be called in the
978 * middle. That's why we do the whole thing under the
979 * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
980 * and then checks the list -- so as we would be acquiring in inverse
981 * order, we just drop the lock once we have the xfer and reacquire it
984 void wa_urb_enqueue_run(struct work_struct
*ws
)
986 struct wahc
*wa
= container_of(ws
, struct wahc
, xfer_work
);
987 struct wa_xfer
*xfer
, *next
;
990 spin_lock_irq(&wa
->xfer_list_lock
);
991 list_for_each_entry_safe(xfer
, next
, &wa
->xfer_delayed_list
,
993 list_del_init(&xfer
->list_node
);
994 spin_unlock_irq(&wa
->xfer_list_lock
);
997 wa_urb_enqueue_b(xfer
);
998 usb_put_urb(urb
); /* taken when queuing */
1000 spin_lock_irq(&wa
->xfer_list_lock
);
1002 spin_unlock_irq(&wa
->xfer_list_lock
);
1004 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run
);
1007 * Submit a transfer to the Wire Adapter in a delayed way
1009 * The process of enqueuing involves possible sleeps() [see
1010 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1011 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1013 * @urb: We own a reference to it done by the HCI Linux USB stack that
1014 * will be given up by calling usb_hcd_giveback_urb() or by
1015 * returning error from this function -> ergo we don't have to
1018 int wa_urb_enqueue(struct wahc
*wa
, struct usb_host_endpoint
*ep
,
1019 struct urb
*urb
, gfp_t gfp
)
1022 struct device
*dev
= &wa
->usb_iface
->dev
;
1023 struct wa_xfer
*xfer
;
1024 unsigned long my_flags
;
1025 unsigned cant_sleep
= irqs_disabled() | in_atomic();
1027 if (urb
->transfer_buffer
== NULL
1028 && !(urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
)
1029 && urb
->transfer_buffer_length
!= 0) {
1030 dev_err(dev
, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb
);
1035 xfer
= kzalloc(sizeof(*xfer
), gfp
);
1040 if (urb
->status
!= -EINPROGRESS
) /* cancelled */
1041 goto error_dequeued
; /* before starting? */
1043 xfer
->wa
= wa_get(wa
);
1049 dev_dbg(dev
, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1050 xfer
, urb
, urb
->pipe
, urb
->transfer_buffer_length
,
1051 urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
? "dma" : "nodma",
1052 urb
->pipe
& USB_DIR_IN
? "inbound" : "outbound",
1053 cant_sleep
? "deferred" : "inline");
1057 spin_lock_irqsave(&wa
->xfer_list_lock
, my_flags
);
1058 list_add_tail(&xfer
->list_node
, &wa
->xfer_delayed_list
);
1059 spin_unlock_irqrestore(&wa
->xfer_list_lock
, my_flags
);
1060 queue_work(wusbd
, &wa
->xfer_work
);
1062 wa_urb_enqueue_b(xfer
);
1071 EXPORT_SYMBOL_GPL(wa_urb_enqueue
);
1074 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1075 * handler] is called.
1077 * Until a transfer goes successfully through wa_urb_enqueue() it
1078 * needs to be dequeued with completion calling; when stuck in delayed
1079 * or before wa_xfer_setup() is called, we need to do completion.
1081 * not setup If there is no hcpriv yet, that means that that enqueue
1082 * still had no time to set the xfer up. Because
1083 * urb->status should be other than -EINPROGRESS,
1084 * enqueue() will catch that and bail out.
1086 * If the transfer has gone through setup, we just need to clean it
1087 * up. If it has gone through submit(), we have to abort it [with an
1088 * asynch request] and then make sure we cancel each segment.
1091 int wa_urb_dequeue(struct wahc
*wa
, struct urb
*urb
)
1093 unsigned long flags
, flags2
;
1094 struct wa_xfer
*xfer
;
1096 struct wa_rpipe
*rpipe
;
1098 unsigned rpipe_ready
= 0;
1102 /* NOthing setup yet enqueue will see urb->status !=
1103 * -EINPROGRESS (by hcd layer) and bail out with
1104 * error, no need to do completion
1106 BUG_ON(urb
->status
== -EINPROGRESS
);
1109 spin_lock_irqsave(&xfer
->lock
, flags
);
1110 rpipe
= xfer
->ep
->hcpriv
;
1111 /* Check the delayed list -> if there, release and complete */
1112 spin_lock_irqsave(&wa
->xfer_list_lock
, flags2
);
1113 if (!list_empty(&xfer
->list_node
) && xfer
->seg
== NULL
)
1114 goto dequeue_delayed
;
1115 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags2
);
1116 if (xfer
->seg
== NULL
) /* still hasn't reached */
1117 goto out_unlock
; /* setup(), enqueue_b() completes */
1118 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1119 __wa_xfer_abort(xfer
);
1120 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
1121 seg
= xfer
->seg
[cnt
];
1122 switch (seg
->status
) {
1123 case WA_SEG_NOTREADY
:
1125 printk(KERN_ERR
"xfer %p#%u: dequeue bad state %u\n",
1126 xfer
, cnt
, seg
->status
);
1129 case WA_SEG_DELAYED
:
1130 seg
->status
= WA_SEG_ABORTED
;
1131 spin_lock_irqsave(&rpipe
->seg_lock
, flags2
);
1132 list_del(&seg
->list_node
);
1134 rpipe_ready
= rpipe_avail_inc(rpipe
);
1135 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags2
);
1137 case WA_SEG_SUBMITTED
:
1138 seg
->status
= WA_SEG_ABORTED
;
1139 usb_unlink_urb(&seg
->urb
);
1140 if (xfer
->is_inbound
== 0)
1141 usb_unlink_urb(seg
->dto_urb
);
1143 rpipe_ready
= rpipe_avail_inc(rpipe
);
1145 case WA_SEG_PENDING
:
1146 seg
->status
= WA_SEG_ABORTED
;
1148 rpipe_ready
= rpipe_avail_inc(rpipe
);
1150 case WA_SEG_DTI_PENDING
:
1151 usb_unlink_urb(wa
->dti_urb
);
1152 seg
->status
= WA_SEG_ABORTED
;
1154 rpipe_ready
= rpipe_avail_inc(rpipe
);
1158 case WA_SEG_ABORTED
:
1162 xfer
->result
= urb
->status
; /* -ENOENT or -ECONNRESET */
1163 __wa_xfer_is_done(xfer
);
1164 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1165 wa_xfer_completion(xfer
);
1167 wa_xfer_delayed_run(rpipe
);
1171 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1176 list_del_init(&xfer
->list_node
);
1177 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags2
);
1178 xfer
->result
= urb
->status
;
1179 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1180 wa_xfer_giveback(xfer
);
1181 usb_put_urb(urb
); /* we got a ref in enqueue() */
1184 EXPORT_SYMBOL_GPL(wa_urb_dequeue
);
1187 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1190 * Positive errno values are internal inconsistencies and should be
1191 * flagged louder. Negative are to be passed up to the user in the
1194 * @status: USB WA status code -- high two bits are stripped.
1196 static int wa_xfer_status_to_errno(u8 status
)
1199 u8 real_status
= status
;
1200 static int xlat
[] = {
1201 [WA_XFER_STATUS_SUCCESS
] = 0,
1202 [WA_XFER_STATUS_HALTED
] = -EPIPE
,
1203 [WA_XFER_STATUS_DATA_BUFFER_ERROR
] = -ENOBUFS
,
1204 [WA_XFER_STATUS_BABBLE
] = -EOVERFLOW
,
1205 [WA_XFER_RESERVED
] = EINVAL
,
1206 [WA_XFER_STATUS_NOT_FOUND
] = 0,
1207 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE
] = -ENOMEM
,
1208 [WA_XFER_STATUS_TRANSACTION_ERROR
] = -EILSEQ
,
1209 [WA_XFER_STATUS_ABORTED
] = -EINTR
,
1210 [WA_XFER_STATUS_RPIPE_NOT_READY
] = EINVAL
,
1211 [WA_XFER_INVALID_FORMAT
] = EINVAL
,
1212 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER
] = EINVAL
,
1213 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH
] = EINVAL
,
1219 if (status
>= ARRAY_SIZE(xlat
)) {
1220 if (printk_ratelimit())
1221 printk(KERN_ERR
"%s(): BUG? "
1222 "Unknown WA transfer status 0x%02x\n",
1223 __func__
, real_status
);
1226 errno
= xlat
[status
];
1227 if (unlikely(errno
> 0)) {
1228 if (printk_ratelimit())
1229 printk(KERN_ERR
"%s(): BUG? "
1230 "Inconsistent WA status: 0x%02x\n",
1231 __func__
, real_status
);
1238 * Process a xfer result completion message
1240 * inbound transfers: need to schedule a DTI read
1242 * FIXME: this functio needs to be broken up in parts
1244 static void wa_xfer_result_chew(struct wahc
*wa
, struct wa_xfer
*xfer
)
1247 struct device
*dev
= &wa
->usb_iface
->dev
;
1248 unsigned long flags
;
1251 struct wa_rpipe
*rpipe
;
1252 struct wa_xfer_result
*xfer_result
= wa
->xfer_result
;
1255 unsigned rpipe_ready
= 0;
1257 spin_lock_irqsave(&xfer
->lock
, flags
);
1258 seg_idx
= xfer_result
->bTransferSegment
& 0x7f;
1259 if (unlikely(seg_idx
>= xfer
->segs
))
1261 seg
= xfer
->seg
[seg_idx
];
1262 rpipe
= xfer
->ep
->hcpriv
;
1263 usb_status
= xfer_result
->bTransferStatus
;
1264 dev_dbg(dev
, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1265 xfer
, seg_idx
, usb_status
, seg
->status
);
1266 if (seg
->status
== WA_SEG_ABORTED
1267 || seg
->status
== WA_SEG_ERROR
) /* already handled */
1268 goto segment_aborted
;
1269 if (seg
->status
== WA_SEG_SUBMITTED
) /* ops, got here */
1270 seg
->status
= WA_SEG_PENDING
; /* before wa_seg{_dto}_cb() */
1271 if (seg
->status
!= WA_SEG_PENDING
) {
1272 if (printk_ratelimit())
1273 dev_err(dev
, "xfer %p#%u: Bad segment state %u\n",
1274 xfer
, seg_idx
, seg
->status
);
1275 seg
->status
= WA_SEG_PENDING
; /* workaround/"fix" it */
1277 if (usb_status
& 0x80) {
1278 seg
->result
= wa_xfer_status_to_errno(usb_status
);
1279 dev_err(dev
, "DTI: xfer %p#%u failed (0x%02x)\n",
1280 xfer
, seg
->index
, usb_status
);
1281 goto error_complete
;
1283 /* FIXME: we ignore warnings, tally them for stats */
1284 if (usb_status
& 0x40) /* Warning?... */
1285 usb_status
= 0; /* ... pass */
1286 if (xfer
->is_inbound
) { /* IN data phase: read to buffer */
1287 seg
->status
= WA_SEG_DTI_PENDING
;
1288 BUG_ON(wa
->buf_in_urb
->status
== -EINPROGRESS
);
1290 wa
->buf_in_urb
->transfer_dma
=
1291 xfer
->urb
->transfer_dma
1292 + seg_idx
* xfer
->seg_size
;
1293 wa
->buf_in_urb
->transfer_flags
1294 |= URB_NO_TRANSFER_DMA_MAP
;
1296 wa
->buf_in_urb
->transfer_buffer
=
1297 xfer
->urb
->transfer_buffer
1298 + seg_idx
* xfer
->seg_size
;
1299 wa
->buf_in_urb
->transfer_flags
1300 &= ~URB_NO_TRANSFER_DMA_MAP
;
1302 wa
->buf_in_urb
->transfer_buffer_length
=
1303 le32_to_cpu(xfer_result
->dwTransferLength
);
1304 wa
->buf_in_urb
->context
= seg
;
1305 result
= usb_submit_urb(wa
->buf_in_urb
, GFP_ATOMIC
);
1307 goto error_submit_buf_in
;
1309 /* OUT data phase, complete it -- */
1310 seg
->status
= WA_SEG_DONE
;
1311 seg
->result
= le32_to_cpu(xfer_result
->dwTransferLength
);
1313 rpipe_ready
= rpipe_avail_inc(rpipe
);
1314 done
= __wa_xfer_is_done(xfer
);
1316 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1318 wa_xfer_completion(xfer
);
1320 wa_xfer_delayed_run(rpipe
);
1323 error_submit_buf_in
:
1324 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
1325 dev_err(dev
, "DTI: URB max acceptable errors "
1326 "exceeded, resetting device\n");
1329 if (printk_ratelimit())
1330 dev_err(dev
, "xfer %p#%u: can't submit DTI data phase: %d\n",
1331 xfer
, seg_idx
, result
);
1332 seg
->result
= result
;
1334 seg
->status
= WA_SEG_ERROR
;
1336 rpipe_ready
= rpipe_avail_inc(rpipe
);
1337 __wa_xfer_abort(xfer
);
1338 done
= __wa_xfer_is_done(xfer
);
1339 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1341 wa_xfer_completion(xfer
);
1343 wa_xfer_delayed_run(rpipe
);
1347 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1348 wa_urb_dequeue(wa
, xfer
->urb
);
1349 if (printk_ratelimit())
1350 dev_err(dev
, "xfer %p#%u: bad segment\n", xfer
, seg_idx
);
1351 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
1352 dev_err(dev
, "DTI: URB max acceptable errors "
1353 "exceeded, resetting device\n");
1359 /* nothing to do, as the aborter did the completion */
1360 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1364 * Callback for the IN data phase
1366 * If successful transition state; otherwise, take a note of the
1367 * error, mark this segment done and try completion.
1369 * Note we don't access until we are sure that the transfer hasn't
1370 * been cancelled (ECONNRESET, ENOENT), which could mean that
1371 * seg->xfer could be already gone.
1373 static void wa_buf_in_cb(struct urb
*urb
)
1375 struct wa_seg
*seg
= urb
->context
;
1376 struct wa_xfer
*xfer
= seg
->xfer
;
1379 struct wa_rpipe
*rpipe
;
1380 unsigned rpipe_ready
;
1381 unsigned long flags
;
1384 switch (urb
->status
) {
1386 spin_lock_irqsave(&xfer
->lock
, flags
);
1388 dev
= &wa
->usb_iface
->dev
;
1389 rpipe
= xfer
->ep
->hcpriv
;
1390 dev_dbg(dev
, "xfer %p#%u: data in done (%zu bytes)\n",
1391 xfer
, seg
->index
, (size_t)urb
->actual_length
);
1392 seg
->status
= WA_SEG_DONE
;
1393 seg
->result
= urb
->actual_length
;
1395 rpipe_ready
= rpipe_avail_inc(rpipe
);
1396 done
= __wa_xfer_is_done(xfer
);
1397 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1399 wa_xfer_completion(xfer
);
1401 wa_xfer_delayed_run(rpipe
);
1403 case -ECONNRESET
: /* URB unlinked; no need to do anything */
1404 case -ENOENT
: /* as it was done by the who unlinked us */
1406 default: /* Other errors ... */
1407 spin_lock_irqsave(&xfer
->lock
, flags
);
1409 dev
= &wa
->usb_iface
->dev
;
1410 rpipe
= xfer
->ep
->hcpriv
;
1411 if (printk_ratelimit())
1412 dev_err(dev
, "xfer %p#%u: data in error %d\n",
1413 xfer
, seg
->index
, urb
->status
);
1414 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
1415 EDC_ERROR_TIMEFRAME
)){
1416 dev_err(dev
, "DTO: URB max acceptable errors "
1417 "exceeded, resetting device\n");
1420 seg
->status
= WA_SEG_ERROR
;
1421 seg
->result
= urb
->status
;
1423 rpipe_ready
= rpipe_avail_inc(rpipe
);
1424 __wa_xfer_abort(xfer
);
1425 done
= __wa_xfer_is_done(xfer
);
1426 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1428 wa_xfer_completion(xfer
);
1430 wa_xfer_delayed_run(rpipe
);
1435 * Handle an incoming transfer result buffer
1437 * Given a transfer result buffer, it completes the transfer (possibly
1438 * scheduling and buffer in read) and then resubmits the DTI URB for a
1439 * new transfer result read.
1442 * The xfer_result DTI URB state machine
1444 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1446 * We start in OFF mode, the first xfer_result notification [through
1447 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1450 * We receive a buffer -- if it is not a xfer_result, we complain and
1451 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1452 * request accounting. If it is an IN segment, we move to RBI and post
1453 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1454 * repost the DTI-URB and move to RXR state. if there was no IN
1455 * segment, it will repost the DTI-URB.
1457 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1458 * errors) in the URBs.
1460 static void wa_xfer_result_cb(struct urb
*urb
)
1463 struct wahc
*wa
= urb
->context
;
1464 struct device
*dev
= &wa
->usb_iface
->dev
;
1465 struct wa_xfer_result
*xfer_result
;
1467 struct wa_xfer
*xfer
;
1470 BUG_ON(wa
->dti_urb
!= urb
);
1471 switch (wa
->dti_urb
->status
) {
1473 /* We have a xfer result buffer; check it */
1474 dev_dbg(dev
, "DTI: xfer result %d bytes at %p\n",
1475 urb
->actual_length
, urb
->transfer_buffer
);
1476 if (wa
->dti_urb
->actual_length
!= sizeof(*xfer_result
)) {
1477 dev_err(dev
, "DTI Error: xfer result--bad size "
1478 "xfer result (%d bytes vs %zu needed)\n",
1479 urb
->actual_length
, sizeof(*xfer_result
));
1482 xfer_result
= wa
->xfer_result
;
1483 if (xfer_result
->hdr
.bLength
!= sizeof(*xfer_result
)) {
1484 dev_err(dev
, "DTI Error: xfer result--"
1485 "bad header length %u\n",
1486 xfer_result
->hdr
.bLength
);
1489 if (xfer_result
->hdr
.bNotifyType
!= WA_XFER_RESULT
) {
1490 dev_err(dev
, "DTI Error: xfer result--"
1491 "bad header type 0x%02x\n",
1492 xfer_result
->hdr
.bNotifyType
);
1495 usb_status
= xfer_result
->bTransferStatus
& 0x3f;
1496 if (usb_status
== WA_XFER_STATUS_ABORTED
1497 || usb_status
== WA_XFER_STATUS_NOT_FOUND
)
1498 /* taken care of already */
1500 xfer_id
= xfer_result
->dwTransferID
;
1501 xfer
= wa_xfer_get_by_id(wa
, xfer_id
);
1503 /* FIXME: transaction might have been cancelled */
1504 dev_err(dev
, "DTI Error: xfer result--"
1505 "unknown xfer 0x%08x (status 0x%02x)\n",
1506 xfer_id
, usb_status
);
1509 wa_xfer_result_chew(wa
, xfer
);
1512 case -ENOENT
: /* (we killed the URB)...so, no broadcast */
1513 case -ESHUTDOWN
: /* going away! */
1514 dev_dbg(dev
, "DTI: going down! %d\n", urb
->status
);
1518 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
,
1519 EDC_ERROR_TIMEFRAME
)) {
1520 dev_err(dev
, "DTI: URB max acceptable errors "
1521 "exceeded, resetting device\n");
1525 if (printk_ratelimit())
1526 dev_err(dev
, "DTI: URB error %d\n", urb
->status
);
1529 /* Resubmit the DTI URB */
1530 result
= usb_submit_urb(wa
->dti_urb
, GFP_ATOMIC
);
1532 dev_err(dev
, "DTI Error: Could not submit DTI URB (%d), "
1533 "resetting\n", result
);
1541 * Transfer complete notification
1543 * Called from the notif.c code. We get a notification on EP2 saying
1544 * that some endpoint has some transfer result data available. We are
1547 * To speed up things, we always have a URB reading the DTI URB; we
1548 * don't really set it up and start it until the first xfer complete
1549 * notification arrives, which is what we do here.
1551 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1554 * So here we just initialize the DTI URB for reading transfer result
1555 * notifications and also the buffer-in URB, for reading buffers. Then
1556 * we just submit the DTI URB.
1558 * @wa shall be referenced
1560 void wa_handle_notif_xfer(struct wahc
*wa
, struct wa_notif_hdr
*notif_hdr
)
1563 struct device
*dev
= &wa
->usb_iface
->dev
;
1564 struct wa_notif_xfer
*notif_xfer
;
1565 const struct usb_endpoint_descriptor
*dti_epd
= wa
->dti_epd
;
1567 notif_xfer
= container_of(notif_hdr
, struct wa_notif_xfer
, hdr
);
1568 BUG_ON(notif_hdr
->bNotifyType
!= WA_NOTIF_TRANSFER
);
1570 if ((0x80 | notif_xfer
->bEndpoint
) != dti_epd
->bEndpointAddress
) {
1571 /* FIXME: hardcoded limitation, adapt */
1572 dev_err(dev
, "BUG: DTI ep is %u, not %u (hack me)\n",
1573 notif_xfer
->bEndpoint
, dti_epd
->bEndpointAddress
);
1576 if (wa
->dti_urb
!= NULL
) /* DTI URB already started */
1579 wa
->dti_urb
= usb_alloc_urb(0, GFP_KERNEL
);
1580 if (wa
->dti_urb
== NULL
) {
1581 dev_err(dev
, "Can't allocate DTI URB\n");
1582 goto error_dti_urb_alloc
;
1585 wa
->dti_urb
, wa
->usb_dev
,
1586 usb_rcvbulkpipe(wa
->usb_dev
, 0x80 | notif_xfer
->bEndpoint
),
1587 wa
->xfer_result
, wa
->xfer_result_size
,
1588 wa_xfer_result_cb
, wa
);
1590 wa
->buf_in_urb
= usb_alloc_urb(0, GFP_KERNEL
);
1591 if (wa
->buf_in_urb
== NULL
) {
1592 dev_err(dev
, "Can't allocate BUF-IN URB\n");
1593 goto error_buf_in_urb_alloc
;
1596 wa
->buf_in_urb
, wa
->usb_dev
,
1597 usb_rcvbulkpipe(wa
->usb_dev
, 0x80 | notif_xfer
->bEndpoint
),
1598 NULL
, 0, wa_buf_in_cb
, wa
);
1599 result
= usb_submit_urb(wa
->dti_urb
, GFP_KERNEL
);
1601 dev_err(dev
, "DTI Error: Could not submit DTI URB (%d), "
1602 "resetting\n", result
);
1603 goto error_dti_urb_submit
;
1608 error_dti_urb_submit
:
1609 usb_put_urb(wa
->buf_in_urb
);
1610 error_buf_in_urb_alloc
:
1611 usb_put_urb(wa
->dti_urb
);
1613 error_dti_urb_alloc
: