3 * Data transfer and URB enqueing
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
33 * Sounds simple, it is a pain to implement.
40 * LIFE CYCLE / STATE DIAGRAM
44 * THIS CODE IS DISGUSTING
46 * Warned you are; it's my second try and still not happy with it.
52 * - Supports DMA xfers, control, bulk and maybe interrupt
54 * - Does not recycle unused rpipes
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
62 * Two methods it could be done:
64 * (a) set up a timer every time an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different required components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/slab.h>
85 #include <linux/hash.h>
86 #include <linux/ratelimit.h>
87 #include <linux/export.h>
88 #include <linux/scatterlist.h>
109 static void wa_xfer_delayed_run(struct wa_rpipe
*);
112 * Life cycle governed by 'struct urb' (the refcount of the struct is
113 * that of the 'struct urb' and usb_free_urb() would free the whole
118 struct urb
*dto_urb
; /* for data output? */
119 struct list_head list_node
; /* for rpipe->req_list */
120 struct wa_xfer
*xfer
; /* out xfer */
121 u8 index
; /* which segment we are */
122 enum wa_seg_status status
;
123 ssize_t result
; /* bytes xfered or error */
124 struct wa_xfer_hdr xfer_hdr
;
125 u8 xfer_extra
[]; /* xtra space for xfer_hdr_ctl */
128 static inline void wa_seg_init(struct wa_seg
*seg
)
130 usb_init_urb(&seg
->urb
);
132 /* set the remaining memory to 0. */
133 memset(((void *)seg
) + sizeof(seg
->urb
), 0,
134 sizeof(*seg
) - sizeof(seg
->urb
));
138 * Protected by xfer->lock
143 struct list_head list_node
;
147 struct wahc
*wa
; /* Wire adapter we are plugged to */
148 struct usb_host_endpoint
*ep
;
149 struct urb
*urb
; /* URB we are transferring for */
150 struct wa_seg
**seg
; /* transfer segments */
151 u8 segs
, segs_submitted
, segs_done
;
152 unsigned is_inbound
:1;
157 gfp_t gfp
; /* allocation mask */
159 struct wusb_dev
*wusb_dev
; /* for activity timestamps */
162 static inline void wa_xfer_init(struct wa_xfer
*xfer
)
164 kref_init(&xfer
->refcnt
);
165 INIT_LIST_HEAD(&xfer
->list_node
);
166 spin_lock_init(&xfer
->lock
);
170 * Destroy a transfer structure
172 * Note that freeing xfer->seg[cnt]->urb will free the containing
173 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
175 static void wa_xfer_destroy(struct kref
*_xfer
)
177 struct wa_xfer
*xfer
= container_of(_xfer
, struct wa_xfer
, refcnt
);
180 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
181 usb_free_urb(xfer
->seg
[cnt
]->dto_urb
);
182 usb_free_urb(&xfer
->seg
[cnt
]->urb
);
188 static void wa_xfer_get(struct wa_xfer
*xfer
)
190 kref_get(&xfer
->refcnt
);
193 static void wa_xfer_put(struct wa_xfer
*xfer
)
195 kref_put(&xfer
->refcnt
, wa_xfer_destroy
);
201 * xfer->lock has to be unlocked
203 * We take xfer->lock for setting the result; this is a barrier
204 * against drivers/usb/core/hcd.c:unlink1() being called after we call
205 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
206 * reference to the transfer.
208 static void wa_xfer_giveback(struct wa_xfer
*xfer
)
212 spin_lock_irqsave(&xfer
->wa
->xfer_list_lock
, flags
);
213 list_del_init(&xfer
->list_node
);
214 spin_unlock_irqrestore(&xfer
->wa
->xfer_list_lock
, flags
);
215 /* FIXME: segmentation broken -- kills DWA */
216 wusbhc_giveback_urb(xfer
->wa
->wusb
, xfer
->urb
, xfer
->result
);
224 * xfer->lock has to be unlocked
226 static void wa_xfer_completion(struct wa_xfer
*xfer
)
229 wusb_dev_put(xfer
->wusb_dev
);
230 rpipe_put(xfer
->ep
->hcpriv
);
231 wa_xfer_giveback(xfer
);
235 * If transfer is done, wrap it up and return true
237 * xfer->lock has to be locked
239 static unsigned __wa_xfer_is_done(struct wa_xfer
*xfer
)
241 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
242 unsigned result
, cnt
;
244 struct urb
*urb
= xfer
->urb
;
245 unsigned found_short
= 0;
247 result
= xfer
->segs_done
== xfer
->segs_submitted
;
250 urb
->actual_length
= 0;
251 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
252 seg
= xfer
->seg
[cnt
];
253 switch (seg
->status
) {
255 if (found_short
&& seg
->result
> 0) {
256 dev_dbg(dev
, "xfer %p#%u: bad short segments (%zu)\n",
257 xfer
, cnt
, seg
->result
);
258 urb
->status
= -EINVAL
;
261 urb
->actual_length
+= seg
->result
;
262 if (seg
->result
< xfer
->seg_size
263 && cnt
!= xfer
->segs
-1)
265 dev_dbg(dev
, "xfer %p#%u: DONE short %d "
266 "result %zu urb->actual_length %d\n",
267 xfer
, seg
->index
, found_short
, seg
->result
,
271 xfer
->result
= seg
->result
;
272 dev_dbg(dev
, "xfer %p#%u: ERROR result %zu\n",
273 xfer
, seg
->index
, seg
->result
);
276 dev_dbg(dev
, "xfer %p#%u ABORTED: result %d\n",
277 xfer
, seg
->index
, urb
->status
);
278 xfer
->result
= urb
->status
;
281 dev_warn(dev
, "xfer %p#%u: is_done bad state %d\n",
282 xfer
, cnt
, seg
->status
);
283 xfer
->result
= -EINVAL
;
293 * Initialize a transfer's ID
295 * We need to use a sequential number; if we use the pointer or the
296 * hash of the pointer, it can repeat over sequential transfers and
297 * then it will confuse the HWA....wonder why in hell they put a 32
298 * bit handle in there then.
300 static void wa_xfer_id_init(struct wa_xfer
*xfer
)
302 xfer
->id
= atomic_add_return(1, &xfer
->wa
->xfer_id_count
);
306 * Return the xfer's ID associated with xfer
310 static u32
wa_xfer_id(struct wa_xfer
*xfer
)
316 * Search for a transfer list ID on the HCD's URB list
318 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
319 * 32-bit hash of the pointer.
321 * @returns NULL if not found.
323 static struct wa_xfer
*wa_xfer_get_by_id(struct wahc
*wa
, u32 id
)
326 struct wa_xfer
*xfer_itr
;
327 spin_lock_irqsave(&wa
->xfer_list_lock
, flags
);
328 list_for_each_entry(xfer_itr
, &wa
->xfer_list
, list_node
) {
329 if (id
== xfer_itr
->id
) {
330 wa_xfer_get(xfer_itr
);
336 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags
);
340 struct wa_xfer_abort_buffer
{
342 struct wa_xfer_abort cmd
;
345 static void __wa_xfer_abort_cb(struct urb
*urb
)
347 struct wa_xfer_abort_buffer
*b
= urb
->context
;
348 usb_put_urb(&b
->urb
);
352 * Aborts an ongoing transaction
354 * Assumes the transfer is referenced and locked and in a submitted
355 * state (mainly that there is an endpoint/rpipe assigned).
357 * The callback (see above) does nothing but freeing up the data by
358 * putting the URB. Because the URB is allocated at the head of the
359 * struct, the whole space we allocated is kfreed.
361 * We'll get an 'aborted transaction' xfer result on DTI, that'll
362 * politely ignore because at this point the transaction has been
363 * marked as aborted already.
365 static void __wa_xfer_abort(struct wa_xfer
*xfer
)
368 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
369 struct wa_xfer_abort_buffer
*b
;
370 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
372 b
= kmalloc(sizeof(*b
), GFP_ATOMIC
);
375 b
->cmd
.bLength
= sizeof(b
->cmd
);
376 b
->cmd
.bRequestType
= WA_XFER_ABORT
;
377 b
->cmd
.wRPipe
= rpipe
->descr
.wRPipeIndex
;
378 b
->cmd
.dwTransferID
= wa_xfer_id(xfer
);
380 usb_init_urb(&b
->urb
);
381 usb_fill_bulk_urb(&b
->urb
, xfer
->wa
->usb_dev
,
382 usb_sndbulkpipe(xfer
->wa
->usb_dev
,
383 xfer
->wa
->dto_epd
->bEndpointAddress
),
384 &b
->cmd
, sizeof(b
->cmd
), __wa_xfer_abort_cb
, b
);
385 result
= usb_submit_urb(&b
->urb
, GFP_ATOMIC
);
388 return; /* callback frees! */
392 if (printk_ratelimit())
393 dev_err(dev
, "xfer %p: Can't submit abort request: %d\n",
403 * @returns < 0 on error, transfer segment request size if ok
405 static ssize_t
__wa_xfer_setup_sizes(struct wa_xfer
*xfer
,
406 enum wa_xfer_type
*pxfer_type
)
409 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
411 struct urb
*urb
= xfer
->urb
;
412 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
414 switch (rpipe
->descr
.bmAttribute
& 0x3) {
415 case USB_ENDPOINT_XFER_CONTROL
:
416 *pxfer_type
= WA_XFER_TYPE_CTL
;
417 result
= sizeof(struct wa_xfer_ctl
);
419 case USB_ENDPOINT_XFER_INT
:
420 case USB_ENDPOINT_XFER_BULK
:
421 *pxfer_type
= WA_XFER_TYPE_BI
;
422 result
= sizeof(struct wa_xfer_bi
);
424 case USB_ENDPOINT_XFER_ISOC
:
425 dev_err(dev
, "FIXME: ISOC not implemented\n");
431 result
= -EINVAL
; /* shut gcc up */
433 xfer
->is_inbound
= urb
->pipe
& USB_DIR_IN
? 1 : 0;
434 xfer
->is_dma
= urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
? 1 : 0;
435 xfer
->seg_size
= le16_to_cpu(rpipe
->descr
.wBlocks
)
436 * 1 << (xfer
->wa
->wa_descr
->bRPipeBlockSize
- 1);
437 /* Compute the segment size and make sure it is a multiple of
438 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
440 maxpktsize
= le16_to_cpu(rpipe
->descr
.wMaxPacketSize
);
441 if (xfer
->seg_size
< maxpktsize
) {
442 dev_err(dev
, "HW BUG? seg_size %zu smaller than maxpktsize "
443 "%zu\n", xfer
->seg_size
, maxpktsize
);
447 xfer
->seg_size
= (xfer
->seg_size
/ maxpktsize
) * maxpktsize
;
448 xfer
->segs
= DIV_ROUND_UP(urb
->transfer_buffer_length
, xfer
->seg_size
);
449 if (xfer
->segs
>= WA_SEGS_MAX
) {
450 dev_err(dev
, "BUG? ops, number of segments %d bigger than %d\n",
451 (int)(urb
->transfer_buffer_length
/ xfer
->seg_size
),
456 if (xfer
->segs
== 0 && *pxfer_type
== WA_XFER_TYPE_CTL
)
462 /* Fill in the common request header and xfer-type specific data. */
463 static void __wa_xfer_setup_hdr0(struct wa_xfer
*xfer
,
464 struct wa_xfer_hdr
*xfer_hdr0
,
465 enum wa_xfer_type xfer_type
,
466 size_t xfer_hdr_size
)
468 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
470 xfer_hdr0
= &xfer
->seg
[0]->xfer_hdr
;
471 xfer_hdr0
->bLength
= xfer_hdr_size
;
472 xfer_hdr0
->bRequestType
= xfer_type
;
473 xfer_hdr0
->wRPipe
= rpipe
->descr
.wRPipeIndex
;
474 xfer_hdr0
->dwTransferID
= wa_xfer_id(xfer
);
475 xfer_hdr0
->bTransferSegment
= 0;
477 case WA_XFER_TYPE_CTL
: {
478 struct wa_xfer_ctl
*xfer_ctl
=
479 container_of(xfer_hdr0
, struct wa_xfer_ctl
, hdr
);
480 xfer_ctl
->bmAttribute
= xfer
->is_inbound
? 1 : 0;
481 memcpy(&xfer_ctl
->baSetupData
, xfer
->urb
->setup_packet
,
482 sizeof(xfer_ctl
->baSetupData
));
485 case WA_XFER_TYPE_BI
:
487 case WA_XFER_TYPE_ISO
:
488 printk(KERN_ERR
"FIXME: ISOC not implemented\n");
495 * Callback for the OUT data phase of the segment request
497 * Check wa_seg_cb(); most comments also apply here because this
498 * function does almost the same thing and they work closely
501 * If the seg request has failed but this DTO phase has succeeded,
502 * wa_seg_cb() has already failed the segment and moved the
503 * status to WA_SEG_ERROR, so this will go through 'case 0' and
504 * effectively do nothing.
506 static void wa_seg_dto_cb(struct urb
*urb
)
508 struct wa_seg
*seg
= urb
->context
;
509 struct wa_xfer
*xfer
= seg
->xfer
;
512 struct wa_rpipe
*rpipe
;
514 unsigned rpipe_ready
= 0;
517 switch (urb
->status
) {
519 spin_lock_irqsave(&xfer
->lock
, flags
);
521 dev
= &wa
->usb_iface
->dev
;
522 dev_dbg(dev
, "xfer %p#%u: data out done (%d bytes)\n",
523 xfer
, seg
->index
, urb
->actual_length
);
524 if (seg
->status
< WA_SEG_PENDING
)
525 seg
->status
= WA_SEG_PENDING
;
526 seg
->result
= urb
->actual_length
;
527 spin_unlock_irqrestore(&xfer
->lock
, flags
);
529 case -ECONNRESET
: /* URB unlinked; no need to do anything */
530 case -ENOENT
: /* as it was done by the who unlinked us */
532 default: /* Other errors ... */
533 spin_lock_irqsave(&xfer
->lock
, flags
);
535 dev
= &wa
->usb_iface
->dev
;
536 rpipe
= xfer
->ep
->hcpriv
;
537 dev_dbg(dev
, "xfer %p#%u: data out error %d\n",
538 xfer
, seg
->index
, urb
->status
);
539 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
540 EDC_ERROR_TIMEFRAME
)){
541 dev_err(dev
, "DTO: URB max acceptable errors "
542 "exceeded, resetting device\n");
545 if (seg
->status
!= WA_SEG_ERROR
) {
546 seg
->status
= WA_SEG_ERROR
;
547 seg
->result
= urb
->status
;
549 __wa_xfer_abort(xfer
);
550 rpipe_ready
= rpipe_avail_inc(rpipe
);
551 done
= __wa_xfer_is_done(xfer
);
553 spin_unlock_irqrestore(&xfer
->lock
, flags
);
555 wa_xfer_completion(xfer
);
557 wa_xfer_delayed_run(rpipe
);
562 * Callback for the segment request
564 * If successful transition state (unless already transitioned or
565 * outbound transfer); otherwise, take a note of the error, mark this
566 * segment done and try completion.
568 * Note we don't access until we are sure that the transfer hasn't
569 * been cancelled (ECONNRESET, ENOENT), which could mean that
570 * seg->xfer could be already gone.
572 * We have to check before setting the status to WA_SEG_PENDING
573 * because sometimes the xfer result callback arrives before this
574 * callback (geeeeeeze), so it might happen that we are already in
575 * another state. As well, we don't set it if the transfer is inbound,
576 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
579 static void wa_seg_cb(struct urb
*urb
)
581 struct wa_seg
*seg
= urb
->context
;
582 struct wa_xfer
*xfer
= seg
->xfer
;
585 struct wa_rpipe
*rpipe
;
587 unsigned rpipe_ready
;
590 switch (urb
->status
) {
592 spin_lock_irqsave(&xfer
->lock
, flags
);
594 dev
= &wa
->usb_iface
->dev
;
595 dev_dbg(dev
, "xfer %p#%u: request done\n", xfer
, seg
->index
);
596 if (xfer
->is_inbound
&& seg
->status
< WA_SEG_PENDING
)
597 seg
->status
= WA_SEG_PENDING
;
598 spin_unlock_irqrestore(&xfer
->lock
, flags
);
600 case -ECONNRESET
: /* URB unlinked; no need to do anything */
601 case -ENOENT
: /* as it was done by the who unlinked us */
603 default: /* Other errors ... */
604 spin_lock_irqsave(&xfer
->lock
, flags
);
606 dev
= &wa
->usb_iface
->dev
;
607 rpipe
= xfer
->ep
->hcpriv
;
608 if (printk_ratelimit())
609 dev_err(dev
, "xfer %p#%u: request error %d\n",
610 xfer
, seg
->index
, urb
->status
);
611 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
612 EDC_ERROR_TIMEFRAME
)){
613 dev_err(dev
, "DTO: URB max acceptable errors "
614 "exceeded, resetting device\n");
617 usb_unlink_urb(seg
->dto_urb
);
618 seg
->status
= WA_SEG_ERROR
;
619 seg
->result
= urb
->status
;
621 __wa_xfer_abort(xfer
);
622 rpipe_ready
= rpipe_avail_inc(rpipe
);
623 done
= __wa_xfer_is_done(xfer
);
624 spin_unlock_irqrestore(&xfer
->lock
, flags
);
626 wa_xfer_completion(xfer
);
628 wa_xfer_delayed_run(rpipe
);
632 /* allocate an SG list to store bytes_to_transfer bytes and copy the
633 * subset of the in_sg that matches the buffer subset
634 * we are about to transfer. */
635 static struct scatterlist
*wa_xfer_create_subset_sg(struct scatterlist
*in_sg
,
636 const unsigned int bytes_transferred
,
637 const unsigned int bytes_to_transfer
, unsigned int *out_num_sgs
)
639 struct scatterlist
*out_sg
;
640 unsigned int bytes_processed
= 0, offset_into_current_page_data
= 0,
642 struct scatterlist
*current_xfer_sg
= in_sg
;
643 struct scatterlist
*current_seg_sg
, *last_seg_sg
;
645 /* skip previously transferred pages. */
646 while ((current_xfer_sg
) &&
647 (bytes_processed
< bytes_transferred
)) {
648 bytes_processed
+= current_xfer_sg
->length
;
650 /* advance the sg if current segment starts on or past the
652 if (bytes_processed
<= bytes_transferred
)
653 current_xfer_sg
= sg_next(current_xfer_sg
);
656 /* the data for the current segment starts in current_xfer_sg.
657 calculate the offset. */
658 if (bytes_processed
> bytes_transferred
) {
659 offset_into_current_page_data
= current_xfer_sg
->length
-
660 (bytes_processed
- bytes_transferred
);
663 /* calculate the number of pages needed by this segment. */
664 nents
= DIV_ROUND_UP((bytes_to_transfer
+
665 offset_into_current_page_data
+
666 current_xfer_sg
->offset
),
669 out_sg
= kmalloc((sizeof(struct scatterlist
) * nents
), GFP_ATOMIC
);
671 sg_init_table(out_sg
, nents
);
673 /* copy the portion of the incoming SG that correlates to the
674 * data to be transferred by this segment to the segment SG. */
675 last_seg_sg
= current_seg_sg
= out_sg
;
678 /* reset nents and calculate the actual number of sg entries
681 while ((bytes_processed
< bytes_to_transfer
) &&
682 current_seg_sg
&& current_xfer_sg
) {
683 unsigned int page_len
= min((current_xfer_sg
->length
-
684 offset_into_current_page_data
),
685 (bytes_to_transfer
- bytes_processed
));
687 sg_set_page(current_seg_sg
, sg_page(current_xfer_sg
),
689 current_xfer_sg
->offset
+
690 offset_into_current_page_data
);
692 bytes_processed
+= page_len
;
694 last_seg_sg
= current_seg_sg
;
695 current_seg_sg
= sg_next(current_seg_sg
);
696 current_xfer_sg
= sg_next(current_xfer_sg
);
698 /* only the first page may require additional offset. */
699 offset_into_current_page_data
= 0;
703 /* update num_sgs and terminate the list since we may have
704 * concatenated pages. */
705 sg_mark_end(last_seg_sg
);
706 *out_num_sgs
= nents
;
713 * Allocate the segs array and initialize each of them
715 * The segments are freed by wa_xfer_destroy() when the xfer use count
716 * drops to zero; however, because each segment is given the same life
717 * cycle as the USB URB it contains, it is actually freed by
718 * usb_put_urb() on the contained USB URB (twisted, eh?).
720 static int __wa_xfer_setup_segs(struct wa_xfer
*xfer
, size_t xfer_hdr_size
)
723 size_t alloc_size
= sizeof(*xfer
->seg
[0])
724 - sizeof(xfer
->seg
[0]->xfer_hdr
) + xfer_hdr_size
;
725 struct usb_device
*usb_dev
= xfer
->wa
->usb_dev
;
726 const struct usb_endpoint_descriptor
*dto_epd
= xfer
->wa
->dto_epd
;
728 size_t buf_itr
, buf_size
, buf_itr_size
;
731 xfer
->seg
= kcalloc(xfer
->segs
, sizeof(xfer
->seg
[0]), GFP_ATOMIC
);
732 if (xfer
->seg
== NULL
)
733 goto error_segs_kzalloc
;
735 buf_size
= xfer
->urb
->transfer_buffer_length
;
736 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
737 seg
= xfer
->seg
[cnt
] = kmalloc(alloc_size
, GFP_ATOMIC
);
739 goto error_seg_kmalloc
;
743 usb_fill_bulk_urb(&seg
->urb
, usb_dev
,
744 usb_sndbulkpipe(usb_dev
,
745 dto_epd
->bEndpointAddress
),
746 &seg
->xfer_hdr
, xfer_hdr_size
,
748 buf_itr_size
= min(buf_size
, xfer
->seg_size
);
749 if (xfer
->is_inbound
== 0 && buf_size
> 0) {
751 seg
->dto_urb
= usb_alloc_urb(0, GFP_ATOMIC
);
752 if (seg
->dto_urb
== NULL
)
753 goto error_dto_alloc
;
755 seg
->dto_urb
, usb_dev
,
756 usb_sndbulkpipe(usb_dev
,
757 dto_epd
->bEndpointAddress
),
758 NULL
, 0, wa_seg_dto_cb
, seg
);
760 seg
->dto_urb
->transfer_dma
=
761 xfer
->urb
->transfer_dma
+ buf_itr
;
762 seg
->dto_urb
->transfer_flags
|=
763 URB_NO_TRANSFER_DMA_MAP
;
764 seg
->dto_urb
->transfer_buffer
= NULL
;
765 seg
->dto_urb
->sg
= NULL
;
766 seg
->dto_urb
->num_sgs
= 0;
768 /* do buffer or SG processing. */
769 seg
->dto_urb
->transfer_flags
&=
770 ~URB_NO_TRANSFER_DMA_MAP
;
771 /* this should always be 0 before a resubmit. */
772 seg
->dto_urb
->num_mapped_sgs
= 0;
774 if (xfer
->urb
->transfer_buffer
) {
775 seg
->dto_urb
->transfer_buffer
=
776 xfer
->urb
->transfer_buffer
+
778 seg
->dto_urb
->sg
= NULL
;
779 seg
->dto_urb
->num_sgs
= 0;
781 /* allocate an SG list to store seg_size
782 bytes and copy the subset of the
783 xfer->urb->sg that matches the
784 buffer subset we are about to read.
787 wa_xfer_create_subset_sg(
789 buf_itr
, buf_itr_size
,
790 &(seg
->dto_urb
->num_sgs
));
792 if (!(seg
->dto_urb
->sg
)) {
793 seg
->dto_urb
->num_sgs
= 0;
797 seg
->dto_urb
->transfer_buffer
= NULL
;
800 seg
->dto_urb
->transfer_buffer_length
= buf_itr_size
;
802 seg
->status
= WA_SEG_READY
;
803 buf_itr
+= buf_itr_size
;
804 buf_size
-= buf_itr_size
;
809 usb_free_urb(xfer
->seg
[cnt
]->dto_urb
);
811 kfree(xfer
->seg
[cnt
]);
814 /* use the fact that cnt is left at were it failed */
815 for (; cnt
>= 0; cnt
--) {
816 if (xfer
->seg
[cnt
] && xfer
->is_inbound
== 0) {
817 usb_free_urb(xfer
->seg
[cnt
]->dto_urb
);
818 kfree(xfer
->seg
[cnt
]->dto_urb
->sg
);
820 kfree(xfer
->seg
[cnt
]);
827 * Allocates all the stuff needed to submit a transfer
829 * Breaks the whole data buffer in a list of segments, each one has a
830 * structure allocated to it and linked in xfer->seg[index]
832 * FIXME: merge setup_segs() and the last part of this function, no
833 * need to do two for loops when we could run everything in a
836 static int __wa_xfer_setup(struct wa_xfer
*xfer
, struct urb
*urb
)
839 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
840 enum wa_xfer_type xfer_type
= 0; /* shut up GCC */
841 size_t xfer_hdr_size
, cnt
, transfer_size
;
842 struct wa_xfer_hdr
*xfer_hdr0
, *xfer_hdr
;
844 result
= __wa_xfer_setup_sizes(xfer
, &xfer_type
);
846 goto error_setup_sizes
;
847 xfer_hdr_size
= result
;
848 result
= __wa_xfer_setup_segs(xfer
, xfer_hdr_size
);
850 dev_err(dev
, "xfer %p: Failed to allocate %d segments: %d\n",
851 xfer
, xfer
->segs
, result
);
852 goto error_setup_segs
;
854 /* Fill the first header */
855 xfer_hdr0
= &xfer
->seg
[0]->xfer_hdr
;
856 wa_xfer_id_init(xfer
);
857 __wa_xfer_setup_hdr0(xfer
, xfer_hdr0
, xfer_type
, xfer_hdr_size
);
859 /* Fill remainig headers */
860 xfer_hdr
= xfer_hdr0
;
861 transfer_size
= urb
->transfer_buffer_length
;
862 xfer_hdr0
->dwTransferLength
= transfer_size
> xfer
->seg_size
?
863 xfer
->seg_size
: transfer_size
;
864 transfer_size
-= xfer
->seg_size
;
865 for (cnt
= 1; cnt
< xfer
->segs
; cnt
++) {
866 xfer_hdr
= &xfer
->seg
[cnt
]->xfer_hdr
;
867 memcpy(xfer_hdr
, xfer_hdr0
, xfer_hdr_size
);
868 xfer_hdr
->bTransferSegment
= cnt
;
869 xfer_hdr
->dwTransferLength
= transfer_size
> xfer
->seg_size
?
870 cpu_to_le32(xfer
->seg_size
)
871 : cpu_to_le32(transfer_size
);
872 xfer
->seg
[cnt
]->status
= WA_SEG_READY
;
873 transfer_size
-= xfer
->seg_size
;
875 xfer_hdr
->bTransferSegment
|= 0x80; /* this is the last segment */
885 * rpipe->seg_lock is held!
887 static int __wa_seg_submit(struct wa_rpipe
*rpipe
, struct wa_xfer
*xfer
,
891 result
= usb_submit_urb(&seg
->urb
, GFP_ATOMIC
);
893 printk(KERN_ERR
"xfer %p#%u: REQ submit failed: %d\n",
894 xfer
, seg
->index
, result
);
895 goto error_seg_submit
;
898 result
= usb_submit_urb(seg
->dto_urb
, GFP_ATOMIC
);
900 printk(KERN_ERR
"xfer %p#%u: DTO submit failed: %d\n",
901 xfer
, seg
->index
, result
);
902 goto error_dto_submit
;
905 seg
->status
= WA_SEG_SUBMITTED
;
906 rpipe_avail_dec(rpipe
);
910 usb_unlink_urb(&seg
->urb
);
912 seg
->status
= WA_SEG_ERROR
;
913 seg
->result
= result
;
918 * Execute more queued request segments until the maximum concurrent allowed
920 * The ugly unlock/lock sequence on the error path is needed as the
921 * xfer->lock normally nests the seg_lock and not viceversa.
924 static void wa_xfer_delayed_run(struct wa_rpipe
*rpipe
)
927 struct device
*dev
= &rpipe
->wa
->usb_iface
->dev
;
929 struct wa_xfer
*xfer
;
932 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
933 while (atomic_read(&rpipe
->segs_available
) > 0
934 && !list_empty(&rpipe
->seg_list
)) {
935 seg
= list_first_entry(&(rpipe
->seg_list
), struct wa_seg
,
937 list_del(&seg
->list_node
);
939 result
= __wa_seg_submit(rpipe
, xfer
, seg
);
940 dev_dbg(dev
, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
941 xfer
, seg
->index
, atomic_read(&rpipe
->segs_available
), result
);
942 if (unlikely(result
< 0)) {
943 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
944 spin_lock_irqsave(&xfer
->lock
, flags
);
945 __wa_xfer_abort(xfer
);
947 spin_unlock_irqrestore(&xfer
->lock
, flags
);
948 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
951 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
956 * xfer->lock is taken
958 * On failure submitting we just stop submitting and return error;
959 * wa_urb_enqueue_b() will execute the completion path
961 static int __wa_xfer_submit(struct wa_xfer
*xfer
)
964 struct wahc
*wa
= xfer
->wa
;
965 struct device
*dev
= &wa
->usb_iface
->dev
;
969 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
970 size_t maxrequests
= le16_to_cpu(rpipe
->descr
.wRequests
);
974 spin_lock_irqsave(&wa
->xfer_list_lock
, flags
);
975 list_add_tail(&xfer
->list_node
, &wa
->xfer_list
);
976 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags
);
978 BUG_ON(atomic_read(&rpipe
->segs_available
) > maxrequests
);
980 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
981 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
982 available
= atomic_read(&rpipe
->segs_available
);
983 empty
= list_empty(&rpipe
->seg_list
);
984 seg
= xfer
->seg
[cnt
];
985 dev_dbg(dev
, "xfer %p#%u: available %u empty %u (%s)\n",
986 xfer
, cnt
, available
, empty
,
987 available
== 0 || !empty
? "delayed" : "submitted");
988 if (available
== 0 || !empty
) {
989 dev_dbg(dev
, "xfer %p#%u: delayed\n", xfer
, cnt
);
990 seg
->status
= WA_SEG_DELAYED
;
991 list_add_tail(&seg
->list_node
, &rpipe
->seg_list
);
993 result
= __wa_seg_submit(rpipe
, xfer
, seg
);
995 __wa_xfer_abort(xfer
);
996 goto error_seg_submit
;
999 xfer
->segs_submitted
++;
1002 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
1007 * Second part of a URB/transfer enqueuement
1009 * Assumes this comes from wa_urb_enqueue() [maybe through
1010 * wa_urb_enqueue_run()]. At this point:
1012 * xfer->wa filled and refcounted
1013 * xfer->ep filled with rpipe refcounted if
1015 * xfer->urb filled and refcounted (this is the case when called
1016 * from wa_urb_enqueue() as we come from usb_submit_urb()
1017 * and when called by wa_urb_enqueue_run(), as we took an
1018 * extra ref dropped by _run() after we return).
1021 * If we fail at __wa_xfer_submit(), then we just check if we are done
1022 * and if so, we run the completion procedure. However, if we are not
1023 * yet done, we do nothing and wait for the completion handlers from
1024 * the submitted URBs or from the xfer-result path to kick in. If xfer
1025 * result never kicks in, the xfer will timeout from the USB code and
1026 * dequeue() will be called.
1028 static void wa_urb_enqueue_b(struct wa_xfer
*xfer
)
1031 unsigned long flags
;
1032 struct urb
*urb
= xfer
->urb
;
1033 struct wahc
*wa
= xfer
->wa
;
1034 struct wusbhc
*wusbhc
= wa
->wusb
;
1035 struct wusb_dev
*wusb_dev
;
1038 result
= rpipe_get_by_ep(wa
, xfer
->ep
, urb
, xfer
->gfp
);
1040 goto error_rpipe_get
;
1042 /* FIXME: segmentation broken -- kills DWA */
1043 mutex_lock(&wusbhc
->mutex
); /* get a WUSB dev */
1044 if (urb
->dev
== NULL
) {
1045 mutex_unlock(&wusbhc
->mutex
);
1046 goto error_dev_gone
;
1048 wusb_dev
= __wusb_dev_get_by_usb_dev(wusbhc
, urb
->dev
);
1049 if (wusb_dev
== NULL
) {
1050 mutex_unlock(&wusbhc
->mutex
);
1051 goto error_dev_gone
;
1053 mutex_unlock(&wusbhc
->mutex
);
1055 spin_lock_irqsave(&xfer
->lock
, flags
);
1056 xfer
->wusb_dev
= wusb_dev
;
1057 result
= urb
->status
;
1058 if (urb
->status
!= -EINPROGRESS
)
1059 goto error_dequeued
;
1061 result
= __wa_xfer_setup(xfer
, urb
);
1063 goto error_xfer_setup
;
1064 result
= __wa_xfer_submit(xfer
);
1066 goto error_xfer_submit
;
1067 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1070 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1071 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
1076 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1077 /* FIXME: segmentation broken, kills DWA */
1079 wusb_dev_put(wusb_dev
);
1081 rpipe_put(xfer
->ep
->hcpriv
);
1083 xfer
->result
= result
;
1084 wa_xfer_giveback(xfer
);
1088 done
= __wa_xfer_is_done(xfer
);
1089 xfer
->result
= result
;
1090 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1092 wa_xfer_completion(xfer
);
1096 * Execute the delayed transfers in the Wire Adapter @wa
1098 * We need to be careful here, as dequeue() could be called in the
1099 * middle. That's why we do the whole thing under the
1100 * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1101 * and then checks the list -- so as we would be acquiring in inverse
1102 * order, we move the delayed list to a separate list while locked and then
1103 * submit them without the list lock held.
1105 void wa_urb_enqueue_run(struct work_struct
*ws
)
1107 struct wahc
*wa
= container_of(ws
, struct wahc
, xfer_enqueue_work
);
1108 struct wa_xfer
*xfer
, *next
;
1110 LIST_HEAD(tmp_list
);
1112 /* Create a copy of the wa->xfer_delayed_list while holding the lock */
1113 spin_lock_irq(&wa
->xfer_list_lock
);
1114 list_cut_position(&tmp_list
, &wa
->xfer_delayed_list
,
1115 wa
->xfer_delayed_list
.prev
);
1116 spin_unlock_irq(&wa
->xfer_list_lock
);
1119 * enqueue from temp list without list lock held since wa_urb_enqueue_b
1120 * can take xfer->lock as well as lock mutexes.
1122 list_for_each_entry_safe(xfer
, next
, &tmp_list
, list_node
) {
1123 list_del_init(&xfer
->list_node
);
1126 wa_urb_enqueue_b(xfer
);
1127 usb_put_urb(urb
); /* taken when queuing */
1130 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run
);
1133 * Process the errored transfers on the Wire Adapter outside of interrupt.
1135 void wa_process_errored_transfers_run(struct work_struct
*ws
)
1137 struct wahc
*wa
= container_of(ws
, struct wahc
, xfer_error_work
);
1138 struct wa_xfer
*xfer
, *next
;
1139 LIST_HEAD(tmp_list
);
1141 pr_info("%s: Run delayed STALL processing.\n", __func__
);
1143 /* Create a copy of the wa->xfer_errored_list while holding the lock */
1144 spin_lock_irq(&wa
->xfer_list_lock
);
1145 list_cut_position(&tmp_list
, &wa
->xfer_errored_list
,
1146 wa
->xfer_errored_list
.prev
);
1147 spin_unlock_irq(&wa
->xfer_list_lock
);
1150 * run rpipe_clear_feature_stalled from temp list without list lock
1153 list_for_each_entry_safe(xfer
, next
, &tmp_list
, list_node
) {
1154 struct usb_host_endpoint
*ep
;
1155 unsigned long flags
;
1156 struct wa_rpipe
*rpipe
;
1158 spin_lock_irqsave(&xfer
->lock
, flags
);
1161 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1163 /* clear RPIPE feature stalled without holding a lock. */
1164 rpipe_clear_feature_stalled(wa
, ep
);
1166 /* complete the xfer. This removes it from the tmp list. */
1167 wa_xfer_completion(xfer
);
1169 /* check for work. */
1170 wa_xfer_delayed_run(rpipe
);
1173 EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run
);
1176 * Submit a transfer to the Wire Adapter in a delayed way
1178 * The process of enqueuing involves possible sleeps() [see
1179 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1180 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1182 * @urb: We own a reference to it done by the HCI Linux USB stack that
1183 * will be given up by calling usb_hcd_giveback_urb() or by
1184 * returning error from this function -> ergo we don't have to
1187 int wa_urb_enqueue(struct wahc
*wa
, struct usb_host_endpoint
*ep
,
1188 struct urb
*urb
, gfp_t gfp
)
1191 struct device
*dev
= &wa
->usb_iface
->dev
;
1192 struct wa_xfer
*xfer
;
1193 unsigned long my_flags
;
1194 unsigned cant_sleep
= irqs_disabled() | in_atomic();
1196 if ((urb
->transfer_buffer
== NULL
)
1197 && (urb
->sg
== NULL
)
1198 && !(urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
)
1199 && urb
->transfer_buffer_length
!= 0) {
1200 dev_err(dev
, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb
);
1205 xfer
= kzalloc(sizeof(*xfer
), gfp
);
1210 if (urb
->status
!= -EINPROGRESS
) /* cancelled */
1211 goto error_dequeued
; /* before starting? */
1213 xfer
->wa
= wa_get(wa
);
1219 dev_dbg(dev
, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1220 xfer
, urb
, urb
->pipe
, urb
->transfer_buffer_length
,
1221 urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
? "dma" : "nodma",
1222 urb
->pipe
& USB_DIR_IN
? "inbound" : "outbound",
1223 cant_sleep
? "deferred" : "inline");
1227 spin_lock_irqsave(&wa
->xfer_list_lock
, my_flags
);
1228 list_add_tail(&xfer
->list_node
, &wa
->xfer_delayed_list
);
1229 spin_unlock_irqrestore(&wa
->xfer_list_lock
, my_flags
);
1230 queue_work(wusbd
, &wa
->xfer_enqueue_work
);
1232 wa_urb_enqueue_b(xfer
);
1241 EXPORT_SYMBOL_GPL(wa_urb_enqueue
);
1244 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1245 * handler] is called.
1247 * Until a transfer goes successfully through wa_urb_enqueue() it
1248 * needs to be dequeued with completion calling; when stuck in delayed
1249 * or before wa_xfer_setup() is called, we need to do completion.
1251 * not setup If there is no hcpriv yet, that means that that enqueue
1252 * still had no time to set the xfer up. Because
1253 * urb->status should be other than -EINPROGRESS,
1254 * enqueue() will catch that and bail out.
1256 * If the transfer has gone through setup, we just need to clean it
1257 * up. If it has gone through submit(), we have to abort it [with an
1258 * asynch request] and then make sure we cancel each segment.
1261 int wa_urb_dequeue(struct wahc
*wa
, struct urb
*urb
)
1263 unsigned long flags
, flags2
;
1264 struct wa_xfer
*xfer
;
1266 struct wa_rpipe
*rpipe
;
1268 unsigned rpipe_ready
= 0;
1273 * Nothing setup yet enqueue will see urb->status !=
1274 * -EINPROGRESS (by hcd layer) and bail out with
1275 * error, no need to do completion
1277 BUG_ON(urb
->status
== -EINPROGRESS
);
1280 spin_lock_irqsave(&xfer
->lock
, flags
);
1281 rpipe
= xfer
->ep
->hcpriv
;
1282 if (rpipe
== NULL
) {
1283 pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
1284 __func__
, wa_xfer_id(xfer
),
1285 "Probably already aborted.\n" );
1288 /* Check the delayed list -> if there, release and complete */
1289 spin_lock_irqsave(&wa
->xfer_list_lock
, flags2
);
1290 if (!list_empty(&xfer
->list_node
) && xfer
->seg
== NULL
)
1291 goto dequeue_delayed
;
1292 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags2
);
1293 if (xfer
->seg
== NULL
) /* still hasn't reached */
1294 goto out_unlock
; /* setup(), enqueue_b() completes */
1295 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1296 __wa_xfer_abort(xfer
);
1297 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
1298 seg
= xfer
->seg
[cnt
];
1299 switch (seg
->status
) {
1300 case WA_SEG_NOTREADY
:
1302 printk(KERN_ERR
"xfer %p#%u: dequeue bad state %u\n",
1303 xfer
, cnt
, seg
->status
);
1306 case WA_SEG_DELAYED
:
1307 seg
->status
= WA_SEG_ABORTED
;
1308 spin_lock_irqsave(&rpipe
->seg_lock
, flags2
);
1309 list_del(&seg
->list_node
);
1311 rpipe_ready
= rpipe_avail_inc(rpipe
);
1312 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags2
);
1314 case WA_SEG_SUBMITTED
:
1315 seg
->status
= WA_SEG_ABORTED
;
1316 usb_unlink_urb(&seg
->urb
);
1317 if (xfer
->is_inbound
== 0)
1318 usb_unlink_urb(seg
->dto_urb
);
1320 rpipe_ready
= rpipe_avail_inc(rpipe
);
1322 case WA_SEG_PENDING
:
1323 seg
->status
= WA_SEG_ABORTED
;
1325 rpipe_ready
= rpipe_avail_inc(rpipe
);
1327 case WA_SEG_DTI_PENDING
:
1328 usb_unlink_urb(wa
->dti_urb
);
1329 seg
->status
= WA_SEG_ABORTED
;
1331 rpipe_ready
= rpipe_avail_inc(rpipe
);
1335 case WA_SEG_ABORTED
:
1339 xfer
->result
= urb
->status
; /* -ENOENT or -ECONNRESET */
1340 __wa_xfer_is_done(xfer
);
1341 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1342 wa_xfer_completion(xfer
);
1344 wa_xfer_delayed_run(rpipe
);
1348 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1353 list_del_init(&xfer
->list_node
);
1354 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags2
);
1355 xfer
->result
= urb
->status
;
1356 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1357 wa_xfer_giveback(xfer
);
1358 usb_put_urb(urb
); /* we got a ref in enqueue() */
1361 EXPORT_SYMBOL_GPL(wa_urb_dequeue
);
1364 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1367 * Positive errno values are internal inconsistencies and should be
1368 * flagged louder. Negative are to be passed up to the user in the
1371 * @status: USB WA status code -- high two bits are stripped.
1373 static int wa_xfer_status_to_errno(u8 status
)
1376 u8 real_status
= status
;
1377 static int xlat
[] = {
1378 [WA_XFER_STATUS_SUCCESS
] = 0,
1379 [WA_XFER_STATUS_HALTED
] = -EPIPE
,
1380 [WA_XFER_STATUS_DATA_BUFFER_ERROR
] = -ENOBUFS
,
1381 [WA_XFER_STATUS_BABBLE
] = -EOVERFLOW
,
1382 [WA_XFER_RESERVED
] = EINVAL
,
1383 [WA_XFER_STATUS_NOT_FOUND
] = 0,
1384 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE
] = -ENOMEM
,
1385 [WA_XFER_STATUS_TRANSACTION_ERROR
] = -EILSEQ
,
1386 [WA_XFER_STATUS_ABORTED
] = -EINTR
,
1387 [WA_XFER_STATUS_RPIPE_NOT_READY
] = EINVAL
,
1388 [WA_XFER_INVALID_FORMAT
] = EINVAL
,
1389 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER
] = EINVAL
,
1390 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH
] = EINVAL
,
1396 if (status
>= ARRAY_SIZE(xlat
)) {
1397 printk_ratelimited(KERN_ERR
"%s(): BUG? "
1398 "Unknown WA transfer status 0x%02x\n",
1399 __func__
, real_status
);
1402 errno
= xlat
[status
];
1403 if (unlikely(errno
> 0)) {
1404 printk_ratelimited(KERN_ERR
"%s(): BUG? "
1405 "Inconsistent WA status: 0x%02x\n",
1406 __func__
, real_status
);
1413 * Process a xfer result completion message
1415 * inbound transfers: need to schedule a DTI read
1417 * FIXME: this function needs to be broken up in parts
1419 static void wa_xfer_result_chew(struct wahc
*wa
, struct wa_xfer
*xfer
)
1422 struct device
*dev
= &wa
->usb_iface
->dev
;
1423 unsigned long flags
;
1426 struct wa_rpipe
*rpipe
;
1427 struct wa_xfer_result
*xfer_result
= wa
->xfer_result
;
1430 unsigned rpipe_ready
= 0;
1432 spin_lock_irqsave(&xfer
->lock
, flags
);
1433 seg_idx
= xfer_result
->bTransferSegment
& 0x7f;
1434 if (unlikely(seg_idx
>= xfer
->segs
))
1436 seg
= xfer
->seg
[seg_idx
];
1437 rpipe
= xfer
->ep
->hcpriv
;
1438 usb_status
= xfer_result
->bTransferStatus
;
1439 dev_dbg(dev
, "xfer %p#%u: bTransferStatus 0x%02x (seg status %u)\n",
1440 xfer
, seg_idx
, usb_status
, seg
->status
);
1441 if (seg
->status
== WA_SEG_ABORTED
1442 || seg
->status
== WA_SEG_ERROR
) /* already handled */
1443 goto segment_aborted
;
1444 if (seg
->status
== WA_SEG_SUBMITTED
) /* ops, got here */
1445 seg
->status
= WA_SEG_PENDING
; /* before wa_seg{_dto}_cb() */
1446 if (seg
->status
!= WA_SEG_PENDING
) {
1447 if (printk_ratelimit())
1448 dev_err(dev
, "xfer %p#%u: Bad segment state %u\n",
1449 xfer
, seg_idx
, seg
->status
);
1450 seg
->status
= WA_SEG_PENDING
; /* workaround/"fix" it */
1452 if (usb_status
& 0x80) {
1453 seg
->result
= wa_xfer_status_to_errno(usb_status
);
1454 dev_err(dev
, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
1455 xfer
, xfer
->id
, seg
->index
, usb_status
);
1456 goto error_complete
;
1458 /* FIXME: we ignore warnings, tally them for stats */
1459 if (usb_status
& 0x40) /* Warning?... */
1460 usb_status
= 0; /* ... pass */
1461 if (xfer
->is_inbound
) { /* IN data phase: read to buffer */
1462 seg
->status
= WA_SEG_DTI_PENDING
;
1463 BUG_ON(wa
->buf_in_urb
->status
== -EINPROGRESS
);
1464 /* this should always be 0 before a resubmit. */
1465 wa
->buf_in_urb
->num_mapped_sgs
= 0;
1468 wa
->buf_in_urb
->transfer_dma
=
1469 xfer
->urb
->transfer_dma
1470 + (seg_idx
* xfer
->seg_size
);
1471 wa
->buf_in_urb
->transfer_flags
1472 |= URB_NO_TRANSFER_DMA_MAP
;
1473 wa
->buf_in_urb
->transfer_buffer
= NULL
;
1474 wa
->buf_in_urb
->sg
= NULL
;
1475 wa
->buf_in_urb
->num_sgs
= 0;
1477 /* do buffer or SG processing. */
1478 wa
->buf_in_urb
->transfer_flags
1479 &= ~URB_NO_TRANSFER_DMA_MAP
;
1481 if (xfer
->urb
->transfer_buffer
) {
1482 wa
->buf_in_urb
->transfer_buffer
=
1483 xfer
->urb
->transfer_buffer
1484 + (seg_idx
* xfer
->seg_size
);
1485 wa
->buf_in_urb
->sg
= NULL
;
1486 wa
->buf_in_urb
->num_sgs
= 0;
1488 /* allocate an SG list to store seg_size bytes
1489 and copy the subset of the xfer->urb->sg
1490 that matches the buffer subset we are
1492 wa
->buf_in_urb
->sg
= wa_xfer_create_subset_sg(
1494 seg_idx
* xfer
->seg_size
,
1496 xfer_result
->dwTransferLength
),
1497 &(wa
->buf_in_urb
->num_sgs
));
1499 if (!(wa
->buf_in_urb
->sg
)) {
1500 wa
->buf_in_urb
->num_sgs
= 0;
1501 goto error_sg_alloc
;
1503 wa
->buf_in_urb
->transfer_buffer
= NULL
;
1506 wa
->buf_in_urb
->transfer_buffer_length
=
1507 le32_to_cpu(xfer_result
->dwTransferLength
);
1508 wa
->buf_in_urb
->context
= seg
;
1509 result
= usb_submit_urb(wa
->buf_in_urb
, GFP_ATOMIC
);
1511 goto error_submit_buf_in
;
1513 /* OUT data phase, complete it -- */
1514 seg
->status
= WA_SEG_DONE
;
1515 seg
->result
= le32_to_cpu(xfer_result
->dwTransferLength
);
1517 rpipe_ready
= rpipe_avail_inc(rpipe
);
1518 done
= __wa_xfer_is_done(xfer
);
1520 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1522 wa_xfer_completion(xfer
);
1524 wa_xfer_delayed_run(rpipe
);
1527 error_submit_buf_in
:
1528 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
1529 dev_err(dev
, "DTI: URB max acceptable errors "
1530 "exceeded, resetting device\n");
1533 if (printk_ratelimit())
1534 dev_err(dev
, "xfer %p#%u: can't submit DTI data phase: %d\n",
1535 xfer
, seg_idx
, result
);
1536 seg
->result
= result
;
1537 kfree(wa
->buf_in_urb
->sg
);
1539 __wa_xfer_abort(xfer
);
1541 seg
->status
= WA_SEG_ERROR
;
1543 rpipe_ready
= rpipe_avail_inc(rpipe
);
1544 done
= __wa_xfer_is_done(xfer
);
1546 * queue work item to clear STALL for control endpoints.
1547 * Otherwise, let endpoint_reset take care of it.
1549 if (((usb_status
& 0x3f) == WA_XFER_STATUS_HALTED
) &&
1550 usb_endpoint_xfer_control(&xfer
->ep
->desc
) &&
1553 dev_info(dev
, "Control EP stall. Queue delayed work.\n");
1554 spin_lock_irq(&wa
->xfer_list_lock
);
1555 /* remove xfer from xfer_list. */
1556 list_del(&xfer
->list_node
);
1557 /* add xfer to xfer_errored_list. */
1558 list_add_tail(&xfer
->list_node
, &wa
->xfer_errored_list
);
1559 spin_unlock_irq(&wa
->xfer_list_lock
);
1560 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1561 queue_work(wusbd
, &wa
->xfer_error_work
);
1563 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1565 wa_xfer_completion(xfer
);
1567 wa_xfer_delayed_run(rpipe
);
1573 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1574 wa_urb_dequeue(wa
, xfer
->urb
);
1575 if (printk_ratelimit())
1576 dev_err(dev
, "xfer %p#%u: bad segment\n", xfer
, seg_idx
);
1577 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
1578 dev_err(dev
, "DTI: URB max acceptable errors "
1579 "exceeded, resetting device\n");
1585 /* nothing to do, as the aborter did the completion */
1586 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1590 * Callback for the IN data phase
1592 * If successful transition state; otherwise, take a note of the
1593 * error, mark this segment done and try completion.
1595 * Note we don't access until we are sure that the transfer hasn't
1596 * been cancelled (ECONNRESET, ENOENT), which could mean that
1597 * seg->xfer could be already gone.
1599 static void wa_buf_in_cb(struct urb
*urb
)
1601 struct wa_seg
*seg
= urb
->context
;
1602 struct wa_xfer
*xfer
= seg
->xfer
;
1605 struct wa_rpipe
*rpipe
;
1606 unsigned rpipe_ready
;
1607 unsigned long flags
;
1610 /* free the sg if it was used. */
1614 switch (urb
->status
) {
1616 spin_lock_irqsave(&xfer
->lock
, flags
);
1618 dev
= &wa
->usb_iface
->dev
;
1619 rpipe
= xfer
->ep
->hcpriv
;
1620 dev_dbg(dev
, "xfer %p#%u: data in done (%zu bytes)\n",
1621 xfer
, seg
->index
, (size_t)urb
->actual_length
);
1622 seg
->status
= WA_SEG_DONE
;
1623 seg
->result
= urb
->actual_length
;
1625 rpipe_ready
= rpipe_avail_inc(rpipe
);
1626 done
= __wa_xfer_is_done(xfer
);
1627 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1629 wa_xfer_completion(xfer
);
1631 wa_xfer_delayed_run(rpipe
);
1633 case -ECONNRESET
: /* URB unlinked; no need to do anything */
1634 case -ENOENT
: /* as it was done by the who unlinked us */
1636 default: /* Other errors ... */
1637 spin_lock_irqsave(&xfer
->lock
, flags
);
1639 dev
= &wa
->usb_iface
->dev
;
1640 rpipe
= xfer
->ep
->hcpriv
;
1641 if (printk_ratelimit())
1642 dev_err(dev
, "xfer %p#%u: data in error %d\n",
1643 xfer
, seg
->index
, urb
->status
);
1644 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
1645 EDC_ERROR_TIMEFRAME
)){
1646 dev_err(dev
, "DTO: URB max acceptable errors "
1647 "exceeded, resetting device\n");
1650 seg
->status
= WA_SEG_ERROR
;
1651 seg
->result
= urb
->status
;
1653 rpipe_ready
= rpipe_avail_inc(rpipe
);
1654 __wa_xfer_abort(xfer
);
1655 done
= __wa_xfer_is_done(xfer
);
1656 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1658 wa_xfer_completion(xfer
);
1660 wa_xfer_delayed_run(rpipe
);
1665 * Handle an incoming transfer result buffer
1667 * Given a transfer result buffer, it completes the transfer (possibly
1668 * scheduling and buffer in read) and then resubmits the DTI URB for a
1669 * new transfer result read.
1672 * The xfer_result DTI URB state machine
1674 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1676 * We start in OFF mode, the first xfer_result notification [through
1677 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1680 * We receive a buffer -- if it is not a xfer_result, we complain and
1681 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1682 * request accounting. If it is an IN segment, we move to RBI and post
1683 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1684 * repost the DTI-URB and move to RXR state. if there was no IN
1685 * segment, it will repost the DTI-URB.
1687 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1688 * errors) in the URBs.
1690 static void wa_xfer_result_cb(struct urb
*urb
)
1693 struct wahc
*wa
= urb
->context
;
1694 struct device
*dev
= &wa
->usb_iface
->dev
;
1695 struct wa_xfer_result
*xfer_result
;
1697 struct wa_xfer
*xfer
;
1700 BUG_ON(wa
->dti_urb
!= urb
);
1701 switch (wa
->dti_urb
->status
) {
1703 /* We have a xfer result buffer; check it */
1704 dev_dbg(dev
, "DTI: xfer result %d bytes at %p\n",
1705 urb
->actual_length
, urb
->transfer_buffer
);
1706 if (wa
->dti_urb
->actual_length
!= sizeof(*xfer_result
)) {
1707 dev_err(dev
, "DTI Error: xfer result--bad size "
1708 "xfer result (%d bytes vs %zu needed)\n",
1709 urb
->actual_length
, sizeof(*xfer_result
));
1712 xfer_result
= wa
->xfer_result
;
1713 if (xfer_result
->hdr
.bLength
!= sizeof(*xfer_result
)) {
1714 dev_err(dev
, "DTI Error: xfer result--"
1715 "bad header length %u\n",
1716 xfer_result
->hdr
.bLength
);
1719 if (xfer_result
->hdr
.bNotifyType
!= WA_XFER_RESULT
) {
1720 dev_err(dev
, "DTI Error: xfer result--"
1721 "bad header type 0x%02x\n",
1722 xfer_result
->hdr
.bNotifyType
);
1725 usb_status
= xfer_result
->bTransferStatus
& 0x3f;
1726 if (usb_status
== WA_XFER_STATUS_NOT_FOUND
)
1727 /* taken care of already */
1729 xfer_id
= xfer_result
->dwTransferID
;
1730 xfer
= wa_xfer_get_by_id(wa
, xfer_id
);
1732 /* FIXME: transaction might have been cancelled */
1733 dev_err(dev
, "DTI Error: xfer result--"
1734 "unknown xfer 0x%08x (status 0x%02x)\n",
1735 xfer_id
, usb_status
);
1738 wa_xfer_result_chew(wa
, xfer
);
1741 case -ENOENT
: /* (we killed the URB)...so, no broadcast */
1742 case -ESHUTDOWN
: /* going away! */
1743 dev_dbg(dev
, "DTI: going down! %d\n", urb
->status
);
1747 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
,
1748 EDC_ERROR_TIMEFRAME
)) {
1749 dev_err(dev
, "DTI: URB max acceptable errors "
1750 "exceeded, resetting device\n");
1754 if (printk_ratelimit())
1755 dev_err(dev
, "DTI: URB error %d\n", urb
->status
);
1758 /* Resubmit the DTI URB */
1759 result
= usb_submit_urb(wa
->dti_urb
, GFP_ATOMIC
);
1761 dev_err(dev
, "DTI Error: Could not submit DTI URB (%d), "
1762 "resetting\n", result
);
1770 * Transfer complete notification
1772 * Called from the notif.c code. We get a notification on EP2 saying
1773 * that some endpoint has some transfer result data available. We are
1776 * To speed up things, we always have a URB reading the DTI URB; we
1777 * don't really set it up and start it until the first xfer complete
1778 * notification arrives, which is what we do here.
1780 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1783 * So here we just initialize the DTI URB for reading transfer result
1784 * notifications and also the buffer-in URB, for reading buffers. Then
1785 * we just submit the DTI URB.
1787 * @wa shall be referenced
1789 void wa_handle_notif_xfer(struct wahc
*wa
, struct wa_notif_hdr
*notif_hdr
)
1792 struct device
*dev
= &wa
->usb_iface
->dev
;
1793 struct wa_notif_xfer
*notif_xfer
;
1794 const struct usb_endpoint_descriptor
*dti_epd
= wa
->dti_epd
;
1796 notif_xfer
= container_of(notif_hdr
, struct wa_notif_xfer
, hdr
);
1797 BUG_ON(notif_hdr
->bNotifyType
!= WA_NOTIF_TRANSFER
);
1799 if ((0x80 | notif_xfer
->bEndpoint
) != dti_epd
->bEndpointAddress
) {
1800 /* FIXME: hardcoded limitation, adapt */
1801 dev_err(dev
, "BUG: DTI ep is %u, not %u (hack me)\n",
1802 notif_xfer
->bEndpoint
, dti_epd
->bEndpointAddress
);
1805 if (wa
->dti_urb
!= NULL
) /* DTI URB already started */
1808 wa
->dti_urb
= usb_alloc_urb(0, GFP_KERNEL
);
1809 if (wa
->dti_urb
== NULL
) {
1810 dev_err(dev
, "Can't allocate DTI URB\n");
1811 goto error_dti_urb_alloc
;
1814 wa
->dti_urb
, wa
->usb_dev
,
1815 usb_rcvbulkpipe(wa
->usb_dev
, 0x80 | notif_xfer
->bEndpoint
),
1816 wa
->xfer_result
, wa
->xfer_result_size
,
1817 wa_xfer_result_cb
, wa
);
1819 wa
->buf_in_urb
= usb_alloc_urb(0, GFP_KERNEL
);
1820 if (wa
->buf_in_urb
== NULL
) {
1821 dev_err(dev
, "Can't allocate BUF-IN URB\n");
1822 goto error_buf_in_urb_alloc
;
1825 wa
->buf_in_urb
, wa
->usb_dev
,
1826 usb_rcvbulkpipe(wa
->usb_dev
, 0x80 | notif_xfer
->bEndpoint
),
1827 NULL
, 0, wa_buf_in_cb
, wa
);
1828 result
= usb_submit_urb(wa
->dti_urb
, GFP_KERNEL
);
1830 dev_err(dev
, "DTI Error: Could not submit DTI URB (%d), "
1831 "resetting\n", result
);
1832 goto error_dti_urb_submit
;
1837 error_dti_urb_submit
:
1838 usb_put_urb(wa
->buf_in_urb
);
1839 error_buf_in_urb_alloc
:
1840 usb_put_urb(wa
->dti_urb
);
1842 error_dti_urb_alloc
: