3 * Data transfer and URB enqueing
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
33 * Sounds simple, it is a pain to implement.
40 * LIFE CYCLE / STATE DIAGRAM
44 * THIS CODE IS DISGUSTING
46 * Warned you are; it's my second try and still not happy with it.
52 * - Supports DMA xfers, control, bulk and maybe interrupt
54 * - Does not recycle unused rpipes
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
62 * Two methods it could be done:
64 * (a) set up a timer every time an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different required components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
82 #include <linux/init.h>
83 #include <linux/spinlock.h>
84 #include <linux/slab.h>
85 #include <linux/hash.h>
86 #include <linux/ratelimit.h>
87 #include <linux/export.h>
88 #include <linux/scatterlist.h>
94 /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
110 static void wa_xfer_delayed_run(struct wa_rpipe
*);
113 * Life cycle governed by 'struct urb' (the refcount of the struct is
114 * that of the 'struct urb' and usb_free_urb() would free the whole
119 struct urb
*dto_urb
; /* for data output? */
120 struct list_head list_node
; /* for rpipe->req_list */
121 struct wa_xfer
*xfer
; /* out xfer */
122 u8 index
; /* which segment we are */
123 enum wa_seg_status status
;
124 ssize_t result
; /* bytes xfered or error */
125 struct wa_xfer_hdr xfer_hdr
;
126 u8 xfer_extra
[]; /* xtra space for xfer_hdr_ctl */
129 static inline void wa_seg_init(struct wa_seg
*seg
)
131 usb_init_urb(&seg
->urb
);
133 /* set the remaining memory to 0. */
134 memset(((void *)seg
) + sizeof(seg
->urb
), 0,
135 sizeof(*seg
) - sizeof(seg
->urb
));
139 * Protected by xfer->lock
144 struct list_head list_node
;
148 struct wahc
*wa
; /* Wire adapter we are plugged to */
149 struct usb_host_endpoint
*ep
;
150 struct urb
*urb
; /* URB we are transferring for */
151 struct wa_seg
**seg
; /* transfer segments */
152 u8 segs
, segs_submitted
, segs_done
;
153 unsigned is_inbound
:1;
158 gfp_t gfp
; /* allocation mask */
160 struct wusb_dev
*wusb_dev
; /* for activity timestamps */
163 static inline void wa_xfer_init(struct wa_xfer
*xfer
)
165 kref_init(&xfer
->refcnt
);
166 INIT_LIST_HEAD(&xfer
->list_node
);
167 spin_lock_init(&xfer
->lock
);
171 * Destroy a transfer structure
173 * Note that freeing xfer->seg[cnt]->urb will free the containing
174 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
176 static void wa_xfer_destroy(struct kref
*_xfer
)
178 struct wa_xfer
*xfer
= container_of(_xfer
, struct wa_xfer
, refcnt
);
181 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
182 usb_free_urb(xfer
->seg
[cnt
]->dto_urb
);
183 usb_free_urb(&xfer
->seg
[cnt
]->urb
);
189 static void wa_xfer_get(struct wa_xfer
*xfer
)
191 kref_get(&xfer
->refcnt
);
194 static void wa_xfer_put(struct wa_xfer
*xfer
)
196 kref_put(&xfer
->refcnt
, wa_xfer_destroy
);
202 * xfer->lock has to be unlocked
204 * We take xfer->lock for setting the result; this is a barrier
205 * against drivers/usb/core/hcd.c:unlink1() being called after we call
206 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
207 * reference to the transfer.
209 static void wa_xfer_giveback(struct wa_xfer
*xfer
)
213 spin_lock_irqsave(&xfer
->wa
->xfer_list_lock
, flags
);
214 list_del_init(&xfer
->list_node
);
215 spin_unlock_irqrestore(&xfer
->wa
->xfer_list_lock
, flags
);
216 /* FIXME: segmentation broken -- kills DWA */
217 wusbhc_giveback_urb(xfer
->wa
->wusb
, xfer
->urb
, xfer
->result
);
225 * xfer->lock has to be unlocked
227 static void wa_xfer_completion(struct wa_xfer
*xfer
)
230 wusb_dev_put(xfer
->wusb_dev
);
231 rpipe_put(xfer
->ep
->hcpriv
);
232 wa_xfer_giveback(xfer
);
236 * If transfer is done, wrap it up and return true
238 * xfer->lock has to be locked
240 static unsigned __wa_xfer_is_done(struct wa_xfer
*xfer
)
242 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
243 unsigned result
, cnt
;
245 struct urb
*urb
= xfer
->urb
;
246 unsigned found_short
= 0;
248 result
= xfer
->segs_done
== xfer
->segs_submitted
;
251 urb
->actual_length
= 0;
252 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
253 seg
= xfer
->seg
[cnt
];
254 switch (seg
->status
) {
256 if (found_short
&& seg
->result
> 0) {
257 dev_dbg(dev
, "xfer %p#%u: bad short segments (%zu)\n",
258 xfer
, cnt
, seg
->result
);
259 urb
->status
= -EINVAL
;
262 urb
->actual_length
+= seg
->result
;
263 if (seg
->result
< xfer
->seg_size
264 && cnt
!= xfer
->segs
-1)
266 dev_dbg(dev
, "xfer %p#%u: DONE short %d "
267 "result %zu urb->actual_length %d\n",
268 xfer
, seg
->index
, found_short
, seg
->result
,
272 xfer
->result
= seg
->result
;
273 dev_dbg(dev
, "xfer %p#%u: ERROR result %zu\n",
274 xfer
, seg
->index
, seg
->result
);
277 dev_dbg(dev
, "xfer %p#%u ABORTED: result %d\n",
278 xfer
, seg
->index
, urb
->status
);
279 xfer
->result
= urb
->status
;
282 dev_warn(dev
, "xfer %p#%u: is_done bad state %d\n",
283 xfer
, cnt
, seg
->status
);
284 xfer
->result
= -EINVAL
;
294 * Initialize a transfer's ID
296 * We need to use a sequential number; if we use the pointer or the
297 * hash of the pointer, it can repeat over sequential transfers and
298 * then it will confuse the HWA....wonder why in hell they put a 32
299 * bit handle in there then.
301 static void wa_xfer_id_init(struct wa_xfer
*xfer
)
303 xfer
->id
= atomic_add_return(1, &xfer
->wa
->xfer_id_count
);
307 * Return the xfer's ID associated with xfer
311 static u32
wa_xfer_id(struct wa_xfer
*xfer
)
317 * Search for a transfer list ID on the HCD's URB list
319 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
320 * 32-bit hash of the pointer.
322 * @returns NULL if not found.
324 static struct wa_xfer
*wa_xfer_get_by_id(struct wahc
*wa
, u32 id
)
327 struct wa_xfer
*xfer_itr
;
328 spin_lock_irqsave(&wa
->xfer_list_lock
, flags
);
329 list_for_each_entry(xfer_itr
, &wa
->xfer_list
, list_node
) {
330 if (id
== xfer_itr
->id
) {
331 wa_xfer_get(xfer_itr
);
337 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags
);
341 struct wa_xfer_abort_buffer
{
343 struct wa_xfer_abort cmd
;
346 static void __wa_xfer_abort_cb(struct urb
*urb
)
348 struct wa_xfer_abort_buffer
*b
= urb
->context
;
349 usb_put_urb(&b
->urb
);
353 * Aborts an ongoing transaction
355 * Assumes the transfer is referenced and locked and in a submitted
356 * state (mainly that there is an endpoint/rpipe assigned).
358 * The callback (see above) does nothing but freeing up the data by
359 * putting the URB. Because the URB is allocated at the head of the
360 * struct, the whole space we allocated is kfreed.
362 * We'll get an 'aborted transaction' xfer result on DTI, that'll
363 * politely ignore because at this point the transaction has been
364 * marked as aborted already.
366 static void __wa_xfer_abort(struct wa_xfer
*xfer
)
369 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
370 struct wa_xfer_abort_buffer
*b
;
371 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
373 b
= kmalloc(sizeof(*b
), GFP_ATOMIC
);
376 b
->cmd
.bLength
= sizeof(b
->cmd
);
377 b
->cmd
.bRequestType
= WA_XFER_ABORT
;
378 b
->cmd
.wRPipe
= rpipe
->descr
.wRPipeIndex
;
379 b
->cmd
.dwTransferID
= wa_xfer_id(xfer
);
381 usb_init_urb(&b
->urb
);
382 usb_fill_bulk_urb(&b
->urb
, xfer
->wa
->usb_dev
,
383 usb_sndbulkpipe(xfer
->wa
->usb_dev
,
384 xfer
->wa
->dto_epd
->bEndpointAddress
),
385 &b
->cmd
, sizeof(b
->cmd
), __wa_xfer_abort_cb
, b
);
386 result
= usb_submit_urb(&b
->urb
, GFP_ATOMIC
);
389 return; /* callback frees! */
393 if (printk_ratelimit())
394 dev_err(dev
, "xfer %p: Can't submit abort request: %d\n",
404 * @returns < 0 on error, transfer segment request size if ok
406 static ssize_t
__wa_xfer_setup_sizes(struct wa_xfer
*xfer
,
407 enum wa_xfer_type
*pxfer_type
)
410 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
412 struct urb
*urb
= xfer
->urb
;
413 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
415 switch (rpipe
->descr
.bmAttribute
& 0x3) {
416 case USB_ENDPOINT_XFER_CONTROL
:
417 *pxfer_type
= WA_XFER_TYPE_CTL
;
418 result
= sizeof(struct wa_xfer_ctl
);
420 case USB_ENDPOINT_XFER_INT
:
421 case USB_ENDPOINT_XFER_BULK
:
422 *pxfer_type
= WA_XFER_TYPE_BI
;
423 result
= sizeof(struct wa_xfer_bi
);
425 case USB_ENDPOINT_XFER_ISOC
:
426 dev_err(dev
, "FIXME: ISOC not implemented\n");
432 result
= -EINVAL
; /* shut gcc up */
434 xfer
->is_inbound
= urb
->pipe
& USB_DIR_IN
? 1 : 0;
435 xfer
->is_dma
= urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
? 1 : 0;
436 xfer
->seg_size
= le16_to_cpu(rpipe
->descr
.wBlocks
)
437 * 1 << (xfer
->wa
->wa_descr
->bRPipeBlockSize
- 1);
438 /* Compute the segment size and make sure it is a multiple of
439 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
441 maxpktsize
= le16_to_cpu(rpipe
->descr
.wMaxPacketSize
);
442 if (xfer
->seg_size
< maxpktsize
) {
443 dev_err(dev
, "HW BUG? seg_size %zu smaller than maxpktsize "
444 "%zu\n", xfer
->seg_size
, maxpktsize
);
448 xfer
->seg_size
= (xfer
->seg_size
/ maxpktsize
) * maxpktsize
;
449 xfer
->segs
= DIV_ROUND_UP(urb
->transfer_buffer_length
, xfer
->seg_size
);
450 if (xfer
->segs
> WA_SEGS_MAX
) {
451 dev_err(dev
, "BUG? ops, number of segments %d bigger than %d\n",
452 (int)(urb
->transfer_buffer_length
/ xfer
->seg_size
),
457 if (xfer
->segs
== 0 && *pxfer_type
== WA_XFER_TYPE_CTL
)
463 /* Fill in the common request header and xfer-type specific data. */
464 static void __wa_xfer_setup_hdr0(struct wa_xfer
*xfer
,
465 struct wa_xfer_hdr
*xfer_hdr0
,
466 enum wa_xfer_type xfer_type
,
467 size_t xfer_hdr_size
)
469 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
471 xfer_hdr0
= &xfer
->seg
[0]->xfer_hdr
;
472 xfer_hdr0
->bLength
= xfer_hdr_size
;
473 xfer_hdr0
->bRequestType
= xfer_type
;
474 xfer_hdr0
->wRPipe
= rpipe
->descr
.wRPipeIndex
;
475 xfer_hdr0
->dwTransferID
= wa_xfer_id(xfer
);
476 xfer_hdr0
->bTransferSegment
= 0;
478 case WA_XFER_TYPE_CTL
: {
479 struct wa_xfer_ctl
*xfer_ctl
=
480 container_of(xfer_hdr0
, struct wa_xfer_ctl
, hdr
);
481 xfer_ctl
->bmAttribute
= xfer
->is_inbound
? 1 : 0;
482 memcpy(&xfer_ctl
->baSetupData
, xfer
->urb
->setup_packet
,
483 sizeof(xfer_ctl
->baSetupData
));
486 case WA_XFER_TYPE_BI
:
488 case WA_XFER_TYPE_ISO
:
489 printk(KERN_ERR
"FIXME: ISOC not implemented\n");
496 * Callback for the OUT data phase of the segment request
498 * Check wa_seg_cb(); most comments also apply here because this
499 * function does almost the same thing and they work closely
502 * If the seg request has failed but this DTO phase has succeeded,
503 * wa_seg_cb() has already failed the segment and moved the
504 * status to WA_SEG_ERROR, so this will go through 'case 0' and
505 * effectively do nothing.
507 static void wa_seg_dto_cb(struct urb
*urb
)
509 struct wa_seg
*seg
= urb
->context
;
510 struct wa_xfer
*xfer
= seg
->xfer
;
513 struct wa_rpipe
*rpipe
;
515 unsigned rpipe_ready
= 0;
518 switch (urb
->status
) {
520 spin_lock_irqsave(&xfer
->lock
, flags
);
522 dev
= &wa
->usb_iface
->dev
;
523 dev_dbg(dev
, "xfer %p#%u: data out done (%d bytes)\n",
524 xfer
, seg
->index
, urb
->actual_length
);
525 if (seg
->status
< WA_SEG_PENDING
)
526 seg
->status
= WA_SEG_PENDING
;
527 seg
->result
= urb
->actual_length
;
528 spin_unlock_irqrestore(&xfer
->lock
, flags
);
530 case -ECONNRESET
: /* URB unlinked; no need to do anything */
531 case -ENOENT
: /* as it was done by the who unlinked us */
533 default: /* Other errors ... */
534 spin_lock_irqsave(&xfer
->lock
, flags
);
536 dev
= &wa
->usb_iface
->dev
;
537 rpipe
= xfer
->ep
->hcpriv
;
538 dev_dbg(dev
, "xfer %p#%u: data out error %d\n",
539 xfer
, seg
->index
, urb
->status
);
540 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
541 EDC_ERROR_TIMEFRAME
)){
542 dev_err(dev
, "DTO: URB max acceptable errors "
543 "exceeded, resetting device\n");
546 if (seg
->status
!= WA_SEG_ERROR
) {
547 seg
->status
= WA_SEG_ERROR
;
548 seg
->result
= urb
->status
;
550 __wa_xfer_abort(xfer
);
551 rpipe_ready
= rpipe_avail_inc(rpipe
);
552 done
= __wa_xfer_is_done(xfer
);
554 spin_unlock_irqrestore(&xfer
->lock
, flags
);
556 wa_xfer_completion(xfer
);
558 wa_xfer_delayed_run(rpipe
);
563 * Callback for the segment request
565 * If successful transition state (unless already transitioned or
566 * outbound transfer); otherwise, take a note of the error, mark this
567 * segment done and try completion.
569 * Note we don't access until we are sure that the transfer hasn't
570 * been cancelled (ECONNRESET, ENOENT), which could mean that
571 * seg->xfer could be already gone.
573 * We have to check before setting the status to WA_SEG_PENDING
574 * because sometimes the xfer result callback arrives before this
575 * callback (geeeeeeze), so it might happen that we are already in
576 * another state. As well, we don't set it if the transfer is inbound,
577 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
580 static void wa_seg_cb(struct urb
*urb
)
582 struct wa_seg
*seg
= urb
->context
;
583 struct wa_xfer
*xfer
= seg
->xfer
;
586 struct wa_rpipe
*rpipe
;
588 unsigned rpipe_ready
;
591 switch (urb
->status
) {
593 spin_lock_irqsave(&xfer
->lock
, flags
);
595 dev
= &wa
->usb_iface
->dev
;
596 dev_dbg(dev
, "xfer %p#%u: request done\n", xfer
, seg
->index
);
597 if (xfer
->is_inbound
&& seg
->status
< WA_SEG_PENDING
)
598 seg
->status
= WA_SEG_PENDING
;
599 spin_unlock_irqrestore(&xfer
->lock
, flags
);
601 case -ECONNRESET
: /* URB unlinked; no need to do anything */
602 case -ENOENT
: /* as it was done by the who unlinked us */
604 default: /* Other errors ... */
605 spin_lock_irqsave(&xfer
->lock
, flags
);
607 dev
= &wa
->usb_iface
->dev
;
608 rpipe
= xfer
->ep
->hcpriv
;
609 if (printk_ratelimit())
610 dev_err(dev
, "xfer %p#%u: request error %d\n",
611 xfer
, seg
->index
, urb
->status
);
612 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
613 EDC_ERROR_TIMEFRAME
)){
614 dev_err(dev
, "DTO: URB max acceptable errors "
615 "exceeded, resetting device\n");
618 usb_unlink_urb(seg
->dto_urb
);
619 seg
->status
= WA_SEG_ERROR
;
620 seg
->result
= urb
->status
;
622 __wa_xfer_abort(xfer
);
623 rpipe_ready
= rpipe_avail_inc(rpipe
);
624 done
= __wa_xfer_is_done(xfer
);
625 spin_unlock_irqrestore(&xfer
->lock
, flags
);
627 wa_xfer_completion(xfer
);
629 wa_xfer_delayed_run(rpipe
);
633 /* allocate an SG list to store bytes_to_transfer bytes and copy the
634 * subset of the in_sg that matches the buffer subset
635 * we are about to transfer. */
636 static struct scatterlist
*wa_xfer_create_subset_sg(struct scatterlist
*in_sg
,
637 const unsigned int bytes_transferred
,
638 const unsigned int bytes_to_transfer
, unsigned int *out_num_sgs
)
640 struct scatterlist
*out_sg
;
641 unsigned int bytes_processed
= 0, offset_into_current_page_data
= 0,
643 struct scatterlist
*current_xfer_sg
= in_sg
;
644 struct scatterlist
*current_seg_sg
, *last_seg_sg
;
646 /* skip previously transferred pages. */
647 while ((current_xfer_sg
) &&
648 (bytes_processed
< bytes_transferred
)) {
649 bytes_processed
+= current_xfer_sg
->length
;
651 /* advance the sg if current segment starts on or past the
653 if (bytes_processed
<= bytes_transferred
)
654 current_xfer_sg
= sg_next(current_xfer_sg
);
657 /* the data for the current segment starts in current_xfer_sg.
658 calculate the offset. */
659 if (bytes_processed
> bytes_transferred
) {
660 offset_into_current_page_data
= current_xfer_sg
->length
-
661 (bytes_processed
- bytes_transferred
);
664 /* calculate the number of pages needed by this segment. */
665 nents
= DIV_ROUND_UP((bytes_to_transfer
+
666 offset_into_current_page_data
+
667 current_xfer_sg
->offset
),
670 out_sg
= kmalloc((sizeof(struct scatterlist
) * nents
), GFP_ATOMIC
);
672 sg_init_table(out_sg
, nents
);
674 /* copy the portion of the incoming SG that correlates to the
675 * data to be transferred by this segment to the segment SG. */
676 last_seg_sg
= current_seg_sg
= out_sg
;
679 /* reset nents and calculate the actual number of sg entries
682 while ((bytes_processed
< bytes_to_transfer
) &&
683 current_seg_sg
&& current_xfer_sg
) {
684 unsigned int page_len
= min((current_xfer_sg
->length
-
685 offset_into_current_page_data
),
686 (bytes_to_transfer
- bytes_processed
));
688 sg_set_page(current_seg_sg
, sg_page(current_xfer_sg
),
690 current_xfer_sg
->offset
+
691 offset_into_current_page_data
);
693 bytes_processed
+= page_len
;
695 last_seg_sg
= current_seg_sg
;
696 current_seg_sg
= sg_next(current_seg_sg
);
697 current_xfer_sg
= sg_next(current_xfer_sg
);
699 /* only the first page may require additional offset. */
700 offset_into_current_page_data
= 0;
704 /* update num_sgs and terminate the list since we may have
705 * concatenated pages. */
706 sg_mark_end(last_seg_sg
);
707 *out_num_sgs
= nents
;
714 * Allocate the segs array and initialize each of them
716 * The segments are freed by wa_xfer_destroy() when the xfer use count
717 * drops to zero; however, because each segment is given the same life
718 * cycle as the USB URB it contains, it is actually freed by
719 * usb_put_urb() on the contained USB URB (twisted, eh?).
721 static int __wa_xfer_setup_segs(struct wa_xfer
*xfer
, size_t xfer_hdr_size
)
724 size_t alloc_size
= sizeof(*xfer
->seg
[0])
725 - sizeof(xfer
->seg
[0]->xfer_hdr
) + xfer_hdr_size
;
726 struct usb_device
*usb_dev
= xfer
->wa
->usb_dev
;
727 const struct usb_endpoint_descriptor
*dto_epd
= xfer
->wa
->dto_epd
;
729 size_t buf_itr
, buf_size
, buf_itr_size
;
732 xfer
->seg
= kcalloc(xfer
->segs
, sizeof(xfer
->seg
[0]), GFP_ATOMIC
);
733 if (xfer
->seg
== NULL
)
734 goto error_segs_kzalloc
;
736 buf_size
= xfer
->urb
->transfer_buffer_length
;
737 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
738 seg
= xfer
->seg
[cnt
] = kmalloc(alloc_size
, GFP_ATOMIC
);
740 goto error_seg_kmalloc
;
744 usb_fill_bulk_urb(&seg
->urb
, usb_dev
,
745 usb_sndbulkpipe(usb_dev
,
746 dto_epd
->bEndpointAddress
),
747 &seg
->xfer_hdr
, xfer_hdr_size
,
749 buf_itr_size
= min(buf_size
, xfer
->seg_size
);
750 if (xfer
->is_inbound
== 0 && buf_size
> 0) {
752 seg
->dto_urb
= usb_alloc_urb(0, GFP_ATOMIC
);
753 if (seg
->dto_urb
== NULL
)
754 goto error_dto_alloc
;
756 seg
->dto_urb
, usb_dev
,
757 usb_sndbulkpipe(usb_dev
,
758 dto_epd
->bEndpointAddress
),
759 NULL
, 0, wa_seg_dto_cb
, seg
);
761 seg
->dto_urb
->transfer_dma
=
762 xfer
->urb
->transfer_dma
+ buf_itr
;
763 seg
->dto_urb
->transfer_flags
|=
764 URB_NO_TRANSFER_DMA_MAP
;
765 seg
->dto_urb
->transfer_buffer
= NULL
;
766 seg
->dto_urb
->sg
= NULL
;
767 seg
->dto_urb
->num_sgs
= 0;
769 /* do buffer or SG processing. */
770 seg
->dto_urb
->transfer_flags
&=
771 ~URB_NO_TRANSFER_DMA_MAP
;
772 /* this should always be 0 before a resubmit. */
773 seg
->dto_urb
->num_mapped_sgs
= 0;
775 if (xfer
->urb
->transfer_buffer
) {
776 seg
->dto_urb
->transfer_buffer
=
777 xfer
->urb
->transfer_buffer
+
779 seg
->dto_urb
->sg
= NULL
;
780 seg
->dto_urb
->num_sgs
= 0;
782 /* allocate an SG list to store seg_size
783 bytes and copy the subset of the
784 xfer->urb->sg that matches the
785 buffer subset we are about to read.
788 wa_xfer_create_subset_sg(
790 buf_itr
, buf_itr_size
,
791 &(seg
->dto_urb
->num_sgs
));
793 if (!(seg
->dto_urb
->sg
)) {
794 seg
->dto_urb
->num_sgs
= 0;
798 seg
->dto_urb
->transfer_buffer
= NULL
;
801 seg
->dto_urb
->transfer_buffer_length
= buf_itr_size
;
803 seg
->status
= WA_SEG_READY
;
804 buf_itr
+= buf_itr_size
;
805 buf_size
-= buf_itr_size
;
810 usb_free_urb(xfer
->seg
[cnt
]->dto_urb
);
812 kfree(xfer
->seg
[cnt
]);
815 /* use the fact that cnt is left at were it failed */
816 for (; cnt
>= 0; cnt
--) {
817 if (xfer
->seg
[cnt
] && xfer
->is_inbound
== 0) {
818 usb_free_urb(xfer
->seg
[cnt
]->dto_urb
);
819 kfree(xfer
->seg
[cnt
]->dto_urb
->sg
);
821 kfree(xfer
->seg
[cnt
]);
828 * Allocates all the stuff needed to submit a transfer
830 * Breaks the whole data buffer in a list of segments, each one has a
831 * structure allocated to it and linked in xfer->seg[index]
833 * FIXME: merge setup_segs() and the last part of this function, no
834 * need to do two for loops when we could run everything in a
837 static int __wa_xfer_setup(struct wa_xfer
*xfer
, struct urb
*urb
)
840 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
841 enum wa_xfer_type xfer_type
= 0; /* shut up GCC */
842 size_t xfer_hdr_size
, cnt
, transfer_size
;
843 struct wa_xfer_hdr
*xfer_hdr0
, *xfer_hdr
;
845 result
= __wa_xfer_setup_sizes(xfer
, &xfer_type
);
847 goto error_setup_sizes
;
848 xfer_hdr_size
= result
;
849 result
= __wa_xfer_setup_segs(xfer
, xfer_hdr_size
);
851 dev_err(dev
, "xfer %p: Failed to allocate %d segments: %d\n",
852 xfer
, xfer
->segs
, result
);
853 goto error_setup_segs
;
855 /* Fill the first header */
856 xfer_hdr0
= &xfer
->seg
[0]->xfer_hdr
;
857 wa_xfer_id_init(xfer
);
858 __wa_xfer_setup_hdr0(xfer
, xfer_hdr0
, xfer_type
, xfer_hdr_size
);
860 /* Fill remainig headers */
861 xfer_hdr
= xfer_hdr0
;
862 transfer_size
= urb
->transfer_buffer_length
;
863 xfer_hdr0
->dwTransferLength
= transfer_size
> xfer
->seg_size
?
864 xfer
->seg_size
: transfer_size
;
865 transfer_size
-= xfer
->seg_size
;
866 for (cnt
= 1; cnt
< xfer
->segs
; cnt
++) {
867 xfer_hdr
= &xfer
->seg
[cnt
]->xfer_hdr
;
868 memcpy(xfer_hdr
, xfer_hdr0
, xfer_hdr_size
);
869 xfer_hdr
->bTransferSegment
= cnt
;
870 xfer_hdr
->dwTransferLength
= transfer_size
> xfer
->seg_size
?
871 cpu_to_le32(xfer
->seg_size
)
872 : cpu_to_le32(transfer_size
);
873 xfer
->seg
[cnt
]->status
= WA_SEG_READY
;
874 transfer_size
-= xfer
->seg_size
;
876 xfer_hdr
->bTransferSegment
|= 0x80; /* this is the last segment */
886 * rpipe->seg_lock is held!
888 static int __wa_seg_submit(struct wa_rpipe
*rpipe
, struct wa_xfer
*xfer
,
892 result
= usb_submit_urb(&seg
->urb
, GFP_ATOMIC
);
894 printk(KERN_ERR
"xfer %p#%u: REQ submit failed: %d\n",
895 xfer
, seg
->index
, result
);
896 goto error_seg_submit
;
899 result
= usb_submit_urb(seg
->dto_urb
, GFP_ATOMIC
);
901 printk(KERN_ERR
"xfer %p#%u: DTO submit failed: %d\n",
902 xfer
, seg
->index
, result
);
903 goto error_dto_submit
;
906 seg
->status
= WA_SEG_SUBMITTED
;
907 rpipe_avail_dec(rpipe
);
911 usb_unlink_urb(&seg
->urb
);
913 seg
->status
= WA_SEG_ERROR
;
914 seg
->result
= result
;
919 * Execute more queued request segments until the maximum concurrent allowed
921 * The ugly unlock/lock sequence on the error path is needed as the
922 * xfer->lock normally nests the seg_lock and not viceversa.
925 static void wa_xfer_delayed_run(struct wa_rpipe
*rpipe
)
928 struct device
*dev
= &rpipe
->wa
->usb_iface
->dev
;
930 struct wa_xfer
*xfer
;
933 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
934 while (atomic_read(&rpipe
->segs_available
) > 0
935 && !list_empty(&rpipe
->seg_list
)) {
936 seg
= list_first_entry(&(rpipe
->seg_list
), struct wa_seg
,
938 list_del(&seg
->list_node
);
940 result
= __wa_seg_submit(rpipe
, xfer
, seg
);
941 dev_dbg(dev
, "xfer %p#%u submitted from delayed [%d segments available] %d\n",
942 xfer
, seg
->index
, atomic_read(&rpipe
->segs_available
), result
);
943 if (unlikely(result
< 0)) {
944 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
945 spin_lock_irqsave(&xfer
->lock
, flags
);
946 __wa_xfer_abort(xfer
);
948 spin_unlock_irqrestore(&xfer
->lock
, flags
);
949 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
952 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
957 * xfer->lock is taken
959 * On failure submitting we just stop submitting and return error;
960 * wa_urb_enqueue_b() will execute the completion path
962 static int __wa_xfer_submit(struct wa_xfer
*xfer
)
965 struct wahc
*wa
= xfer
->wa
;
966 struct device
*dev
= &wa
->usb_iface
->dev
;
970 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
971 size_t maxrequests
= le16_to_cpu(rpipe
->descr
.wRequests
);
975 spin_lock_irqsave(&wa
->xfer_list_lock
, flags
);
976 list_add_tail(&xfer
->list_node
, &wa
->xfer_list
);
977 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags
);
979 BUG_ON(atomic_read(&rpipe
->segs_available
) > maxrequests
);
981 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
982 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
983 available
= atomic_read(&rpipe
->segs_available
);
984 empty
= list_empty(&rpipe
->seg_list
);
985 seg
= xfer
->seg
[cnt
];
986 dev_dbg(dev
, "xfer %p#%u: available %u empty %u (%s)\n",
987 xfer
, cnt
, available
, empty
,
988 available
== 0 || !empty
? "delayed" : "submitted");
989 if (available
== 0 || !empty
) {
990 dev_dbg(dev
, "xfer %p#%u: delayed\n", xfer
, cnt
);
991 seg
->status
= WA_SEG_DELAYED
;
992 list_add_tail(&seg
->list_node
, &rpipe
->seg_list
);
994 result
= __wa_seg_submit(rpipe
, xfer
, seg
);
996 __wa_xfer_abort(xfer
);
997 goto error_seg_submit
;
1000 xfer
->segs_submitted
++;
1003 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
1008 * Second part of a URB/transfer enqueuement
1010 * Assumes this comes from wa_urb_enqueue() [maybe through
1011 * wa_urb_enqueue_run()]. At this point:
1013 * xfer->wa filled and refcounted
1014 * xfer->ep filled with rpipe refcounted if
1016 * xfer->urb filled and refcounted (this is the case when called
1017 * from wa_urb_enqueue() as we come from usb_submit_urb()
1018 * and when called by wa_urb_enqueue_run(), as we took an
1019 * extra ref dropped by _run() after we return).
1022 * If we fail at __wa_xfer_submit(), then we just check if we are done
1023 * and if so, we run the completion procedure. However, if we are not
1024 * yet done, we do nothing and wait for the completion handlers from
1025 * the submitted URBs or from the xfer-result path to kick in. If xfer
1026 * result never kicks in, the xfer will timeout from the USB code and
1027 * dequeue() will be called.
1029 static void wa_urb_enqueue_b(struct wa_xfer
*xfer
)
1032 unsigned long flags
;
1033 struct urb
*urb
= xfer
->urb
;
1034 struct wahc
*wa
= xfer
->wa
;
1035 struct wusbhc
*wusbhc
= wa
->wusb
;
1036 struct wusb_dev
*wusb_dev
;
1039 result
= rpipe_get_by_ep(wa
, xfer
->ep
, urb
, xfer
->gfp
);
1041 goto error_rpipe_get
;
1043 /* FIXME: segmentation broken -- kills DWA */
1044 mutex_lock(&wusbhc
->mutex
); /* get a WUSB dev */
1045 if (urb
->dev
== NULL
) {
1046 mutex_unlock(&wusbhc
->mutex
);
1047 goto error_dev_gone
;
1049 wusb_dev
= __wusb_dev_get_by_usb_dev(wusbhc
, urb
->dev
);
1050 if (wusb_dev
== NULL
) {
1051 mutex_unlock(&wusbhc
->mutex
);
1052 goto error_dev_gone
;
1054 mutex_unlock(&wusbhc
->mutex
);
1056 spin_lock_irqsave(&xfer
->lock
, flags
);
1057 xfer
->wusb_dev
= wusb_dev
;
1058 result
= urb
->status
;
1059 if (urb
->status
!= -EINPROGRESS
)
1060 goto error_dequeued
;
1062 result
= __wa_xfer_setup(xfer
, urb
);
1064 goto error_xfer_setup
;
1065 result
= __wa_xfer_submit(xfer
);
1067 goto error_xfer_submit
;
1068 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1071 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1072 * does a wa_xfer_put() that will call wa_xfer_destroy() and clean
1077 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1078 /* FIXME: segmentation broken, kills DWA */
1080 wusb_dev_put(wusb_dev
);
1082 rpipe_put(xfer
->ep
->hcpriv
);
1084 xfer
->result
= result
;
1085 wa_xfer_giveback(xfer
);
1089 done
= __wa_xfer_is_done(xfer
);
1090 xfer
->result
= result
;
1091 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1093 wa_xfer_completion(xfer
);
1097 * Execute the delayed transfers in the Wire Adapter @wa
1099 * We need to be careful here, as dequeue() could be called in the
1100 * middle. That's why we do the whole thing under the
1101 * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1102 * and then checks the list -- so as we would be acquiring in inverse
1103 * order, we move the delayed list to a separate list while locked and then
1104 * submit them without the list lock held.
1106 void wa_urb_enqueue_run(struct work_struct
*ws
)
1108 struct wahc
*wa
= container_of(ws
, struct wahc
, xfer_enqueue_work
);
1109 struct wa_xfer
*xfer
, *next
;
1111 LIST_HEAD(tmp_list
);
1113 /* Create a copy of the wa->xfer_delayed_list while holding the lock */
1114 spin_lock_irq(&wa
->xfer_list_lock
);
1115 list_cut_position(&tmp_list
, &wa
->xfer_delayed_list
,
1116 wa
->xfer_delayed_list
.prev
);
1117 spin_unlock_irq(&wa
->xfer_list_lock
);
1120 * enqueue from temp list without list lock held since wa_urb_enqueue_b
1121 * can take xfer->lock as well as lock mutexes.
1123 list_for_each_entry_safe(xfer
, next
, &tmp_list
, list_node
) {
1124 list_del_init(&xfer
->list_node
);
1127 wa_urb_enqueue_b(xfer
);
1128 usb_put_urb(urb
); /* taken when queuing */
1131 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run
);
1134 * Process the errored transfers on the Wire Adapter outside of interrupt.
1136 void wa_process_errored_transfers_run(struct work_struct
*ws
)
1138 struct wahc
*wa
= container_of(ws
, struct wahc
, xfer_error_work
);
1139 struct wa_xfer
*xfer
, *next
;
1140 LIST_HEAD(tmp_list
);
1142 pr_info("%s: Run delayed STALL processing.\n", __func__
);
1144 /* Create a copy of the wa->xfer_errored_list while holding the lock */
1145 spin_lock_irq(&wa
->xfer_list_lock
);
1146 list_cut_position(&tmp_list
, &wa
->xfer_errored_list
,
1147 wa
->xfer_errored_list
.prev
);
1148 spin_unlock_irq(&wa
->xfer_list_lock
);
1151 * run rpipe_clear_feature_stalled from temp list without list lock
1154 list_for_each_entry_safe(xfer
, next
, &tmp_list
, list_node
) {
1155 struct usb_host_endpoint
*ep
;
1156 unsigned long flags
;
1157 struct wa_rpipe
*rpipe
;
1159 spin_lock_irqsave(&xfer
->lock
, flags
);
1162 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1164 /* clear RPIPE feature stalled without holding a lock. */
1165 rpipe_clear_feature_stalled(wa
, ep
);
1167 /* complete the xfer. This removes it from the tmp list. */
1168 wa_xfer_completion(xfer
);
1170 /* check for work. */
1171 wa_xfer_delayed_run(rpipe
);
1174 EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run
);
1177 * Submit a transfer to the Wire Adapter in a delayed way
1179 * The process of enqueuing involves possible sleeps() [see
1180 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1181 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1183 * @urb: We own a reference to it done by the HCI Linux USB stack that
1184 * will be given up by calling usb_hcd_giveback_urb() or by
1185 * returning error from this function -> ergo we don't have to
1188 int wa_urb_enqueue(struct wahc
*wa
, struct usb_host_endpoint
*ep
,
1189 struct urb
*urb
, gfp_t gfp
)
1192 struct device
*dev
= &wa
->usb_iface
->dev
;
1193 struct wa_xfer
*xfer
;
1194 unsigned long my_flags
;
1195 unsigned cant_sleep
= irqs_disabled() | in_atomic();
1197 if ((urb
->transfer_buffer
== NULL
)
1198 && (urb
->sg
== NULL
)
1199 && !(urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
)
1200 && urb
->transfer_buffer_length
!= 0) {
1201 dev_err(dev
, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb
);
1206 xfer
= kzalloc(sizeof(*xfer
), gfp
);
1211 if (urb
->status
!= -EINPROGRESS
) /* cancelled */
1212 goto error_dequeued
; /* before starting? */
1214 xfer
->wa
= wa_get(wa
);
1220 dev_dbg(dev
, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1221 xfer
, urb
, urb
->pipe
, urb
->transfer_buffer_length
,
1222 urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
? "dma" : "nodma",
1223 urb
->pipe
& USB_DIR_IN
? "inbound" : "outbound",
1224 cant_sleep
? "deferred" : "inline");
1228 spin_lock_irqsave(&wa
->xfer_list_lock
, my_flags
);
1229 list_add_tail(&xfer
->list_node
, &wa
->xfer_delayed_list
);
1230 spin_unlock_irqrestore(&wa
->xfer_list_lock
, my_flags
);
1231 queue_work(wusbd
, &wa
->xfer_enqueue_work
);
1233 wa_urb_enqueue_b(xfer
);
1242 EXPORT_SYMBOL_GPL(wa_urb_enqueue
);
1245 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1246 * handler] is called.
1248 * Until a transfer goes successfully through wa_urb_enqueue() it
1249 * needs to be dequeued with completion calling; when stuck in delayed
1250 * or before wa_xfer_setup() is called, we need to do completion.
1252 * not setup If there is no hcpriv yet, that means that that enqueue
1253 * still had no time to set the xfer up. Because
1254 * urb->status should be other than -EINPROGRESS,
1255 * enqueue() will catch that and bail out.
1257 * If the transfer has gone through setup, we just need to clean it
1258 * up. If it has gone through submit(), we have to abort it [with an
1259 * asynch request] and then make sure we cancel each segment.
1262 int wa_urb_dequeue(struct wahc
*wa
, struct urb
*urb
)
1264 unsigned long flags
, flags2
;
1265 struct wa_xfer
*xfer
;
1267 struct wa_rpipe
*rpipe
;
1269 unsigned rpipe_ready
= 0;
1274 * Nothing setup yet enqueue will see urb->status !=
1275 * -EINPROGRESS (by hcd layer) and bail out with
1276 * error, no need to do completion
1278 BUG_ON(urb
->status
== -EINPROGRESS
);
1281 spin_lock_irqsave(&xfer
->lock
, flags
);
1282 rpipe
= xfer
->ep
->hcpriv
;
1283 if (rpipe
== NULL
) {
1284 pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
1285 __func__
, wa_xfer_id(xfer
),
1286 "Probably already aborted.\n" );
1289 /* Check the delayed list -> if there, release and complete */
1290 spin_lock_irqsave(&wa
->xfer_list_lock
, flags2
);
1291 if (!list_empty(&xfer
->list_node
) && xfer
->seg
== NULL
)
1292 goto dequeue_delayed
;
1293 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags2
);
1294 if (xfer
->seg
== NULL
) /* still hasn't reached */
1295 goto out_unlock
; /* setup(), enqueue_b() completes */
1296 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1297 __wa_xfer_abort(xfer
);
1298 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
1299 seg
= xfer
->seg
[cnt
];
1300 switch (seg
->status
) {
1301 case WA_SEG_NOTREADY
:
1303 printk(KERN_ERR
"xfer %p#%u: dequeue bad state %u\n",
1304 xfer
, cnt
, seg
->status
);
1307 case WA_SEG_DELAYED
:
1308 seg
->status
= WA_SEG_ABORTED
;
1309 spin_lock_irqsave(&rpipe
->seg_lock
, flags2
);
1310 list_del(&seg
->list_node
);
1312 rpipe_ready
= rpipe_avail_inc(rpipe
);
1313 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags2
);
1315 case WA_SEG_SUBMITTED
:
1316 seg
->status
= WA_SEG_ABORTED
;
1317 usb_unlink_urb(&seg
->urb
);
1318 if (xfer
->is_inbound
== 0)
1319 usb_unlink_urb(seg
->dto_urb
);
1321 rpipe_ready
= rpipe_avail_inc(rpipe
);
1323 case WA_SEG_PENDING
:
1324 seg
->status
= WA_SEG_ABORTED
;
1326 rpipe_ready
= rpipe_avail_inc(rpipe
);
1328 case WA_SEG_DTI_PENDING
:
1329 usb_unlink_urb(wa
->dti_urb
);
1330 seg
->status
= WA_SEG_ABORTED
;
1332 rpipe_ready
= rpipe_avail_inc(rpipe
);
1336 case WA_SEG_ABORTED
:
1340 xfer
->result
= urb
->status
; /* -ENOENT or -ECONNRESET */
1341 __wa_xfer_is_done(xfer
);
1342 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1343 wa_xfer_completion(xfer
);
1345 wa_xfer_delayed_run(rpipe
);
1349 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1354 list_del_init(&xfer
->list_node
);
1355 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags2
);
1356 xfer
->result
= urb
->status
;
1357 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1358 wa_xfer_giveback(xfer
);
1359 usb_put_urb(urb
); /* we got a ref in enqueue() */
1362 EXPORT_SYMBOL_GPL(wa_urb_dequeue
);
1365 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
1368 * Positive errno values are internal inconsistencies and should be
1369 * flagged louder. Negative are to be passed up to the user in the
1372 * @status: USB WA status code -- high two bits are stripped.
1374 static int wa_xfer_status_to_errno(u8 status
)
1377 u8 real_status
= status
;
1378 static int xlat
[] = {
1379 [WA_XFER_STATUS_SUCCESS
] = 0,
1380 [WA_XFER_STATUS_HALTED
] = -EPIPE
,
1381 [WA_XFER_STATUS_DATA_BUFFER_ERROR
] = -ENOBUFS
,
1382 [WA_XFER_STATUS_BABBLE
] = -EOVERFLOW
,
1383 [WA_XFER_RESERVED
] = EINVAL
,
1384 [WA_XFER_STATUS_NOT_FOUND
] = 0,
1385 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE
] = -ENOMEM
,
1386 [WA_XFER_STATUS_TRANSACTION_ERROR
] = -EILSEQ
,
1387 [WA_XFER_STATUS_ABORTED
] = -EINTR
,
1388 [WA_XFER_STATUS_RPIPE_NOT_READY
] = EINVAL
,
1389 [WA_XFER_INVALID_FORMAT
] = EINVAL
,
1390 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER
] = EINVAL
,
1391 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH
] = EINVAL
,
1397 if (status
>= ARRAY_SIZE(xlat
)) {
1398 printk_ratelimited(KERN_ERR
"%s(): BUG? "
1399 "Unknown WA transfer status 0x%02x\n",
1400 __func__
, real_status
);
1403 errno
= xlat
[status
];
1404 if (unlikely(errno
> 0)) {
1405 printk_ratelimited(KERN_ERR
"%s(): BUG? "
1406 "Inconsistent WA status: 0x%02x\n",
1407 __func__
, real_status
);
1414 * Process a xfer result completion message
1416 * inbound transfers: need to schedule a DTI read
1418 * FIXME: this function needs to be broken up in parts
1420 static void wa_xfer_result_chew(struct wahc
*wa
, struct wa_xfer
*xfer
)
1423 struct device
*dev
= &wa
->usb_iface
->dev
;
1424 unsigned long flags
;
1427 struct wa_rpipe
*rpipe
;
1428 struct wa_xfer_result
*xfer_result
= wa
->xfer_result
;
1431 unsigned rpipe_ready
= 0;
1433 spin_lock_irqsave(&xfer
->lock
, flags
);
1434 seg_idx
= xfer_result
->bTransferSegment
& 0x7f;
1435 if (unlikely(seg_idx
>= xfer
->segs
))
1437 seg
= xfer
->seg
[seg_idx
];
1438 rpipe
= xfer
->ep
->hcpriv
;
1439 usb_status
= xfer_result
->bTransferStatus
;
1440 dev_dbg(dev
, "xfer %p#%u: bTransferStatus 0x%02x (seg status %u)\n",
1441 xfer
, seg_idx
, usb_status
, seg
->status
);
1442 if (seg
->status
== WA_SEG_ABORTED
1443 || seg
->status
== WA_SEG_ERROR
) /* already handled */
1444 goto segment_aborted
;
1445 if (seg
->status
== WA_SEG_SUBMITTED
) /* ops, got here */
1446 seg
->status
= WA_SEG_PENDING
; /* before wa_seg{_dto}_cb() */
1447 if (seg
->status
!= WA_SEG_PENDING
) {
1448 if (printk_ratelimit())
1449 dev_err(dev
, "xfer %p#%u: Bad segment state %u\n",
1450 xfer
, seg_idx
, seg
->status
);
1451 seg
->status
= WA_SEG_PENDING
; /* workaround/"fix" it */
1453 if (usb_status
& 0x80) {
1454 seg
->result
= wa_xfer_status_to_errno(usb_status
);
1455 dev_err(dev
, "DTI: xfer %p#:%08X:%u failed (0x%02x)\n",
1456 xfer
, xfer
->id
, seg
->index
, usb_status
);
1457 goto error_complete
;
1459 /* FIXME: we ignore warnings, tally them for stats */
1460 if (usb_status
& 0x40) /* Warning?... */
1461 usb_status
= 0; /* ... pass */
1462 if (xfer
->is_inbound
) { /* IN data phase: read to buffer */
1463 seg
->status
= WA_SEG_DTI_PENDING
;
1464 BUG_ON(wa
->buf_in_urb
->status
== -EINPROGRESS
);
1465 /* this should always be 0 before a resubmit. */
1466 wa
->buf_in_urb
->num_mapped_sgs
= 0;
1469 wa
->buf_in_urb
->transfer_dma
=
1470 xfer
->urb
->transfer_dma
1471 + (seg_idx
* xfer
->seg_size
);
1472 wa
->buf_in_urb
->transfer_flags
1473 |= URB_NO_TRANSFER_DMA_MAP
;
1474 wa
->buf_in_urb
->transfer_buffer
= NULL
;
1475 wa
->buf_in_urb
->sg
= NULL
;
1476 wa
->buf_in_urb
->num_sgs
= 0;
1478 /* do buffer or SG processing. */
1479 wa
->buf_in_urb
->transfer_flags
1480 &= ~URB_NO_TRANSFER_DMA_MAP
;
1482 if (xfer
->urb
->transfer_buffer
) {
1483 wa
->buf_in_urb
->transfer_buffer
=
1484 xfer
->urb
->transfer_buffer
1485 + (seg_idx
* xfer
->seg_size
);
1486 wa
->buf_in_urb
->sg
= NULL
;
1487 wa
->buf_in_urb
->num_sgs
= 0;
1489 /* allocate an SG list to store seg_size bytes
1490 and copy the subset of the xfer->urb->sg
1491 that matches the buffer subset we are
1493 wa
->buf_in_urb
->sg
= wa_xfer_create_subset_sg(
1495 seg_idx
* xfer
->seg_size
,
1497 xfer_result
->dwTransferLength
),
1498 &(wa
->buf_in_urb
->num_sgs
));
1500 if (!(wa
->buf_in_urb
->sg
)) {
1501 wa
->buf_in_urb
->num_sgs
= 0;
1502 goto error_sg_alloc
;
1504 wa
->buf_in_urb
->transfer_buffer
= NULL
;
1507 wa
->buf_in_urb
->transfer_buffer_length
=
1508 le32_to_cpu(xfer_result
->dwTransferLength
);
1509 wa
->buf_in_urb
->context
= seg
;
1510 result
= usb_submit_urb(wa
->buf_in_urb
, GFP_ATOMIC
);
1512 goto error_submit_buf_in
;
1514 /* OUT data phase, complete it -- */
1515 seg
->status
= WA_SEG_DONE
;
1516 seg
->result
= le32_to_cpu(xfer_result
->dwTransferLength
);
1518 rpipe_ready
= rpipe_avail_inc(rpipe
);
1519 done
= __wa_xfer_is_done(xfer
);
1521 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1523 wa_xfer_completion(xfer
);
1525 wa_xfer_delayed_run(rpipe
);
1528 error_submit_buf_in
:
1529 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
1530 dev_err(dev
, "DTI: URB max acceptable errors "
1531 "exceeded, resetting device\n");
1534 if (printk_ratelimit())
1535 dev_err(dev
, "xfer %p#%u: can't submit DTI data phase: %d\n",
1536 xfer
, seg_idx
, result
);
1537 seg
->result
= result
;
1538 kfree(wa
->buf_in_urb
->sg
);
1540 __wa_xfer_abort(xfer
);
1542 seg
->status
= WA_SEG_ERROR
;
1544 rpipe_ready
= rpipe_avail_inc(rpipe
);
1545 done
= __wa_xfer_is_done(xfer
);
1547 * queue work item to clear STALL for control endpoints.
1548 * Otherwise, let endpoint_reset take care of it.
1550 if (((usb_status
& 0x3f) == WA_XFER_STATUS_HALTED
) &&
1551 usb_endpoint_xfer_control(&xfer
->ep
->desc
) &&
1554 dev_info(dev
, "Control EP stall. Queue delayed work.\n");
1555 spin_lock_irq(&wa
->xfer_list_lock
);
1556 /* remove xfer from xfer_list. */
1557 list_del(&xfer
->list_node
);
1558 /* add xfer to xfer_errored_list. */
1559 list_add_tail(&xfer
->list_node
, &wa
->xfer_errored_list
);
1560 spin_unlock_irq(&wa
->xfer_list_lock
);
1561 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1562 queue_work(wusbd
, &wa
->xfer_error_work
);
1564 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1566 wa_xfer_completion(xfer
);
1568 wa_xfer_delayed_run(rpipe
);
1574 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1575 wa_urb_dequeue(wa
, xfer
->urb
);
1576 if (printk_ratelimit())
1577 dev_err(dev
, "xfer %p#%u: bad segment\n", xfer
, seg_idx
);
1578 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
1579 dev_err(dev
, "DTI: URB max acceptable errors "
1580 "exceeded, resetting device\n");
1586 /* nothing to do, as the aborter did the completion */
1587 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1591 * Callback for the IN data phase
1593 * If successful transition state; otherwise, take a note of the
1594 * error, mark this segment done and try completion.
1596 * Note we don't access until we are sure that the transfer hasn't
1597 * been cancelled (ECONNRESET, ENOENT), which could mean that
1598 * seg->xfer could be already gone.
1600 static void wa_buf_in_cb(struct urb
*urb
)
1602 struct wa_seg
*seg
= urb
->context
;
1603 struct wa_xfer
*xfer
= seg
->xfer
;
1606 struct wa_rpipe
*rpipe
;
1607 unsigned rpipe_ready
;
1608 unsigned long flags
;
1611 /* free the sg if it was used. */
1615 switch (urb
->status
) {
1617 spin_lock_irqsave(&xfer
->lock
, flags
);
1619 dev
= &wa
->usb_iface
->dev
;
1620 rpipe
= xfer
->ep
->hcpriv
;
1621 dev_dbg(dev
, "xfer %p#%u: data in done (%zu bytes)\n",
1622 xfer
, seg
->index
, (size_t)urb
->actual_length
);
1623 seg
->status
= WA_SEG_DONE
;
1624 seg
->result
= urb
->actual_length
;
1626 rpipe_ready
= rpipe_avail_inc(rpipe
);
1627 done
= __wa_xfer_is_done(xfer
);
1628 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1630 wa_xfer_completion(xfer
);
1632 wa_xfer_delayed_run(rpipe
);
1634 case -ECONNRESET
: /* URB unlinked; no need to do anything */
1635 case -ENOENT
: /* as it was done by the who unlinked us */
1637 default: /* Other errors ... */
1638 spin_lock_irqsave(&xfer
->lock
, flags
);
1640 dev
= &wa
->usb_iface
->dev
;
1641 rpipe
= xfer
->ep
->hcpriv
;
1642 if (printk_ratelimit())
1643 dev_err(dev
, "xfer %p#%u: data in error %d\n",
1644 xfer
, seg
->index
, urb
->status
);
1645 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
1646 EDC_ERROR_TIMEFRAME
)){
1647 dev_err(dev
, "DTO: URB max acceptable errors "
1648 "exceeded, resetting device\n");
1651 seg
->status
= WA_SEG_ERROR
;
1652 seg
->result
= urb
->status
;
1654 rpipe_ready
= rpipe_avail_inc(rpipe
);
1655 __wa_xfer_abort(xfer
);
1656 done
= __wa_xfer_is_done(xfer
);
1657 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1659 wa_xfer_completion(xfer
);
1661 wa_xfer_delayed_run(rpipe
);
1666 * Handle an incoming transfer result buffer
1668 * Given a transfer result buffer, it completes the transfer (possibly
1669 * scheduling and buffer in read) and then resubmits the DTI URB for a
1670 * new transfer result read.
1673 * The xfer_result DTI URB state machine
1675 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
1677 * We start in OFF mode, the first xfer_result notification [through
1678 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
1681 * We receive a buffer -- if it is not a xfer_result, we complain and
1682 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
1683 * request accounting. If it is an IN segment, we move to RBI and post
1684 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
1685 * repost the DTI-URB and move to RXR state. if there was no IN
1686 * segment, it will repost the DTI-URB.
1688 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
1689 * errors) in the URBs.
1691 static void wa_xfer_result_cb(struct urb
*urb
)
1694 struct wahc
*wa
= urb
->context
;
1695 struct device
*dev
= &wa
->usb_iface
->dev
;
1696 struct wa_xfer_result
*xfer_result
;
1698 struct wa_xfer
*xfer
;
1701 BUG_ON(wa
->dti_urb
!= urb
);
1702 switch (wa
->dti_urb
->status
) {
1704 /* We have a xfer result buffer; check it */
1705 dev_dbg(dev
, "DTI: xfer result %d bytes at %p\n",
1706 urb
->actual_length
, urb
->transfer_buffer
);
1707 if (wa
->dti_urb
->actual_length
!= sizeof(*xfer_result
)) {
1708 dev_err(dev
, "DTI Error: xfer result--bad size "
1709 "xfer result (%d bytes vs %zu needed)\n",
1710 urb
->actual_length
, sizeof(*xfer_result
));
1713 xfer_result
= wa
->xfer_result
;
1714 if (xfer_result
->hdr
.bLength
!= sizeof(*xfer_result
)) {
1715 dev_err(dev
, "DTI Error: xfer result--"
1716 "bad header length %u\n",
1717 xfer_result
->hdr
.bLength
);
1720 if (xfer_result
->hdr
.bNotifyType
!= WA_XFER_RESULT
) {
1721 dev_err(dev
, "DTI Error: xfer result--"
1722 "bad header type 0x%02x\n",
1723 xfer_result
->hdr
.bNotifyType
);
1726 usb_status
= xfer_result
->bTransferStatus
& 0x3f;
1727 if (usb_status
== WA_XFER_STATUS_NOT_FOUND
)
1728 /* taken care of already */
1730 xfer_id
= xfer_result
->dwTransferID
;
1731 xfer
= wa_xfer_get_by_id(wa
, xfer_id
);
1733 /* FIXME: transaction might have been cancelled */
1734 dev_err(dev
, "DTI Error: xfer result--"
1735 "unknown xfer 0x%08x (status 0x%02x)\n",
1736 xfer_id
, usb_status
);
1739 wa_xfer_result_chew(wa
, xfer
);
1742 case -ENOENT
: /* (we killed the URB)...so, no broadcast */
1743 case -ESHUTDOWN
: /* going away! */
1744 dev_dbg(dev
, "DTI: going down! %d\n", urb
->status
);
1748 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
,
1749 EDC_ERROR_TIMEFRAME
)) {
1750 dev_err(dev
, "DTI: URB max acceptable errors "
1751 "exceeded, resetting device\n");
1755 if (printk_ratelimit())
1756 dev_err(dev
, "DTI: URB error %d\n", urb
->status
);
1759 /* Resubmit the DTI URB */
1760 result
= usb_submit_urb(wa
->dti_urb
, GFP_ATOMIC
);
1762 dev_err(dev
, "DTI Error: Could not submit DTI URB (%d), "
1763 "resetting\n", result
);
1771 * Transfer complete notification
1773 * Called from the notif.c code. We get a notification on EP2 saying
1774 * that some endpoint has some transfer result data available. We are
1777 * To speed up things, we always have a URB reading the DTI URB; we
1778 * don't really set it up and start it until the first xfer complete
1779 * notification arrives, which is what we do here.
1781 * Follow up in wa_xfer_result_cb(), as that's where the whole state
1784 * So here we just initialize the DTI URB for reading transfer result
1785 * notifications and also the buffer-in URB, for reading buffers. Then
1786 * we just submit the DTI URB.
1788 * @wa shall be referenced
1790 void wa_handle_notif_xfer(struct wahc
*wa
, struct wa_notif_hdr
*notif_hdr
)
1793 struct device
*dev
= &wa
->usb_iface
->dev
;
1794 struct wa_notif_xfer
*notif_xfer
;
1795 const struct usb_endpoint_descriptor
*dti_epd
= wa
->dti_epd
;
1797 notif_xfer
= container_of(notif_hdr
, struct wa_notif_xfer
, hdr
);
1798 BUG_ON(notif_hdr
->bNotifyType
!= WA_NOTIF_TRANSFER
);
1800 if ((0x80 | notif_xfer
->bEndpoint
) != dti_epd
->bEndpointAddress
) {
1801 /* FIXME: hardcoded limitation, adapt */
1802 dev_err(dev
, "BUG: DTI ep is %u, not %u (hack me)\n",
1803 notif_xfer
->bEndpoint
, dti_epd
->bEndpointAddress
);
1806 if (wa
->dti_urb
!= NULL
) /* DTI URB already started */
1809 wa
->dti_urb
= usb_alloc_urb(0, GFP_KERNEL
);
1810 if (wa
->dti_urb
== NULL
) {
1811 dev_err(dev
, "Can't allocate DTI URB\n");
1812 goto error_dti_urb_alloc
;
1815 wa
->dti_urb
, wa
->usb_dev
,
1816 usb_rcvbulkpipe(wa
->usb_dev
, 0x80 | notif_xfer
->bEndpoint
),
1817 wa
->xfer_result
, wa
->xfer_result_size
,
1818 wa_xfer_result_cb
, wa
);
1820 wa
->buf_in_urb
= usb_alloc_urb(0, GFP_KERNEL
);
1821 if (wa
->buf_in_urb
== NULL
) {
1822 dev_err(dev
, "Can't allocate BUF-IN URB\n");
1823 goto error_buf_in_urb_alloc
;
1826 wa
->buf_in_urb
, wa
->usb_dev
,
1827 usb_rcvbulkpipe(wa
->usb_dev
, 0x80 | notif_xfer
->bEndpoint
),
1828 NULL
, 0, wa_buf_in_cb
, wa
);
1829 result
= usb_submit_urb(wa
->dti_urb
, GFP_KERNEL
);
1831 dev_err(dev
, "DTI Error: Could not submit DTI URB (%d), "
1832 "resetting\n", result
);
1833 goto error_dti_urb_submit
;
1838 error_dti_urb_submit
:
1839 usb_put_urb(wa
->buf_in_urb
);
1840 error_buf_in_urb_alloc
:
1841 usb_put_urb(wa
->dti_urb
);
1843 error_dti_urb_alloc
: