3 * Data transfer and URB enqueing
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 * How transfers work: get a buffer, break it up in segments (segment
24 * size is a multiple of the maxpacket size). For each segment issue a
25 * segment request (struct wa_xfer_*), then send the data buffer if
26 * out or nothing if in (all over the DTO endpoint).
28 * For each submitted segment request, a notification will come over
29 * the NEP endpoint and a transfer result (struct xfer_result) will
30 * arrive in the DTI URB. Read it, get the xfer ID, see if there is
31 * data coming (inbound transfer), schedule a read and handle it.
33 * Sounds simple, it is a pain to implement.
40 * LIFE CYCLE / STATE DIAGRAM
44 * THIS CODE IS DISGUSTING
46 * Warned you are; it's my second try and still not happy with it.
52 * - Supports DMA xfers, control, bulk and maybe interrupt
54 * - Does not recycle unused rpipes
56 * An rpipe is assigned to an endpoint the first time it is used,
57 * and then it's there, assigned, until the endpoint is disabled
58 * (destroyed [{h,d}wahc_op_ep_disable()]. The assignment of the
59 * rpipe to the endpoint is done under the wa->rpipe_sem semaphore
60 * (should be a mutex).
62 * Two methods it could be done:
64 * (a) set up a timer every time an rpipe's use count drops to 1
65 * (which means unused) or when a transfer ends. Reset the
66 * timer when a xfer is queued. If the timer expires, release
67 * the rpipe [see rpipe_ep_disable()].
69 * (b) when looking for free rpipes to attach [rpipe_get_by_ep()],
70 * when none are found go over the list, check their endpoint
71 * and their activity record (if no last-xfer-done-ts in the
72 * last x seconds) take it
74 * However, due to the fact that we have a set of limited
75 * resources (max-segments-at-the-same-time per xfer,
76 * xfers-per-ripe, blocks-per-rpipe, rpipes-per-host), at the end
77 * we are going to have to rebuild all this based on an scheduler,
78 * to where we have a list of transactions to do and based on the
79 * availability of the different required components (blocks,
80 * rpipes, segment slots, etc), we go scheduling them. Painful.
82 #include <linux/spinlock.h>
83 #include <linux/slab.h>
84 #include <linux/hash.h>
85 #include <linux/ratelimit.h>
86 #include <linux/export.h>
87 #include <linux/scatterlist.h>
93 /* [WUSB] section 8.3.3 allocates 7 bits for the segment index. */
109 static void wa_xfer_delayed_run(struct wa_rpipe
*);
110 static int __wa_xfer_delayed_run(struct wa_rpipe
*rpipe
, int *dto_waiting
);
113 * Life cycle governed by 'struct urb' (the refcount of the struct is
114 * that of the 'struct urb' and usb_free_urb() would free the whole
118 struct urb tr_urb
; /* transfer request urb. */
119 struct urb
*isoc_pack_desc_urb
; /* for isoc packet descriptor. */
120 struct urb
*dto_urb
; /* for data output. */
121 struct list_head list_node
; /* for rpipe->req_list */
122 struct wa_xfer
*xfer
; /* out xfer */
123 u8 index
; /* which segment we are */
124 int isoc_frame_count
; /* number of isoc frames in this segment. */
125 int isoc_frame_offset
; /* starting frame offset in the xfer URB. */
126 /* Isoc frame that the current transfer buffer corresponds to. */
127 int isoc_frame_index
;
128 int isoc_size
; /* size of all isoc frames sent by this seg. */
129 enum wa_seg_status status
;
130 ssize_t result
; /* bytes xfered or error */
131 struct wa_xfer_hdr xfer_hdr
;
134 static inline void wa_seg_init(struct wa_seg
*seg
)
136 usb_init_urb(&seg
->tr_urb
);
138 /* set the remaining memory to 0. */
139 memset(((void *)seg
) + sizeof(seg
->tr_urb
), 0,
140 sizeof(*seg
) - sizeof(seg
->tr_urb
));
144 * Protected by xfer->lock
149 struct list_head list_node
;
153 struct wahc
*wa
; /* Wire adapter we are plugged to */
154 struct usb_host_endpoint
*ep
;
155 struct urb
*urb
; /* URB we are transferring for */
156 struct wa_seg
**seg
; /* transfer segments */
157 u8 segs
, segs_submitted
, segs_done
;
158 unsigned is_inbound
:1;
163 gfp_t gfp
; /* allocation mask */
165 struct wusb_dev
*wusb_dev
; /* for activity timestamps */
168 static void __wa_populate_dto_urb_isoc(struct wa_xfer
*xfer
,
169 struct wa_seg
*seg
, int curr_iso_frame
);
170 static void wa_complete_remaining_xfer_segs(struct wa_xfer
*xfer
,
171 int starting_index
, enum wa_seg_status status
);
173 static inline void wa_xfer_init(struct wa_xfer
*xfer
)
175 kref_init(&xfer
->refcnt
);
176 INIT_LIST_HEAD(&xfer
->list_node
);
177 spin_lock_init(&xfer
->lock
);
181 * Destroy a transfer structure
183 * Note that freeing xfer->seg[cnt]->tr_urb will free the containing
184 * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
186 static void wa_xfer_destroy(struct kref
*_xfer
)
188 struct wa_xfer
*xfer
= container_of(_xfer
, struct wa_xfer
, refcnt
);
191 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
192 struct wa_seg
*seg
= xfer
->seg
[cnt
];
194 usb_free_urb(seg
->isoc_pack_desc_urb
);
196 kfree(seg
->dto_urb
->sg
);
197 usb_free_urb(seg
->dto_urb
);
199 usb_free_urb(&seg
->tr_urb
);
207 static void wa_xfer_get(struct wa_xfer
*xfer
)
209 kref_get(&xfer
->refcnt
);
212 static void wa_xfer_put(struct wa_xfer
*xfer
)
214 kref_put(&xfer
->refcnt
, wa_xfer_destroy
);
218 * Try to get exclusive access to the DTO endpoint resource. Return true
221 static inline int __wa_dto_try_get(struct wahc
*wa
)
223 return (test_and_set_bit(0, &wa
->dto_in_use
) == 0);
226 /* Release the DTO endpoint resource. */
227 static inline void __wa_dto_put(struct wahc
*wa
)
229 clear_bit_unlock(0, &wa
->dto_in_use
);
232 /* Service RPIPEs that are waiting on the DTO resource. */
233 static void wa_check_for_delayed_rpipes(struct wahc
*wa
)
237 struct wa_rpipe
*rpipe
;
239 spin_lock_irqsave(&wa
->rpipe_lock
, flags
);
240 while (!list_empty(&wa
->rpipe_delayed_list
) && !dto_waiting
) {
241 rpipe
= list_first_entry(&wa
->rpipe_delayed_list
,
242 struct wa_rpipe
, list_node
);
243 __wa_xfer_delayed_run(rpipe
, &dto_waiting
);
244 /* remove this RPIPE from the list if it is not waiting. */
246 pr_debug("%s: RPIPE %d serviced and removed from delayed list.\n",
248 le16_to_cpu(rpipe
->descr
.wRPipeIndex
));
249 list_del_init(&rpipe
->list_node
);
252 spin_unlock_irqrestore(&wa
->rpipe_lock
, flags
);
255 /* add this RPIPE to the end of the delayed RPIPE list. */
256 static void wa_add_delayed_rpipe(struct wahc
*wa
, struct wa_rpipe
*rpipe
)
260 spin_lock_irqsave(&wa
->rpipe_lock
, flags
);
261 /* add rpipe to the list if it is not already on it. */
262 if (list_empty(&rpipe
->list_node
)) {
263 pr_debug("%s: adding RPIPE %d to the delayed list.\n",
264 __func__
, le16_to_cpu(rpipe
->descr
.wRPipeIndex
));
265 list_add_tail(&rpipe
->list_node
, &wa
->rpipe_delayed_list
);
267 spin_unlock_irqrestore(&wa
->rpipe_lock
, flags
);
273 * xfer->lock has to be unlocked
275 * We take xfer->lock for setting the result; this is a barrier
276 * against drivers/usb/core/hcd.c:unlink1() being called after we call
277 * usb_hcd_giveback_urb() and wa_urb_dequeue() trying to get a
278 * reference to the transfer.
280 static void wa_xfer_giveback(struct wa_xfer
*xfer
)
284 spin_lock_irqsave(&xfer
->wa
->xfer_list_lock
, flags
);
285 list_del_init(&xfer
->list_node
);
286 usb_hcd_unlink_urb_from_ep(&(xfer
->wa
->wusb
->usb_hcd
), xfer
->urb
);
287 spin_unlock_irqrestore(&xfer
->wa
->xfer_list_lock
, flags
);
288 /* FIXME: segmentation broken -- kills DWA */
289 wusbhc_giveback_urb(xfer
->wa
->wusb
, xfer
->urb
, xfer
->result
);
297 * xfer->lock has to be unlocked
299 static void wa_xfer_completion(struct wa_xfer
*xfer
)
302 wusb_dev_put(xfer
->wusb_dev
);
303 rpipe_put(xfer
->ep
->hcpriv
);
304 wa_xfer_giveback(xfer
);
308 * Initialize a transfer's ID
310 * We need to use a sequential number; if we use the pointer or the
311 * hash of the pointer, it can repeat over sequential transfers and
312 * then it will confuse the HWA....wonder why in hell they put a 32
313 * bit handle in there then.
315 static void wa_xfer_id_init(struct wa_xfer
*xfer
)
317 xfer
->id
= atomic_add_return(1, &xfer
->wa
->xfer_id_count
);
320 /* Return the xfer's ID. */
321 static inline u32
wa_xfer_id(struct wa_xfer
*xfer
)
326 /* Return the xfer's ID in transport format (little endian). */
327 static inline __le32
wa_xfer_id_le32(struct wa_xfer
*xfer
)
329 return cpu_to_le32(xfer
->id
);
333 * If transfer is done, wrap it up and return true
335 * xfer->lock has to be locked
337 static unsigned __wa_xfer_is_done(struct wa_xfer
*xfer
)
339 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
340 unsigned result
, cnt
;
342 struct urb
*urb
= xfer
->urb
;
343 unsigned found_short
= 0;
345 result
= xfer
->segs_done
== xfer
->segs_submitted
;
348 urb
->actual_length
= 0;
349 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
350 seg
= xfer
->seg
[cnt
];
351 switch (seg
->status
) {
353 if (found_short
&& seg
->result
> 0) {
354 dev_dbg(dev
, "xfer %p ID %08X#%u: bad short segments (%zu)\n",
355 xfer
, wa_xfer_id(xfer
), cnt
,
357 urb
->status
= -EINVAL
;
360 urb
->actual_length
+= seg
->result
;
361 if (!(usb_pipeisoc(xfer
->urb
->pipe
))
362 && seg
->result
< xfer
->seg_size
363 && cnt
!= xfer
->segs
-1)
365 dev_dbg(dev
, "xfer %p ID %08X#%u: DONE short %d "
366 "result %zu urb->actual_length %d\n",
367 xfer
, wa_xfer_id(xfer
), seg
->index
, found_short
,
368 seg
->result
, urb
->actual_length
);
371 xfer
->result
= seg
->result
;
372 dev_dbg(dev
, "xfer %p ID %08X#%u: ERROR result %zi(0x%08zX)\n",
373 xfer
, wa_xfer_id(xfer
), seg
->index
, seg
->result
,
377 xfer
->result
= seg
->result
;
378 dev_dbg(dev
, "xfer %p ID %08X#%u: ABORTED result %zi(0x%08zX)\n",
379 xfer
, wa_xfer_id(xfer
), seg
->index
, seg
->result
,
383 dev_warn(dev
, "xfer %p ID %08X#%u: is_done bad state %d\n",
384 xfer
, wa_xfer_id(xfer
), cnt
, seg
->status
);
385 xfer
->result
= -EINVAL
;
395 * Mark the given segment as done. Return true if this completes the xfer.
396 * This should only be called for segs that have been submitted to an RPIPE.
397 * Delayed segs are not marked as submitted so they do not need to be marked
398 * as done when cleaning up.
400 * xfer->lock has to be locked
402 static unsigned __wa_xfer_mark_seg_as_done(struct wa_xfer
*xfer
,
403 struct wa_seg
*seg
, enum wa_seg_status status
)
405 seg
->status
= status
;
408 /* check for done. */
409 return __wa_xfer_is_done(xfer
);
413 * Search for a transfer list ID on the HCD's URB list
415 * For 32 bit architectures, we use the pointer itself; for 64 bits, a
416 * 32-bit hash of the pointer.
418 * @returns NULL if not found.
420 static struct wa_xfer
*wa_xfer_get_by_id(struct wahc
*wa
, u32 id
)
423 struct wa_xfer
*xfer_itr
;
424 spin_lock_irqsave(&wa
->xfer_list_lock
, flags
);
425 list_for_each_entry(xfer_itr
, &wa
->xfer_list
, list_node
) {
426 if (id
== xfer_itr
->id
) {
427 wa_xfer_get(xfer_itr
);
433 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags
);
437 struct wa_xfer_abort_buffer
{
440 struct wa_xfer_abort cmd
;
443 static void __wa_xfer_abort_cb(struct urb
*urb
)
445 struct wa_xfer_abort_buffer
*b
= urb
->context
;
446 struct wahc
*wa
= b
->wa
;
449 * If the abort request URB failed, then the HWA did not get the abort
450 * command. Forcibly clean up the xfer without waiting for a Transfer
451 * Result from the HWA.
453 if (urb
->status
< 0) {
454 struct wa_xfer
*xfer
;
455 struct device
*dev
= &wa
->usb_iface
->dev
;
457 xfer
= wa_xfer_get_by_id(wa
, le32_to_cpu(b
->cmd
.dwTransferID
));
458 dev_err(dev
, "%s: Transfer Abort request failed. result: %d\n",
459 __func__
, urb
->status
);
462 int done
, seg_index
= 0;
463 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
465 dev_err(dev
, "%s: cleaning up xfer %p ID 0x%08X.\n",
466 __func__
, xfer
, wa_xfer_id(xfer
));
467 spin_lock_irqsave(&xfer
->lock
, flags
);
468 /* skip done segs. */
469 while (seg_index
< xfer
->segs
) {
470 struct wa_seg
*seg
= xfer
->seg
[seg_index
];
472 if ((seg
->status
== WA_SEG_DONE
) ||
473 (seg
->status
== WA_SEG_ERROR
)) {
479 /* mark remaining segs as aborted. */
480 wa_complete_remaining_xfer_segs(xfer
, seg_index
,
482 done
= __wa_xfer_is_done(xfer
);
483 spin_unlock_irqrestore(&xfer
->lock
, flags
);
485 wa_xfer_completion(xfer
);
486 wa_xfer_delayed_run(rpipe
);
489 dev_err(dev
, "%s: xfer ID 0x%08X already gone.\n",
490 __func__
, le32_to_cpu(b
->cmd
.dwTransferID
));
494 wa_put(wa
); /* taken in __wa_xfer_abort */
495 usb_put_urb(&b
->urb
);
499 * Aborts an ongoing transaction
501 * Assumes the transfer is referenced and locked and in a submitted
502 * state (mainly that there is an endpoint/rpipe assigned).
504 * The callback (see above) does nothing but freeing up the data by
505 * putting the URB. Because the URB is allocated at the head of the
506 * struct, the whole space we allocated is kfreed. *
508 static int __wa_xfer_abort(struct wa_xfer
*xfer
)
510 int result
= -ENOMEM
;
511 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
512 struct wa_xfer_abort_buffer
*b
;
513 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
515 b
= kmalloc(sizeof(*b
), GFP_ATOMIC
);
518 b
->cmd
.bLength
= sizeof(b
->cmd
);
519 b
->cmd
.bRequestType
= WA_XFER_ABORT
;
520 b
->cmd
.wRPipe
= rpipe
->descr
.wRPipeIndex
;
521 b
->cmd
.dwTransferID
= wa_xfer_id_le32(xfer
);
522 b
->wa
= wa_get(xfer
->wa
);
524 usb_init_urb(&b
->urb
);
525 usb_fill_bulk_urb(&b
->urb
, xfer
->wa
->usb_dev
,
526 usb_sndbulkpipe(xfer
->wa
->usb_dev
,
527 xfer
->wa
->dto_epd
->bEndpointAddress
),
528 &b
->cmd
, sizeof(b
->cmd
), __wa_xfer_abort_cb
, b
);
529 result
= usb_submit_urb(&b
->urb
, GFP_ATOMIC
);
532 return result
; /* callback frees! */
537 if (printk_ratelimit())
538 dev_err(dev
, "xfer %p: Can't submit abort request: %d\n",
547 * Calculate the number of isoc frames starting from isoc_frame_offset
548 * that will fit a in transfer segment.
550 static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer
*xfer
,
551 int isoc_frame_offset
, int *total_size
)
553 int segment_size
= 0, frame_count
= 0;
554 int index
= isoc_frame_offset
;
555 struct usb_iso_packet_descriptor
*iso_frame_desc
=
556 xfer
->urb
->iso_frame_desc
;
558 while ((index
< xfer
->urb
->number_of_packets
)
559 && ((segment_size
+ iso_frame_desc
[index
].length
)
560 <= xfer
->seg_size
)) {
562 * For Alereon HWA devices, only include an isoc frame in an
563 * out segment if it is physically contiguous with the previous
564 * frame. This is required because those devices expect
565 * the isoc frames to be sent as a single USB transaction as
566 * opposed to one transaction per frame with standard HWA.
568 if ((xfer
->wa
->quirks
& WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC
)
569 && (xfer
->is_inbound
== 0)
570 && (index
> isoc_frame_offset
)
571 && ((iso_frame_desc
[index
- 1].offset
+
572 iso_frame_desc
[index
- 1].length
) !=
573 iso_frame_desc
[index
].offset
))
576 /* this frame fits. count it. */
578 segment_size
+= iso_frame_desc
[index
].length
;
580 /* move to the next isoc frame. */
584 *total_size
= segment_size
;
590 * @returns < 0 on error, transfer segment request size if ok
592 static ssize_t
__wa_xfer_setup_sizes(struct wa_xfer
*xfer
,
593 enum wa_xfer_type
*pxfer_type
)
596 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
598 struct urb
*urb
= xfer
->urb
;
599 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
601 switch (rpipe
->descr
.bmAttribute
& 0x3) {
602 case USB_ENDPOINT_XFER_CONTROL
:
603 *pxfer_type
= WA_XFER_TYPE_CTL
;
604 result
= sizeof(struct wa_xfer_ctl
);
606 case USB_ENDPOINT_XFER_INT
:
607 case USB_ENDPOINT_XFER_BULK
:
608 *pxfer_type
= WA_XFER_TYPE_BI
;
609 result
= sizeof(struct wa_xfer_bi
);
611 case USB_ENDPOINT_XFER_ISOC
:
612 *pxfer_type
= WA_XFER_TYPE_ISO
;
613 result
= sizeof(struct wa_xfer_hwaiso
);
618 result
= -EINVAL
; /* shut gcc up */
620 xfer
->is_inbound
= urb
->pipe
& USB_DIR_IN
? 1 : 0;
621 xfer
->is_dma
= urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
? 1 : 0;
623 maxpktsize
= le16_to_cpu(rpipe
->descr
.wMaxPacketSize
);
624 xfer
->seg_size
= le16_to_cpu(rpipe
->descr
.wBlocks
)
625 * 1 << (xfer
->wa
->wa_descr
->bRPipeBlockSize
- 1);
626 /* Compute the segment size and make sure it is a multiple of
627 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
629 if (xfer
->seg_size
< maxpktsize
) {
631 "HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
632 xfer
->seg_size
, maxpktsize
);
636 xfer
->seg_size
= (xfer
->seg_size
/ maxpktsize
) * maxpktsize
;
637 if ((rpipe
->descr
.bmAttribute
& 0x3) == USB_ENDPOINT_XFER_ISOC
) {
642 * loop over urb->number_of_packets to determine how many
643 * xfer segments will be needed to send the isoc frames.
645 while (index
< urb
->number_of_packets
) {
646 int seg_size
; /* don't care. */
647 index
+= __wa_seg_calculate_isoc_frame_count(xfer
,
652 xfer
->segs
= DIV_ROUND_UP(urb
->transfer_buffer_length
,
654 if (xfer
->segs
== 0 && *pxfer_type
== WA_XFER_TYPE_CTL
)
658 if (xfer
->segs
> WA_SEGS_MAX
) {
659 dev_err(dev
, "BUG? oops, number of segments %zu bigger than %d\n",
660 (urb
->transfer_buffer_length
/xfer
->seg_size
),
669 static void __wa_setup_isoc_packet_descr(
670 struct wa_xfer_packet_info_hwaiso
*packet_desc
,
671 struct wa_xfer
*xfer
,
672 struct wa_seg
*seg
) {
673 struct usb_iso_packet_descriptor
*iso_frame_desc
=
674 xfer
->urb
->iso_frame_desc
;
677 /* populate isoc packet descriptor. */
678 packet_desc
->bPacketType
= WA_XFER_ISO_PACKET_INFO
;
679 packet_desc
->wLength
= cpu_to_le16(sizeof(*packet_desc
) +
680 (sizeof(packet_desc
->PacketLength
[0]) *
681 seg
->isoc_frame_count
));
682 for (frame_index
= 0; frame_index
< seg
->isoc_frame_count
;
684 int offset_index
= frame_index
+ seg
->isoc_frame_offset
;
685 packet_desc
->PacketLength
[frame_index
] =
686 cpu_to_le16(iso_frame_desc
[offset_index
].length
);
691 /* Fill in the common request header and xfer-type specific data. */
692 static void __wa_xfer_setup_hdr0(struct wa_xfer
*xfer
,
693 struct wa_xfer_hdr
*xfer_hdr0
,
694 enum wa_xfer_type xfer_type
,
695 size_t xfer_hdr_size
)
697 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
698 struct wa_seg
*seg
= xfer
->seg
[0];
700 xfer_hdr0
= &seg
->xfer_hdr
;
701 xfer_hdr0
->bLength
= xfer_hdr_size
;
702 xfer_hdr0
->bRequestType
= xfer_type
;
703 xfer_hdr0
->wRPipe
= rpipe
->descr
.wRPipeIndex
;
704 xfer_hdr0
->dwTransferID
= wa_xfer_id_le32(xfer
);
705 xfer_hdr0
->bTransferSegment
= 0;
707 case WA_XFER_TYPE_CTL
: {
708 struct wa_xfer_ctl
*xfer_ctl
=
709 container_of(xfer_hdr0
, struct wa_xfer_ctl
, hdr
);
710 xfer_ctl
->bmAttribute
= xfer
->is_inbound
? 1 : 0;
711 memcpy(&xfer_ctl
->baSetupData
, xfer
->urb
->setup_packet
,
712 sizeof(xfer_ctl
->baSetupData
));
715 case WA_XFER_TYPE_BI
:
717 case WA_XFER_TYPE_ISO
: {
718 struct wa_xfer_hwaiso
*xfer_iso
=
719 container_of(xfer_hdr0
, struct wa_xfer_hwaiso
, hdr
);
720 struct wa_xfer_packet_info_hwaiso
*packet_desc
=
721 ((void *)xfer_iso
) + xfer_hdr_size
;
723 /* populate the isoc section of the transfer request. */
724 xfer_iso
->dwNumOfPackets
= cpu_to_le32(seg
->isoc_frame_count
);
725 /* populate isoc packet descriptor. */
726 __wa_setup_isoc_packet_descr(packet_desc
, xfer
, seg
);
735 * Callback for the OUT data phase of the segment request
737 * Check wa_seg_tr_cb(); most comments also apply here because this
738 * function does almost the same thing and they work closely
741 * If the seg request has failed but this DTO phase has succeeded,
742 * wa_seg_tr_cb() has already failed the segment and moved the
743 * status to WA_SEG_ERROR, so this will go through 'case 0' and
744 * effectively do nothing.
746 static void wa_seg_dto_cb(struct urb
*urb
)
748 struct wa_seg
*seg
= urb
->context
;
749 struct wa_xfer
*xfer
= seg
->xfer
;
752 struct wa_rpipe
*rpipe
;
754 unsigned rpipe_ready
= 0;
755 int data_send_done
= 1, release_dto
= 0, holding_dto
= 0;
759 /* free the sg if it was used. */
763 spin_lock_irqsave(&xfer
->lock
, flags
);
765 dev
= &wa
->usb_iface
->dev
;
766 if (usb_pipeisoc(xfer
->urb
->pipe
)) {
767 /* Alereon HWA sends all isoc frames in a single transfer. */
768 if (wa
->quirks
& WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC
)
769 seg
->isoc_frame_index
+= seg
->isoc_frame_count
;
771 seg
->isoc_frame_index
+= 1;
772 if (seg
->isoc_frame_index
< seg
->isoc_frame_count
) {
774 holding_dto
= 1; /* checked in error cases. */
776 * if this is the last isoc frame of the segment, we
777 * can release DTO after sending this frame.
779 if ((seg
->isoc_frame_index
+ 1) >=
780 seg
->isoc_frame_count
)
783 dev_dbg(dev
, "xfer 0x%08X#%u: isoc frame = %d, holding_dto = %d, release_dto = %d.\n",
784 wa_xfer_id(xfer
), seg
->index
, seg
->isoc_frame_index
,
785 holding_dto
, release_dto
);
787 spin_unlock_irqrestore(&xfer
->lock
, flags
);
789 switch (urb
->status
) {
791 spin_lock_irqsave(&xfer
->lock
, flags
);
792 seg
->result
+= urb
->actual_length
;
793 if (data_send_done
) {
794 dev_dbg(dev
, "xfer 0x%08X#%u: data out done (%zu bytes)\n",
795 wa_xfer_id(xfer
), seg
->index
, seg
->result
);
796 if (seg
->status
< WA_SEG_PENDING
)
797 seg
->status
= WA_SEG_PENDING
;
799 /* should only hit this for isoc xfers. */
801 * Populate the dto URB with the next isoc frame buffer,
802 * send the URB and release DTO if we no longer need it.
804 __wa_populate_dto_urb_isoc(xfer
, seg
,
805 seg
->isoc_frame_offset
+ seg
->isoc_frame_index
);
807 /* resubmit the URB with the next isoc frame. */
808 /* take a ref on resubmit. */
810 result
= usb_submit_urb(seg
->dto_urb
, GFP_ATOMIC
);
812 dev_err(dev
, "xfer 0x%08X#%u: DTO submit failed: %d\n",
813 wa_xfer_id(xfer
), seg
->index
, result
);
814 spin_unlock_irqrestore(&xfer
->lock
, flags
);
815 goto error_dto_submit
;
818 spin_unlock_irqrestore(&xfer
->lock
, flags
);
821 wa_check_for_delayed_rpipes(wa
);
824 case -ECONNRESET
: /* URB unlinked; no need to do anything */
825 case -ENOENT
: /* as it was done by the who unlinked us */
828 wa_check_for_delayed_rpipes(wa
);
831 default: /* Other errors ... */
832 dev_err(dev
, "xfer 0x%08X#%u: data out error %d\n",
833 wa_xfer_id(xfer
), seg
->index
, urb
->status
);
837 /* taken when this URB was submitted. */
842 /* taken on resubmit attempt. */
845 spin_lock_irqsave(&xfer
->lock
, flags
);
846 rpipe
= xfer
->ep
->hcpriv
;
847 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
848 EDC_ERROR_TIMEFRAME
)){
849 dev_err(dev
, "DTO: URB max acceptable errors exceeded, resetting device\n");
852 if (seg
->status
!= WA_SEG_ERROR
) {
853 seg
->result
= urb
->status
;
854 __wa_xfer_abort(xfer
);
855 rpipe_ready
= rpipe_avail_inc(rpipe
);
856 done
= __wa_xfer_mark_seg_as_done(xfer
, seg
, WA_SEG_ERROR
);
858 spin_unlock_irqrestore(&xfer
->lock
, flags
);
861 wa_check_for_delayed_rpipes(wa
);
864 wa_xfer_completion(xfer
);
866 wa_xfer_delayed_run(rpipe
);
867 /* taken when this URB was submitted. */
872 * Callback for the isoc packet descriptor phase of the segment request
874 * Check wa_seg_tr_cb(); most comments also apply here because this
875 * function does almost the same thing and they work closely
878 * If the seg request has failed but this phase has succeeded,
879 * wa_seg_tr_cb() has already failed the segment and moved the
880 * status to WA_SEG_ERROR, so this will go through 'case 0' and
881 * effectively do nothing.
883 static void wa_seg_iso_pack_desc_cb(struct urb
*urb
)
885 struct wa_seg
*seg
= urb
->context
;
886 struct wa_xfer
*xfer
= seg
->xfer
;
889 struct wa_rpipe
*rpipe
;
891 unsigned rpipe_ready
= 0;
894 switch (urb
->status
) {
896 spin_lock_irqsave(&xfer
->lock
, flags
);
898 dev
= &wa
->usb_iface
->dev
;
899 dev_dbg(dev
, "iso xfer %08X#%u: packet descriptor done\n",
900 wa_xfer_id(xfer
), seg
->index
);
901 if (xfer
->is_inbound
&& seg
->status
< WA_SEG_PENDING
)
902 seg
->status
= WA_SEG_PENDING
;
903 spin_unlock_irqrestore(&xfer
->lock
, flags
);
905 case -ECONNRESET
: /* URB unlinked; no need to do anything */
906 case -ENOENT
: /* as it was done by the who unlinked us */
908 default: /* Other errors ... */
909 spin_lock_irqsave(&xfer
->lock
, flags
);
911 dev
= &wa
->usb_iface
->dev
;
912 rpipe
= xfer
->ep
->hcpriv
;
913 pr_err_ratelimited("iso xfer %08X#%u: packet descriptor error %d\n",
914 wa_xfer_id(xfer
), seg
->index
, urb
->status
);
915 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
916 EDC_ERROR_TIMEFRAME
)){
917 dev_err(dev
, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
920 if (seg
->status
!= WA_SEG_ERROR
) {
921 usb_unlink_urb(seg
->dto_urb
);
922 seg
->result
= urb
->status
;
923 __wa_xfer_abort(xfer
);
924 rpipe_ready
= rpipe_avail_inc(rpipe
);
925 done
= __wa_xfer_mark_seg_as_done(xfer
, seg
,
928 spin_unlock_irqrestore(&xfer
->lock
, flags
);
930 wa_xfer_completion(xfer
);
932 wa_xfer_delayed_run(rpipe
);
934 /* taken when this URB was submitted. */
939 * Callback for the segment request
941 * If successful transition state (unless already transitioned or
942 * outbound transfer); otherwise, take a note of the error, mark this
943 * segment done and try completion.
945 * Note we don't access until we are sure that the transfer hasn't
946 * been cancelled (ECONNRESET, ENOENT), which could mean that
947 * seg->xfer could be already gone.
949 * We have to check before setting the status to WA_SEG_PENDING
950 * because sometimes the xfer result callback arrives before this
951 * callback (geeeeeeze), so it might happen that we are already in
952 * another state. As well, we don't set it if the transfer is not inbound,
953 * as in that case, wa_seg_dto_cb will do it when the OUT data phase
956 static void wa_seg_tr_cb(struct urb
*urb
)
958 struct wa_seg
*seg
= urb
->context
;
959 struct wa_xfer
*xfer
= seg
->xfer
;
962 struct wa_rpipe
*rpipe
;
964 unsigned rpipe_ready
;
967 switch (urb
->status
) {
969 spin_lock_irqsave(&xfer
->lock
, flags
);
971 dev
= &wa
->usb_iface
->dev
;
972 dev_dbg(dev
, "xfer %p ID 0x%08X#%u: request done\n",
973 xfer
, wa_xfer_id(xfer
), seg
->index
);
974 if (xfer
->is_inbound
&&
975 seg
->status
< WA_SEG_PENDING
&&
976 !(usb_pipeisoc(xfer
->urb
->pipe
)))
977 seg
->status
= WA_SEG_PENDING
;
978 spin_unlock_irqrestore(&xfer
->lock
, flags
);
980 case -ECONNRESET
: /* URB unlinked; no need to do anything */
981 case -ENOENT
: /* as it was done by the who unlinked us */
983 default: /* Other errors ... */
984 spin_lock_irqsave(&xfer
->lock
, flags
);
986 dev
= &wa
->usb_iface
->dev
;
987 rpipe
= xfer
->ep
->hcpriv
;
988 if (printk_ratelimit())
989 dev_err(dev
, "xfer %p ID 0x%08X#%u: request error %d\n",
990 xfer
, wa_xfer_id(xfer
), seg
->index
,
992 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
993 EDC_ERROR_TIMEFRAME
)){
994 dev_err(dev
, "DTO: URB max acceptable errors "
995 "exceeded, resetting device\n");
998 usb_unlink_urb(seg
->isoc_pack_desc_urb
);
999 usb_unlink_urb(seg
->dto_urb
);
1000 seg
->result
= urb
->status
;
1001 __wa_xfer_abort(xfer
);
1002 rpipe_ready
= rpipe_avail_inc(rpipe
);
1003 done
= __wa_xfer_mark_seg_as_done(xfer
, seg
, WA_SEG_ERROR
);
1004 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1006 wa_xfer_completion(xfer
);
1008 wa_xfer_delayed_run(rpipe
);
1010 /* taken when this URB was submitted. */
1015 * Allocate an SG list to store bytes_to_transfer bytes and copy the
1016 * subset of the in_sg that matches the buffer subset
1017 * we are about to transfer.
1019 static struct scatterlist
*wa_xfer_create_subset_sg(struct scatterlist
*in_sg
,
1020 const unsigned int bytes_transferred
,
1021 const unsigned int bytes_to_transfer
, int *out_num_sgs
)
1023 struct scatterlist
*out_sg
;
1024 unsigned int bytes_processed
= 0, offset_into_current_page_data
= 0,
1026 struct scatterlist
*current_xfer_sg
= in_sg
;
1027 struct scatterlist
*current_seg_sg
, *last_seg_sg
;
1029 /* skip previously transferred pages. */
1030 while ((current_xfer_sg
) &&
1031 (bytes_processed
< bytes_transferred
)) {
1032 bytes_processed
+= current_xfer_sg
->length
;
1034 /* advance the sg if current segment starts on or past the
1036 if (bytes_processed
<= bytes_transferred
)
1037 current_xfer_sg
= sg_next(current_xfer_sg
);
1040 /* the data for the current segment starts in current_xfer_sg.
1041 calculate the offset. */
1042 if (bytes_processed
> bytes_transferred
) {
1043 offset_into_current_page_data
= current_xfer_sg
->length
-
1044 (bytes_processed
- bytes_transferred
);
1047 /* calculate the number of pages needed by this segment. */
1048 nents
= DIV_ROUND_UP((bytes_to_transfer
+
1049 offset_into_current_page_data
+
1050 current_xfer_sg
->offset
),
1053 out_sg
= kmalloc((sizeof(struct scatterlist
) * nents
), GFP_ATOMIC
);
1055 sg_init_table(out_sg
, nents
);
1057 /* copy the portion of the incoming SG that correlates to the
1058 * data to be transferred by this segment to the segment SG. */
1059 last_seg_sg
= current_seg_sg
= out_sg
;
1060 bytes_processed
= 0;
1062 /* reset nents and calculate the actual number of sg entries
1065 while ((bytes_processed
< bytes_to_transfer
) &&
1066 current_seg_sg
&& current_xfer_sg
) {
1067 unsigned int page_len
= min((current_xfer_sg
->length
-
1068 offset_into_current_page_data
),
1069 (bytes_to_transfer
- bytes_processed
));
1071 sg_set_page(current_seg_sg
, sg_page(current_xfer_sg
),
1073 current_xfer_sg
->offset
+
1074 offset_into_current_page_data
);
1076 bytes_processed
+= page_len
;
1078 last_seg_sg
= current_seg_sg
;
1079 current_seg_sg
= sg_next(current_seg_sg
);
1080 current_xfer_sg
= sg_next(current_xfer_sg
);
1082 /* only the first page may require additional offset. */
1083 offset_into_current_page_data
= 0;
1087 /* update num_sgs and terminate the list since we may have
1088 * concatenated pages. */
1089 sg_mark_end(last_seg_sg
);
1090 *out_num_sgs
= nents
;
1097 * Populate DMA buffer info for the isoc dto urb.
1099 static void __wa_populate_dto_urb_isoc(struct wa_xfer
*xfer
,
1100 struct wa_seg
*seg
, int curr_iso_frame
)
1102 seg
->dto_urb
->transfer_flags
|= URB_NO_TRANSFER_DMA_MAP
;
1103 seg
->dto_urb
->sg
= NULL
;
1104 seg
->dto_urb
->num_sgs
= 0;
1105 /* dto urb buffer address pulled from iso_frame_desc. */
1106 seg
->dto_urb
->transfer_dma
= xfer
->urb
->transfer_dma
+
1107 xfer
->urb
->iso_frame_desc
[curr_iso_frame
].offset
;
1108 /* The Alereon HWA sends a single URB with all isoc segs. */
1109 if (xfer
->wa
->quirks
& WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC
)
1110 seg
->dto_urb
->transfer_buffer_length
= seg
->isoc_size
;
1112 seg
->dto_urb
->transfer_buffer_length
=
1113 xfer
->urb
->iso_frame_desc
[curr_iso_frame
].length
;
1117 * Populate buffer ptr and size, DMA buffer or SG list for the dto urb.
1119 static int __wa_populate_dto_urb(struct wa_xfer
*xfer
,
1120 struct wa_seg
*seg
, size_t buf_itr_offset
, size_t buf_itr_size
)
1125 seg
->dto_urb
->transfer_dma
=
1126 xfer
->urb
->transfer_dma
+ buf_itr_offset
;
1127 seg
->dto_urb
->transfer_flags
|= URB_NO_TRANSFER_DMA_MAP
;
1128 seg
->dto_urb
->sg
= NULL
;
1129 seg
->dto_urb
->num_sgs
= 0;
1131 /* do buffer or SG processing. */
1132 seg
->dto_urb
->transfer_flags
&=
1133 ~URB_NO_TRANSFER_DMA_MAP
;
1134 /* this should always be 0 before a resubmit. */
1135 seg
->dto_urb
->num_mapped_sgs
= 0;
1137 if (xfer
->urb
->transfer_buffer
) {
1138 seg
->dto_urb
->transfer_buffer
=
1139 xfer
->urb
->transfer_buffer
+
1141 seg
->dto_urb
->sg
= NULL
;
1142 seg
->dto_urb
->num_sgs
= 0;
1144 seg
->dto_urb
->transfer_buffer
= NULL
;
1147 * allocate an SG list to store seg_size bytes
1148 * and copy the subset of the xfer->urb->sg that
1149 * matches the buffer subset we are about to
1152 seg
->dto_urb
->sg
= wa_xfer_create_subset_sg(
1154 buf_itr_offset
, buf_itr_size
,
1155 &(seg
->dto_urb
->num_sgs
));
1156 if (!(seg
->dto_urb
->sg
))
1160 seg
->dto_urb
->transfer_buffer_length
= buf_itr_size
;
1166 * Allocate the segs array and initialize each of them
1168 * The segments are freed by wa_xfer_destroy() when the xfer use count
1169 * drops to zero; however, because each segment is given the same life
1170 * cycle as the USB URB it contains, it is actually freed by
1171 * usb_put_urb() on the contained USB URB (twisted, eh?).
1173 static int __wa_xfer_setup_segs(struct wa_xfer
*xfer
, size_t xfer_hdr_size
)
1175 int result
, cnt
, isoc_frame_offset
= 0;
1176 size_t alloc_size
= sizeof(*xfer
->seg
[0])
1177 - sizeof(xfer
->seg
[0]->xfer_hdr
) + xfer_hdr_size
;
1178 struct usb_device
*usb_dev
= xfer
->wa
->usb_dev
;
1179 const struct usb_endpoint_descriptor
*dto_epd
= xfer
->wa
->dto_epd
;
1181 size_t buf_itr
, buf_size
, buf_itr_size
;
1184 xfer
->seg
= kcalloc(xfer
->segs
, sizeof(xfer
->seg
[0]), GFP_ATOMIC
);
1185 if (xfer
->seg
== NULL
)
1186 goto error_segs_kzalloc
;
1188 buf_size
= xfer
->urb
->transfer_buffer_length
;
1189 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
1190 size_t iso_pkt_descr_size
= 0;
1191 int seg_isoc_frame_count
= 0, seg_isoc_size
= 0;
1194 * Adjust the size of the segment object to contain space for
1195 * the isoc packet descriptor buffer.
1197 if (usb_pipeisoc(xfer
->urb
->pipe
)) {
1198 seg_isoc_frame_count
=
1199 __wa_seg_calculate_isoc_frame_count(xfer
,
1200 isoc_frame_offset
, &seg_isoc_size
);
1202 iso_pkt_descr_size
=
1203 sizeof(struct wa_xfer_packet_info_hwaiso
) +
1204 (seg_isoc_frame_count
* sizeof(__le16
));
1206 seg
= xfer
->seg
[cnt
] = kmalloc(alloc_size
+ iso_pkt_descr_size
,
1209 goto error_seg_kmalloc
;
1213 usb_fill_bulk_urb(&seg
->tr_urb
, usb_dev
,
1214 usb_sndbulkpipe(usb_dev
,
1215 dto_epd
->bEndpointAddress
),
1216 &seg
->xfer_hdr
, xfer_hdr_size
,
1218 buf_itr_size
= min(buf_size
, xfer
->seg_size
);
1220 if (usb_pipeisoc(xfer
->urb
->pipe
)) {
1221 seg
->isoc_frame_count
= seg_isoc_frame_count
;
1222 seg
->isoc_frame_offset
= isoc_frame_offset
;
1223 seg
->isoc_size
= seg_isoc_size
;
1224 /* iso packet descriptor. */
1225 seg
->isoc_pack_desc_urb
=
1226 usb_alloc_urb(0, GFP_ATOMIC
);
1227 if (seg
->isoc_pack_desc_urb
== NULL
)
1228 goto error_iso_pack_desc_alloc
;
1230 * The buffer for the isoc packet descriptor starts
1231 * after the transfer request header in the
1232 * segment object memory buffer.
1235 seg
->isoc_pack_desc_urb
, usb_dev
,
1236 usb_sndbulkpipe(usb_dev
,
1237 dto_epd
->bEndpointAddress
),
1238 (void *)(&seg
->xfer_hdr
) +
1241 wa_seg_iso_pack_desc_cb
, seg
);
1243 /* adjust starting frame offset for next seg. */
1244 isoc_frame_offset
+= seg_isoc_frame_count
;
1247 if (xfer
->is_inbound
== 0 && buf_size
> 0) {
1248 /* outbound data. */
1249 seg
->dto_urb
= usb_alloc_urb(0, GFP_ATOMIC
);
1250 if (seg
->dto_urb
== NULL
)
1251 goto error_dto_alloc
;
1253 seg
->dto_urb
, usb_dev
,
1254 usb_sndbulkpipe(usb_dev
,
1255 dto_epd
->bEndpointAddress
),
1256 NULL
, 0, wa_seg_dto_cb
, seg
);
1258 if (usb_pipeisoc(xfer
->urb
->pipe
)) {
1260 * Fill in the xfer buffer information for the
1261 * first isoc frame. Subsequent frames in this
1262 * segment will be filled in and sent from the
1263 * DTO completion routine, if needed.
1265 __wa_populate_dto_urb_isoc(xfer
, seg
,
1266 seg
->isoc_frame_offset
);
1268 /* fill in the xfer buffer information. */
1269 result
= __wa_populate_dto_urb(xfer
, seg
,
1270 buf_itr
, buf_itr_size
);
1272 goto error_seg_outbound_populate
;
1274 buf_itr
+= buf_itr_size
;
1275 buf_size
-= buf_itr_size
;
1278 seg
->status
= WA_SEG_READY
;
1283 * Free the memory for the current segment which failed to init.
1284 * Use the fact that cnt is left at were it failed. The remaining
1285 * segments will be cleaned up by wa_xfer_destroy.
1287 error_seg_outbound_populate
:
1288 usb_free_urb(xfer
->seg
[cnt
]->dto_urb
);
1290 usb_free_urb(xfer
->seg
[cnt
]->isoc_pack_desc_urb
);
1291 error_iso_pack_desc_alloc
:
1292 kfree(xfer
->seg
[cnt
]);
1293 xfer
->seg
[cnt
] = NULL
;
1300 * Allocates all the stuff needed to submit a transfer
1302 * Breaks the whole data buffer in a list of segments, each one has a
1303 * structure allocated to it and linked in xfer->seg[index]
1305 * FIXME: merge setup_segs() and the last part of this function, no
1306 * need to do two for loops when we could run everything in a
1309 static int __wa_xfer_setup(struct wa_xfer
*xfer
, struct urb
*urb
)
1312 struct device
*dev
= &xfer
->wa
->usb_iface
->dev
;
1313 enum wa_xfer_type xfer_type
= 0; /* shut up GCC */
1314 size_t xfer_hdr_size
, cnt
, transfer_size
;
1315 struct wa_xfer_hdr
*xfer_hdr0
, *xfer_hdr
;
1317 result
= __wa_xfer_setup_sizes(xfer
, &xfer_type
);
1319 goto error_setup_sizes
;
1320 xfer_hdr_size
= result
;
1321 result
= __wa_xfer_setup_segs(xfer
, xfer_hdr_size
);
1323 dev_err(dev
, "xfer %p: Failed to allocate %d segments: %d\n",
1324 xfer
, xfer
->segs
, result
);
1325 goto error_setup_segs
;
1327 /* Fill the first header */
1328 xfer_hdr0
= &xfer
->seg
[0]->xfer_hdr
;
1329 wa_xfer_id_init(xfer
);
1330 __wa_xfer_setup_hdr0(xfer
, xfer_hdr0
, xfer_type
, xfer_hdr_size
);
1332 /* Fill remaining headers */
1333 xfer_hdr
= xfer_hdr0
;
1334 if (xfer_type
== WA_XFER_TYPE_ISO
) {
1335 xfer_hdr0
->dwTransferLength
=
1336 cpu_to_le32(xfer
->seg
[0]->isoc_size
);
1337 for (cnt
= 1; cnt
< xfer
->segs
; cnt
++) {
1338 struct wa_xfer_packet_info_hwaiso
*packet_desc
;
1339 struct wa_seg
*seg
= xfer
->seg
[cnt
];
1340 struct wa_xfer_hwaiso
*xfer_iso
;
1342 xfer_hdr
= &seg
->xfer_hdr
;
1343 xfer_iso
= container_of(xfer_hdr
,
1344 struct wa_xfer_hwaiso
, hdr
);
1345 packet_desc
= ((void *)xfer_hdr
) + xfer_hdr_size
;
1347 * Copy values from the 0th header. Segment specific
1348 * values are set below.
1350 memcpy(xfer_hdr
, xfer_hdr0
, xfer_hdr_size
);
1351 xfer_hdr
->bTransferSegment
= cnt
;
1352 xfer_hdr
->dwTransferLength
=
1353 cpu_to_le32(seg
->isoc_size
);
1354 xfer_iso
->dwNumOfPackets
=
1355 cpu_to_le32(seg
->isoc_frame_count
);
1356 __wa_setup_isoc_packet_descr(packet_desc
, xfer
, seg
);
1357 seg
->status
= WA_SEG_READY
;
1360 transfer_size
= urb
->transfer_buffer_length
;
1361 xfer_hdr0
->dwTransferLength
= transfer_size
> xfer
->seg_size
?
1362 cpu_to_le32(xfer
->seg_size
) :
1363 cpu_to_le32(transfer_size
);
1364 transfer_size
-= xfer
->seg_size
;
1365 for (cnt
= 1; cnt
< xfer
->segs
; cnt
++) {
1366 xfer_hdr
= &xfer
->seg
[cnt
]->xfer_hdr
;
1367 memcpy(xfer_hdr
, xfer_hdr0
, xfer_hdr_size
);
1368 xfer_hdr
->bTransferSegment
= cnt
;
1369 xfer_hdr
->dwTransferLength
=
1370 transfer_size
> xfer
->seg_size
?
1371 cpu_to_le32(xfer
->seg_size
)
1372 : cpu_to_le32(transfer_size
);
1373 xfer
->seg
[cnt
]->status
= WA_SEG_READY
;
1374 transfer_size
-= xfer
->seg_size
;
1377 xfer_hdr
->bTransferSegment
|= 0x80; /* this is the last segment */
1387 * rpipe->seg_lock is held!
1389 static int __wa_seg_submit(struct wa_rpipe
*rpipe
, struct wa_xfer
*xfer
,
1390 struct wa_seg
*seg
, int *dto_done
)
1394 /* default to done unless we encounter a multi-frame isoc segment. */
1398 * Take a ref for each segment urb so the xfer cannot disappear until
1399 * all of the callbacks run.
1402 /* submit the transfer request. */
1403 seg
->status
= WA_SEG_SUBMITTED
;
1404 result
= usb_submit_urb(&seg
->tr_urb
, GFP_ATOMIC
);
1406 pr_err("%s: xfer %p#%u: REQ submit failed: %d\n",
1407 __func__
, xfer
, seg
->index
, result
);
1409 goto error_tr_submit
;
1411 /* submit the isoc packet descriptor if present. */
1412 if (seg
->isoc_pack_desc_urb
) {
1414 result
= usb_submit_urb(seg
->isoc_pack_desc_urb
, GFP_ATOMIC
);
1415 seg
->isoc_frame_index
= 0;
1417 pr_err("%s: xfer %p#%u: ISO packet descriptor submit failed: %d\n",
1418 __func__
, xfer
, seg
->index
, result
);
1420 goto error_iso_pack_desc_submit
;
1423 /* submit the out data if this is an out request. */
1425 struct wahc
*wa
= xfer
->wa
;
1427 result
= usb_submit_urb(seg
->dto_urb
, GFP_ATOMIC
);
1429 pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
1430 __func__
, xfer
, seg
->index
, result
);
1432 goto error_dto_submit
;
1435 * If this segment contains more than one isoc frame, hold
1436 * onto the dto resource until we send all frames.
1437 * Only applies to non-Alereon devices.
1439 if (((wa
->quirks
& WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC
) == 0)
1440 && (seg
->isoc_frame_count
> 1))
1443 rpipe_avail_dec(rpipe
);
1447 usb_unlink_urb(seg
->isoc_pack_desc_urb
);
1448 error_iso_pack_desc_submit
:
1449 usb_unlink_urb(&seg
->tr_urb
);
1451 seg
->status
= WA_SEG_ERROR
;
1452 seg
->result
= result
;
1458 * Execute more queued request segments until the maximum concurrent allowed.
1459 * Return true if the DTO resource was acquired and released.
1461 * The ugly unlock/lock sequence on the error path is needed as the
1462 * xfer->lock normally nests the seg_lock and not viceversa.
1464 static int __wa_xfer_delayed_run(struct wa_rpipe
*rpipe
, int *dto_waiting
)
1466 int result
, dto_acquired
= 0, dto_done
= 0;
1467 struct device
*dev
= &rpipe
->wa
->usb_iface
->dev
;
1469 struct wa_xfer
*xfer
;
1470 unsigned long flags
;
1474 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
1475 while (atomic_read(&rpipe
->segs_available
) > 0
1476 && !list_empty(&rpipe
->seg_list
)
1477 && (dto_acquired
= __wa_dto_try_get(rpipe
->wa
))) {
1478 seg
= list_first_entry(&(rpipe
->seg_list
), struct wa_seg
,
1480 list_del(&seg
->list_node
);
1483 * Get a reference to the xfer in case the callbacks for the
1484 * URBs submitted by __wa_seg_submit attempt to complete
1485 * the xfer before this function completes.
1488 result
= __wa_seg_submit(rpipe
, xfer
, seg
, &dto_done
);
1489 /* release the dto resource if this RPIPE is done with it. */
1491 __wa_dto_put(rpipe
->wa
);
1492 dev_dbg(dev
, "xfer %p ID %08X#%u submitted from delayed [%d segments available] %d\n",
1493 xfer
, wa_xfer_id(xfer
), seg
->index
,
1494 atomic_read(&rpipe
->segs_available
), result
);
1495 if (unlikely(result
< 0)) {
1498 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
1499 spin_lock_irqsave(&xfer
->lock
, flags
);
1500 __wa_xfer_abort(xfer
);
1502 * This seg was marked as submitted when it was put on
1503 * the RPIPE seg_list. Mark it done.
1506 done
= __wa_xfer_is_done(xfer
);
1507 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1509 wa_xfer_completion(xfer
);
1510 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
1515 * Mark this RPIPE as waiting if dto was not acquired, there are
1516 * delayed segs and no active transfers to wake us up later.
1518 if (!dto_acquired
&& !list_empty(&rpipe
->seg_list
)
1519 && (atomic_read(&rpipe
->segs_available
) ==
1520 le16_to_cpu(rpipe
->descr
.wRequests
)))
1523 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
1528 static void wa_xfer_delayed_run(struct wa_rpipe
*rpipe
)
1531 int dto_done
= __wa_xfer_delayed_run(rpipe
, &dto_waiting
);
1534 * If this RPIPE is waiting on the DTO resource, add it to the tail of
1536 * Otherwise, if the WA DTO resource was acquired and released by
1537 * __wa_xfer_delayed_run, another RPIPE may have attempted to acquire
1538 * DTO and failed during that time. Check the delayed list and process
1539 * any waiters. Start searching from the next RPIPE index.
1542 wa_add_delayed_rpipe(rpipe
->wa
, rpipe
);
1544 wa_check_for_delayed_rpipes(rpipe
->wa
);
1549 * xfer->lock is taken
1551 * On failure submitting we just stop submitting and return error;
1552 * wa_urb_enqueue_b() will execute the completion path
1554 static int __wa_xfer_submit(struct wa_xfer
*xfer
)
1556 int result
, dto_acquired
= 0, dto_done
= 0, dto_waiting
= 0;
1557 struct wahc
*wa
= xfer
->wa
;
1558 struct device
*dev
= &wa
->usb_iface
->dev
;
1561 unsigned long flags
;
1562 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
1563 size_t maxrequests
= le16_to_cpu(rpipe
->descr
.wRequests
);
1567 spin_lock_irqsave(&wa
->xfer_list_lock
, flags
);
1568 list_add_tail(&xfer
->list_node
, &wa
->xfer_list
);
1569 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags
);
1571 BUG_ON(atomic_read(&rpipe
->segs_available
) > maxrequests
);
1573 spin_lock_irqsave(&rpipe
->seg_lock
, flags
);
1574 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
1577 available
= atomic_read(&rpipe
->segs_available
);
1578 empty
= list_empty(&rpipe
->seg_list
);
1579 seg
= xfer
->seg
[cnt
];
1580 if (available
&& empty
) {
1582 * Only attempt to acquire DTO if we have a segment
1585 dto_acquired
= __wa_dto_try_get(rpipe
->wa
);
1588 result
= __wa_seg_submit(rpipe
, xfer
, seg
,
1590 dev_dbg(dev
, "xfer %p ID 0x%08X#%u: available %u empty %u submitted\n",
1591 xfer
, wa_xfer_id(xfer
), cnt
, available
,
1594 __wa_dto_put(rpipe
->wa
);
1597 __wa_xfer_abort(xfer
);
1598 goto error_seg_submit
;
1604 dev_dbg(dev
, "xfer %p ID 0x%08X#%u: available %u empty %u delayed\n",
1605 xfer
, wa_xfer_id(xfer
), cnt
, available
, empty
);
1606 seg
->status
= WA_SEG_DELAYED
;
1607 list_add_tail(&seg
->list_node
, &rpipe
->seg_list
);
1609 xfer
->segs_submitted
++;
1613 * Mark this RPIPE as waiting if dto was not acquired, there are
1614 * delayed segs and no active transfers to wake us up later.
1616 if (!dto_acquired
&& !list_empty(&rpipe
->seg_list
)
1617 && (atomic_read(&rpipe
->segs_available
) ==
1618 le16_to_cpu(rpipe
->descr
.wRequests
)))
1620 spin_unlock_irqrestore(&rpipe
->seg_lock
, flags
);
1623 wa_add_delayed_rpipe(rpipe
->wa
, rpipe
);
1625 wa_check_for_delayed_rpipes(rpipe
->wa
);
1631 * Second part of a URB/transfer enqueuement
1633 * Assumes this comes from wa_urb_enqueue() [maybe through
1634 * wa_urb_enqueue_run()]. At this point:
1636 * xfer->wa filled and refcounted
1637 * xfer->ep filled with rpipe refcounted if
1639 * xfer->urb filled and refcounted (this is the case when called
1640 * from wa_urb_enqueue() as we come from usb_submit_urb()
1641 * and when called by wa_urb_enqueue_run(), as we took an
1642 * extra ref dropped by _run() after we return).
1645 * If we fail at __wa_xfer_submit(), then we just check if we are done
1646 * and if so, we run the completion procedure. However, if we are not
1647 * yet done, we do nothing and wait for the completion handlers from
1648 * the submitted URBs or from the xfer-result path to kick in. If xfer
1649 * result never kicks in, the xfer will timeout from the USB code and
1650 * dequeue() will be called.
1652 static int wa_urb_enqueue_b(struct wa_xfer
*xfer
)
1655 unsigned long flags
;
1656 struct urb
*urb
= xfer
->urb
;
1657 struct wahc
*wa
= xfer
->wa
;
1658 struct wusbhc
*wusbhc
= wa
->wusb
;
1659 struct wusb_dev
*wusb_dev
;
1662 result
= rpipe_get_by_ep(wa
, xfer
->ep
, urb
, xfer
->gfp
);
1664 pr_err("%s: error_rpipe_get\n", __func__
);
1665 goto error_rpipe_get
;
1668 /* FIXME: segmentation broken -- kills DWA */
1669 mutex_lock(&wusbhc
->mutex
); /* get a WUSB dev */
1670 if (urb
->dev
== NULL
) {
1671 mutex_unlock(&wusbhc
->mutex
);
1672 pr_err("%s: error usb dev gone\n", __func__
);
1673 goto error_dev_gone
;
1675 wusb_dev
= __wusb_dev_get_by_usb_dev(wusbhc
, urb
->dev
);
1676 if (wusb_dev
== NULL
) {
1677 mutex_unlock(&wusbhc
->mutex
);
1678 dev_err(&(urb
->dev
->dev
), "%s: error wusb dev gone\n",
1680 goto error_dev_gone
;
1682 mutex_unlock(&wusbhc
->mutex
);
1684 spin_lock_irqsave(&xfer
->lock
, flags
);
1685 xfer
->wusb_dev
= wusb_dev
;
1686 result
= urb
->status
;
1687 if (urb
->status
!= -EINPROGRESS
) {
1688 dev_err(&(urb
->dev
->dev
), "%s: error_dequeued\n", __func__
);
1689 goto error_dequeued
;
1692 result
= __wa_xfer_setup(xfer
, urb
);
1694 dev_err(&(urb
->dev
->dev
), "%s: error_xfer_setup\n", __func__
);
1695 goto error_xfer_setup
;
1698 * Get a xfer reference since __wa_xfer_submit starts asynchronous
1699 * operations that may try to complete the xfer before this function
1703 result
= __wa_xfer_submit(xfer
);
1705 dev_err(&(urb
->dev
->dev
), "%s: error_xfer_submit\n", __func__
);
1706 goto error_xfer_submit
;
1708 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1713 * this is basically wa_xfer_completion() broken up wa_xfer_giveback()
1714 * does a wa_xfer_put() that will call wa_xfer_destroy() and undo
1719 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1720 /* FIXME: segmentation broken, kills DWA */
1722 wusb_dev_put(wusb_dev
);
1724 rpipe_put(xfer
->ep
->hcpriv
);
1726 xfer
->result
= result
;
1730 done
= __wa_xfer_is_done(xfer
);
1731 xfer
->result
= result
;
1732 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1734 wa_xfer_completion(xfer
);
1736 /* return success since the completion routine will run. */
1741 * Execute the delayed transfers in the Wire Adapter @wa
1743 * We need to be careful here, as dequeue() could be called in the
1744 * middle. That's why we do the whole thing under the
1745 * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
1746 * and then checks the list -- so as we would be acquiring in inverse
1747 * order, we move the delayed list to a separate list while locked and then
1748 * submit them without the list lock held.
1750 void wa_urb_enqueue_run(struct work_struct
*ws
)
1752 struct wahc
*wa
= container_of(ws
, struct wahc
, xfer_enqueue_work
);
1753 struct wa_xfer
*xfer
, *next
;
1755 LIST_HEAD(tmp_list
);
1757 /* Create a copy of the wa->xfer_delayed_list while holding the lock */
1758 spin_lock_irq(&wa
->xfer_list_lock
);
1759 list_cut_position(&tmp_list
, &wa
->xfer_delayed_list
,
1760 wa
->xfer_delayed_list
.prev
);
1761 spin_unlock_irq(&wa
->xfer_list_lock
);
1764 * enqueue from temp list without list lock held since wa_urb_enqueue_b
1765 * can take xfer->lock as well as lock mutexes.
1767 list_for_each_entry_safe(xfer
, next
, &tmp_list
, list_node
) {
1768 list_del_init(&xfer
->list_node
);
1771 if (wa_urb_enqueue_b(xfer
) < 0)
1772 wa_xfer_giveback(xfer
);
1773 usb_put_urb(urb
); /* taken when queuing */
1776 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run
);
1779 * Process the errored transfers on the Wire Adapter outside of interrupt.
1781 void wa_process_errored_transfers_run(struct work_struct
*ws
)
1783 struct wahc
*wa
= container_of(ws
, struct wahc
, xfer_error_work
);
1784 struct wa_xfer
*xfer
, *next
;
1785 LIST_HEAD(tmp_list
);
1787 pr_info("%s: Run delayed STALL processing.\n", __func__
);
1789 /* Create a copy of the wa->xfer_errored_list while holding the lock */
1790 spin_lock_irq(&wa
->xfer_list_lock
);
1791 list_cut_position(&tmp_list
, &wa
->xfer_errored_list
,
1792 wa
->xfer_errored_list
.prev
);
1793 spin_unlock_irq(&wa
->xfer_list_lock
);
1796 * run rpipe_clear_feature_stalled from temp list without list lock
1799 list_for_each_entry_safe(xfer
, next
, &tmp_list
, list_node
) {
1800 struct usb_host_endpoint
*ep
;
1801 unsigned long flags
;
1802 struct wa_rpipe
*rpipe
;
1804 spin_lock_irqsave(&xfer
->lock
, flags
);
1807 spin_unlock_irqrestore(&xfer
->lock
, flags
);
1809 /* clear RPIPE feature stalled without holding a lock. */
1810 rpipe_clear_feature_stalled(wa
, ep
);
1812 /* complete the xfer. This removes it from the tmp list. */
1813 wa_xfer_completion(xfer
);
1815 /* check for work. */
1816 wa_xfer_delayed_run(rpipe
);
1819 EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run
);
1822 * Submit a transfer to the Wire Adapter in a delayed way
1824 * The process of enqueuing involves possible sleeps() [see
1825 * enqueue_b(), for the rpipe_get() and the mutex_lock()]. If we are
1826 * in an atomic section, we defer the enqueue_b() call--else we call direct.
1828 * @urb: We own a reference to it done by the HCI Linux USB stack that
1829 * will be given up by calling usb_hcd_giveback_urb() or by
1830 * returning error from this function -> ergo we don't have to
1833 int wa_urb_enqueue(struct wahc
*wa
, struct usb_host_endpoint
*ep
,
1834 struct urb
*urb
, gfp_t gfp
)
1837 struct device
*dev
= &wa
->usb_iface
->dev
;
1838 struct wa_xfer
*xfer
;
1839 unsigned long my_flags
;
1840 unsigned cant_sleep
= irqs_disabled() | in_atomic();
1842 if ((urb
->transfer_buffer
== NULL
)
1843 && (urb
->sg
== NULL
)
1844 && !(urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
)
1845 && urb
->transfer_buffer_length
!= 0) {
1846 dev_err(dev
, "BUG? urb %p: NULL xfer buffer & NODMA\n", urb
);
1850 spin_lock_irqsave(&wa
->xfer_list_lock
, my_flags
);
1851 result
= usb_hcd_link_urb_to_ep(&(wa
->wusb
->usb_hcd
), urb
);
1852 spin_unlock_irqrestore(&wa
->xfer_list_lock
, my_flags
);
1854 goto error_link_urb
;
1857 xfer
= kzalloc(sizeof(*xfer
), gfp
);
1862 if (urb
->status
!= -EINPROGRESS
) /* cancelled */
1863 goto error_dequeued
; /* before starting? */
1865 xfer
->wa
= wa_get(wa
);
1871 dev_dbg(dev
, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1872 xfer
, urb
, urb
->pipe
, urb
->transfer_buffer_length
,
1873 urb
->transfer_flags
& URB_NO_TRANSFER_DMA_MAP
? "dma" : "nodma",
1874 urb
->pipe
& USB_DIR_IN
? "inbound" : "outbound",
1875 cant_sleep
? "deferred" : "inline");
1879 spin_lock_irqsave(&wa
->xfer_list_lock
, my_flags
);
1880 list_add_tail(&xfer
->list_node
, &wa
->xfer_delayed_list
);
1881 spin_unlock_irqrestore(&wa
->xfer_list_lock
, my_flags
);
1882 queue_work(wusbd
, &wa
->xfer_enqueue_work
);
1884 result
= wa_urb_enqueue_b(xfer
);
1887 * URB submit/enqueue failed. Clean up, return an
1888 * error and do not run the callback. This avoids
1889 * an infinite submit/complete loop.
1891 dev_err(dev
, "%s: URB enqueue failed: %d\n",
1895 spin_lock_irqsave(&wa
->xfer_list_lock
, my_flags
);
1896 usb_hcd_unlink_urb_from_ep(&(wa
->wusb
->usb_hcd
), urb
);
1897 spin_unlock_irqrestore(&wa
->xfer_list_lock
, my_flags
);
1906 spin_lock_irqsave(&wa
->xfer_list_lock
, my_flags
);
1907 usb_hcd_unlink_urb_from_ep(&(wa
->wusb
->usb_hcd
), urb
);
1908 spin_unlock_irqrestore(&wa
->xfer_list_lock
, my_flags
);
1912 EXPORT_SYMBOL_GPL(wa_urb_enqueue
);
1915 * Dequeue a URB and make sure uwb_hcd_giveback_urb() [completion
1916 * handler] is called.
1918 * Until a transfer goes successfully through wa_urb_enqueue() it
1919 * needs to be dequeued with completion calling; when stuck in delayed
1920 * or before wa_xfer_setup() is called, we need to do completion.
1922 * not setup If there is no hcpriv yet, that means that that enqueue
1923 * still had no time to set the xfer up. Because
1924 * urb->status should be other than -EINPROGRESS,
1925 * enqueue() will catch that and bail out.
1927 * If the transfer has gone through setup, we just need to clean it
1928 * up. If it has gone through submit(), we have to abort it [with an
1929 * asynch request] and then make sure we cancel each segment.
1932 int wa_urb_dequeue(struct wahc
*wa
, struct urb
*urb
, int status
)
1934 unsigned long flags
, flags2
;
1935 struct wa_xfer
*xfer
;
1937 struct wa_rpipe
*rpipe
;
1938 unsigned cnt
, done
= 0, xfer_abort_pending
;
1939 unsigned rpipe_ready
= 0;
1942 /* check if it is safe to unlink. */
1943 spin_lock_irqsave(&wa
->xfer_list_lock
, flags
);
1944 result
= usb_hcd_check_unlink_urb(&(wa
->wusb
->usb_hcd
), urb
, status
);
1945 if ((result
== 0) && urb
->hcpriv
) {
1947 * Get a xfer ref to prevent a race with wa_xfer_giveback
1948 * cleaning up the xfer while we are working with it.
1950 wa_xfer_get(urb
->hcpriv
);
1952 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags
);
1959 spin_lock_irqsave(&xfer
->lock
, flags
);
1960 pr_debug("%s: DEQUEUE xfer id 0x%08X\n", __func__
, wa_xfer_id(xfer
));
1961 rpipe
= xfer
->ep
->hcpriv
;
1962 if (rpipe
== NULL
) {
1963 pr_debug("%s: xfer %p id 0x%08X has no RPIPE. %s",
1964 __func__
, xfer
, wa_xfer_id(xfer
),
1965 "Probably already aborted.\n" );
1970 * Check for done to avoid racing with wa_xfer_giveback and completing
1973 if (__wa_xfer_is_done(xfer
)) {
1974 pr_debug("%s: xfer %p id 0x%08X already done.\n", __func__
,
1975 xfer
, wa_xfer_id(xfer
));
1979 /* Check the delayed list -> if there, release and complete */
1980 spin_lock_irqsave(&wa
->xfer_list_lock
, flags2
);
1981 if (!list_empty(&xfer
->list_node
) && xfer
->seg
== NULL
)
1982 goto dequeue_delayed
;
1983 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags2
);
1984 if (xfer
->seg
== NULL
) /* still hasn't reached */
1985 goto out_unlock
; /* setup(), enqueue_b() completes */
1986 /* Ok, the xfer is in flight already, it's been setup and submitted.*/
1987 xfer_abort_pending
= __wa_xfer_abort(xfer
) >= 0;
1989 * grab the rpipe->seg_lock here to prevent racing with
1990 * __wa_xfer_delayed_run.
1992 spin_lock(&rpipe
->seg_lock
);
1993 for (cnt
= 0; cnt
< xfer
->segs
; cnt
++) {
1994 seg
= xfer
->seg
[cnt
];
1995 pr_debug("%s: xfer id 0x%08X#%d status = %d\n",
1996 __func__
, wa_xfer_id(xfer
), cnt
, seg
->status
);
1997 switch (seg
->status
) {
1998 case WA_SEG_NOTREADY
:
2000 printk(KERN_ERR
"xfer %p#%u: dequeue bad state %u\n",
2001 xfer
, cnt
, seg
->status
);
2004 case WA_SEG_DELAYED
:
2006 * delete from rpipe delayed list. If no segments on
2007 * this xfer have been submitted, __wa_xfer_is_done will
2008 * trigger a giveback below. Otherwise, the submitted
2009 * segments will be completed in the DTI interrupt.
2011 seg
->status
= WA_SEG_ABORTED
;
2012 seg
->result
= -ENOENT
;
2013 list_del(&seg
->list_node
);
2018 case WA_SEG_ABORTED
:
2021 * The buf_in data for a segment in the
2022 * WA_SEG_DTI_PENDING state is actively being read.
2023 * Let wa_buf_in_cb handle it since it will be called
2024 * and will increment xfer->segs_done. Cleaning up
2025 * here could cause wa_buf_in_cb to access the xfer
2026 * after it has been completed/freed.
2028 case WA_SEG_DTI_PENDING
:
2031 * In the states below, the HWA device already knows
2032 * about the transfer. If an abort request was sent,
2033 * allow the HWA to process it and wait for the
2034 * results. Otherwise, the DTI state and seg completed
2035 * counts can get out of sync.
2037 case WA_SEG_SUBMITTED
:
2038 case WA_SEG_PENDING
:
2040 * Check if the abort was successfully sent. This could
2041 * be false if the HWA has been removed but we haven't
2042 * gotten the disconnect notification yet.
2044 if (!xfer_abort_pending
) {
2045 seg
->status
= WA_SEG_ABORTED
;
2046 rpipe_ready
= rpipe_avail_inc(rpipe
);
2052 spin_unlock(&rpipe
->seg_lock
);
2053 xfer
->result
= urb
->status
; /* -ENOENT or -ECONNRESET */
2054 done
= __wa_xfer_is_done(xfer
);
2055 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2057 wa_xfer_completion(xfer
);
2059 wa_xfer_delayed_run(rpipe
);
2064 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2069 list_del_init(&xfer
->list_node
);
2070 spin_unlock_irqrestore(&wa
->xfer_list_lock
, flags2
);
2071 xfer
->result
= urb
->status
;
2072 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2073 wa_xfer_giveback(xfer
);
2075 usb_put_urb(urb
); /* we got a ref in enqueue() */
2078 EXPORT_SYMBOL_GPL(wa_urb_dequeue
);
2081 * Translation from WA status codes (WUSB1.0 Table 8.15) to errno
2084 * Positive errno values are internal inconsistencies and should be
2085 * flagged louder. Negative are to be passed up to the user in the
2088 * @status: USB WA status code -- high two bits are stripped.
2090 static int wa_xfer_status_to_errno(u8 status
)
2093 u8 real_status
= status
;
2094 static int xlat
[] = {
2095 [WA_XFER_STATUS_SUCCESS
] = 0,
2096 [WA_XFER_STATUS_HALTED
] = -EPIPE
,
2097 [WA_XFER_STATUS_DATA_BUFFER_ERROR
] = -ENOBUFS
,
2098 [WA_XFER_STATUS_BABBLE
] = -EOVERFLOW
,
2099 [WA_XFER_RESERVED
] = EINVAL
,
2100 [WA_XFER_STATUS_NOT_FOUND
] = 0,
2101 [WA_XFER_STATUS_INSUFFICIENT_RESOURCE
] = -ENOMEM
,
2102 [WA_XFER_STATUS_TRANSACTION_ERROR
] = -EILSEQ
,
2103 [WA_XFER_STATUS_ABORTED
] = -ENOENT
,
2104 [WA_XFER_STATUS_RPIPE_NOT_READY
] = EINVAL
,
2105 [WA_XFER_INVALID_FORMAT
] = EINVAL
,
2106 [WA_XFER_UNEXPECTED_SEGMENT_NUMBER
] = EINVAL
,
2107 [WA_XFER_STATUS_RPIPE_TYPE_MISMATCH
] = EINVAL
,
2113 if (status
>= ARRAY_SIZE(xlat
)) {
2114 printk_ratelimited(KERN_ERR
"%s(): BUG? "
2115 "Unknown WA transfer status 0x%02x\n",
2116 __func__
, real_status
);
2119 errno
= xlat
[status
];
2120 if (unlikely(errno
> 0)) {
2121 printk_ratelimited(KERN_ERR
"%s(): BUG? "
2122 "Inconsistent WA status: 0x%02x\n",
2123 __func__
, real_status
);
2130 * If a last segment flag and/or a transfer result error is encountered,
2131 * no other segment transfer results will be returned from the device.
2132 * Mark the remaining submitted or pending xfers as completed so that
2133 * the xfer will complete cleanly.
2135 * xfer->lock must be held
2138 static void wa_complete_remaining_xfer_segs(struct wa_xfer
*xfer
,
2139 int starting_index
, enum wa_seg_status status
)
2142 struct wa_rpipe
*rpipe
= xfer
->ep
->hcpriv
;
2144 for (index
= starting_index
; index
< xfer
->segs_submitted
; index
++) {
2145 struct wa_seg
*current_seg
= xfer
->seg
[index
];
2147 BUG_ON(current_seg
== NULL
);
2149 switch (current_seg
->status
) {
2150 case WA_SEG_SUBMITTED
:
2151 case WA_SEG_PENDING
:
2152 case WA_SEG_DTI_PENDING
:
2153 rpipe_avail_inc(rpipe
);
2155 * do not increment RPIPE avail for the WA_SEG_DELAYED case
2156 * since it has not been submitted to the RPIPE.
2158 case WA_SEG_DELAYED
:
2160 current_seg
->status
= status
;
2162 case WA_SEG_ABORTED
:
2165 WARN(1, "%s: xfer 0x%08X#%d. bad seg status = %d\n",
2166 __func__
, wa_xfer_id(xfer
), index
,
2167 current_seg
->status
);
2173 /* Populate the given urb based on the current isoc transfer state. */
2174 static int __wa_populate_buf_in_urb_isoc(struct wahc
*wa
,
2175 struct urb
*buf_in_urb
, struct wa_xfer
*xfer
, struct wa_seg
*seg
)
2177 int urb_start_frame
= seg
->isoc_frame_index
+ seg
->isoc_frame_offset
;
2178 int seg_index
, total_len
= 0, urb_frame_index
= urb_start_frame
;
2179 struct usb_iso_packet_descriptor
*iso_frame_desc
=
2180 xfer
->urb
->iso_frame_desc
;
2181 const int dti_packet_size
= usb_endpoint_maxp(wa
->dti_epd
);
2182 int next_frame_contiguous
;
2183 struct usb_iso_packet_descriptor
*iso_frame
;
2185 BUG_ON(buf_in_urb
->status
== -EINPROGRESS
);
2188 * If the current frame actual_length is contiguous with the next frame
2189 * and actual_length is a multiple of the DTI endpoint max packet size,
2190 * combine the current frame with the next frame in a single URB. This
2191 * reduces the number of URBs that must be submitted in that case.
2193 seg_index
= seg
->isoc_frame_index
;
2195 next_frame_contiguous
= 0;
2197 iso_frame
= &iso_frame_desc
[urb_frame_index
];
2198 total_len
+= iso_frame
->actual_length
;
2202 if (seg_index
< seg
->isoc_frame_count
) {
2203 struct usb_iso_packet_descriptor
*next_iso_frame
;
2205 next_iso_frame
= &iso_frame_desc
[urb_frame_index
];
2207 if ((iso_frame
->offset
+ iso_frame
->actual_length
) ==
2208 next_iso_frame
->offset
)
2209 next_frame_contiguous
= 1;
2211 } while (next_frame_contiguous
2212 && ((iso_frame
->actual_length
% dti_packet_size
) == 0));
2214 /* this should always be 0 before a resubmit. */
2215 buf_in_urb
->num_mapped_sgs
= 0;
2216 buf_in_urb
->transfer_dma
= xfer
->urb
->transfer_dma
+
2217 iso_frame_desc
[urb_start_frame
].offset
;
2218 buf_in_urb
->transfer_buffer_length
= total_len
;
2219 buf_in_urb
->transfer_flags
|= URB_NO_TRANSFER_DMA_MAP
;
2220 buf_in_urb
->transfer_buffer
= NULL
;
2221 buf_in_urb
->sg
= NULL
;
2222 buf_in_urb
->num_sgs
= 0;
2223 buf_in_urb
->context
= seg
;
2225 /* return the number of frames included in this URB. */
2226 return seg_index
- seg
->isoc_frame_index
;
2229 /* Populate the given urb based on the current transfer state. */
2230 static int wa_populate_buf_in_urb(struct urb
*buf_in_urb
, struct wa_xfer
*xfer
,
2231 unsigned int seg_idx
, unsigned int bytes_transferred
)
2234 struct wa_seg
*seg
= xfer
->seg
[seg_idx
];
2236 BUG_ON(buf_in_urb
->status
== -EINPROGRESS
);
2237 /* this should always be 0 before a resubmit. */
2238 buf_in_urb
->num_mapped_sgs
= 0;
2241 buf_in_urb
->transfer_dma
= xfer
->urb
->transfer_dma
2242 + (seg_idx
* xfer
->seg_size
);
2243 buf_in_urb
->transfer_flags
|= URB_NO_TRANSFER_DMA_MAP
;
2244 buf_in_urb
->transfer_buffer
= NULL
;
2245 buf_in_urb
->sg
= NULL
;
2246 buf_in_urb
->num_sgs
= 0;
2248 /* do buffer or SG processing. */
2249 buf_in_urb
->transfer_flags
&= ~URB_NO_TRANSFER_DMA_MAP
;
2251 if (xfer
->urb
->transfer_buffer
) {
2252 buf_in_urb
->transfer_buffer
=
2253 xfer
->urb
->transfer_buffer
2254 + (seg_idx
* xfer
->seg_size
);
2255 buf_in_urb
->sg
= NULL
;
2256 buf_in_urb
->num_sgs
= 0;
2258 /* allocate an SG list to store seg_size bytes
2259 and copy the subset of the xfer->urb->sg
2260 that matches the buffer subset we are
2262 buf_in_urb
->sg
= wa_xfer_create_subset_sg(
2264 seg_idx
* xfer
->seg_size
,
2266 &(buf_in_urb
->num_sgs
));
2268 if (!(buf_in_urb
->sg
)) {
2269 buf_in_urb
->num_sgs
= 0;
2272 buf_in_urb
->transfer_buffer
= NULL
;
2275 buf_in_urb
->transfer_buffer_length
= bytes_transferred
;
2276 buf_in_urb
->context
= seg
;
2282 * Process a xfer result completion message
2284 * inbound transfers: need to schedule a buf_in_urb read
2286 * FIXME: this function needs to be broken up in parts
2288 static void wa_xfer_result_chew(struct wahc
*wa
, struct wa_xfer
*xfer
,
2289 struct wa_xfer_result
*xfer_result
)
2292 struct device
*dev
= &wa
->usb_iface
->dev
;
2293 unsigned long flags
;
2294 unsigned int seg_idx
;
2296 struct wa_rpipe
*rpipe
;
2299 unsigned rpipe_ready
= 0;
2300 unsigned bytes_transferred
= le32_to_cpu(xfer_result
->dwTransferLength
);
2301 struct urb
*buf_in_urb
= &(wa
->buf_in_urbs
[0]);
2303 spin_lock_irqsave(&xfer
->lock
, flags
);
2304 seg_idx
= xfer_result
->bTransferSegment
& 0x7f;
2305 if (unlikely(seg_idx
>= xfer
->segs
))
2307 seg
= xfer
->seg
[seg_idx
];
2308 rpipe
= xfer
->ep
->hcpriv
;
2309 usb_status
= xfer_result
->bTransferStatus
;
2310 dev_dbg(dev
, "xfer %p ID 0x%08X#%u: bTransferStatus 0x%02x (seg status %u)\n",
2311 xfer
, wa_xfer_id(xfer
), seg_idx
, usb_status
, seg
->status
);
2312 if (seg
->status
== WA_SEG_ABORTED
2313 || seg
->status
== WA_SEG_ERROR
) /* already handled */
2314 goto segment_aborted
;
2315 if (seg
->status
== WA_SEG_SUBMITTED
) /* ops, got here */
2316 seg
->status
= WA_SEG_PENDING
; /* before wa_seg{_dto}_cb() */
2317 if (seg
->status
!= WA_SEG_PENDING
) {
2318 if (printk_ratelimit())
2319 dev_err(dev
, "xfer %p#%u: Bad segment state %u\n",
2320 xfer
, seg_idx
, seg
->status
);
2321 seg
->status
= WA_SEG_PENDING
; /* workaround/"fix" it */
2323 if (usb_status
& 0x80) {
2324 seg
->result
= wa_xfer_status_to_errno(usb_status
);
2325 dev_err(dev
, "DTI: xfer %p 0x%08X:#%u failed (0x%02x)\n",
2326 xfer
, xfer
->id
, seg
->index
, usb_status
);
2327 seg
->status
= ((usb_status
& 0x7F) == WA_XFER_STATUS_ABORTED
) ?
2328 WA_SEG_ABORTED
: WA_SEG_ERROR
;
2329 goto error_complete
;
2331 /* FIXME: we ignore warnings, tally them for stats */
2332 if (usb_status
& 0x40) /* Warning?... */
2333 usb_status
= 0; /* ... pass */
2335 * If the last segment bit is set, complete the remaining segments.
2336 * When the current segment is completed, either in wa_buf_in_cb for
2337 * transfers with data or below for no data, the xfer will complete.
2339 if (xfer_result
->bTransferSegment
& 0x80)
2340 wa_complete_remaining_xfer_segs(xfer
, seg
->index
+ 1,
2342 if (usb_pipeisoc(xfer
->urb
->pipe
)
2343 && (le32_to_cpu(xfer_result
->dwNumOfPackets
) > 0)) {
2344 /* set up WA state to read the isoc packet status next. */
2345 wa
->dti_isoc_xfer_in_progress
= wa_xfer_id(xfer
);
2346 wa
->dti_isoc_xfer_seg
= seg_idx
;
2347 wa
->dti_state
= WA_DTI_ISOC_PACKET_STATUS_PENDING
;
2348 } else if (xfer
->is_inbound
&& !usb_pipeisoc(xfer
->urb
->pipe
)
2349 && (bytes_transferred
> 0)) {
2350 /* IN data phase: read to buffer */
2351 seg
->status
= WA_SEG_DTI_PENDING
;
2352 result
= wa_populate_buf_in_urb(buf_in_urb
, xfer
, seg_idx
,
2355 goto error_buf_in_populate
;
2356 ++(wa
->active_buf_in_urbs
);
2357 result
= usb_submit_urb(buf_in_urb
, GFP_ATOMIC
);
2359 --(wa
->active_buf_in_urbs
);
2360 goto error_submit_buf_in
;
2363 /* OUT data phase or no data, complete it -- */
2364 seg
->result
= bytes_transferred
;
2365 rpipe_ready
= rpipe_avail_inc(rpipe
);
2366 done
= __wa_xfer_mark_seg_as_done(xfer
, seg
, WA_SEG_DONE
);
2368 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2370 wa_xfer_completion(xfer
);
2372 wa_xfer_delayed_run(rpipe
);
2375 error_submit_buf_in
:
2376 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
2377 dev_err(dev
, "DTI: URB max acceptable errors "
2378 "exceeded, resetting device\n");
2381 if (printk_ratelimit())
2382 dev_err(dev
, "xfer %p#%u: can't submit DTI data phase: %d\n",
2383 xfer
, seg_idx
, result
);
2384 seg
->result
= result
;
2385 kfree(buf_in_urb
->sg
);
2386 buf_in_urb
->sg
= NULL
;
2387 error_buf_in_populate
:
2388 __wa_xfer_abort(xfer
);
2389 seg
->status
= WA_SEG_ERROR
;
2392 rpipe_ready
= rpipe_avail_inc(rpipe
);
2393 wa_complete_remaining_xfer_segs(xfer
, seg
->index
+ 1, seg
->status
);
2394 done
= __wa_xfer_is_done(xfer
);
2396 * queue work item to clear STALL for control endpoints.
2397 * Otherwise, let endpoint_reset take care of it.
2399 if (((usb_status
& 0x3f) == WA_XFER_STATUS_HALTED
) &&
2400 usb_endpoint_xfer_control(&xfer
->ep
->desc
) &&
2403 dev_info(dev
, "Control EP stall. Queue delayed work.\n");
2404 spin_lock(&wa
->xfer_list_lock
);
2405 /* move xfer from xfer_list to xfer_errored_list. */
2406 list_move_tail(&xfer
->list_node
, &wa
->xfer_errored_list
);
2407 spin_unlock(&wa
->xfer_list_lock
);
2408 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2409 queue_work(wusbd
, &wa
->xfer_error_work
);
2411 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2413 wa_xfer_completion(xfer
);
2415 wa_xfer_delayed_run(rpipe
);
2421 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2422 wa_urb_dequeue(wa
, xfer
->urb
, -ENOENT
);
2423 if (printk_ratelimit())
2424 dev_err(dev
, "xfer %p#%u: bad segment\n", xfer
, seg_idx
);
2425 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
, EDC_ERROR_TIMEFRAME
)) {
2426 dev_err(dev
, "DTI: URB max acceptable errors "
2427 "exceeded, resetting device\n");
2433 /* nothing to do, as the aborter did the completion */
2434 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2438 * Process a isochronous packet status message
2440 * inbound transfers: need to schedule a buf_in_urb read
2442 static int wa_process_iso_packet_status(struct wahc
*wa
, struct urb
*urb
)
2444 struct device
*dev
= &wa
->usb_iface
->dev
;
2445 struct wa_xfer_packet_status_hwaiso
*packet_status
;
2446 struct wa_xfer_packet_status_len_hwaiso
*status_array
;
2447 struct wa_xfer
*xfer
;
2448 unsigned long flags
;
2450 struct wa_rpipe
*rpipe
;
2451 unsigned done
= 0, dti_busy
= 0, data_frame_count
= 0, seg_index
;
2452 unsigned first_frame_index
= 0, rpipe_ready
= 0;
2455 /* We have a xfer result buffer; check it */
2456 dev_dbg(dev
, "DTI: isoc packet status %d bytes at %p\n",
2457 urb
->actual_length
, urb
->transfer_buffer
);
2458 packet_status
= (struct wa_xfer_packet_status_hwaiso
*)(wa
->dti_buf
);
2459 if (packet_status
->bPacketType
!= WA_XFER_ISO_PACKET_STATUS
) {
2460 dev_err(dev
, "DTI Error: isoc packet status--bad type 0x%02x\n",
2461 packet_status
->bPacketType
);
2462 goto error_parse_buffer
;
2464 xfer
= wa_xfer_get_by_id(wa
, wa
->dti_isoc_xfer_in_progress
);
2466 dev_err(dev
, "DTI Error: isoc packet status--unknown xfer 0x%08x\n",
2467 wa
->dti_isoc_xfer_in_progress
);
2468 goto error_parse_buffer
;
2470 spin_lock_irqsave(&xfer
->lock
, flags
);
2471 if (unlikely(wa
->dti_isoc_xfer_seg
>= xfer
->segs
))
2473 seg
= xfer
->seg
[wa
->dti_isoc_xfer_seg
];
2474 rpipe
= xfer
->ep
->hcpriv
;
2475 expected_size
= sizeof(*packet_status
) +
2476 (sizeof(packet_status
->PacketStatus
[0]) *
2477 seg
->isoc_frame_count
);
2478 if (urb
->actual_length
!= expected_size
) {
2479 dev_err(dev
, "DTI Error: isoc packet status--bad urb length (%d bytes vs %d needed)\n",
2480 urb
->actual_length
, expected_size
);
2483 if (le16_to_cpu(packet_status
->wLength
) != expected_size
) {
2484 dev_err(dev
, "DTI Error: isoc packet status--bad length %u\n",
2485 le16_to_cpu(packet_status
->wLength
));
2488 /* write isoc packet status and lengths back to the xfer urb. */
2489 status_array
= packet_status
->PacketStatus
;
2490 xfer
->urb
->start_frame
=
2491 wa
->wusb
->usb_hcd
.driver
->get_frame_number(&wa
->wusb
->usb_hcd
);
2492 for (seg_index
= 0; seg_index
< seg
->isoc_frame_count
; ++seg_index
) {
2493 struct usb_iso_packet_descriptor
*iso_frame_desc
=
2494 xfer
->urb
->iso_frame_desc
;
2495 const int xfer_frame_index
=
2496 seg
->isoc_frame_offset
+ seg_index
;
2498 iso_frame_desc
[xfer_frame_index
].status
=
2499 wa_xfer_status_to_errno(
2500 le16_to_cpu(status_array
[seg_index
].PacketStatus
));
2501 iso_frame_desc
[xfer_frame_index
].actual_length
=
2502 le16_to_cpu(status_array
[seg_index
].PacketLength
);
2503 /* track the number of frames successfully transferred. */
2504 if (iso_frame_desc
[xfer_frame_index
].actual_length
> 0) {
2505 /* save the starting frame index for buf_in_urb. */
2506 if (!data_frame_count
)
2507 first_frame_index
= seg_index
;
2512 if (xfer
->is_inbound
&& data_frame_count
) {
2513 int result
, total_frames_read
= 0, urb_index
= 0;
2514 struct urb
*buf_in_urb
;
2516 /* IN data phase: read to buffer */
2517 seg
->status
= WA_SEG_DTI_PENDING
;
2519 /* start with the first frame with data. */
2520 seg
->isoc_frame_index
= first_frame_index
;
2521 /* submit up to WA_MAX_BUF_IN_URBS read URBs. */
2523 int urb_frame_index
, urb_frame_count
;
2524 struct usb_iso_packet_descriptor
*iso_frame_desc
;
2526 buf_in_urb
= &(wa
->buf_in_urbs
[urb_index
]);
2527 urb_frame_count
= __wa_populate_buf_in_urb_isoc(wa
,
2528 buf_in_urb
, xfer
, seg
);
2529 /* advance frame index to start of next read URB. */
2530 seg
->isoc_frame_index
+= urb_frame_count
;
2531 total_frames_read
+= urb_frame_count
;
2533 ++(wa
->active_buf_in_urbs
);
2534 result
= usb_submit_urb(buf_in_urb
, GFP_ATOMIC
);
2536 /* skip 0-byte frames. */
2538 seg
->isoc_frame_offset
+ seg
->isoc_frame_index
;
2540 &(xfer
->urb
->iso_frame_desc
[urb_frame_index
]);
2541 while ((seg
->isoc_frame_index
<
2542 seg
->isoc_frame_count
) &&
2543 (iso_frame_desc
->actual_length
== 0)) {
2544 ++(seg
->isoc_frame_index
);
2549 } while ((result
== 0) && (urb_index
< WA_MAX_BUF_IN_URBS
)
2550 && (seg
->isoc_frame_index
<
2551 seg
->isoc_frame_count
));
2554 --(wa
->active_buf_in_urbs
);
2555 dev_err(dev
, "DTI Error: Could not submit buf in URB (%d)",
2558 } else if (data_frame_count
> total_frames_read
)
2559 /* If we need to read more frames, set DTI busy. */
2562 /* OUT transfer or no more IN data, complete it -- */
2563 rpipe_ready
= rpipe_avail_inc(rpipe
);
2564 done
= __wa_xfer_mark_seg_as_done(xfer
, seg
, WA_SEG_DONE
);
2566 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2568 wa
->dti_state
= WA_DTI_BUF_IN_DATA_PENDING
;
2570 wa
->dti_state
= WA_DTI_TRANSFER_RESULT_PENDING
;
2572 wa_xfer_completion(xfer
);
2574 wa_xfer_delayed_run(rpipe
);
2579 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2586 * Callback for the IN data phase
2588 * If successful transition state; otherwise, take a note of the
2589 * error, mark this segment done and try completion.
2591 * Note we don't access until we are sure that the transfer hasn't
2592 * been cancelled (ECONNRESET, ENOENT), which could mean that
2593 * seg->xfer could be already gone.
2595 static void wa_buf_in_cb(struct urb
*urb
)
2597 struct wa_seg
*seg
= urb
->context
;
2598 struct wa_xfer
*xfer
= seg
->xfer
;
2601 struct wa_rpipe
*rpipe
;
2602 unsigned rpipe_ready
= 0, isoc_data_frame_count
= 0;
2603 unsigned long flags
;
2604 int resubmit_dti
= 0, active_buf_in_urbs
;
2607 /* free the sg if it was used. */
2611 spin_lock_irqsave(&xfer
->lock
, flags
);
2613 dev
= &wa
->usb_iface
->dev
;
2614 --(wa
->active_buf_in_urbs
);
2615 active_buf_in_urbs
= wa
->active_buf_in_urbs
;
2616 rpipe
= xfer
->ep
->hcpriv
;
2618 if (usb_pipeisoc(xfer
->urb
->pipe
)) {
2619 struct usb_iso_packet_descriptor
*iso_frame_desc
=
2620 xfer
->urb
->iso_frame_desc
;
2624 * Find the next isoc frame with data and count how many
2625 * frames with data remain.
2627 seg_index
= seg
->isoc_frame_index
;
2628 while (seg_index
< seg
->isoc_frame_count
) {
2629 const int urb_frame_index
=
2630 seg
->isoc_frame_offset
+ seg_index
;
2632 if (iso_frame_desc
[urb_frame_index
].actual_length
> 0) {
2633 /* save the index of the next frame with data */
2634 if (!isoc_data_frame_count
)
2635 seg
->isoc_frame_index
= seg_index
;
2636 ++isoc_data_frame_count
;
2641 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2643 switch (urb
->status
) {
2645 spin_lock_irqsave(&xfer
->lock
, flags
);
2647 seg
->result
+= urb
->actual_length
;
2648 if (isoc_data_frame_count
> 0) {
2649 int result
, urb_frame_count
;
2651 /* submit a read URB for the next frame with data. */
2652 urb_frame_count
= __wa_populate_buf_in_urb_isoc(wa
, urb
,
2654 /* advance index to start of next read URB. */
2655 seg
->isoc_frame_index
+= urb_frame_count
;
2656 ++(wa
->active_buf_in_urbs
);
2657 result
= usb_submit_urb(urb
, GFP_ATOMIC
);
2659 --(wa
->active_buf_in_urbs
);
2660 dev_err(dev
, "DTI Error: Could not submit buf in URB (%d)",
2665 * If we are in this callback and
2666 * isoc_data_frame_count > 0, it means that the dti_urb
2667 * submission was delayed in wa_dti_cb. Once
2668 * we submit the last buf_in_urb, we can submit the
2671 resubmit_dti
= (isoc_data_frame_count
==
2673 } else if (active_buf_in_urbs
== 0) {
2675 "xfer %p 0x%08X#%u: data in done (%zu bytes)\n",
2676 xfer
, wa_xfer_id(xfer
), seg
->index
,
2678 rpipe_ready
= rpipe_avail_inc(rpipe
);
2679 done
= __wa_xfer_mark_seg_as_done(xfer
, seg
,
2682 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2684 wa_xfer_completion(xfer
);
2686 wa_xfer_delayed_run(rpipe
);
2688 case -ECONNRESET
: /* URB unlinked; no need to do anything */
2689 case -ENOENT
: /* as it was done by the who unlinked us */
2691 default: /* Other errors ... */
2693 * Error on data buf read. Only resubmit DTI if it hasn't
2694 * already been done by previously hitting this error or by a
2695 * successful completion of the previous buf_in_urb.
2697 resubmit_dti
= wa
->dti_state
!= WA_DTI_TRANSFER_RESULT_PENDING
;
2698 spin_lock_irqsave(&xfer
->lock
, flags
);
2699 if (printk_ratelimit())
2700 dev_err(dev
, "xfer %p 0x%08X#%u: data in error %d\n",
2701 xfer
, wa_xfer_id(xfer
), seg
->index
,
2703 if (edc_inc(&wa
->nep_edc
, EDC_MAX_ERRORS
,
2704 EDC_ERROR_TIMEFRAME
)){
2705 dev_err(dev
, "DTO: URB max acceptable errors "
2706 "exceeded, resetting device\n");
2709 seg
->result
= urb
->status
;
2710 rpipe_ready
= rpipe_avail_inc(rpipe
);
2711 if (active_buf_in_urbs
== 0)
2712 done
= __wa_xfer_mark_seg_as_done(xfer
, seg
,
2715 __wa_xfer_abort(xfer
);
2716 spin_unlock_irqrestore(&xfer
->lock
, flags
);
2718 wa_xfer_completion(xfer
);
2720 wa_xfer_delayed_run(rpipe
);
2726 wa
->dti_state
= WA_DTI_TRANSFER_RESULT_PENDING
;
2728 result
= usb_submit_urb(wa
->dti_urb
, GFP_ATOMIC
);
2730 dev_err(dev
, "DTI Error: Could not submit DTI URB (%d)\n",
2738 * Handle an incoming transfer result buffer
2740 * Given a transfer result buffer, it completes the transfer (possibly
2741 * scheduling and buffer in read) and then resubmits the DTI URB for a
2742 * new transfer result read.
2745 * The xfer_result DTI URB state machine
2747 * States: OFF | RXR (Read-Xfer-Result) | RBI (Read-Buffer-In)
2749 * We start in OFF mode, the first xfer_result notification [through
2750 * wa_handle_notif_xfer()] moves us to RXR by posting the DTI-URB to
2753 * We receive a buffer -- if it is not a xfer_result, we complain and
2754 * repost the DTI-URB. If it is a xfer_result then do the xfer seg
2755 * request accounting. If it is an IN segment, we move to RBI and post
2756 * a BUF-IN-URB to the right buffer. The BUF-IN-URB callback will
2757 * repost the DTI-URB and move to RXR state. if there was no IN
2758 * segment, it will repost the DTI-URB.
2760 * We go back to OFF when we detect a ENOENT or ESHUTDOWN (or too many
2761 * errors) in the URBs.
2763 static void wa_dti_cb(struct urb
*urb
)
2765 int result
, dti_busy
= 0;
2766 struct wahc
*wa
= urb
->context
;
2767 struct device
*dev
= &wa
->usb_iface
->dev
;
2771 BUG_ON(wa
->dti_urb
!= urb
);
2772 switch (wa
->dti_urb
->status
) {
2774 if (wa
->dti_state
== WA_DTI_TRANSFER_RESULT_PENDING
) {
2775 struct wa_xfer_result
*xfer_result
;
2776 struct wa_xfer
*xfer
;
2778 /* We have a xfer result buffer; check it */
2779 dev_dbg(dev
, "DTI: xfer result %d bytes at %p\n",
2780 urb
->actual_length
, urb
->transfer_buffer
);
2781 if (urb
->actual_length
!= sizeof(*xfer_result
)) {
2782 dev_err(dev
, "DTI Error: xfer result--bad size xfer result (%d bytes vs %zu needed)\n",
2784 sizeof(*xfer_result
));
2787 xfer_result
= (struct wa_xfer_result
*)(wa
->dti_buf
);
2788 if (xfer_result
->hdr
.bLength
!= sizeof(*xfer_result
)) {
2789 dev_err(dev
, "DTI Error: xfer result--bad header length %u\n",
2790 xfer_result
->hdr
.bLength
);
2793 if (xfer_result
->hdr
.bNotifyType
!= WA_XFER_RESULT
) {
2794 dev_err(dev
, "DTI Error: xfer result--bad header type 0x%02x\n",
2795 xfer_result
->hdr
.bNotifyType
);
2798 xfer_id
= le32_to_cpu(xfer_result
->dwTransferID
);
2799 usb_status
= xfer_result
->bTransferStatus
& 0x3f;
2800 if (usb_status
== WA_XFER_STATUS_NOT_FOUND
) {
2801 /* taken care of already */
2802 dev_dbg(dev
, "%s: xfer 0x%08X#%u not found.\n",
2804 xfer_result
->bTransferSegment
& 0x7f);
2807 xfer
= wa_xfer_get_by_id(wa
, xfer_id
);
2809 /* FIXME: transaction not found. */
2810 dev_err(dev
, "DTI Error: xfer result--unknown xfer 0x%08x (status 0x%02x)\n",
2811 xfer_id
, usb_status
);
2814 wa_xfer_result_chew(wa
, xfer
, xfer_result
);
2816 } else if (wa
->dti_state
== WA_DTI_ISOC_PACKET_STATUS_PENDING
) {
2817 dti_busy
= wa_process_iso_packet_status(wa
, urb
);
2819 dev_err(dev
, "DTI Error: unexpected EP state = %d\n",
2823 case -ENOENT
: /* (we killed the URB)...so, no broadcast */
2824 case -ESHUTDOWN
: /* going away! */
2825 dev_dbg(dev
, "DTI: going down! %d\n", urb
->status
);
2829 if (edc_inc(&wa
->dti_edc
, EDC_MAX_ERRORS
,
2830 EDC_ERROR_TIMEFRAME
)) {
2831 dev_err(dev
, "DTI: URB max acceptable errors "
2832 "exceeded, resetting device\n");
2836 if (printk_ratelimit())
2837 dev_err(dev
, "DTI: URB error %d\n", urb
->status
);
2841 /* Resubmit the DTI URB if we are not busy processing isoc in frames. */
2843 result
= usb_submit_urb(wa
->dti_urb
, GFP_ATOMIC
);
2845 dev_err(dev
, "DTI Error: Could not submit DTI URB (%d)\n",
2855 * Initialize the DTI URB for reading transfer result notifications and also
2856 * the buffer-in URB, for reading buffers. Then we just submit the DTI URB.
2858 int wa_dti_start(struct wahc
*wa
)
2860 const struct usb_endpoint_descriptor
*dti_epd
= wa
->dti_epd
;
2861 struct device
*dev
= &wa
->usb_iface
->dev
;
2862 int result
= -ENOMEM
, index
;
2864 if (wa
->dti_urb
!= NULL
) /* DTI URB already started */
2867 wa
->dti_urb
= usb_alloc_urb(0, GFP_KERNEL
);
2868 if (wa
->dti_urb
== NULL
) {
2869 dev_err(dev
, "Can't allocate DTI URB\n");
2870 goto error_dti_urb_alloc
;
2873 wa
->dti_urb
, wa
->usb_dev
,
2874 usb_rcvbulkpipe(wa
->usb_dev
, 0x80 | dti_epd
->bEndpointAddress
),
2875 wa
->dti_buf
, wa
->dti_buf_size
,
2878 /* init the buf in URBs */
2879 for (index
= 0; index
< WA_MAX_BUF_IN_URBS
; ++index
) {
2881 &(wa
->buf_in_urbs
[index
]), wa
->usb_dev
,
2882 usb_rcvbulkpipe(wa
->usb_dev
,
2883 0x80 | dti_epd
->bEndpointAddress
),
2884 NULL
, 0, wa_buf_in_cb
, wa
);
2886 result
= usb_submit_urb(wa
->dti_urb
, GFP_KERNEL
);
2888 dev_err(dev
, "DTI Error: Could not submit DTI URB (%d) resetting\n",
2890 goto error_dti_urb_submit
;
2895 error_dti_urb_submit
:
2896 usb_put_urb(wa
->dti_urb
);
2898 error_dti_urb_alloc
:
2901 EXPORT_SYMBOL_GPL(wa_dti_start
);
2903 * Transfer complete notification
2905 * Called from the notif.c code. We get a notification on EP2 saying
2906 * that some endpoint has some transfer result data available. We are
2909 * To speed up things, we always have a URB reading the DTI URB; we
2910 * don't really set it up and start it until the first xfer complete
2911 * notification arrives, which is what we do here.
2913 * Follow up in wa_dti_cb(), as that's where the whole state
2916 * @wa shall be referenced
2918 void wa_handle_notif_xfer(struct wahc
*wa
, struct wa_notif_hdr
*notif_hdr
)
2920 struct device
*dev
= &wa
->usb_iface
->dev
;
2921 struct wa_notif_xfer
*notif_xfer
;
2922 const struct usb_endpoint_descriptor
*dti_epd
= wa
->dti_epd
;
2924 notif_xfer
= container_of(notif_hdr
, struct wa_notif_xfer
, hdr
);
2925 BUG_ON(notif_hdr
->bNotifyType
!= WA_NOTIF_TRANSFER
);
2927 if ((0x80 | notif_xfer
->bEndpoint
) != dti_epd
->bEndpointAddress
) {
2928 /* FIXME: hardcoded limitation, adapt */
2929 dev_err(dev
, "BUG: DTI ep is %u, not %u (hack me)\n",
2930 notif_xfer
->bEndpoint
, dti_epd
->bEndpointAddress
);
2934 /* attempt to start the DTI ep processing. */
2935 if (wa_dti_start(wa
) < 0)