2 * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
4 * Copyright (C) 2004-2013 Synopsys, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 * This file contains the Descriptor DMA implementation for Host mode
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/spinlock.h>
43 #include <linux/interrupt.h>
44 #include <linux/dma-mapping.h>
46 #include <linux/slab.h>
47 #include <linux/usb.h>
49 #include <linux/usb/hcd.h>
50 #include <linux/usb/ch11.h>
55 static u16
dwc2_frame_list_idx(u16 frame
)
57 return frame
& (FRLISTEN_64_SIZE
- 1);
60 static u16
dwc2_desclist_idx_inc(u16 idx
, u16 inc
, u8 speed
)
63 ((speed
== USB_SPEED_HIGH
? MAX_DMA_DESC_NUM_HS_ISOC
:
64 MAX_DMA_DESC_NUM_GENERIC
) - 1);
67 static u16
dwc2_desclist_idx_dec(u16 idx
, u16 inc
, u8 speed
)
70 ((speed
== USB_SPEED_HIGH
? MAX_DMA_DESC_NUM_HS_ISOC
:
71 MAX_DMA_DESC_NUM_GENERIC
) - 1);
74 static u16
dwc2_max_desc_num(struct dwc2_qh
*qh
)
76 return (qh
->ep_type
== USB_ENDPOINT_XFER_ISOC
&&
77 qh
->dev_speed
== USB_SPEED_HIGH
) ?
78 MAX_DMA_DESC_NUM_HS_ISOC
: MAX_DMA_DESC_NUM_GENERIC
;
81 static u16
dwc2_frame_incr_val(struct dwc2_qh
*qh
)
83 return qh
->dev_speed
== USB_SPEED_HIGH
?
84 (qh
->interval
+ 8 - 1) / 8 : qh
->interval
;
87 static int dwc2_desc_list_alloc(struct dwc2_hsotg
*hsotg
, struct dwc2_qh
*qh
,
90 qh
->desc_list
= dma_alloc_coherent(hsotg
->dev
,
91 sizeof(struct dwc2_hcd_dma_desc
) *
92 dwc2_max_desc_num(qh
), &qh
->desc_list_dma
,
98 memset(qh
->desc_list
, 0,
99 sizeof(struct dwc2_hcd_dma_desc
) * dwc2_max_desc_num(qh
));
101 qh
->n_bytes
= kzalloc(sizeof(u32
) * dwc2_max_desc_num(qh
), flags
);
103 dma_free_coherent(hsotg
->dev
, sizeof(struct dwc2_hcd_dma_desc
)
104 * dwc2_max_desc_num(qh
), qh
->desc_list
,
106 qh
->desc_list
= NULL
;
113 static void dwc2_desc_list_free(struct dwc2_hsotg
*hsotg
, struct dwc2_qh
*qh
)
116 dma_free_coherent(hsotg
->dev
, sizeof(struct dwc2_hcd_dma_desc
)
117 * dwc2_max_desc_num(qh
), qh
->desc_list
,
119 qh
->desc_list
= NULL
;
126 static int dwc2_frame_list_alloc(struct dwc2_hsotg
*hsotg
, gfp_t mem_flags
)
128 if (hsotg
->frame_list
)
131 hsotg
->frame_list
= dma_alloc_coherent(hsotg
->dev
,
132 4 * FRLISTEN_64_SIZE
,
133 &hsotg
->frame_list_dma
,
135 if (!hsotg
->frame_list
)
138 memset(hsotg
->frame_list
, 0, 4 * FRLISTEN_64_SIZE
);
142 static void dwc2_frame_list_free(struct dwc2_hsotg
*hsotg
)
145 dma_addr_t frame_list_dma
;
148 spin_lock_irqsave(&hsotg
->lock
, flags
);
150 if (!hsotg
->frame_list
) {
151 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
155 frame_list
= hsotg
->frame_list
;
156 frame_list_dma
= hsotg
->frame_list_dma
;
157 hsotg
->frame_list
= NULL
;
159 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
161 dma_free_coherent(hsotg
->dev
, 4 * FRLISTEN_64_SIZE
, frame_list
,
165 static void dwc2_per_sched_enable(struct dwc2_hsotg
*hsotg
, u32 fr_list_en
)
170 spin_lock_irqsave(&hsotg
->lock
, flags
);
172 hcfg
= dwc2_readl(hsotg
->regs
+ HCFG
);
173 if (hcfg
& HCFG_PERSCHEDENA
) {
174 /* already enabled */
175 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
179 dwc2_writel(hsotg
->frame_list_dma
, hsotg
->regs
+ HFLBADDR
);
181 hcfg
&= ~HCFG_FRLISTEN_MASK
;
182 hcfg
|= fr_list_en
| HCFG_PERSCHEDENA
;
183 dev_vdbg(hsotg
->dev
, "Enabling Periodic schedule\n");
184 dwc2_writel(hcfg
, hsotg
->regs
+ HCFG
);
186 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
189 static void dwc2_per_sched_disable(struct dwc2_hsotg
*hsotg
)
194 spin_lock_irqsave(&hsotg
->lock
, flags
);
196 hcfg
= dwc2_readl(hsotg
->regs
+ HCFG
);
197 if (!(hcfg
& HCFG_PERSCHEDENA
)) {
198 /* already disabled */
199 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
203 hcfg
&= ~HCFG_PERSCHEDENA
;
204 dev_vdbg(hsotg
->dev
, "Disabling Periodic schedule\n");
205 dwc2_writel(hcfg
, hsotg
->regs
+ HCFG
);
207 spin_unlock_irqrestore(&hsotg
->lock
, flags
);
211 * Activates/Deactivates FrameList entries for the channel based on endpoint
214 static void dwc2_update_frame_list(struct dwc2_hsotg
*hsotg
, struct dwc2_qh
*qh
,
217 struct dwc2_host_chan
*chan
;
221 pr_err("hsotg = %p\n", hsotg
);
226 dev_err(hsotg
->dev
, "qh->channel = %p\n", qh
->channel
);
230 if (!hsotg
->frame_list
) {
231 dev_err(hsotg
->dev
, "hsotg->frame_list = %p\n",
237 inc
= dwc2_frame_incr_val(qh
);
238 if (qh
->ep_type
== USB_ENDPOINT_XFER_ISOC
)
239 i
= dwc2_frame_list_idx(qh
->sched_frame
);
246 hsotg
->frame_list
[j
] |= 1 << chan
->hc_num
;
248 hsotg
->frame_list
[j
] &= ~(1 << chan
->hc_num
);
249 j
= (j
+ inc
) & (FRLISTEN_64_SIZE
- 1);
256 if (chan
->speed
== USB_SPEED_HIGH
&& qh
->interval
) {
258 /* TODO - check this */
259 inc
= (8 + qh
->interval
- 1) / qh
->interval
;
260 for (i
= 0; i
< inc
; i
++) {
262 j
= j
<< qh
->interval
;
265 chan
->schinfo
= 0xff;
269 static void dwc2_release_channel_ddma(struct dwc2_hsotg
*hsotg
,
272 struct dwc2_host_chan
*chan
= qh
->channel
;
274 if (dwc2_qh_is_non_per(qh
)) {
275 if (hsotg
->core_params
->uframe_sched
> 0)
276 hsotg
->available_host_channels
++;
278 hsotg
->non_periodic_channels
--;
280 dwc2_update_frame_list(hsotg
, qh
, 0);
284 * The condition is added to prevent double cleanup try in case of
285 * device disconnect. See channel cleanup in dwc2_hcd_disconnect().
288 if (!list_empty(&chan
->hc_list_entry
))
289 list_del(&chan
->hc_list_entry
);
290 dwc2_hc_cleanup(hsotg
, chan
);
291 list_add_tail(&chan
->hc_list_entry
, &hsotg
->free_hc_list
);
299 memset(qh
->desc_list
, 0, sizeof(struct dwc2_hcd_dma_desc
) *
300 dwc2_max_desc_num(qh
));
304 * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
307 * @hsotg: The HCD state structure for the DWC OTG controller
308 * @qh: The QH to init
310 * Return: 0 if successful, negative error code otherwise
312 * Allocates memory for the descriptor list. For the first periodic QH,
313 * allocates memory for the FrameList and enables periodic scheduling.
315 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg
*hsotg
, struct dwc2_qh
*qh
,
322 "SPLIT Transfers are not supported in Descriptor DMA mode.\n");
327 retval
= dwc2_desc_list_alloc(hsotg
, qh
, mem_flags
);
331 if (qh
->ep_type
== USB_ENDPOINT_XFER_ISOC
||
332 qh
->ep_type
== USB_ENDPOINT_XFER_INT
) {
333 if (!hsotg
->frame_list
) {
334 retval
= dwc2_frame_list_alloc(hsotg
, mem_flags
);
337 /* Enable periodic schedule on first periodic QH */
338 dwc2_per_sched_enable(hsotg
, HCFG_FRLISTEN_64
);
346 dwc2_desc_list_free(hsotg
, qh
);
352 * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
355 * @hsotg: The HCD state structure for the DWC OTG controller
356 * @qh: The QH to free
358 * Frees descriptor list memory associated with the QH. If QH is periodic and
359 * the last, frees FrameList memory and disables periodic scheduling.
361 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg
*hsotg
, struct dwc2_qh
*qh
)
363 dwc2_desc_list_free(hsotg
, qh
);
366 * Channel still assigned due to some reasons.
367 * Seen on Isoc URB dequeue. Channel halted but no subsequent
368 * ChHalted interrupt to release the channel. Afterwards
369 * when it comes here from endpoint disable routine
370 * channel remains assigned.
373 dwc2_release_channel_ddma(hsotg
, qh
);
375 if ((qh
->ep_type
== USB_ENDPOINT_XFER_ISOC
||
376 qh
->ep_type
== USB_ENDPOINT_XFER_INT
) &&
377 (hsotg
->core_params
->uframe_sched
> 0 ||
378 !hsotg
->periodic_channels
) && hsotg
->frame_list
) {
379 dwc2_per_sched_disable(hsotg
);
380 dwc2_frame_list_free(hsotg
);
384 static u8
dwc2_frame_to_desc_idx(struct dwc2_qh
*qh
, u16 frame_idx
)
386 if (qh
->dev_speed
== USB_SPEED_HIGH
)
387 /* Descriptor set (8 descriptors) index which is 8-aligned */
388 return (frame_idx
& ((MAX_DMA_DESC_NUM_HS_ISOC
/ 8) - 1)) * 8;
390 return frame_idx
& (MAX_DMA_DESC_NUM_GENERIC
- 1);
394 * Determine starting frame for Isochronous transfer.
395 * Few frames skipped to prevent race condition with HC.
397 static u16
dwc2_calc_starting_frame(struct dwc2_hsotg
*hsotg
,
398 struct dwc2_qh
*qh
, u16
*skip_frames
)
402 hsotg
->frame_number
= dwc2_hcd_get_frame_number(hsotg
);
404 /* sched_frame is always frame number (not uFrame) both in FS and HS! */
407 * skip_frames is used to limit activated descriptors number
408 * to avoid the situation when HC services the last activated
409 * descriptor firstly.
411 * Current frame is 1, scheduled frame is 3. Since HC always fetches
412 * the descriptor corresponding to curr_frame+1, the descriptor
413 * corresponding to frame 2 will be fetched. If the number of
414 * descriptors is max=64 (or greather) the list will be fully programmed
415 * with Active descriptors and it is possible case (rare) that the
416 * latest descriptor(considering rollback) corresponding to frame 2 will
417 * be serviced first. HS case is more probable because, in fact, up to
418 * 11 uframes (16 in the code) may be skipped.
420 if (qh
->dev_speed
== USB_SPEED_HIGH
) {
422 * Consider uframe counter also, to start xfer asap. If half of
423 * the frame elapsed skip 2 frames otherwise just 1 frame.
424 * Starting descriptor index must be 8-aligned, so if the
425 * current frame is near to complete the next one is skipped as
428 if (dwc2_micro_frame_num(hsotg
->frame_number
) >= 5) {
429 *skip_frames
= 2 * 8;
430 frame
= dwc2_frame_num_inc(hsotg
->frame_number
,
433 *skip_frames
= 1 * 8;
434 frame
= dwc2_frame_num_inc(hsotg
->frame_number
,
438 frame
= dwc2_full_frame_num(frame
);
441 * Two frames are skipped for FS - the current and the next.
442 * But for descriptor programming, 1 frame (descriptor) is
443 * enough, see example above.
446 frame
= dwc2_frame_num_inc(hsotg
->frame_number
, 2);
453 * Calculate initial descriptor index for isochronous transfer based on
456 static u16
dwc2_recalc_initial_desc_idx(struct dwc2_hsotg
*hsotg
,
459 u16 frame
, fr_idx
, fr_idx_tmp
, skip_frames
;
462 * With current ISOC processing algorithm the channel is being released
463 * when no more QTDs in the list (qh->ntd == 0). Thus this function is
464 * called only when qh->ntd == 0 and qh->channel == 0.
466 * So qh->channel != NULL branch is not used and just not removed from
467 * the source file. It is required for another possible approach which
468 * is, do not disable and release the channel when ISOC session
469 * completed, just move QH to inactive schedule until new QTD arrives.
470 * On new QTD, the QH moved back to 'ready' schedule, starting frame and
471 * therefore starting desc_index are recalculated. In this case channel
472 * is released only on ep_disable.
476 * Calculate starting descriptor index. For INTERRUPT endpoint it is
480 frame
= dwc2_calc_starting_frame(hsotg
, qh
, &skip_frames
);
482 * Calculate initial descriptor index based on FrameList current
483 * bitmap and servicing period
485 fr_idx_tmp
= dwc2_frame_list_idx(frame
);
486 fr_idx
= (FRLISTEN_64_SIZE
+
487 dwc2_frame_list_idx(qh
->sched_frame
) - fr_idx_tmp
)
488 % dwc2_frame_incr_val(qh
);
489 fr_idx
= (fr_idx
+ fr_idx_tmp
) % FRLISTEN_64_SIZE
;
491 qh
->sched_frame
= dwc2_calc_starting_frame(hsotg
, qh
,
493 fr_idx
= dwc2_frame_list_idx(qh
->sched_frame
);
496 qh
->td_first
= qh
->td_last
= dwc2_frame_to_desc_idx(qh
, fr_idx
);
501 #define ISOC_URB_GIVEBACK_ASAP
503 #define MAX_ISOC_XFER_SIZE_FS 1023
504 #define MAX_ISOC_XFER_SIZE_HS 3072
505 #define DESCNUM_THRESHOLD 4
507 static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg
*hsotg
,
508 struct dwc2_qtd
*qtd
,
509 struct dwc2_qh
*qh
, u32 max_xfer_size
,
512 struct dwc2_hcd_dma_desc
*dma_desc
= &qh
->desc_list
[idx
];
513 struct dwc2_hcd_iso_packet_desc
*frame_desc
;
515 memset(dma_desc
, 0, sizeof(*dma_desc
));
516 frame_desc
= &qtd
->urb
->iso_descs
[qtd
->isoc_frame_index_last
];
518 if (frame_desc
->length
> max_xfer_size
)
519 qh
->n_bytes
[idx
] = max_xfer_size
;
521 qh
->n_bytes
[idx
] = frame_desc
->length
;
523 dma_desc
->buf
= (u32
)(qtd
->urb
->dma
+ frame_desc
->offset
);
524 dma_desc
->status
= qh
->n_bytes
[idx
] << HOST_DMA_ISOC_NBYTES_SHIFT
&
525 HOST_DMA_ISOC_NBYTES_MASK
;
527 #ifdef ISOC_URB_GIVEBACK_ASAP
528 /* Set IOC for each descriptor corresponding to last frame of URB */
529 if (qtd
->isoc_frame_index_last
== qtd
->urb
->packet_count
)
530 dma_desc
->status
|= HOST_DMA_IOC
;
534 qtd
->isoc_frame_index_last
++;
537 static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg
*hsotg
,
538 struct dwc2_qh
*qh
, u16 skip_frames
)
540 struct dwc2_qtd
*qtd
;
542 u16 idx
, inc
, n_desc
, ntd_max
= 0;
549 ntd_max
= (dwc2_max_desc_num(qh
) + qh
->interval
- 1) /
551 if (skip_frames
&& !qh
->channel
)
552 ntd_max
-= skip_frames
/ qh
->interval
;
555 max_xfer_size
= qh
->dev_speed
== USB_SPEED_HIGH
?
556 MAX_ISOC_XFER_SIZE_HS
: MAX_ISOC_XFER_SIZE_FS
;
558 list_for_each_entry(qtd
, &qh
->qtd_list
, qtd_list_entry
) {
559 while (qh
->ntd
< ntd_max
&& qtd
->isoc_frame_index_last
<
560 qtd
->urb
->packet_count
) {
562 qh
->desc_list
[n_desc
- 1].status
|= HOST_DMA_A
;
563 dwc2_fill_host_isoc_dma_desc(hsotg
, qtd
, qh
,
565 idx
= dwc2_desclist_idx_inc(idx
, inc
, qh
->dev_speed
);
573 #ifdef ISOC_URB_GIVEBACK_ASAP
574 /* Set IOC for last descriptor if descriptor list is full */
575 if (qh
->ntd
== ntd_max
) {
576 idx
= dwc2_desclist_idx_dec(qh
->td_last
, inc
, qh
->dev_speed
);
577 qh
->desc_list
[idx
].status
|= HOST_DMA_IOC
;
581 * Set IOC bit only for one descriptor. Always try to be ahead of HW
582 * processing, i.e. on IOC generation driver activates next descriptor
583 * but core continues to process descriptors following the one with IOC
587 if (n_desc
> DESCNUM_THRESHOLD
)
589 * Move IOC "up". Required even if there is only one QTD
590 * in the list, because QTDs might continue to be queued,
591 * but during the activation it was only one queued.
592 * Actually more than one QTD might be in the list if this
593 * function called from XferCompletion - QTDs was queued during
594 * HW processing of the previous descriptor chunk.
596 idx
= dwc2_desclist_idx_dec(idx
, inc
* ((qh
->ntd
+ 1) / 2),
600 * Set the IOC for the latest descriptor if either number of
601 * descriptors is not greater than threshold or no more new
602 * descriptors activated
604 idx
= dwc2_desclist_idx_dec(qh
->td_last
, inc
, qh
->dev_speed
);
606 qh
->desc_list
[idx
].status
|= HOST_DMA_IOC
;
610 qh
->desc_list
[n_desc
- 1].status
|= HOST_DMA_A
;
612 qh
->desc_list
[0].status
|= HOST_DMA_A
;
616 static void dwc2_fill_host_dma_desc(struct dwc2_hsotg
*hsotg
,
617 struct dwc2_host_chan
*chan
,
618 struct dwc2_qtd
*qtd
, struct dwc2_qh
*qh
,
621 struct dwc2_hcd_dma_desc
*dma_desc
= &qh
->desc_list
[n_desc
];
622 int len
= chan
->xfer_len
;
624 if (len
> MAX_DMA_DESC_SIZE
- (chan
->max_packet
- 1))
625 len
= MAX_DMA_DESC_SIZE
- (chan
->max_packet
- 1);
627 if (chan
->ep_is_in
) {
630 if (len
> 0 && chan
->max_packet
)
631 num_packets
= (len
+ chan
->max_packet
- 1)
634 /* Need 1 packet for transfer length of 0 */
637 /* Always program an integral # of packets for IN transfers */
638 len
= num_packets
* chan
->max_packet
;
641 dma_desc
->status
= len
<< HOST_DMA_NBYTES_SHIFT
& HOST_DMA_NBYTES_MASK
;
642 qh
->n_bytes
[n_desc
] = len
;
644 if (qh
->ep_type
== USB_ENDPOINT_XFER_CONTROL
&&
645 qtd
->control_phase
== DWC2_CONTROL_SETUP
)
646 dma_desc
->status
|= HOST_DMA_SUP
;
648 dma_desc
->buf
= (u32
)chan
->xfer_dma
;
651 * Last (or only) descriptor of IN transfer with actual size less
654 if (len
> chan
->xfer_len
) {
657 chan
->xfer_dma
+= len
;
658 chan
->xfer_len
-= len
;
662 static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg
*hsotg
,
665 struct dwc2_qtd
*qtd
;
666 struct dwc2_host_chan
*chan
= qh
->channel
;
669 dev_vdbg(hsotg
->dev
, "%s(): qh=%p dma=%08lx len=%d\n", __func__
, qh
,
670 (unsigned long)chan
->xfer_dma
, chan
->xfer_len
);
673 * Start with chan->xfer_dma initialized in assign_and_init_hc(), then
674 * if SG transfer consists of multiple URBs, this pointer is re-assigned
675 * to the buffer of the currently processed QTD. For non-SG request
676 * there is always one QTD active.
679 list_for_each_entry(qtd
, &qh
->qtd_list
, qtd_list_entry
) {
680 dev_vdbg(hsotg
->dev
, "qtd=%p\n", qtd
);
683 /* SG request - more than 1 QTD */
684 chan
->xfer_dma
= qtd
->urb
->dma
+
685 qtd
->urb
->actual_length
;
686 chan
->xfer_len
= qtd
->urb
->length
-
687 qtd
->urb
->actual_length
;
688 dev_vdbg(hsotg
->dev
, "buf=%08lx len=%d\n",
689 (unsigned long)chan
->xfer_dma
, chan
->xfer_len
);
695 qh
->desc_list
[n_desc
- 1].status
|= HOST_DMA_A
;
697 "set A bit in desc %d (%p)\n",
699 &qh
->desc_list
[n_desc
- 1]);
701 dwc2_fill_host_dma_desc(hsotg
, chan
, qtd
, qh
, n_desc
);
703 "desc %d (%p) buf=%08x status=%08x\n",
704 n_desc
, &qh
->desc_list
[n_desc
],
705 qh
->desc_list
[n_desc
].buf
,
706 qh
->desc_list
[n_desc
].status
);
709 } while (chan
->xfer_len
> 0 &&
710 n_desc
!= MAX_DMA_DESC_NUM_GENERIC
);
712 dev_vdbg(hsotg
->dev
, "n_desc=%d\n", n_desc
);
714 if (qh
->ep_type
== USB_ENDPOINT_XFER_CONTROL
)
716 if (n_desc
== MAX_DMA_DESC_NUM_GENERIC
)
721 qh
->desc_list
[n_desc
- 1].status
|=
722 HOST_DMA_IOC
| HOST_DMA_EOL
| HOST_DMA_A
;
723 dev_vdbg(hsotg
->dev
, "set IOC/EOL/A bits in desc %d (%p)\n",
724 n_desc
- 1, &qh
->desc_list
[n_desc
- 1]);
726 qh
->desc_list
[0].status
|= HOST_DMA_A
;
727 dev_vdbg(hsotg
->dev
, "set A bit in desc 0 (%p)\n",
735 * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
737 * @hsotg: The HCD state structure for the DWC OTG controller
738 * @qh: The QH to init
740 * Return: 0 if successful, negative error code otherwise
742 * For Control and Bulk endpoints, initializes descriptor list and starts the
743 * transfer. For Interrupt and Isochronous endpoints, initializes descriptor
744 * list then updates FrameList, marking appropriate entries as active.
746 * For Isochronous endpoints the starting descriptor index is calculated based
747 * on the scheduled frame, but only on the first transfer descriptor within a
748 * session. Then the transfer is started via enabling the channel.
750 * For Isochronous endpoints the channel is not halted on XferComplete
751 * interrupt so remains assigned to the endpoint(QH) until session is done.
753 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg
*hsotg
, struct dwc2_qh
*qh
)
755 /* Channel is already assigned */
756 struct dwc2_host_chan
*chan
= qh
->channel
;
759 switch (chan
->ep_type
) {
760 case USB_ENDPOINT_XFER_CONTROL
:
761 case USB_ENDPOINT_XFER_BULK
:
762 dwc2_init_non_isoc_dma_desc(hsotg
, qh
);
763 dwc2_hc_start_transfer_ddma(hsotg
, chan
);
765 case USB_ENDPOINT_XFER_INT
:
766 dwc2_init_non_isoc_dma_desc(hsotg
, qh
);
767 dwc2_update_frame_list(hsotg
, qh
, 1);
768 dwc2_hc_start_transfer_ddma(hsotg
, chan
);
770 case USB_ENDPOINT_XFER_ISOC
:
772 skip_frames
= dwc2_recalc_initial_desc_idx(hsotg
, qh
);
773 dwc2_init_isoc_dma_desc(hsotg
, qh
, skip_frames
);
775 if (!chan
->xfer_started
) {
776 dwc2_update_frame_list(hsotg
, qh
, 1);
779 * Always set to max, instead of actual size. Otherwise
780 * ntd will be changed with channel being enabled. Not
783 chan
->ntd
= dwc2_max_desc_num(qh
);
785 /* Enable channel only once for ISOC */
786 dwc2_hc_start_transfer_ddma(hsotg
, chan
);
795 #define DWC2_CMPL_DONE 1
796 #define DWC2_CMPL_STOP 2
798 static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg
*hsotg
,
799 struct dwc2_host_chan
*chan
,
800 struct dwc2_qtd
*qtd
,
801 struct dwc2_qh
*qh
, u16 idx
)
803 struct dwc2_hcd_dma_desc
*dma_desc
= &qh
->desc_list
[idx
];
804 struct dwc2_hcd_iso_packet_desc
*frame_desc
;
811 frame_desc
= &qtd
->urb
->iso_descs
[qtd
->isoc_frame_index_last
];
812 dma_desc
->buf
= (u32
)(qtd
->urb
->dma
+ frame_desc
->offset
);
814 remain
= (dma_desc
->status
& HOST_DMA_ISOC_NBYTES_MASK
) >>
815 HOST_DMA_ISOC_NBYTES_SHIFT
;
817 if ((dma_desc
->status
& HOST_DMA_STS_MASK
) == HOST_DMA_STS_PKTERR
) {
819 * XactError, or unable to complete all the transactions
820 * in the scheduled micro-frame/frame, both indicated by
821 * HOST_DMA_STS_PKTERR
823 qtd
->urb
->error_count
++;
824 frame_desc
->actual_length
= qh
->n_bytes
[idx
] - remain
;
825 frame_desc
->status
= -EPROTO
;
828 frame_desc
->actual_length
= qh
->n_bytes
[idx
] - remain
;
829 frame_desc
->status
= 0;
832 if (++qtd
->isoc_frame_index
== qtd
->urb
->packet_count
) {
834 * urb->status is not used for isoc transfers here. The
835 * individual frame_desc status are used instead.
837 dwc2_host_complete(hsotg
, qtd
, 0);
838 dwc2_hcd_qtd_unlink_and_free(hsotg
, qtd
, qh
);
841 * This check is necessary because urb_dequeue can be called
842 * from urb complete callback (sound driver for example). All
843 * pending URBs are dequeued there, so no need for further
846 if (chan
->halt_status
== DWC2_HC_XFER_URB_DEQUEUE
)
853 /* Stop if IOC requested descriptor reached */
854 if (dma_desc
->status
& HOST_DMA_IOC
)
860 static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg
*hsotg
,
861 struct dwc2_host_chan
*chan
,
862 enum dwc2_halt_status halt_status
)
864 struct dwc2_hcd_iso_packet_desc
*frame_desc
;
865 struct dwc2_qtd
*qtd
, *qtd_tmp
;
873 if (chan
->halt_status
== DWC2_HC_XFER_URB_DEQUEUE
) {
874 list_for_each_entry(qtd
, &qh
->qtd_list
, qtd_list_entry
)
879 if (halt_status
== DWC2_HC_XFER_AHB_ERR
||
880 halt_status
== DWC2_HC_XFER_BABBLE_ERR
) {
882 * Channel is halted in these error cases, considered as serious
884 * Complete all URBs marking all frames as failed, irrespective
885 * whether some of the descriptors (frames) succeeded or not.
886 * Pass error code to completion routine as well, to update
887 * urb->status, some of class drivers might use it to stop
888 * queing transfer requests.
890 int err
= halt_status
== DWC2_HC_XFER_AHB_ERR
?
893 list_for_each_entry_safe(qtd
, qtd_tmp
, &qh
->qtd_list
,
896 for (idx
= 0; idx
< qtd
->urb
->packet_count
;
898 frame_desc
= &qtd
->urb
->iso_descs
[idx
];
899 frame_desc
->status
= err
;
902 dwc2_host_complete(hsotg
, qtd
, err
);
905 dwc2_hcd_qtd_unlink_and_free(hsotg
, qtd
, qh
);
911 list_for_each_entry_safe(qtd
, qtd_tmp
, &qh
->qtd_list
, qtd_list_entry
) {
912 if (!qtd
->in_process
)
915 rc
= dwc2_cmpl_host_isoc_dma_desc(hsotg
, chan
, qtd
, qh
,
919 idx
= dwc2_desclist_idx_inc(idx
, qh
->interval
,
921 if (rc
== DWC2_CMPL_STOP
)
923 if (rc
== DWC2_CMPL_DONE
)
925 } while (idx
!= qh
->td_first
);
932 static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg
*hsotg
,
933 struct dwc2_host_chan
*chan
,
934 struct dwc2_qtd
*qtd
,
935 struct dwc2_hcd_dma_desc
*dma_desc
,
936 enum dwc2_halt_status halt_status
,
937 u32 n_bytes
, int *xfer_done
)
939 struct dwc2_hcd_urb
*urb
= qtd
->urb
;
943 remain
= (dma_desc
->status
& HOST_DMA_NBYTES_MASK
) >>
944 HOST_DMA_NBYTES_SHIFT
;
946 dev_vdbg(hsotg
->dev
, "remain=%d dwc2_urb=%p\n", remain
, urb
);
948 if (halt_status
== DWC2_HC_XFER_AHB_ERR
) {
949 dev_err(hsotg
->dev
, "EIO\n");
954 if ((dma_desc
->status
& HOST_DMA_STS_MASK
) == HOST_DMA_STS_PKTERR
) {
955 switch (halt_status
) {
956 case DWC2_HC_XFER_STALL
:
957 dev_vdbg(hsotg
->dev
, "Stall\n");
958 urb
->status
= -EPIPE
;
960 case DWC2_HC_XFER_BABBLE_ERR
:
961 dev_err(hsotg
->dev
, "Babble\n");
962 urb
->status
= -EOVERFLOW
;
964 case DWC2_HC_XFER_XACT_ERR
:
965 dev_err(hsotg
->dev
, "XactErr\n");
966 urb
->status
= -EPROTO
;
970 "%s: Unhandled descriptor error status (%d)\n",
971 __func__
, halt_status
);
977 if (dma_desc
->status
& HOST_DMA_A
) {
979 "Active descriptor encountered on channel %d\n",
984 if (chan
->ep_type
== USB_ENDPOINT_XFER_CONTROL
) {
985 if (qtd
->control_phase
== DWC2_CONTROL_DATA
) {
986 urb
->actual_length
+= n_bytes
- remain
;
987 if (remain
|| urb
->actual_length
>= urb
->length
) {
989 * For Control Data stage do not set urb->status
990 * to 0, to prevent URB callback. Set it when
991 * Status phase is done. See below.
995 } else if (qtd
->control_phase
== DWC2_CONTROL_STATUS
) {
999 /* No handling for SETUP stage */
1002 urb
->actual_length
+= n_bytes
- remain
;
1003 dev_vdbg(hsotg
->dev
, "length=%d actual=%d\n", urb
->length
,
1004 urb
->actual_length
);
1005 if (remain
|| urb
->actual_length
>= urb
->length
) {
1014 static int dwc2_process_non_isoc_desc(struct dwc2_hsotg
*hsotg
,
1015 struct dwc2_host_chan
*chan
,
1016 int chnum
, struct dwc2_qtd
*qtd
,
1018 enum dwc2_halt_status halt_status
,
1021 struct dwc2_qh
*qh
= chan
->qh
;
1022 struct dwc2_hcd_urb
*urb
= qtd
->urb
;
1023 struct dwc2_hcd_dma_desc
*dma_desc
;
1027 dev_vdbg(hsotg
->dev
, "%s()\n", __func__
);
1032 dma_desc
= &qh
->desc_list
[desc_num
];
1033 n_bytes
= qh
->n_bytes
[desc_num
];
1034 dev_vdbg(hsotg
->dev
,
1035 "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
1036 qtd
, urb
, desc_num
, dma_desc
, n_bytes
);
1037 failed
= dwc2_update_non_isoc_urb_state_ddma(hsotg
, chan
, qtd
, dma_desc
,
1038 halt_status
, n_bytes
,
1040 if (failed
|| (*xfer_done
&& urb
->status
!= -EINPROGRESS
)) {
1041 dwc2_host_complete(hsotg
, qtd
, urb
->status
);
1042 dwc2_hcd_qtd_unlink_and_free(hsotg
, qtd
, qh
);
1043 dev_vdbg(hsotg
->dev
, "failed=%1x xfer_done=%1x status=%08x\n",
1044 failed
, *xfer_done
, urb
->status
);
1048 if (qh
->ep_type
== USB_ENDPOINT_XFER_CONTROL
) {
1049 switch (qtd
->control_phase
) {
1050 case DWC2_CONTROL_SETUP
:
1051 if (urb
->length
> 0)
1052 qtd
->control_phase
= DWC2_CONTROL_DATA
;
1054 qtd
->control_phase
= DWC2_CONTROL_STATUS
;
1055 dev_vdbg(hsotg
->dev
,
1056 " Control setup transaction done\n");
1058 case DWC2_CONTROL_DATA
:
1060 qtd
->control_phase
= DWC2_CONTROL_STATUS
;
1061 dev_vdbg(hsotg
->dev
,
1062 " Control data transfer done\n");
1063 } else if (desc_num
+ 1 == qtd
->n_desc
) {
1065 * Last descriptor for Control data stage which
1066 * is not completed yet
1068 dwc2_hcd_save_data_toggle(hsotg
, chan
, chnum
,
1080 static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg
*hsotg
,
1081 struct dwc2_host_chan
*chan
,
1083 enum dwc2_halt_status halt_status
)
1085 struct list_head
*qtd_item
, *qtd_tmp
;
1086 struct dwc2_qh
*qh
= chan
->qh
;
1087 struct dwc2_qtd
*qtd
= NULL
;
1091 if (chan
->halt_status
== DWC2_HC_XFER_URB_DEQUEUE
) {
1092 list_for_each_entry(qtd
, &qh
->qtd_list
, qtd_list_entry
)
1093 qtd
->in_process
= 0;
1097 list_for_each_safe(qtd_item
, qtd_tmp
, &qh
->qtd_list
) {
1100 qtd
= list_entry(qtd_item
, struct dwc2_qtd
, qtd_list_entry
);
1103 for (i
= 0; i
< qtd
->n_desc
; i
++) {
1104 if (dwc2_process_non_isoc_desc(hsotg
, chan
, chnum
, qtd
,
1105 desc_num
, halt_status
,
1114 if (qh
->ep_type
!= USB_ENDPOINT_XFER_CONTROL
) {
1116 * Resetting the data toggle for bulk and interrupt endpoints
1117 * in case of stall. See handle_hc_stall_intr().
1119 if (halt_status
== DWC2_HC_XFER_STALL
)
1120 qh
->data_toggle
= DWC2_HC_PID_DATA0
;
1122 dwc2_hcd_save_data_toggle(hsotg
, chan
, chnum
, qtd
);
1125 if (halt_status
== DWC2_HC_XFER_COMPLETE
) {
1126 if (chan
->hcint
& HCINTMSK_NYET
) {
1128 * Got a NYET on the last transaction of the transfer.
1129 * It means that the endpoint should be in the PING
1130 * state at the beginning of the next transfer.
1138 * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
1139 * status and calls completion routine for the URB if it's done. Called from
1140 * interrupt handlers.
1142 * @hsotg: The HCD state structure for the DWC OTG controller
1143 * @chan: Host channel the transfer is completed on
1144 * @chnum: Index of Host channel registers
1145 * @halt_status: Reason the channel is being halted or just XferComplete
1146 * for isochronous transfers
1148 * Releases the channel to be used by other transfers.
1149 * In case of Isochronous endpoint the channel is not halted until the end of
1150 * the session, i.e. QTD list is empty.
1151 * If periodic channel released the FrameList is updated accordingly.
1152 * Calls transaction selection routines to activate pending transfers.
1154 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg
*hsotg
,
1155 struct dwc2_host_chan
*chan
, int chnum
,
1156 enum dwc2_halt_status halt_status
)
1158 struct dwc2_qh
*qh
= chan
->qh
;
1159 int continue_isoc_xfer
= 0;
1160 enum dwc2_transaction_type tr_type
;
1162 if (chan
->ep_type
== USB_ENDPOINT_XFER_ISOC
) {
1163 dwc2_complete_isoc_xfer_ddma(hsotg
, chan
, halt_status
);
1165 /* Release the channel if halted or session completed */
1166 if (halt_status
!= DWC2_HC_XFER_COMPLETE
||
1167 list_empty(&qh
->qtd_list
)) {
1168 /* Halt the channel if session completed */
1169 if (halt_status
== DWC2_HC_XFER_COMPLETE
)
1170 dwc2_hc_halt(hsotg
, chan
, halt_status
);
1171 dwc2_release_channel_ddma(hsotg
, qh
);
1172 dwc2_hcd_qh_unlink(hsotg
, qh
);
1174 /* Keep in assigned schedule to continue transfer */
1175 list_move(&qh
->qh_list_entry
,
1176 &hsotg
->periodic_sched_assigned
);
1177 continue_isoc_xfer
= 1;
1180 * Todo: Consider the case when period exceeds FrameList size.
1181 * Frame Rollover interrupt should be used.
1185 * Scan descriptor list to complete the URB(s), then release
1188 dwc2_complete_non_isoc_xfer_ddma(hsotg
, chan
, chnum
,
1190 dwc2_release_channel_ddma(hsotg
, qh
);
1191 dwc2_hcd_qh_unlink(hsotg
, qh
);
1193 if (!list_empty(&qh
->qtd_list
)) {
1195 * Add back to inactive non-periodic schedule on normal
1198 dwc2_hcd_qh_add(hsotg
, qh
);
1202 tr_type
= dwc2_hcd_select_transactions(hsotg
);
1203 if (tr_type
!= DWC2_TRANSACTION_NONE
|| continue_isoc_xfer
) {
1204 if (continue_isoc_xfer
) {
1205 if (tr_type
== DWC2_TRANSACTION_NONE
)
1206 tr_type
= DWC2_TRANSACTION_PERIODIC
;
1207 else if (tr_type
== DWC2_TRANSACTION_NON_PERIODIC
)
1208 tr_type
= DWC2_TRANSACTION_ALL
;
1210 dwc2_hcd_queue_transactions(hsotg
, tr_type
);