mmc: rtsx_pci: Enable MMC_CAP_ERASE to allow erase/discard/trim requests
[linux/fpc-iii.git] / drivers / usb / dwc2 / hcd_ddma.c
blob0e1d42b5dec5289630c1eee94a3942c1c7b64820
1 /*
2 * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
4 * Copyright (C) 2004-2013 Synopsys, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 * This file contains the Descriptor DMA implementation for Host mode
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/spinlock.h>
43 #include <linux/interrupt.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/io.h>
46 #include <linux/slab.h>
47 #include <linux/usb.h>
49 #include <linux/usb/hcd.h>
50 #include <linux/usb/ch11.h>
52 #include "core.h"
53 #include "hcd.h"
55 static u16 dwc2_frame_list_idx(u16 frame)
57 return frame & (FRLISTEN_64_SIZE - 1);
60 static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed)
62 return (idx + inc) &
63 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
64 MAX_DMA_DESC_NUM_GENERIC) - 1);
67 static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed)
69 return (idx - inc) &
70 ((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
71 MAX_DMA_DESC_NUM_GENERIC) - 1);
74 static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
76 return (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
77 qh->dev_speed == USB_SPEED_HIGH) ?
78 MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
81 static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
83 return qh->dev_speed == USB_SPEED_HIGH ?
84 (qh->host_interval + 8 - 1) / 8 : qh->host_interval;
87 static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
88 gfp_t flags)
90 struct kmem_cache *desc_cache;
92 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC
93 && qh->dev_speed == USB_SPEED_HIGH)
94 desc_cache = hsotg->desc_hsisoc_cache;
95 else
96 desc_cache = hsotg->desc_gen_cache;
98 qh->desc_list_sz = sizeof(struct dwc2_hcd_dma_desc) *
99 dwc2_max_desc_num(qh);
101 qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA);
102 if (!qh->desc_list)
103 return -ENOMEM;
105 qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list,
106 qh->desc_list_sz,
107 DMA_TO_DEVICE);
109 qh->n_bytes = kzalloc(sizeof(u32) * dwc2_max_desc_num(qh), flags);
110 if (!qh->n_bytes) {
111 dma_unmap_single(hsotg->dev, qh->desc_list_dma,
112 qh->desc_list_sz,
113 DMA_FROM_DEVICE);
114 kmem_cache_free(desc_cache, qh->desc_list);
115 qh->desc_list = NULL;
116 return -ENOMEM;
119 return 0;
122 static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
124 struct kmem_cache *desc_cache;
126 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC
127 && qh->dev_speed == USB_SPEED_HIGH)
128 desc_cache = hsotg->desc_hsisoc_cache;
129 else
130 desc_cache = hsotg->desc_gen_cache;
132 if (qh->desc_list) {
133 dma_unmap_single(hsotg->dev, qh->desc_list_dma,
134 qh->desc_list_sz, DMA_FROM_DEVICE);
135 kmem_cache_free(desc_cache, qh->desc_list);
136 qh->desc_list = NULL;
139 kfree(qh->n_bytes);
140 qh->n_bytes = NULL;
143 static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
145 if (hsotg->frame_list)
146 return 0;
148 hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE;
149 hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA);
150 if (!hsotg->frame_list)
151 return -ENOMEM;
153 hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list,
154 hsotg->frame_list_sz,
155 DMA_TO_DEVICE);
157 return 0;
160 static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
162 unsigned long flags;
164 spin_lock_irqsave(&hsotg->lock, flags);
166 if (!hsotg->frame_list) {
167 spin_unlock_irqrestore(&hsotg->lock, flags);
168 return;
171 dma_unmap_single(hsotg->dev, hsotg->frame_list_dma,
172 hsotg->frame_list_sz, DMA_FROM_DEVICE);
174 kfree(hsotg->frame_list);
175 hsotg->frame_list = NULL;
177 spin_unlock_irqrestore(&hsotg->lock, flags);
181 static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
183 u32 hcfg;
184 unsigned long flags;
186 spin_lock_irqsave(&hsotg->lock, flags);
188 hcfg = dwc2_readl(hsotg->regs + HCFG);
189 if (hcfg & HCFG_PERSCHEDENA) {
190 /* already enabled */
191 spin_unlock_irqrestore(&hsotg->lock, flags);
192 return;
195 dwc2_writel(hsotg->frame_list_dma, hsotg->regs + HFLBADDR);
197 hcfg &= ~HCFG_FRLISTEN_MASK;
198 hcfg |= fr_list_en | HCFG_PERSCHEDENA;
199 dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
200 dwc2_writel(hcfg, hsotg->regs + HCFG);
202 spin_unlock_irqrestore(&hsotg->lock, flags);
205 static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
207 u32 hcfg;
208 unsigned long flags;
210 spin_lock_irqsave(&hsotg->lock, flags);
212 hcfg = dwc2_readl(hsotg->regs + HCFG);
213 if (!(hcfg & HCFG_PERSCHEDENA)) {
214 /* already disabled */
215 spin_unlock_irqrestore(&hsotg->lock, flags);
216 return;
219 hcfg &= ~HCFG_PERSCHEDENA;
220 dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
221 dwc2_writel(hcfg, hsotg->regs + HCFG);
223 spin_unlock_irqrestore(&hsotg->lock, flags);
227 * Activates/Deactivates FrameList entries for the channel based on endpoint
228 * servicing period
230 static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
231 int enable)
233 struct dwc2_host_chan *chan;
234 u16 i, j, inc;
236 if (!hsotg) {
237 pr_err("hsotg = %p\n", hsotg);
238 return;
241 if (!qh->channel) {
242 dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel);
243 return;
246 if (!hsotg->frame_list) {
247 dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
248 hsotg->frame_list);
249 return;
252 chan = qh->channel;
253 inc = dwc2_frame_incr_val(qh);
254 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
255 i = dwc2_frame_list_idx(qh->next_active_frame);
256 else
257 i = 0;
259 j = i;
260 do {
261 if (enable)
262 hsotg->frame_list[j] |= 1 << chan->hc_num;
263 else
264 hsotg->frame_list[j] &= ~(1 << chan->hc_num);
265 j = (j + inc) & (FRLISTEN_64_SIZE - 1);
266 } while (j != i);
269 * Sync frame list since controller will access it if periodic
270 * channel is currently enabled.
272 dma_sync_single_for_device(hsotg->dev,
273 hsotg->frame_list_dma,
274 hsotg->frame_list_sz,
275 DMA_TO_DEVICE);
277 if (!enable)
278 return;
280 chan->schinfo = 0;
281 if (chan->speed == USB_SPEED_HIGH && qh->host_interval) {
282 j = 1;
283 /* TODO - check this */
284 inc = (8 + qh->host_interval - 1) / qh->host_interval;
285 for (i = 0; i < inc; i++) {
286 chan->schinfo |= j;
287 j = j << qh->host_interval;
289 } else {
290 chan->schinfo = 0xff;
294 static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
295 struct dwc2_qh *qh)
297 struct dwc2_host_chan *chan = qh->channel;
299 if (dwc2_qh_is_non_per(qh)) {
300 if (hsotg->core_params->uframe_sched > 0)
301 hsotg->available_host_channels++;
302 else
303 hsotg->non_periodic_channels--;
304 } else {
305 dwc2_update_frame_list(hsotg, qh, 0);
306 hsotg->available_host_channels++;
310 * The condition is added to prevent double cleanup try in case of
311 * device disconnect. See channel cleanup in dwc2_hcd_disconnect().
313 if (chan->qh) {
314 if (!list_empty(&chan->hc_list_entry))
315 list_del(&chan->hc_list_entry);
316 dwc2_hc_cleanup(hsotg, chan);
317 list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
318 chan->qh = NULL;
321 qh->channel = NULL;
322 qh->ntd = 0;
324 if (qh->desc_list)
325 memset(qh->desc_list, 0, sizeof(struct dwc2_hcd_dma_desc) *
326 dwc2_max_desc_num(qh));
330 * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
331 * related members
333 * @hsotg: The HCD state structure for the DWC OTG controller
334 * @qh: The QH to init
336 * Return: 0 if successful, negative error code otherwise
338 * Allocates memory for the descriptor list. For the first periodic QH,
339 * allocates memory for the FrameList and enables periodic scheduling.
341 int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
342 gfp_t mem_flags)
344 int retval;
346 if (qh->do_split) {
347 dev_err(hsotg->dev,
348 "SPLIT Transfers are not supported in Descriptor DMA mode.\n");
349 retval = -EINVAL;
350 goto err0;
353 retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags);
354 if (retval)
355 goto err0;
357 if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
358 qh->ep_type == USB_ENDPOINT_XFER_INT) {
359 if (!hsotg->frame_list) {
360 retval = dwc2_frame_list_alloc(hsotg, mem_flags);
361 if (retval)
362 goto err1;
363 /* Enable periodic schedule on first periodic QH */
364 dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
368 qh->ntd = 0;
369 return 0;
371 err1:
372 dwc2_desc_list_free(hsotg, qh);
373 err0:
374 return retval;
378 * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
379 * members
381 * @hsotg: The HCD state structure for the DWC OTG controller
382 * @qh: The QH to free
384 * Frees descriptor list memory associated with the QH. If QH is periodic and
385 * the last, frees FrameList memory and disables periodic scheduling.
387 void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
389 unsigned long flags;
391 dwc2_desc_list_free(hsotg, qh);
394 * Channel still assigned due to some reasons.
395 * Seen on Isoc URB dequeue. Channel halted but no subsequent
396 * ChHalted interrupt to release the channel. Afterwards
397 * when it comes here from endpoint disable routine
398 * channel remains assigned.
400 spin_lock_irqsave(&hsotg->lock, flags);
401 if (qh->channel)
402 dwc2_release_channel_ddma(hsotg, qh);
403 spin_unlock_irqrestore(&hsotg->lock, flags);
405 if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
406 qh->ep_type == USB_ENDPOINT_XFER_INT) &&
407 (hsotg->core_params->uframe_sched > 0 ||
408 !hsotg->periodic_channels) && hsotg->frame_list) {
409 dwc2_per_sched_disable(hsotg);
410 dwc2_frame_list_free(hsotg);
414 static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx)
416 if (qh->dev_speed == USB_SPEED_HIGH)
417 /* Descriptor set (8 descriptors) index which is 8-aligned */
418 return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
419 else
420 return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
424 * Determine starting frame for Isochronous transfer.
425 * Few frames skipped to prevent race condition with HC.
427 static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
428 struct dwc2_qh *qh, u16 *skip_frames)
430 u16 frame;
432 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
435 * next_active_frame is always frame number (not uFrame) both in FS
436 * and HS!
440 * skip_frames is used to limit activated descriptors number
441 * to avoid the situation when HC services the last activated
442 * descriptor firstly.
443 * Example for FS:
444 * Current frame is 1, scheduled frame is 3. Since HC always fetches
445 * the descriptor corresponding to curr_frame+1, the descriptor
446 * corresponding to frame 2 will be fetched. If the number of
447 * descriptors is max=64 (or greather) the list will be fully programmed
448 * with Active descriptors and it is possible case (rare) that the
449 * latest descriptor(considering rollback) corresponding to frame 2 will
450 * be serviced first. HS case is more probable because, in fact, up to
451 * 11 uframes (16 in the code) may be skipped.
453 if (qh->dev_speed == USB_SPEED_HIGH) {
455 * Consider uframe counter also, to start xfer asap. If half of
456 * the frame elapsed skip 2 frames otherwise just 1 frame.
457 * Starting descriptor index must be 8-aligned, so if the
458 * current frame is near to complete the next one is skipped as
459 * well.
461 if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
462 *skip_frames = 2 * 8;
463 frame = dwc2_frame_num_inc(hsotg->frame_number,
464 *skip_frames);
465 } else {
466 *skip_frames = 1 * 8;
467 frame = dwc2_frame_num_inc(hsotg->frame_number,
468 *skip_frames);
471 frame = dwc2_full_frame_num(frame);
472 } else {
474 * Two frames are skipped for FS - the current and the next.
475 * But for descriptor programming, 1 frame (descriptor) is
476 * enough, see example above.
478 *skip_frames = 1;
479 frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
482 return frame;
486 * Calculate initial descriptor index for isochronous transfer based on
487 * scheduled frame
489 static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
490 struct dwc2_qh *qh)
492 u16 frame, fr_idx, fr_idx_tmp, skip_frames;
495 * With current ISOC processing algorithm the channel is being released
496 * when no more QTDs in the list (qh->ntd == 0). Thus this function is
497 * called only when qh->ntd == 0 and qh->channel == 0.
499 * So qh->channel != NULL branch is not used and just not removed from
500 * the source file. It is required for another possible approach which
501 * is, do not disable and release the channel when ISOC session
502 * completed, just move QH to inactive schedule until new QTD arrives.
503 * On new QTD, the QH moved back to 'ready' schedule, starting frame and
504 * therefore starting desc_index are recalculated. In this case channel
505 * is released only on ep_disable.
509 * Calculate starting descriptor index. For INTERRUPT endpoint it is
510 * always 0.
512 if (qh->channel) {
513 frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames);
515 * Calculate initial descriptor index based on FrameList current
516 * bitmap and servicing period
518 fr_idx_tmp = dwc2_frame_list_idx(frame);
519 fr_idx = (FRLISTEN_64_SIZE +
520 dwc2_frame_list_idx(qh->next_active_frame) -
521 fr_idx_tmp) % dwc2_frame_incr_val(qh);
522 fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
523 } else {
524 qh->next_active_frame = dwc2_calc_starting_frame(hsotg, qh,
525 &skip_frames);
526 fr_idx = dwc2_frame_list_idx(qh->next_active_frame);
529 qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
531 return skip_frames;
534 #define ISOC_URB_GIVEBACK_ASAP
536 #define MAX_ISOC_XFER_SIZE_FS 1023
537 #define MAX_ISOC_XFER_SIZE_HS 3072
538 #define DESCNUM_THRESHOLD 4
540 static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
541 struct dwc2_qtd *qtd,
542 struct dwc2_qh *qh, u32 max_xfer_size,
543 u16 idx)
545 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[idx];
546 struct dwc2_hcd_iso_packet_desc *frame_desc;
548 memset(dma_desc, 0, sizeof(*dma_desc));
549 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
551 if (frame_desc->length > max_xfer_size)
552 qh->n_bytes[idx] = max_xfer_size;
553 else
554 qh->n_bytes[idx] = frame_desc->length;
556 dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
557 dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
558 HOST_DMA_ISOC_NBYTES_MASK;
560 /* Set active bit */
561 dma_desc->status |= HOST_DMA_A;
563 qh->ntd++;
564 qtd->isoc_frame_index_last++;
566 #ifdef ISOC_URB_GIVEBACK_ASAP
567 /* Set IOC for each descriptor corresponding to last frame of URB */
568 if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
569 dma_desc->status |= HOST_DMA_IOC;
570 #endif
572 dma_sync_single_for_device(hsotg->dev,
573 qh->desc_list_dma +
574 (idx * sizeof(struct dwc2_hcd_dma_desc)),
575 sizeof(struct dwc2_hcd_dma_desc),
576 DMA_TO_DEVICE);
579 static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
580 struct dwc2_qh *qh, u16 skip_frames)
582 struct dwc2_qtd *qtd;
583 u32 max_xfer_size;
584 u16 idx, inc, n_desc = 0, ntd_max = 0;
585 u16 cur_idx;
586 u16 next_idx;
588 idx = qh->td_last;
589 inc = qh->host_interval;
590 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
591 cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
592 next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
595 * Ensure current frame number didn't overstep last scheduled
596 * descriptor. If it happens, the only way to recover is to move
597 * qh->td_last to current frame number + 1.
598 * So that next isoc descriptor will be scheduled on frame number + 1
599 * and not on a past frame.
601 if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) {
602 if (inc < 32) {
603 dev_vdbg(hsotg->dev,
604 "current frame number overstep last descriptor\n");
605 qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc,
606 qh->dev_speed);
607 idx = qh->td_last;
611 if (qh->host_interval) {
612 ntd_max = (dwc2_max_desc_num(qh) + qh->host_interval - 1) /
613 qh->host_interval;
614 if (skip_frames && !qh->channel)
615 ntd_max -= skip_frames / qh->host_interval;
618 max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
619 MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
621 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
622 if (qtd->in_process &&
623 qtd->isoc_frame_index_last ==
624 qtd->urb->packet_count)
625 continue;
627 qtd->isoc_td_first = idx;
628 while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
629 qtd->urb->packet_count) {
630 dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
631 max_xfer_size, idx);
632 idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
633 n_desc++;
635 qtd->isoc_td_last = idx;
636 qtd->in_process = 1;
639 qh->td_last = idx;
641 #ifdef ISOC_URB_GIVEBACK_ASAP
642 /* Set IOC for last descriptor if descriptor list is full */
643 if (qh->ntd == ntd_max) {
644 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
645 qh->desc_list[idx].status |= HOST_DMA_IOC;
646 dma_sync_single_for_device(hsotg->dev,
647 qh->desc_list_dma + (idx *
648 sizeof(struct dwc2_hcd_dma_desc)),
649 sizeof(struct dwc2_hcd_dma_desc),
650 DMA_TO_DEVICE);
652 #else
654 * Set IOC bit only for one descriptor. Always try to be ahead of HW
655 * processing, i.e. on IOC generation driver activates next descriptor
656 * but core continues to process descriptors following the one with IOC
657 * set.
660 if (n_desc > DESCNUM_THRESHOLD)
662 * Move IOC "up". Required even if there is only one QTD
663 * in the list, because QTDs might continue to be queued,
664 * but during the activation it was only one queued.
665 * Actually more than one QTD might be in the list if this
666 * function called from XferCompletion - QTDs was queued during
667 * HW processing of the previous descriptor chunk.
669 idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
670 qh->dev_speed);
671 else
673 * Set the IOC for the latest descriptor if either number of
674 * descriptors is not greater than threshold or no more new
675 * descriptors activated
677 idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
679 qh->desc_list[idx].status |= HOST_DMA_IOC;
680 dma_sync_single_for_device(hsotg->dev,
681 qh->desc_list_dma +
682 (idx * sizeof(struct dwc2_hcd_dma_desc)),
683 sizeof(struct dwc2_hcd_dma_desc),
684 DMA_TO_DEVICE);
685 #endif
688 static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
689 struct dwc2_host_chan *chan,
690 struct dwc2_qtd *qtd, struct dwc2_qh *qh,
691 int n_desc)
693 struct dwc2_hcd_dma_desc *dma_desc = &qh->desc_list[n_desc];
694 int len = chan->xfer_len;
696 if (len > MAX_DMA_DESC_SIZE - (chan->max_packet - 1))
697 len = MAX_DMA_DESC_SIZE - (chan->max_packet - 1);
699 if (chan->ep_is_in) {
700 int num_packets;
702 if (len > 0 && chan->max_packet)
703 num_packets = (len + chan->max_packet - 1)
704 / chan->max_packet;
705 else
706 /* Need 1 packet for transfer length of 0 */
707 num_packets = 1;
709 /* Always program an integral # of packets for IN transfers */
710 len = num_packets * chan->max_packet;
713 dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK;
714 qh->n_bytes[n_desc] = len;
716 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL &&
717 qtd->control_phase == DWC2_CONTROL_SETUP)
718 dma_desc->status |= HOST_DMA_SUP;
720 dma_desc->buf = (u32)chan->xfer_dma;
722 dma_sync_single_for_device(hsotg->dev,
723 qh->desc_list_dma +
724 (n_desc * sizeof(struct dwc2_hcd_dma_desc)),
725 sizeof(struct dwc2_hcd_dma_desc),
726 DMA_TO_DEVICE);
729 * Last (or only) descriptor of IN transfer with actual size less
730 * than MaxPacket
732 if (len > chan->xfer_len) {
733 chan->xfer_len = 0;
734 } else {
735 chan->xfer_dma += len;
736 chan->xfer_len -= len;
740 static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
741 struct dwc2_qh *qh)
743 struct dwc2_qtd *qtd;
744 struct dwc2_host_chan *chan = qh->channel;
745 int n_desc = 0;
747 dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh,
748 (unsigned long)chan->xfer_dma, chan->xfer_len);
751 * Start with chan->xfer_dma initialized in assign_and_init_hc(), then
752 * if SG transfer consists of multiple URBs, this pointer is re-assigned
753 * to the buffer of the currently processed QTD. For non-SG request
754 * there is always one QTD active.
757 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
758 dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
760 if (n_desc) {
761 /* SG request - more than 1 QTD */
762 chan->xfer_dma = qtd->urb->dma +
763 qtd->urb->actual_length;
764 chan->xfer_len = qtd->urb->length -
765 qtd->urb->actual_length;
766 dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n",
767 (unsigned long)chan->xfer_dma, chan->xfer_len);
770 qtd->n_desc = 0;
771 do {
772 if (n_desc > 1) {
773 qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
774 dev_vdbg(hsotg->dev,
775 "set A bit in desc %d (%p)\n",
776 n_desc - 1,
777 &qh->desc_list[n_desc - 1]);
778 dma_sync_single_for_device(hsotg->dev,
779 qh->desc_list_dma +
780 ((n_desc - 1) *
781 sizeof(struct dwc2_hcd_dma_desc)),
782 sizeof(struct dwc2_hcd_dma_desc),
783 DMA_TO_DEVICE);
785 dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
786 dev_vdbg(hsotg->dev,
787 "desc %d (%p) buf=%08x status=%08x\n",
788 n_desc, &qh->desc_list[n_desc],
789 qh->desc_list[n_desc].buf,
790 qh->desc_list[n_desc].status);
791 qtd->n_desc++;
792 n_desc++;
793 } while (chan->xfer_len > 0 &&
794 n_desc != MAX_DMA_DESC_NUM_GENERIC);
796 dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc);
797 qtd->in_process = 1;
798 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL)
799 break;
800 if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
801 break;
804 if (n_desc) {
805 qh->desc_list[n_desc - 1].status |=
806 HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
807 dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
808 n_desc - 1, &qh->desc_list[n_desc - 1]);
809 dma_sync_single_for_device(hsotg->dev,
810 qh->desc_list_dma + (n_desc - 1) *
811 sizeof(struct dwc2_hcd_dma_desc),
812 sizeof(struct dwc2_hcd_dma_desc),
813 DMA_TO_DEVICE);
814 if (n_desc > 1) {
815 qh->desc_list[0].status |= HOST_DMA_A;
816 dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
817 &qh->desc_list[0]);
818 dma_sync_single_for_device(hsotg->dev,
819 qh->desc_list_dma,
820 sizeof(struct dwc2_hcd_dma_desc),
821 DMA_TO_DEVICE);
823 chan->ntd = n_desc;
828 * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
830 * @hsotg: The HCD state structure for the DWC OTG controller
831 * @qh: The QH to init
833 * Return: 0 if successful, negative error code otherwise
835 * For Control and Bulk endpoints, initializes descriptor list and starts the
836 * transfer. For Interrupt and Isochronous endpoints, initializes descriptor
837 * list then updates FrameList, marking appropriate entries as active.
839 * For Isochronous endpoints the starting descriptor index is calculated based
840 * on the scheduled frame, but only on the first transfer descriptor within a
841 * session. Then the transfer is started via enabling the channel.
843 * For Isochronous endpoints the channel is not halted on XferComplete
844 * interrupt so remains assigned to the endpoint(QH) until session is done.
846 void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
848 /* Channel is already assigned */
849 struct dwc2_host_chan *chan = qh->channel;
850 u16 skip_frames = 0;
852 switch (chan->ep_type) {
853 case USB_ENDPOINT_XFER_CONTROL:
854 case USB_ENDPOINT_XFER_BULK:
855 dwc2_init_non_isoc_dma_desc(hsotg, qh);
856 dwc2_hc_start_transfer_ddma(hsotg, chan);
857 break;
858 case USB_ENDPOINT_XFER_INT:
859 dwc2_init_non_isoc_dma_desc(hsotg, qh);
860 dwc2_update_frame_list(hsotg, qh, 1);
861 dwc2_hc_start_transfer_ddma(hsotg, chan);
862 break;
863 case USB_ENDPOINT_XFER_ISOC:
864 if (!qh->ntd)
865 skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
866 dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
868 if (!chan->xfer_started) {
869 dwc2_update_frame_list(hsotg, qh, 1);
872 * Always set to max, instead of actual size. Otherwise
873 * ntd will be changed with channel being enabled. Not
874 * recommended.
876 chan->ntd = dwc2_max_desc_num(qh);
878 /* Enable channel only once for ISOC */
879 dwc2_hc_start_transfer_ddma(hsotg, chan);
882 break;
883 default:
884 break;
888 #define DWC2_CMPL_DONE 1
889 #define DWC2_CMPL_STOP 2
891 static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
892 struct dwc2_host_chan *chan,
893 struct dwc2_qtd *qtd,
894 struct dwc2_qh *qh, u16 idx)
896 struct dwc2_hcd_dma_desc *dma_desc;
897 struct dwc2_hcd_iso_packet_desc *frame_desc;
898 u16 remain = 0;
899 int rc = 0;
901 if (!qtd->urb)
902 return -EINVAL;
904 dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
905 sizeof(struct dwc2_hcd_dma_desc)),
906 sizeof(struct dwc2_hcd_dma_desc),
907 DMA_FROM_DEVICE);
909 dma_desc = &qh->desc_list[idx];
911 frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
912 dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
913 if (chan->ep_is_in)
914 remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
915 HOST_DMA_ISOC_NBYTES_SHIFT;
917 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
919 * XactError, or unable to complete all the transactions
920 * in the scheduled micro-frame/frame, both indicated by
921 * HOST_DMA_STS_PKTERR
923 qtd->urb->error_count++;
924 frame_desc->actual_length = qh->n_bytes[idx] - remain;
925 frame_desc->status = -EPROTO;
926 } else {
927 /* Success */
928 frame_desc->actual_length = qh->n_bytes[idx] - remain;
929 frame_desc->status = 0;
932 if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
934 * urb->status is not used for isoc transfers here. The
935 * individual frame_desc status are used instead.
937 dwc2_host_complete(hsotg, qtd, 0);
938 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
941 * This check is necessary because urb_dequeue can be called
942 * from urb complete callback (sound driver for example). All
943 * pending URBs are dequeued there, so no need for further
944 * processing.
946 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE)
947 return -1;
948 rc = DWC2_CMPL_DONE;
951 qh->ntd--;
953 /* Stop if IOC requested descriptor reached */
954 if (dma_desc->status & HOST_DMA_IOC)
955 rc = DWC2_CMPL_STOP;
957 return rc;
960 static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
961 struct dwc2_host_chan *chan,
962 enum dwc2_halt_status halt_status)
964 struct dwc2_hcd_iso_packet_desc *frame_desc;
965 struct dwc2_qtd *qtd, *qtd_tmp;
966 struct dwc2_qh *qh;
967 u16 idx;
968 int rc;
970 qh = chan->qh;
971 idx = qh->td_first;
973 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
974 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
975 qtd->in_process = 0;
976 return;
979 if (halt_status == DWC2_HC_XFER_AHB_ERR ||
980 halt_status == DWC2_HC_XFER_BABBLE_ERR) {
982 * Channel is halted in these error cases, considered as serious
983 * issues.
984 * Complete all URBs marking all frames as failed, irrespective
985 * whether some of the descriptors (frames) succeeded or not.
986 * Pass error code to completion routine as well, to update
987 * urb->status, some of class drivers might use it to stop
988 * queing transfer requests.
990 int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
991 -EIO : -EOVERFLOW;
993 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
994 qtd_list_entry) {
995 if (qtd->urb) {
996 for (idx = 0; idx < qtd->urb->packet_count;
997 idx++) {
998 frame_desc = &qtd->urb->iso_descs[idx];
999 frame_desc->status = err;
1002 dwc2_host_complete(hsotg, qtd, err);
1005 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1008 return;
1011 list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
1012 if (!qtd->in_process)
1013 break;
1016 * Ensure idx corresponds to descriptor where first urb of this
1017 * qtd was added. In fact, during isoc desc init, dwc2 may skip
1018 * an index if current frame number is already over this index.
1020 if (idx != qtd->isoc_td_first) {
1021 dev_vdbg(hsotg->dev,
1022 "try to complete %d instead of %d\n",
1023 idx, qtd->isoc_td_first);
1024 idx = qtd->isoc_td_first;
1027 do {
1028 struct dwc2_qtd *qtd_next;
1029 u16 cur_idx;
1031 rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
1032 idx);
1033 if (rc < 0)
1034 return;
1035 idx = dwc2_desclist_idx_inc(idx, qh->host_interval,
1036 chan->speed);
1037 if (!rc)
1038 continue;
1040 if (rc == DWC2_CMPL_DONE)
1041 break;
1043 /* rc == DWC2_CMPL_STOP */
1045 if (qh->host_interval >= 32)
1046 goto stop_scan;
1048 qh->td_first = idx;
1049 cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
1050 qtd_next = list_first_entry(&qh->qtd_list,
1051 struct dwc2_qtd,
1052 qtd_list_entry);
1053 if (dwc2_frame_idx_num_gt(cur_idx,
1054 qtd_next->isoc_td_last))
1055 break;
1057 goto stop_scan;
1059 } while (idx != qh->td_first);
1062 stop_scan:
1063 qh->td_first = idx;
1066 static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
1067 struct dwc2_host_chan *chan,
1068 struct dwc2_qtd *qtd,
1069 struct dwc2_hcd_dma_desc *dma_desc,
1070 enum dwc2_halt_status halt_status,
1071 u32 n_bytes, int *xfer_done)
1073 struct dwc2_hcd_urb *urb = qtd->urb;
1074 u16 remain = 0;
1076 if (chan->ep_is_in)
1077 remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
1078 HOST_DMA_NBYTES_SHIFT;
1080 dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
1082 if (halt_status == DWC2_HC_XFER_AHB_ERR) {
1083 dev_err(hsotg->dev, "EIO\n");
1084 urb->status = -EIO;
1085 return 1;
1088 if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
1089 switch (halt_status) {
1090 case DWC2_HC_XFER_STALL:
1091 dev_vdbg(hsotg->dev, "Stall\n");
1092 urb->status = -EPIPE;
1093 break;
1094 case DWC2_HC_XFER_BABBLE_ERR:
1095 dev_err(hsotg->dev, "Babble\n");
1096 urb->status = -EOVERFLOW;
1097 break;
1098 case DWC2_HC_XFER_XACT_ERR:
1099 dev_err(hsotg->dev, "XactErr\n");
1100 urb->status = -EPROTO;
1101 break;
1102 default:
1103 dev_err(hsotg->dev,
1104 "%s: Unhandled descriptor error status (%d)\n",
1105 __func__, halt_status);
1106 break;
1108 return 1;
1111 if (dma_desc->status & HOST_DMA_A) {
1112 dev_vdbg(hsotg->dev,
1113 "Active descriptor encountered on channel %d\n",
1114 chan->hc_num);
1115 return 0;
1118 if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1119 if (qtd->control_phase == DWC2_CONTROL_DATA) {
1120 urb->actual_length += n_bytes - remain;
1121 if (remain || urb->actual_length >= urb->length) {
1123 * For Control Data stage do not set urb->status
1124 * to 0, to prevent URB callback. Set it when
1125 * Status phase is done. See below.
1127 *xfer_done = 1;
1129 } else if (qtd->control_phase == DWC2_CONTROL_STATUS) {
1130 urb->status = 0;
1131 *xfer_done = 1;
1133 /* No handling for SETUP stage */
1134 } else {
1135 /* BULK and INTR */
1136 urb->actual_length += n_bytes - remain;
1137 dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length,
1138 urb->actual_length);
1139 if (remain || urb->actual_length >= urb->length) {
1140 urb->status = 0;
1141 *xfer_done = 1;
1145 return 0;
1148 static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
1149 struct dwc2_host_chan *chan,
1150 int chnum, struct dwc2_qtd *qtd,
1151 int desc_num,
1152 enum dwc2_halt_status halt_status,
1153 int *xfer_done)
1155 struct dwc2_qh *qh = chan->qh;
1156 struct dwc2_hcd_urb *urb = qtd->urb;
1157 struct dwc2_hcd_dma_desc *dma_desc;
1158 u32 n_bytes;
1159 int failed;
1161 dev_vdbg(hsotg->dev, "%s()\n", __func__);
1163 if (!urb)
1164 return -EINVAL;
1166 dma_sync_single_for_cpu(hsotg->dev,
1167 qh->desc_list_dma + (desc_num *
1168 sizeof(struct dwc2_hcd_dma_desc)),
1169 sizeof(struct dwc2_hcd_dma_desc),
1170 DMA_FROM_DEVICE);
1172 dma_desc = &qh->desc_list[desc_num];
1173 n_bytes = qh->n_bytes[desc_num];
1174 dev_vdbg(hsotg->dev,
1175 "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
1176 qtd, urb, desc_num, dma_desc, n_bytes);
1177 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
1178 halt_status, n_bytes,
1179 xfer_done);
1180 if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
1181 dwc2_host_complete(hsotg, qtd, urb->status);
1182 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1183 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
1184 failed, *xfer_done);
1185 return failed;
1188 if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1189 switch (qtd->control_phase) {
1190 case DWC2_CONTROL_SETUP:
1191 if (urb->length > 0)
1192 qtd->control_phase = DWC2_CONTROL_DATA;
1193 else
1194 qtd->control_phase = DWC2_CONTROL_STATUS;
1195 dev_vdbg(hsotg->dev,
1196 " Control setup transaction done\n");
1197 break;
1198 case DWC2_CONTROL_DATA:
1199 if (*xfer_done) {
1200 qtd->control_phase = DWC2_CONTROL_STATUS;
1201 dev_vdbg(hsotg->dev,
1202 " Control data transfer done\n");
1203 } else if (desc_num + 1 == qtd->n_desc) {
1205 * Last descriptor for Control data stage which
1206 * is not completed yet
1208 dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1209 qtd);
1211 break;
1212 default:
1213 break;
1217 return 0;
1220 static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1221 struct dwc2_host_chan *chan,
1222 int chnum,
1223 enum dwc2_halt_status halt_status)
1225 struct list_head *qtd_item, *qtd_tmp;
1226 struct dwc2_qh *qh = chan->qh;
1227 struct dwc2_qtd *qtd = NULL;
1228 int xfer_done;
1229 int desc_num = 0;
1231 if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1232 list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
1233 qtd->in_process = 0;
1234 return;
1237 list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
1238 int i;
1239 int qtd_desc_count;
1241 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
1242 xfer_done = 0;
1243 qtd_desc_count = qtd->n_desc;
1245 for (i = 0; i < qtd_desc_count; i++) {
1246 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
1247 desc_num, halt_status,
1248 &xfer_done)) {
1249 qtd = NULL;
1250 goto stop_scan;
1253 desc_num++;
1257 stop_scan:
1258 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
1260 * Resetting the data toggle for bulk and interrupt endpoints
1261 * in case of stall. See handle_hc_stall_intr().
1263 if (halt_status == DWC2_HC_XFER_STALL)
1264 qh->data_toggle = DWC2_HC_PID_DATA0;
1265 else
1266 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, NULL);
1269 if (halt_status == DWC2_HC_XFER_COMPLETE) {
1270 if (chan->hcint & HCINTMSK_NYET) {
1272 * Got a NYET on the last transaction of the transfer.
1273 * It means that the endpoint should be in the PING
1274 * state at the beginning of the next transfer.
1276 qh->ping_state = 1;
1282 * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
1283 * status and calls completion routine for the URB if it's done. Called from
1284 * interrupt handlers.
1286 * @hsotg: The HCD state structure for the DWC OTG controller
1287 * @chan: Host channel the transfer is completed on
1288 * @chnum: Index of Host channel registers
1289 * @halt_status: Reason the channel is being halted or just XferComplete
1290 * for isochronous transfers
1292 * Releases the channel to be used by other transfers.
1293 * In case of Isochronous endpoint the channel is not halted until the end of
1294 * the session, i.e. QTD list is empty.
1295 * If periodic channel released the FrameList is updated accordingly.
1296 * Calls transaction selection routines to activate pending transfers.
1298 void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
1299 struct dwc2_host_chan *chan, int chnum,
1300 enum dwc2_halt_status halt_status)
1302 struct dwc2_qh *qh = chan->qh;
1303 int continue_isoc_xfer = 0;
1304 enum dwc2_transaction_type tr_type;
1306 if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1307 dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
1309 /* Release the channel if halted or session completed */
1310 if (halt_status != DWC2_HC_XFER_COMPLETE ||
1311 list_empty(&qh->qtd_list)) {
1312 struct dwc2_qtd *qtd, *qtd_tmp;
1315 * Kill all remainings QTDs since channel has been
1316 * halted.
1318 list_for_each_entry_safe(qtd, qtd_tmp,
1319 &qh->qtd_list,
1320 qtd_list_entry) {
1321 dwc2_host_complete(hsotg, qtd,
1322 -ECONNRESET);
1323 dwc2_hcd_qtd_unlink_and_free(hsotg,
1324 qtd, qh);
1327 /* Halt the channel if session completed */
1328 if (halt_status == DWC2_HC_XFER_COMPLETE)
1329 dwc2_hc_halt(hsotg, chan, halt_status);
1330 dwc2_release_channel_ddma(hsotg, qh);
1331 dwc2_hcd_qh_unlink(hsotg, qh);
1332 } else {
1333 /* Keep in assigned schedule to continue transfer */
1334 list_move_tail(&qh->qh_list_entry,
1335 &hsotg->periodic_sched_assigned);
1337 * If channel has been halted during giveback of urb
1338 * then prevent any new scheduling.
1340 if (!chan->halt_status)
1341 continue_isoc_xfer = 1;
1344 * Todo: Consider the case when period exceeds FrameList size.
1345 * Frame Rollover interrupt should be used.
1347 } else {
1349 * Scan descriptor list to complete the URB(s), then release
1350 * the channel
1352 dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
1353 halt_status);
1354 dwc2_release_channel_ddma(hsotg, qh);
1355 dwc2_hcd_qh_unlink(hsotg, qh);
1357 if (!list_empty(&qh->qtd_list)) {
1359 * Add back to inactive non-periodic schedule on normal
1360 * completion
1362 dwc2_hcd_qh_add(hsotg, qh);
1366 tr_type = dwc2_hcd_select_transactions(hsotg);
1367 if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) {
1368 if (continue_isoc_xfer) {
1369 if (tr_type == DWC2_TRANSACTION_NONE)
1370 tr_type = DWC2_TRANSACTION_PERIODIC;
1371 else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC)
1372 tr_type = DWC2_TRANSACTION_ALL;
1374 dwc2_hcd_queue_transactions(hsotg, tr_type);