dm thin metadata: fix __udivdi3 undefined on 32-bit
[linux/fpc-iii.git] / drivers / usb / dwc2 / hcd_queue.c
blob7d8d06cfe3c1df1d5bc1a5534205464d89f65a56
1 /*
2 * hcd_queue.c - DesignWare HS OTG Controller host queuing routines
4 * Copyright (C) 2004-2013 Synopsys, Inc.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions, and the following disclaimer,
11 * without modification.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. The names of the above-listed copyright holders may not be used
16 * to endorse or promote products derived from this software without
17 * specific prior written permission.
19 * ALTERNATIVELY, this software may be distributed under the terms of the
20 * GNU General Public License ("GPL") as published by the Free Software
21 * Foundation; either version 2 of the License, or (at your option) any
22 * later version.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 * This file contains the functions to manage Queue Heads and Queue
39 * Transfer Descriptors for Host mode
41 #include <linux/kernel.h>
42 #include <linux/module.h>
43 #include <linux/spinlock.h>
44 #include <linux/interrupt.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/io.h>
47 #include <linux/slab.h>
48 #include <linux/usb.h>
50 #include <linux/usb/hcd.h>
51 #include <linux/usb/ch11.h>
53 #include "core.h"
54 #include "hcd.h"
56 /**
57 * dwc2_qh_init() - Initializes a QH structure
59 * @hsotg: The HCD state structure for the DWC OTG controller
60 * @qh: The QH to init
61 * @urb: Holds the information about the device/endpoint needed to initialize
62 * the QH
64 #define SCHEDULE_SLOP 10
65 static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
66 struct dwc2_hcd_urb *urb)
68 int dev_speed, hub_addr, hub_port;
69 char *speed, *type;
71 dev_vdbg(hsotg->dev, "%s()\n", __func__);
73 /* Initialize QH */
74 qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
75 qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
77 qh->data_toggle = DWC2_HC_PID_DATA0;
78 qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
79 INIT_LIST_HEAD(&qh->qtd_list);
80 INIT_LIST_HEAD(&qh->qh_list_entry);
82 /* FS/LS Endpoint on HS Hub, NOT virtual root hub */
83 dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
85 dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
87 if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) &&
88 hub_addr != 0 && hub_addr != 1) {
89 dev_vdbg(hsotg->dev,
90 "QH init: EP %d: TT found at hub addr %d, for port %d\n",
91 dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr,
92 hub_port);
93 qh->do_split = 1;
96 if (qh->ep_type == USB_ENDPOINT_XFER_INT ||
97 qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
98 /* Compute scheduling parameters once and save them */
99 u32 hprt, prtspd;
101 /* Todo: Account for split transfers in the bus time */
102 int bytecount =
103 dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
105 qh->usecs = NS_TO_US(usb_calc_bus_time(qh->do_split ?
106 USB_SPEED_HIGH : dev_speed, qh->ep_is_in,
107 qh->ep_type == USB_ENDPOINT_XFER_ISOC,
108 bytecount));
110 /* Ensure frame_number corresponds to the reality */
111 hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
112 /* Start in a slightly future (micro)frame */
113 qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
114 SCHEDULE_SLOP);
115 qh->interval = urb->interval;
116 #if 0
117 /* Increase interrupt polling rate for debugging */
118 if (qh->ep_type == USB_ENDPOINT_XFER_INT)
119 qh->interval = 8;
120 #endif
121 hprt = dwc2_readl(hsotg->regs + HPRT0);
122 prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
123 if (prtspd == HPRT0_SPD_HIGH_SPEED &&
124 (dev_speed == USB_SPEED_LOW ||
125 dev_speed == USB_SPEED_FULL)) {
126 qh->interval *= 8;
127 qh->sched_frame |= 0x7;
128 qh->start_split_frame = qh->sched_frame;
130 dev_dbg(hsotg->dev, "interval=%d\n", qh->interval);
133 dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n");
134 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh);
135 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n",
136 dwc2_hcd_get_dev_addr(&urb->pipe_info));
137 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n",
138 dwc2_hcd_get_ep_num(&urb->pipe_info),
139 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
141 qh->dev_speed = dev_speed;
143 switch (dev_speed) {
144 case USB_SPEED_LOW:
145 speed = "low";
146 break;
147 case USB_SPEED_FULL:
148 speed = "full";
149 break;
150 case USB_SPEED_HIGH:
151 speed = "high";
152 break;
153 default:
154 speed = "?";
155 break;
157 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed);
159 switch (qh->ep_type) {
160 case USB_ENDPOINT_XFER_ISOC:
161 type = "isochronous";
162 break;
163 case USB_ENDPOINT_XFER_INT:
164 type = "interrupt";
165 break;
166 case USB_ENDPOINT_XFER_CONTROL:
167 type = "control";
168 break;
169 case USB_ENDPOINT_XFER_BULK:
170 type = "bulk";
171 break;
172 default:
173 type = "?";
174 break;
177 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type);
179 if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
180 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n",
181 qh->usecs);
182 dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n",
183 qh->interval);
188 * dwc2_hcd_qh_create() - Allocates and initializes a QH
190 * @hsotg: The HCD state structure for the DWC OTG controller
191 * @urb: Holds the information about the device/endpoint needed
192 * to initialize the QH
193 * @atomic_alloc: Flag to do atomic allocation if needed
195 * Return: Pointer to the newly allocated QH, or NULL on error
197 struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
198 struct dwc2_hcd_urb *urb,
199 gfp_t mem_flags)
201 struct dwc2_qh *qh;
203 if (!urb->priv)
204 return NULL;
206 /* Allocate memory */
207 qh = kzalloc(sizeof(*qh), mem_flags);
208 if (!qh)
209 return NULL;
211 dwc2_qh_init(hsotg, qh, urb);
213 if (hsotg->core_params->dma_desc_enable > 0 &&
214 dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
215 dwc2_hcd_qh_free(hsotg, qh);
216 return NULL;
219 return qh;
223 * dwc2_hcd_qh_free() - Frees the QH
225 * @hsotg: HCD instance
226 * @qh: The QH to free
228 * QH should already be removed from the list. QTD list should already be empty
229 * if called from URB Dequeue.
231 * Must NOT be called with interrupt disabled or spinlock held
233 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
235 if (hsotg->core_params->dma_desc_enable > 0) {
236 dwc2_hcd_qh_free_ddma(hsotg, qh);
237 } else {
238 /* kfree(NULL) is safe */
239 kfree(qh->dw_align_buf);
240 qh->dw_align_buf_dma = (dma_addr_t)0;
242 kfree(qh);
246 * dwc2_periodic_channel_available() - Checks that a channel is available for a
247 * periodic transfer
249 * @hsotg: The HCD state structure for the DWC OTG controller
251 * Return: 0 if successful, negative error code otherwise
253 static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
256 * Currently assuming that there is a dedicated host channel for
257 * each periodic transaction plus at least one host channel for
258 * non-periodic transactions
260 int status;
261 int num_channels;
263 num_channels = hsotg->core_params->host_channels;
264 if (hsotg->periodic_channels + hsotg->non_periodic_channels <
265 num_channels
266 && hsotg->periodic_channels < num_channels - 1) {
267 status = 0;
268 } else {
269 dev_dbg(hsotg->dev,
270 "%s: Total channels: %d, Periodic: %d, "
271 "Non-periodic: %d\n", __func__, num_channels,
272 hsotg->periodic_channels, hsotg->non_periodic_channels);
273 status = -ENOSPC;
276 return status;
280 * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth
281 * for the specified QH in the periodic schedule
283 * @hsotg: The HCD state structure for the DWC OTG controller
284 * @qh: QH containing periodic bandwidth required
286 * Return: 0 if successful, negative error code otherwise
288 * For simplicity, this calculation assumes that all the transfers in the
289 * periodic schedule may occur in the same (micro)frame
291 static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
292 struct dwc2_qh *qh)
294 int status;
295 s16 max_claimed_usecs;
297 status = 0;
299 if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
301 * High speed mode
302 * Max periodic usecs is 80% x 125 usec = 100 usec
304 max_claimed_usecs = 100 - qh->usecs;
305 } else {
307 * Full speed mode
308 * Max periodic usecs is 90% x 1000 usec = 900 usec
310 max_claimed_usecs = 900 - qh->usecs;
313 if (hsotg->periodic_usecs > max_claimed_usecs) {
314 dev_err(hsotg->dev,
315 "%s: already claimed usecs %d, required usecs %d\n",
316 __func__, hsotg->periodic_usecs, qh->usecs);
317 status = -ENOSPC;
320 return status;
324 * Microframe scheduler
325 * track the total use in hsotg->frame_usecs
326 * keep each qh use in qh->frame_usecs
327 * when surrendering the qh then donate the time back
329 static const unsigned short max_uframe_usecs[] = {
330 100, 100, 100, 100, 100, 100, 30, 0
333 void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
335 int i;
337 for (i = 0; i < 8; i++)
338 hsotg->frame_usecs[i] = max_uframe_usecs[i];
341 static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
343 unsigned short utime = qh->usecs;
344 int i;
346 for (i = 0; i < 8; i++) {
347 /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
348 if (utime <= hsotg->frame_usecs[i]) {
349 hsotg->frame_usecs[i] -= utime;
350 qh->frame_usecs[i] += utime;
351 return i;
354 return -ENOSPC;
358 * use this for FS apps that can span multiple uframes
360 static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
362 unsigned short utime = qh->usecs;
363 unsigned short xtime;
364 int t_left;
365 int i;
366 int j;
367 int k;
369 for (i = 0; i < 8; i++) {
370 if (hsotg->frame_usecs[i] <= 0)
371 continue;
374 * we need n consecutive slots so use j as a start slot
375 * j plus j+1 must be enough time (for now)
377 xtime = hsotg->frame_usecs[i];
378 for (j = i + 1; j < 8; j++) {
380 * if we add this frame remaining time to xtime we may
381 * be OK, if not we need to test j for a complete frame
383 if (xtime + hsotg->frame_usecs[j] < utime) {
384 if (hsotg->frame_usecs[j] <
385 max_uframe_usecs[j])
386 continue;
388 if (xtime >= utime) {
389 t_left = utime;
390 for (k = i; k < 8; k++) {
391 t_left -= hsotg->frame_usecs[k];
392 if (t_left <= 0) {
393 qh->frame_usecs[k] +=
394 hsotg->frame_usecs[k]
395 + t_left;
396 hsotg->frame_usecs[k] = -t_left;
397 return i;
398 } else {
399 qh->frame_usecs[k] +=
400 hsotg->frame_usecs[k];
401 hsotg->frame_usecs[k] = 0;
405 /* add the frame time to x time */
406 xtime += hsotg->frame_usecs[j];
407 /* we must have a fully available next frame or break */
408 if (xtime < utime &&
409 hsotg->frame_usecs[j] == max_uframe_usecs[j])
410 continue;
413 return -ENOSPC;
416 static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
418 int ret;
420 if (qh->dev_speed == USB_SPEED_HIGH) {
421 /* if this is a hs transaction we need a full frame */
422 ret = dwc2_find_single_uframe(hsotg, qh);
423 } else {
425 * if this is a fs transaction we may need a sequence
426 * of frames
428 ret = dwc2_find_multi_uframe(hsotg, qh);
430 return ret;
434 * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
435 * host channel is large enough to handle the maximum data transfer in a single
436 * (micro)frame for a periodic transfer
438 * @hsotg: The HCD state structure for the DWC OTG controller
439 * @qh: QH for a periodic endpoint
441 * Return: 0 if successful, negative error code otherwise
443 static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
444 struct dwc2_qh *qh)
446 u32 max_xfer_size;
447 u32 max_channel_xfer_size;
448 int status = 0;
450 max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
451 max_channel_xfer_size = hsotg->core_params->max_transfer_size;
453 if (max_xfer_size > max_channel_xfer_size) {
454 dev_err(hsotg->dev,
455 "%s: Periodic xfer length %d > max xfer length for channel %d\n",
456 __func__, max_xfer_size, max_channel_xfer_size);
457 status = -ENOSPC;
460 return status;
464 * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in
465 * the periodic schedule
467 * @hsotg: The HCD state structure for the DWC OTG controller
468 * @qh: QH for the periodic transfer. The QH should already contain the
469 * scheduling information.
471 * Return: 0 if successful, negative error code otherwise
473 static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
475 int status;
477 if (hsotg->core_params->uframe_sched > 0) {
478 int frame = -1;
480 status = dwc2_find_uframe(hsotg, qh);
481 if (status == 0)
482 frame = 7;
483 else if (status > 0)
484 frame = status - 1;
486 /* Set the new frame up */
487 if (frame >= 0) {
488 qh->sched_frame &= ~0x7;
489 qh->sched_frame |= (frame & 7);
492 if (status > 0)
493 status = 0;
494 } else {
495 status = dwc2_periodic_channel_available(hsotg);
496 if (status) {
497 dev_info(hsotg->dev,
498 "%s: No host channel available for periodic transfer\n",
499 __func__);
500 return status;
503 status = dwc2_check_periodic_bandwidth(hsotg, qh);
506 if (status) {
507 dev_dbg(hsotg->dev,
508 "%s: Insufficient periodic bandwidth for periodic transfer\n",
509 __func__);
510 return status;
513 status = dwc2_check_max_xfer_size(hsotg, qh);
514 if (status) {
515 dev_dbg(hsotg->dev,
516 "%s: Channel max transfer size too small for periodic transfer\n",
517 __func__);
518 return status;
521 if (hsotg->core_params->dma_desc_enable > 0)
522 /* Don't rely on SOF and start in ready schedule */
523 list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
524 else
525 /* Always start in inactive schedule */
526 list_add_tail(&qh->qh_list_entry,
527 &hsotg->periodic_sched_inactive);
529 if (hsotg->core_params->uframe_sched <= 0)
530 /* Reserve periodic channel */
531 hsotg->periodic_channels++;
533 /* Update claimed usecs per (micro)frame */
534 hsotg->periodic_usecs += qh->usecs;
536 return status;
540 * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer
541 * from the periodic schedule
543 * @hsotg: The HCD state structure for the DWC OTG controller
544 * @qh: QH for the periodic transfer
546 static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
547 struct dwc2_qh *qh)
549 int i;
551 list_del_init(&qh->qh_list_entry);
553 /* Update claimed usecs per (micro)frame */
554 hsotg->periodic_usecs -= qh->usecs;
556 if (hsotg->core_params->uframe_sched > 0) {
557 for (i = 0; i < 8; i++) {
558 hsotg->frame_usecs[i] += qh->frame_usecs[i];
559 qh->frame_usecs[i] = 0;
561 } else {
562 /* Release periodic channel reservation */
563 hsotg->periodic_channels--;
568 * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
569 * schedule if it is not already in the schedule. If the QH is already in
570 * the schedule, no action is taken.
572 * @hsotg: The HCD state structure for the DWC OTG controller
573 * @qh: The QH to add
575 * Return: 0 if successful, negative error code otherwise
577 int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
579 int status;
580 u32 intr_mask;
582 if (dbg_qh(qh))
583 dev_vdbg(hsotg->dev, "%s()\n", __func__);
585 if (!list_empty(&qh->qh_list_entry))
586 /* QH already in a schedule */
587 return 0;
589 if (!dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number) &&
590 !hsotg->frame_number) {
591 dev_dbg(hsotg->dev,
592 "reset frame number counter\n");
593 qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
594 SCHEDULE_SLOP);
597 /* Add the new QH to the appropriate schedule */
598 if (dwc2_qh_is_non_per(qh)) {
599 /* Always start in inactive schedule */
600 list_add_tail(&qh->qh_list_entry,
601 &hsotg->non_periodic_sched_inactive);
602 return 0;
605 status = dwc2_schedule_periodic(hsotg, qh);
606 if (status)
607 return status;
608 if (!hsotg->periodic_qh_count) {
609 intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
610 intr_mask |= GINTSTS_SOF;
611 dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
613 hsotg->periodic_qh_count++;
615 return 0;
619 * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic
620 * schedule. Memory is not freed.
622 * @hsotg: The HCD state structure
623 * @qh: QH to remove from schedule
625 void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
627 u32 intr_mask;
629 dev_vdbg(hsotg->dev, "%s()\n", __func__);
631 if (list_empty(&qh->qh_list_entry))
632 /* QH is not in a schedule */
633 return;
635 if (dwc2_qh_is_non_per(qh)) {
636 if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
637 hsotg->non_periodic_qh_ptr =
638 hsotg->non_periodic_qh_ptr->next;
639 list_del_init(&qh->qh_list_entry);
640 return;
643 dwc2_deschedule_periodic(hsotg, qh);
644 hsotg->periodic_qh_count--;
645 if (!hsotg->periodic_qh_count) {
646 intr_mask = dwc2_readl(hsotg->regs + GINTMSK);
647 intr_mask &= ~GINTSTS_SOF;
648 dwc2_writel(intr_mask, hsotg->regs + GINTMSK);
653 * Schedule the next continuing periodic split transfer
655 static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
656 struct dwc2_qh *qh, u16 frame_number,
657 int sched_next_periodic_split)
659 u16 incr;
661 if (sched_next_periodic_split) {
662 qh->sched_frame = frame_number;
663 incr = dwc2_frame_num_inc(qh->start_split_frame, 1);
664 if (dwc2_frame_num_le(frame_number, incr)) {
666 * Allow one frame to elapse after start split
667 * microframe before scheduling complete split, but
668 * DON'T if we are doing the next start split in the
669 * same frame for an ISOC out
671 if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
672 qh->ep_is_in != 0) {
673 qh->sched_frame =
674 dwc2_frame_num_inc(qh->sched_frame, 1);
677 } else {
678 qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame,
679 qh->interval);
680 if (dwc2_frame_num_le(qh->sched_frame, frame_number))
681 qh->sched_frame = frame_number;
682 qh->sched_frame |= 0x7;
683 qh->start_split_frame = qh->sched_frame;
688 * Deactivates a QH. For non-periodic QHs, removes the QH from the active
689 * non-periodic schedule. The QH is added to the inactive non-periodic
690 * schedule if any QTDs are still attached to the QH.
692 * For periodic QHs, the QH is removed from the periodic queued schedule. If
693 * there are any QTDs still attached to the QH, the QH is added to either the
694 * periodic inactive schedule or the periodic ready schedule and its next
695 * scheduled frame is calculated. The QH is placed in the ready schedule if
696 * the scheduled frame has been reached already. Otherwise it's placed in the
697 * inactive schedule. If there are no QTDs attached to the QH, the QH is
698 * completely removed from the periodic schedule.
700 void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
701 int sched_next_periodic_split)
703 u16 frame_number;
705 if (dbg_qh(qh))
706 dev_vdbg(hsotg->dev, "%s()\n", __func__);
708 if (dwc2_qh_is_non_per(qh)) {
709 dwc2_hcd_qh_unlink(hsotg, qh);
710 if (!list_empty(&qh->qtd_list))
711 /* Add back to inactive non-periodic schedule */
712 dwc2_hcd_qh_add(hsotg, qh);
713 return;
716 frame_number = dwc2_hcd_get_frame_number(hsotg);
718 if (qh->do_split) {
719 dwc2_sched_periodic_split(hsotg, qh, frame_number,
720 sched_next_periodic_split);
721 } else {
722 qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame,
723 qh->interval);
724 if (dwc2_frame_num_le(qh->sched_frame, frame_number))
725 qh->sched_frame = frame_number;
728 if (list_empty(&qh->qtd_list)) {
729 dwc2_hcd_qh_unlink(hsotg, qh);
730 return;
733 * Remove from periodic_sched_queued and move to
734 * appropriate queue
736 if ((hsotg->core_params->uframe_sched > 0 &&
737 dwc2_frame_num_le(qh->sched_frame, frame_number)) ||
738 (hsotg->core_params->uframe_sched <= 0 &&
739 qh->sched_frame == frame_number))
740 list_move(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
741 else
742 list_move(&qh->qh_list_entry, &hsotg->periodic_sched_inactive);
746 * dwc2_hcd_qtd_init() - Initializes a QTD structure
748 * @qtd: The QTD to initialize
749 * @urb: The associated URB
751 void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
753 qtd->urb = urb;
754 if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
755 USB_ENDPOINT_XFER_CONTROL) {
757 * The only time the QTD data toggle is used is on the data
758 * phase of control transfers. This phase always starts with
759 * DATA1.
761 qtd->data_toggle = DWC2_HC_PID_DATA1;
762 qtd->control_phase = DWC2_CONTROL_SETUP;
765 /* Start split */
766 qtd->complete_split = 0;
767 qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
768 qtd->isoc_split_offset = 0;
769 qtd->in_process = 0;
771 /* Store the qtd ptr in the urb to reference the QTD */
772 urb->qtd = qtd;
776 * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
777 * Caller must hold driver lock.
779 * @hsotg: The DWC HCD structure
780 * @qtd: The QTD to add
781 * @qh: Queue head to add qtd to
783 * Return: 0 if successful, negative error code otherwise
785 * If the QH to which the QTD is added is not currently scheduled, it is placed
786 * into the proper schedule based on its EP type.
788 int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
789 struct dwc2_qh *qh)
791 int retval;
793 if (unlikely(!qh)) {
794 dev_err(hsotg->dev, "%s: Invalid QH\n", __func__);
795 retval = -EINVAL;
796 goto fail;
799 retval = dwc2_hcd_qh_add(hsotg, qh);
800 if (retval)
801 goto fail;
803 qtd->qh = qh;
804 list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list);
806 return 0;
807 fail:
808 return retval;