mb/google/brya/var/omnigul: Modify NVMe and UFS Storage support
[coreboot.git] / payloads / libpayload / drivers / usb / xhci.c
blob6737a140ec75d18cee6aaf326eb8910fc5995288
1 /*
3 * Copyright (C) 2010 Patrick Georgi
4 * Copyright (C) 2013 secunet Security Networks AG
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
30 //#define XHCI_SPEW_DEBUG
32 #include <inttypes.h>
33 #include <arch/virtual.h>
34 #include "xhci_private.h"
35 #include "xhci.h"
37 static void xhci_start(hci_t *controller);
38 static void xhci_stop(hci_t *controller);
39 static void xhci_reset(hci_t *controller);
40 static void xhci_reinit(hci_t *controller);
41 static void xhci_shutdown(hci_t *controller);
42 static int xhci_bulk(endpoint_t *ep, int size, u8 *data, int finalize);
43 static int xhci_control(usbdev_t *dev, direction_t dir, int drlen, void *devreq,
44 int dalen, u8 *data);
45 static void* xhci_create_intr_queue(endpoint_t *ep, int reqsize, int reqcount, int reqtiming);
46 static void xhci_destroy_intr_queue(endpoint_t *ep, void *queue);
47 static u8* xhci_poll_intr_queue(void *queue);
50 * Some structures must not cross page boundaries. To get this,
51 * we align them by their size (or the next greater power of 2).
53 void *
54 xhci_align(const size_t min_align, const size_t size)
56 size_t align;
57 if (!(size & (size - 1)))
58 align = size; /* It's a power of 2 */
59 else
60 align = 1 << ((sizeof(unsigned) << 3) - __builtin_clz(size));
61 if (align < min_align)
62 align = min_align;
63 xhci_spew("Aligning %zu to %zu\n", size, align);
64 return dma_memalign(align, size);
67 void
68 xhci_clear_trb(trb_t *const trb, const int pcs)
70 trb->ptr_low = 0;
71 trb->ptr_high = 0;
72 trb->status = 0;
73 trb->control = !pcs;
76 void
77 xhci_init_cycle_ring(transfer_ring_t *const tr, const size_t ring_size)
79 memset((void *)tr->ring, 0, ring_size * sizeof(*tr->ring));
80 TRB_SET(TT, &tr->ring[ring_size - 1], TRB_LINK);
81 TRB_SET(TC, &tr->ring[ring_size - 1], 1);
82 /* only one segment that points to itself */
83 tr->ring[ring_size - 1].ptr_low = virt_to_phys(tr->ring);
85 tr->pcs = 1;
86 tr->cur = tr->ring;
89 /* On Panther Point: switch ports shared with EHCI to xHCI */
90 #if CONFIG(LP_USB_PCI)
91 static void
92 xhci_switch_ppt_ports(pcidev_t addr)
94 if (pci_read_config32(addr, 0x00) == 0x1e318086) {
95 u32 reg32 = pci_read_config32(addr, 0xdc) & 0xf;
96 xhci_debug("Ports capable of SuperSpeed: 0x%"PRIx32"\n", reg32);
98 /* For now, do not enable SuperSpeed on any ports */
99 //pci_write_config32(addr, 0xd8, reg32);
100 pci_write_config32(addr, 0xd8, 0x00000000);
101 reg32 = pci_read_config32(addr, 0xd8) & 0xf;
102 xhci_debug("Configured for SuperSpeed: 0x%"PRIx32"\n", reg32);
104 reg32 = pci_read_config32(addr, 0xd4) & 0xf;
105 xhci_debug("Trying to switch over: 0x%"PRIx32"\n", reg32);
107 pci_write_config32(addr, 0xd0, reg32);
108 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
109 xhci_debug("Actually switched over: 0x%"PRIx32"\n", reg32);
112 #endif
114 #if CONFIG(LP_USB_PCI)
115 /* On Panther Point: switch all ports back to EHCI */
116 static void
117 xhci_switchback_ppt_ports(pcidev_t addr)
119 if (pci_read_config32(addr, 0x00) == 0x1e318086) {
120 u32 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
121 xhci_debug("Switching ports back: 0x%"PRIx32"\n", reg32);
122 pci_write_config32(addr, 0xd0, 0x00000000);
123 reg32 = pci_read_config32(addr, 0xd0) & 0xf;
124 xhci_debug("Still switched to xHCI: 0x%"PRIx32"\n", reg32);
127 #endif
129 static long
130 xhci_handshake(volatile u32 *const reg, u32 mask, u32 wait_for, long timeout_us)
132 if (timeout_us <= 0)
133 return 0;
134 while ((*reg & mask) != wait_for && timeout_us != 0) {
135 --timeout_us;
136 udelay(1);
138 return timeout_us;
141 static int
142 xhci_wait_ready(xhci_t *const xhci)
144 xhci_debug("Waiting for controller to be ready... ");
145 if (!xhci_handshake(&xhci->opreg->usbsts, USBSTS_CNR, 0, 100000L)) {
146 usb_debug("timeout!\n");
147 return -1;
149 usb_debug("ok.\n");
150 return 0;
153 hci_t *
154 xhci_init(unsigned long physical_bar)
156 int i;
158 /* First, allocate and initialize static controller structures */
160 hci_t *const controller = new_controller();
161 controller->type = XHCI;
162 controller->start = xhci_start;
163 controller->stop = xhci_stop;
164 controller->reset = xhci_reset;
165 controller->init = xhci_reinit;
166 controller->shutdown = xhci_shutdown;
167 controller->bulk = xhci_bulk;
168 controller->control = xhci_control;
169 controller->set_address = xhci_set_address;
170 controller->finish_device_config = xhci_finish_device_config;
171 controller->destroy_device = xhci_destroy_dev;
172 controller->create_intr_queue = xhci_create_intr_queue;
173 controller->destroy_intr_queue = xhci_destroy_intr_queue;
174 controller->poll_intr_queue = xhci_poll_intr_queue;
175 controller->pcidev = 0;
177 controller->reg_base = (uintptr_t)physical_bar;
178 controller->instance = xzalloc(sizeof(xhci_t));
179 xhci_t *const xhci = (xhci_t *)controller->instance;
181 init_device_entry(controller, 0);
182 xhci->roothub = controller->devices[0];
183 xhci->cr.ring = xhci_align(64, COMMAND_RING_SIZE * sizeof(trb_t));
184 xhci->er.ring = xhci_align(64, EVENT_RING_SIZE * sizeof(trb_t));
185 xhci->ev_ring_table = xhci_align(64, sizeof(erst_entry_t));
186 if (!xhci->roothub || !xhci->cr.ring ||
187 !xhci->er.ring || !xhci->ev_ring_table) {
188 xhci_debug("Out of memory\n");
189 goto _free_xhci;
192 xhci->capreg = phys_to_virt(physical_bar);
193 xhci->opreg = phys_to_virt(physical_bar) + CAP_GET(CAPLEN, xhci->capreg);
194 xhci->hcrreg = phys_to_virt(physical_bar) + xhci->capreg->rtsoff;
195 xhci->dbreg = phys_to_virt(physical_bar) + xhci->capreg->dboff;
197 xhci_debug("regbase: 0x%"PRIxPTR"\n", physical_bar);
198 xhci_debug("caplen: 0x%"PRIx32"\n", CAP_GET(CAPLEN, xhci->capreg));
199 xhci_debug("rtsoff: 0x%"PRIx32"\n", xhci->capreg->rtsoff);
200 xhci_debug("dboff: 0x%"PRIx32"\n", xhci->capreg->dboff);
202 xhci_debug("hciversion: %"PRIx8".%"PRIx8"\n",
203 CAP_GET(CAPVER_HI, xhci->capreg), CAP_GET(CAPVER_LO, xhci->capreg));
204 if ((CAP_GET(CAPVER, xhci->capreg) < 0x96) ||
205 (CAP_GET(CAPVER, xhci->capreg) > 0x120)) {
206 xhci_debug("Unsupported xHCI version\n");
207 goto _free_xhci;
210 xhci_debug("context size: %dB\n", CTXSIZE(xhci));
211 xhci_debug("maxslots: 0x%02"PRIx32"\n", CAP_GET(MAXSLOTS, xhci->capreg));
212 xhci_debug("maxports: 0x%02"PRIx32"\n", CAP_GET(MAXPORTS, xhci->capreg));
213 const unsigned pagesize = xhci->opreg->pagesize << 12;
214 xhci_debug("pagesize: 0x%04x\n", pagesize);
217 * We haven't touched the hardware yet. So we allocate all dynamic
218 * structures at first and can still chicken out easily if we run out
219 * of memory.
221 xhci->max_slots_en = CAP_GET(MAXSLOTS, xhci->capreg) &
222 CONFIG_LP_MASK_MaxSlotsEn;
223 xhci->dcbaa = xhci_align(64, (xhci->max_slots_en + 1) * sizeof(u64));
224 xhci->dev = malloc((xhci->max_slots_en + 1) * sizeof(*xhci->dev));
225 if (!xhci->dcbaa || !xhci->dev) {
226 xhci_debug("Out of memory\n");
227 goto _free_xhci;
229 memset(xhci->dcbaa, 0x00, (xhci->max_slots_en + 1) * sizeof(u64));
230 memset(xhci->dev, 0x00, (xhci->max_slots_en + 1) * sizeof(*xhci->dev));
233 * Let dcbaa[0] point to another array of pointers, sp_ptrs.
234 * The pointers therein point to scratchpad buffers (pages).
236 const size_t max_sp_bufs =
237 CAP_GET(MAX_SCRATCH_BUFS_HI, xhci->capreg) << 5 |
238 CAP_GET(MAX_SCRATCH_BUFS_LO, xhci->capreg);
239 xhci_debug("max scratchpad bufs: 0x%zx\n", max_sp_bufs);
240 if (max_sp_bufs) {
241 const size_t sp_ptrs_size = max_sp_bufs * sizeof(u64);
242 xhci->sp_ptrs = xhci_align(64, sp_ptrs_size);
243 if (!xhci->sp_ptrs) {
244 xhci_debug("Out of memory\n");
245 goto _free_xhci_structs;
247 memset(xhci->sp_ptrs, 0x00, sp_ptrs_size);
248 for (i = 0; i < max_sp_bufs; ++i) {
249 /* Could use mmap() here if we had it.
250 Maybe there is another way. */
251 void *const page = memalign(pagesize, pagesize);
252 if (!page) {
253 xhci_debug("Out of memory\n");
254 goto _free_xhci_structs;
256 xhci->sp_ptrs[i] = virt_to_phys(page);
258 xhci->dcbaa[0] = virt_to_phys(xhci->sp_ptrs);
261 if (dma_initialized()) {
262 xhci->dma_buffer = dma_memalign(64 * 1024, DMA_SIZE);
263 if (!xhci->dma_buffer) {
264 xhci_debug("Not enough memory for DMA bounce buffer\n");
265 goto _free_xhci_structs;
269 /* Now start working on the hardware */
270 if (xhci_wait_ready(xhci))
271 goto _free_xhci_structs;
273 /* TODO: Check if BIOS claims ownership (and hand over) */
275 xhci_reset(controller);
276 xhci_reinit(controller);
278 xhci->roothub->controller = controller;
279 xhci->roothub->init = xhci_rh_init;
280 xhci->roothub->init(xhci->roothub);
282 return controller;
284 _free_xhci_structs:
285 free(xhci->dma_buffer);
286 if (xhci->sp_ptrs) {
287 for (i = 0; i < max_sp_bufs; ++i) {
288 if (xhci->sp_ptrs[i])
289 free(phys_to_virt(xhci->sp_ptrs[i]));
292 free(xhci->sp_ptrs);
293 free(xhci->dcbaa);
294 _free_xhci:
295 free((void *)xhci->ev_ring_table);
296 free((void *)xhci->er.ring);
297 free((void *)xhci->cr.ring);
298 free(xhci->roothub);
299 free(xhci->dev);
300 free(xhci);
301 /* _free_controller: */
302 detach_controller(controller);
303 free(controller);
304 return NULL;
307 #if CONFIG(LP_USB_PCI)
308 hci_t *
309 xhci_pci_init(pcidev_t addr)
311 u32 reg_addr;
312 hci_t *controller;
314 reg_addr = pci_read_config32(addr, PCI_BASE_ADDRESS_0) &
315 PCI_BASE_ADDRESS_MEM_MASK;
316 if (pci_read_config32(addr, PCI_BASE_ADDRESS_1) > 0)
317 fatal("We don't do 64bit addressing.\n");
319 controller = xhci_init((unsigned long)reg_addr);
320 if (controller) {
321 xhci_t *xhci = controller->instance;
322 controller->pcidev = addr;
324 xhci_switch_ppt_ports(addr);
326 /* Set up any quirks for controller root hub */
327 xhci->roothub->quirks = pci_quirk_check(addr);
330 return controller;
332 #endif
334 static void
335 xhci_reset(hci_t *const controller)
337 xhci_t *const xhci = XHCI_INST(controller);
339 xhci_stop(controller);
341 xhci->opreg->usbcmd |= USBCMD_HCRST;
343 /* Existing Intel xHCI controllers require a delay of 1 ms,
344 * after setting the CMD_RESET bit, and before accessing any
345 * HC registers. This allows the HC to complete the
346 * reset operation and be ready for HC register access.
347 * Without this delay, the subsequent HC register access,
348 * may result in a system hang very rarely.
350 if (CONFIG(LP_ARCH_X86))
351 mdelay(1);
353 xhci_debug("Resetting controller... ");
354 if (!xhci_handshake(&xhci->opreg->usbcmd, USBCMD_HCRST, 0, 1000000L))
355 usb_debug("timeout!\n");
356 else
357 usb_debug("ok.\n");
360 static void
361 xhci_reinit(hci_t *controller)
363 xhci_t *const xhci = XHCI_INST(controller);
365 if (xhci_wait_ready(xhci))
366 return;
368 /* Enable all available slots */
369 xhci->opreg->config = xhci->max_slots_en;
371 /* Set DCBAA */
372 xhci->opreg->dcbaap_lo = virt_to_phys(xhci->dcbaa);
373 xhci->opreg->dcbaap_hi = 0;
375 /* Initialize command ring */
376 xhci_init_cycle_ring(&xhci->cr, COMMAND_RING_SIZE);
377 xhci_debug("command ring @%p (0x%08"PRIxPTR")\n",
378 xhci->cr.ring, virt_to_phys(xhci->cr.ring));
379 xhci->opreg->crcr_lo = virt_to_phys(xhci->cr.ring) | CRCR_RCS;
380 xhci->opreg->crcr_hi = 0;
382 /* Make sure interrupts are disabled */
383 xhci->opreg->usbcmd &= ~USBCMD_INTE;
385 /* Initialize event ring */
386 xhci_reset_event_ring(&xhci->er);
387 xhci_debug("event ring @%p (0x%08"PRIxPTR")\n",
388 xhci->er.ring, virt_to_phys(xhci->er.ring));
389 xhci_debug("ERST Max: 0x%"PRIx32" -> 0x%x entries\n",
390 CAP_GET(ERST_MAX, xhci->capreg),
391 1 << CAP_GET(ERST_MAX, xhci->capreg));
392 memset((void*)xhci->ev_ring_table, 0x00, sizeof(erst_entry_t));
393 xhci->ev_ring_table[0].seg_base_lo = virt_to_phys(xhci->er.ring);
394 xhci->ev_ring_table[0].seg_base_hi = 0;
395 xhci->ev_ring_table[0].seg_size = EVENT_RING_SIZE;
397 /* pass event ring table to hardware */
398 wmb();
399 /* Initialize primary interrupter */
400 xhci->hcrreg->intrrs[0].erstsz = 1;
401 xhci_update_event_dq(xhci);
402 /* erstba has to be written at last */
403 xhci->hcrreg->intrrs[0].erstba_lo = virt_to_phys(xhci->ev_ring_table);
404 xhci->hcrreg->intrrs[0].erstba_hi = 0;
406 xhci_start(controller);
408 #ifdef USB_DEBUG
409 int i;
410 for (i = 0; i < 32; ++i) {
411 xhci_debug("NOOP run #%d\n", i);
412 trb_t *const cmd = xhci_next_command_trb(xhci);
413 TRB_SET(TT, cmd, TRB_CMD_NOOP);
415 xhci_post_command(xhci);
417 /* Wait for result in event ring */
418 int cc = xhci_wait_for_command_done(xhci, cmd, 1);
420 xhci_debug("Command ring is %srunning: cc: %d\n",
421 (xhci->opreg->crcr_lo & CRCR_CRR) ? "" : "not ", cc);
422 if (cc != CC_SUCCESS)
423 xhci_debug("noop command failed.\n");
425 #endif
428 static void
429 xhci_shutdown(hci_t *const controller)
431 int i;
433 if (controller == 0)
434 return;
436 detach_controller(controller);
438 xhci_t *const xhci = XHCI_INST(controller);
439 xhci_stop(controller);
441 #if CONFIG(LP_USB_PCI)
442 if (controller->pcidev)
443 xhci_switchback_ppt_ports(controller->pcidev);
444 #endif
446 if (xhci->sp_ptrs) {
447 const size_t max_sp_bufs =
448 CAP_GET(MAX_SCRATCH_BUFS_HI, xhci->capreg) << 5 |
449 CAP_GET(MAX_SCRATCH_BUFS_LO, xhci->capreg);
450 for (i = 0; i < max_sp_bufs; ++i) {
451 if (xhci->sp_ptrs[i])
452 free(phys_to_virt(xhci->sp_ptrs[i]));
455 free(xhci->sp_ptrs);
456 free(xhci->dma_buffer);
457 free(xhci->dcbaa);
458 free(xhci->dev);
459 free((void *)xhci->ev_ring_table);
460 free((void *)xhci->er.ring);
461 free((void *)xhci->cr.ring);
462 free(xhci);
463 free(controller);
466 static void
467 xhci_start(hci_t *controller)
469 xhci_t *const xhci = XHCI_INST(controller);
471 xhci->opreg->usbcmd |= USBCMD_RS;
472 if (!xhci_handshake(&xhci->opreg->usbsts, USBSTS_HCH, 0, 1000000L))
473 xhci_debug("Controller didn't start within 1s\n");
476 static void
477 xhci_stop(hci_t *controller)
479 xhci_t *const xhci = XHCI_INST(controller);
481 xhci->opreg->usbcmd &= ~USBCMD_RS;
482 if (!xhci_handshake(&xhci->opreg->usbsts,
483 USBSTS_HCH, USBSTS_HCH, 1000000L))
484 xhci_debug("Controller didn't halt within 1s\n");
487 static int
488 xhci_reset_endpoint(usbdev_t *const dev, endpoint_t *const ep)
490 xhci_t *const xhci = XHCI_INST(dev->controller);
491 const int slot_id = dev->address;
492 const int ep_id = ep ? xhci_ep_id(ep) : 1;
493 epctx_t *const epctx = xhci->dev[slot_id].ctx.ep[ep_id];
495 xhci_debug("Resetting ID %d EP %d (ep state: %d)\n",
496 slot_id, ep_id, EC_GET(STATE, epctx));
498 /* Run Reset Endpoint Command if the EP is in Halted state */
499 if (EC_GET(STATE, epctx) == 2) {
500 const int cc = xhci_cmd_reset_endpoint(xhci, slot_id, ep_id);
501 if (cc != CC_SUCCESS) {
502 xhci_debug("Reset Endpoint Command failed: %d\n", cc);
503 return 1;
507 /* Clear TT buffer for bulk and control endpoints behind a TT */
508 const int hub = dev->hub;
509 if (hub && dev->speed < HIGH_SPEED &&
510 dev->controller->devices[hub]->speed == HIGH_SPEED)
511 /* TODO */;
513 /* Reset transfer ring if the endpoint is in the right state */
514 const unsigned ep_state = EC_GET(STATE, epctx);
515 if (ep_state == 3 || ep_state == 4) {
516 transfer_ring_t *const tr =
517 xhci->dev[slot_id].transfer_rings[ep_id];
518 const int cc = xhci_cmd_set_tr_dq(xhci, slot_id, ep_id,
519 tr->ring, 1);
520 if (cc != CC_SUCCESS) {
521 xhci_debug("Set TR Dequeue Command failed: %d\n", cc);
522 return 1;
524 xhci_init_cycle_ring(tr, TRANSFER_RING_SIZE);
527 xhci_debug("Finished resetting ID %d EP %d (ep state: %d)\n",
528 slot_id, ep_id, EC_GET(STATE, epctx));
530 return 0;
533 static void
534 xhci_enqueue_trb(transfer_ring_t *const tr)
536 const int chain = TRB_GET(CH, tr->cur);
537 TRB_SET(C, tr->cur, tr->pcs);
538 ++tr->cur;
540 while (TRB_GET(TT, tr->cur) == TRB_LINK) {
541 xhci_spew("Handling LINK pointer\n");
542 const int tc = TRB_GET(TC, tr->cur);
543 TRB_SET(CH, tr->cur, chain);
544 wmb();
545 TRB_SET(C, tr->cur, tr->pcs);
546 tr->cur = phys_to_virt(tr->cur->ptr_low);
547 if (tc)
548 tr->pcs ^= 1;
552 static void
553 xhci_ring_doorbell(endpoint_t *const ep)
555 /* Ensure all TRB changes are written to memory. */
556 wmb();
557 XHCI_INST(ep->dev->controller)->dbreg[ep->dev->address] =
558 xhci_ep_id(ep);
561 static void
562 xhci_enqueue_td(transfer_ring_t *const tr, const int ep, const size_t mps,
563 const int dalen, void *const data, const int dir)
565 trb_t *trb = NULL; /* cur TRB */
566 u8 *cur_start = data; /* cur data pointer */
567 size_t length = dalen; /* remaining bytes */
568 size_t packets = (length + mps - 1) / mps; /* remaining packets */
569 size_t residue = 0; /* residue from last TRB */
570 size_t trb_count = 0; /* TRBs added so far */
572 while (length || !trb_count /* enqueue at least one */) {
573 const size_t cur_end = ((size_t)cur_start + 0x10000) & ~0xffff;
574 size_t cur_length = cur_end - (size_t)cur_start;
575 if (length < cur_length) {
576 cur_length = length;
577 packets = 0;
578 length = 0;
579 } else if (!CONFIG(LP_USB_XHCI_MTK_QUIRK)) {
580 packets -= (residue + cur_length) / mps;
581 residue = (residue + cur_length) % mps;
582 length -= cur_length;
585 trb = tr->cur;
586 xhci_clear_trb(trb, tr->pcs);
587 trb->ptr_low = virt_to_phys(cur_start);
588 TRB_SET(TL, trb, cur_length);
589 TRB_SET(TDS, trb, MIN(TRB_MAX_TD_SIZE, packets));
590 TRB_SET(CH, trb, 1);
592 if (length && CONFIG(LP_USB_XHCI_MTK_QUIRK)) {
594 * For MTK's xHCI controller, TDS defines a number of
595 * packets that remain to be transferred for a TD after
596 * processing all Max packets in all previous TRBs, that
597 * means don't include the current TRB's.
599 packets -= (residue + cur_length) / mps;
600 residue = (residue + cur_length) % mps;
601 length -= cur_length;
604 /* Check for first, data stage TRB */
605 if (!trb_count && ep == 1) {
606 TRB_SET(DIR, trb, dir);
607 TRB_SET(TT, trb, TRB_DATA_STAGE);
608 } else {
609 TRB_SET(TT, trb, TRB_NORMAL);
612 * This is a workaround for Synopsys DWC3. If the ENT flag is
613 * not set for the Normal and Data Stage TRBs. We get Event TRB
614 * with length 0x20d from the controller when we enqueue a TRB
615 * for the IN endpoint with length 0x200.
617 if (!length)
618 TRB_SET(ENT, trb, 1);
620 xhci_enqueue_trb(tr);
622 cur_start += cur_length;
623 ++trb_count;
626 trb = tr->cur;
627 xhci_clear_trb(trb, tr->pcs);
628 trb->ptr_low = virt_to_phys(trb); /* for easier debugging only */
629 TRB_SET(TT, trb, TRB_EVENT_DATA);
630 TRB_SET(IOC, trb, 1);
632 xhci_enqueue_trb(tr);
635 static int
636 xhci_control(usbdev_t *const dev, const direction_t dir,
637 const int drlen, void *const devreq,
638 const int dalen, unsigned char *const src)
640 unsigned char *data = src;
641 xhci_t *const xhci = XHCI_INST(dev->controller);
642 epctx_t *const epctx = xhci->dev[dev->address].ctx.ep0;
643 transfer_ring_t *const tr = xhci->dev[dev->address].transfer_rings[1];
645 const size_t off = (size_t)data & 0xffff;
646 if ((off + dalen) > ((TRANSFER_RING_SIZE - 4) << 16)) {
647 xhci_debug("Unsupported transfer size\n");
648 return -1;
651 /* Reset endpoint if it's not running */
652 const unsigned ep_state = EC_GET(STATE, epctx);
653 if (ep_state > 1) {
654 if (xhci_reset_endpoint(dev, NULL))
655 return -1;
658 if (dalen && !dma_coherent(src)) {
659 data = xhci->dma_buffer;
660 if (dalen > DMA_SIZE) {
661 xhci_debug("Control transfer too large: %d\n", dalen);
662 return -1;
664 if (dir == OUT)
665 memcpy(data, src, dalen);
668 /* Fill and enqueue setup TRB */
669 trb_t *const setup = tr->cur;
670 xhci_clear_trb(setup, tr->pcs);
671 setup->ptr_low = ((u32 *)devreq)[0];
672 setup->ptr_high = ((u32 *)devreq)[1];
673 TRB_SET(TL, setup, 8);
674 TRB_SET(TRT, setup, (dalen)
675 ? ((dir == OUT) ? TRB_TRT_OUT_DATA : TRB_TRT_IN_DATA)
676 : TRB_TRT_NO_DATA);
677 TRB_SET(TT, setup, TRB_SETUP_STAGE);
678 TRB_SET(IDT, setup, 1);
679 TRB_SET(IOC, setup, 1);
680 xhci_enqueue_trb(tr);
682 /* Fill and enqueue data TRBs (if any) */
683 if (dalen) {
684 const unsigned mps = EC_GET(MPS, epctx);
685 const unsigned dt_dir = (dir == OUT) ? TRB_DIR_OUT : TRB_DIR_IN;
686 xhci_enqueue_td(tr, 1, mps, dalen, data, dt_dir);
689 /* Fill status TRB */
690 trb_t *const status = tr->cur;
691 xhci_clear_trb(status, tr->pcs);
692 TRB_SET(DIR, status, (dir == OUT) ? TRB_DIR_IN : TRB_DIR_OUT);
693 TRB_SET(TT, status, TRB_STATUS_STAGE);
694 TRB_SET(IOC, status, 1);
695 xhci_enqueue_trb(tr);
697 /* Ring doorbell for EP0 */
698 xhci_ring_doorbell(&dev->endpoints[0]);
700 /* Wait for transfer events */
701 int i, transferred = 0;
702 const int n_stages = 2 + !!dalen;
703 for (i = 0; i < n_stages; ++i) {
704 const int ret = xhci_wait_for_transfer(xhci, dev->address, 1);
705 transferred += ret;
706 if (ret < 0) {
707 if (ret == TIMEOUT) {
708 xhci_debug("Stopping ID %d EP 1\n",
709 dev->address);
710 xhci_cmd_stop_endpoint(xhci, dev->address, 1);
712 xhci_debug("Stage %d/%d failed: %d\n"
713 " trb ring: @%p\n"
714 " setup trb: @%p\n"
715 " status trb: @%p\n"
716 " ep state: %d -> %d\n"
717 " usbsts: 0x%08"PRIx32"\n",
718 i, n_stages, ret,
719 tr->ring, setup, status,
720 ep_state, EC_GET(STATE, epctx),
721 xhci->opreg->usbsts);
722 return ret;
726 if (dir == IN && data != src)
727 memcpy(src, data, transferred);
728 return transferred;
731 /* finalize == 1: if data is of packet aligned size, add a zero length packet */
732 static int
733 xhci_bulk(endpoint_t *const ep, const int size, u8 *const src,
734 const int finalize)
736 /* finalize: Hopefully the xHCI controller always does this.
737 We have no control over the packets. */
739 u8 *data = src;
740 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
741 const int slot_id = ep->dev->address;
742 const int ep_id = xhci_ep_id(ep);
743 epctx_t *const epctx = xhci->dev[slot_id].ctx.ep[ep_id];
744 transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id];
746 const size_t off = (size_t)data & 0xffff;
747 if ((off + size) > ((TRANSFER_RING_SIZE - 2) << 16)) {
748 xhci_debug("Unsupported transfer size\n");
749 return -1;
752 if (!dma_coherent(src)) {
753 data = xhci->dma_buffer;
754 if (size > DMA_SIZE) {
755 xhci_debug("Bulk transfer too large: %d\n", size);
756 return -1;
758 if (ep->direction == OUT)
759 memcpy(data, src, size);
762 /* Reset endpoint if it's not running */
763 const unsigned ep_state = EC_GET(STATE, epctx);
764 if (ep_state > 1) {
765 if (xhci_reset_endpoint(ep->dev, ep))
766 return -1;
769 /* Enqueue transfer and ring doorbell */
770 const unsigned mps = EC_GET(MPS, epctx);
771 const unsigned dir = (ep->direction == OUT) ? TRB_DIR_OUT : TRB_DIR_IN;
772 xhci_enqueue_td(tr, ep_id, mps, size, data, dir);
773 xhci_ring_doorbell(ep);
775 /* Wait for transfer event */
776 const int ret = xhci_wait_for_transfer(xhci, ep->dev->address, ep_id);
777 if (ret < 0) {
778 if (ret == TIMEOUT) {
779 xhci_debug("Stopping ID %d EP %d\n",
780 ep->dev->address, ep_id);
781 xhci_cmd_stop_endpoint(xhci, ep->dev->address, ep_id);
783 xhci_debug("Bulk transfer failed: %d\n"
784 " ep state: %d -> %d\n"
785 " usbsts: 0x%08"PRIx32"\n",
786 ret, ep_state,
787 EC_GET(STATE, epctx),
788 xhci->opreg->usbsts);
789 return ret;
792 if (ep->direction == IN && data != src)
793 memcpy(src, data, ret);
794 return ret;
797 static trb_t *
798 xhci_next_trb(trb_t *cur, int *const pcs)
800 ++cur;
801 while (TRB_GET(TT, cur) == TRB_LINK) {
802 if (pcs && TRB_GET(TC, cur))
803 *pcs ^= 1;
804 cur = phys_to_virt(cur->ptr_low);
806 return cur;
809 /* create and hook-up an intr queue into device schedule */
810 static void *
811 xhci_create_intr_queue(endpoint_t *const ep,
812 const int reqsize, const int reqcount,
813 const int reqtiming)
815 /* reqtiming: We ignore it and use the interval from the
816 endpoint descriptor configured earlier. */
818 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
819 const int slot_id = ep->dev->address;
820 const int ep_id = xhci_ep_id(ep);
821 transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id];
823 if (reqcount > (TRANSFER_RING_SIZE - 2)) {
824 xhci_debug("reqcount is too high, at most %d supported\n",
825 TRANSFER_RING_SIZE - 2);
826 return NULL;
828 if (reqsize > 0x10000) {
829 xhci_debug("reqsize is too large, at most 64KiB supported\n");
830 return NULL;
832 if (xhci->dev[slot_id].interrupt_queues[ep_id]) {
833 xhci_debug("Only one interrupt queue per endpoint supported\n");
834 return NULL;
837 /* Allocate intrq structure and reqdata chunks */
839 intrq_t *const intrq = malloc(sizeof(*intrq));
840 if (!intrq) {
841 xhci_debug("Out of memory\n");
842 return NULL;
845 int i;
846 int pcs = tr->pcs;
847 trb_t *cur = tr->cur;
848 for (i = 0; i < reqcount; ++i) {
849 if (TRB_GET(C, cur) == pcs) {
850 xhci_debug("Not enough empty TRBs\n");
851 goto _free_return;
853 void *const reqdata = xhci_align(1, reqsize);
854 if (!reqdata) {
855 xhci_debug("Out of memory\n");
856 goto _free_return;
858 xhci_clear_trb(cur, pcs);
859 cur->ptr_low = virt_to_phys(reqdata);
860 cur->ptr_high = 0;
861 TRB_SET(TL, cur, reqsize);
862 TRB_SET(TT, cur, TRB_NORMAL);
863 TRB_SET(ISP, cur, 1);
864 TRB_SET(IOC, cur, 1);
866 cur = xhci_next_trb(cur, &pcs);
869 intrq->size = reqsize;
870 intrq->count = reqcount;
871 intrq->next = tr->cur;
872 intrq->ready = NULL;
873 intrq->ep = ep;
874 xhci->dev[slot_id].interrupt_queues[ep_id] = intrq;
876 /* Now enqueue all the prepared TRBs but the last
877 and ring the doorbell. */
878 for (i = 0; i < (reqcount - 1); ++i)
879 xhci_enqueue_trb(tr);
880 xhci_ring_doorbell(ep);
882 return intrq;
884 _free_return:
885 cur = tr->cur;
886 for (--i; i >= 0; --i) {
887 free(phys_to_virt(cur->ptr_low));
888 cur = xhci_next_trb(cur, NULL);
890 free(intrq);
891 return NULL;
894 /* remove queue from device schedule, dropping all data that came in */
895 static void
896 xhci_destroy_intr_queue(endpoint_t *const ep, void *const q)
898 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
899 const int slot_id = ep->dev->address;
900 const int ep_id = xhci_ep_id(ep);
901 transfer_ring_t *const tr = xhci->dev[slot_id].transfer_rings[ep_id];
903 intrq_t *const intrq = (intrq_t *)q;
905 /* Make sure the endpoint is stopped */
906 if (EC_GET(STATE, xhci->dev[slot_id].ctx.ep[ep_id]) == 1) {
907 const int cc = xhci_cmd_stop_endpoint(xhci, slot_id, ep_id);
908 if (cc != CC_SUCCESS)
909 xhci_debug("Warning: Failed to stop endpoint\n");
912 /* Process all remaining transfer events */
913 xhci_handle_events(xhci);
915 /* Free all pending transfers and the interrupt queue structure */
916 int i;
917 for (i = 0; i < intrq->count; ++i) {
918 free(phys_to_virt(intrq->next->ptr_low));
919 intrq->next = xhci_next_trb(intrq->next, NULL);
921 xhci->dev[slot_id].interrupt_queues[ep_id] = NULL;
922 free((void *)intrq);
924 /* Reset the controller's dequeue pointer and reinitialize the ring */
925 xhci_cmd_set_tr_dq(xhci, slot_id, ep_id, tr->ring, 1);
926 xhci_init_cycle_ring(tr, TRANSFER_RING_SIZE);
929 /* read one intr-packet from queue, if available. extend the queue for new input.
930 return NULL if nothing new available.
931 Recommended use: while (data=poll_intr_queue(q)) process(data);
933 static u8 *
934 xhci_poll_intr_queue(void *const q)
936 if (!q)
937 return NULL;
939 intrq_t *const intrq = (intrq_t *)q;
940 endpoint_t *const ep = intrq->ep;
941 xhci_t *const xhci = XHCI_INST(ep->dev->controller);
943 /* TODO: Reset interrupt queue if it gets halted? */
945 xhci_handle_events(xhci);
947 u8 *reqdata = NULL;
948 while (!reqdata && intrq->ready) {
949 const int ep_id = xhci_ep_id(ep);
950 transfer_ring_t *const tr =
951 xhci->dev[ep->dev->address].transfer_rings[ep_id];
953 /* Fetch the request's buffer */
954 reqdata = phys_to_virt(intrq->next->ptr_low);
956 /* Enqueue the last (spare) TRB and ring doorbell */
957 xhci_enqueue_trb(tr);
958 xhci_ring_doorbell(ep);
960 /* Reuse the current buffer for the next spare TRB */
961 xhci_clear_trb(tr->cur, tr->pcs);
962 tr->cur->ptr_low = virt_to_phys(reqdata);
963 tr->cur->ptr_high = 0;
964 TRB_SET(TL, tr->cur, intrq->size);
965 TRB_SET(TT, tr->cur, TRB_NORMAL);
966 TRB_SET(ISP, tr->cur, 1);
967 TRB_SET(IOC, tr->cur, 1);
969 /* Check if anything was transferred */
970 const size_t read = TRB_GET(TL, intrq->next);
971 if (!read)
972 reqdata = NULL;
973 else if (read < intrq->size)
974 /* At least zero it, poll interface is rather limited */
975 memset(reqdata + read, 0x00, intrq->size - read);
977 /* Advance the interrupt queue */
978 if (intrq->ready == intrq->next)
979 /* This was last TRB being ready */
980 intrq->ready = NULL;
981 intrq->next = xhci_next_trb(intrq->next, NULL);
984 return reqdata;