3 * Copyright (C) 2010 Patrick Georgi
4 * Copyright (C) 2013 secunet Security Networks AG
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 //#define XHCI_SPEW_DEBUG
33 #include <arch/virtual.h>
34 #include "xhci_private.h"
37 static void xhci_start(hci_t
*controller
);
38 static void xhci_stop(hci_t
*controller
);
39 static void xhci_reset(hci_t
*controller
);
40 static void xhci_reinit(hci_t
*controller
);
41 static void xhci_shutdown(hci_t
*controller
);
42 static int xhci_bulk(endpoint_t
*ep
, int size
, u8
*data
, int finalize
);
43 static int xhci_control(usbdev_t
*dev
, direction_t dir
, int drlen
, void *devreq
,
45 static void* xhci_create_intr_queue(endpoint_t
*ep
, int reqsize
, int reqcount
, int reqtiming
);
46 static void xhci_destroy_intr_queue(endpoint_t
*ep
, void *queue
);
47 static u8
* xhci_poll_intr_queue(void *queue
);
50 * Some structures must not cross page boundaries. To get this,
51 * we align them by their size (or the next greater power of 2).
54 xhci_align(const size_t min_align
, const size_t size
)
57 if (!(size
& (size
- 1)))
58 align
= size
; /* It's a power of 2 */
60 align
= 1 << ((sizeof(unsigned) << 3) - __builtin_clz(size
));
61 if (align
< min_align
)
63 xhci_spew("Aligning %zu to %zu\n", size
, align
);
64 return dma_memalign(align
, size
);
68 xhci_clear_trb(trb_t
*const trb
, const int pcs
)
77 xhci_init_cycle_ring(transfer_ring_t
*const tr
, const size_t ring_size
)
79 memset((void *)tr
->ring
, 0, ring_size
* sizeof(*tr
->ring
));
80 TRB_SET(TT
, &tr
->ring
[ring_size
- 1], TRB_LINK
);
81 TRB_SET(TC
, &tr
->ring
[ring_size
- 1], 1);
82 /* only one segment that points to itself */
83 tr
->ring
[ring_size
- 1].ptr_low
= virt_to_phys(tr
->ring
);
89 /* On Panther Point: switch ports shared with EHCI to xHCI */
90 #if CONFIG(LP_USB_PCI)
92 xhci_switch_ppt_ports(pcidev_t addr
)
94 if (pci_read_config32(addr
, 0x00) == 0x1e318086) {
95 u32 reg32
= pci_read_config32(addr
, 0xdc) & 0xf;
96 xhci_debug("Ports capable of SuperSpeed: 0x%"PRIx32
"\n", reg32
);
98 /* For now, do not enable SuperSpeed on any ports */
99 //pci_write_config32(addr, 0xd8, reg32);
100 pci_write_config32(addr
, 0xd8, 0x00000000);
101 reg32
= pci_read_config32(addr
, 0xd8) & 0xf;
102 xhci_debug("Configured for SuperSpeed: 0x%"PRIx32
"\n", reg32
);
104 reg32
= pci_read_config32(addr
, 0xd4) & 0xf;
105 xhci_debug("Trying to switch over: 0x%"PRIx32
"\n", reg32
);
107 pci_write_config32(addr
, 0xd0, reg32
);
108 reg32
= pci_read_config32(addr
, 0xd0) & 0xf;
109 xhci_debug("Actually switched over: 0x%"PRIx32
"\n", reg32
);
114 #if CONFIG(LP_USB_PCI)
115 /* On Panther Point: switch all ports back to EHCI */
117 xhci_switchback_ppt_ports(pcidev_t addr
)
119 if (pci_read_config32(addr
, 0x00) == 0x1e318086) {
120 u32 reg32
= pci_read_config32(addr
, 0xd0) & 0xf;
121 xhci_debug("Switching ports back: 0x%"PRIx32
"\n", reg32
);
122 pci_write_config32(addr
, 0xd0, 0x00000000);
123 reg32
= pci_read_config32(addr
, 0xd0) & 0xf;
124 xhci_debug("Still switched to xHCI: 0x%"PRIx32
"\n", reg32
);
130 xhci_handshake(volatile u32
*const reg
, u32 mask
, u32 wait_for
, long timeout_us
)
134 while ((*reg
& mask
) != wait_for
&& timeout_us
!= 0) {
142 xhci_wait_ready(xhci_t
*const xhci
)
144 xhci_debug("Waiting for controller to be ready... ");
145 if (!xhci_handshake(&xhci
->opreg
->usbsts
, USBSTS_CNR
, 0, 100000L)) {
146 usb_debug("timeout!\n");
154 xhci_init(unsigned long physical_bar
)
161 /* First, allocate and initialize static controller structures */
163 hci_t
*const controller
= new_controller();
164 controller
->type
= XHCI
;
165 controller
->start
= xhci_start
;
166 controller
->stop
= xhci_stop
;
167 controller
->reset
= xhci_reset
;
168 controller
->init
= xhci_reinit
;
169 controller
->shutdown
= xhci_shutdown
;
170 controller
->bulk
= xhci_bulk
;
171 controller
->control
= xhci_control
;
172 controller
->set_address
= xhci_set_address
;
173 controller
->finish_device_config
= xhci_finish_device_config
;
174 controller
->destroy_device
= xhci_destroy_dev
;
175 controller
->create_intr_queue
= xhci_create_intr_queue
;
176 controller
->destroy_intr_queue
= xhci_destroy_intr_queue
;
177 controller
->poll_intr_queue
= xhci_poll_intr_queue
;
178 controller
->pcidev
= 0;
180 controller
->reg_base
= (uintptr_t)physical_bar
;
181 controller
->instance
= xzalloc(sizeof(xhci_t
));
182 xhci_t
*const xhci
= (xhci_t
*)controller
->instance
;
184 init_device_entry(controller
, 0);
185 xhci
->roothub
= controller
->devices
[0];
186 xhci
->cr
.ring
= xhci_align(64, COMMAND_RING_SIZE
* sizeof(trb_t
));
187 xhci
->er
.ring
= xhci_align(64, EVENT_RING_SIZE
* sizeof(trb_t
));
188 xhci
->ev_ring_table
= xhci_align(64, sizeof(erst_entry_t
));
189 if (!xhci
->roothub
|| !xhci
->cr
.ring
||
190 !xhci
->er
.ring
|| !xhci
->ev_ring_table
) {
191 xhci_debug("Out of memory\n");
195 xhci
->capreg
= phys_to_virt(physical_bar
);
196 xhci
->opreg
= phys_to_virt(physical_bar
) + CAP_GET(CAPLEN
, xhci
->capreg
);
197 xhci
->hcrreg
= phys_to_virt(physical_bar
) + xhci
->capreg
->rtsoff
;
198 xhci
->dbreg
= phys_to_virt(physical_bar
) + xhci
->capreg
->dboff
;
200 xhci_debug("regbase: 0x%"PRIxPTR
"\n", physical_bar
);
201 xhci_debug("caplen: 0x%"PRIx32
"\n", CAP_GET(CAPLEN
, xhci
->capreg
));
202 xhci_debug("rtsoff: 0x%"PRIx32
"\n", xhci
->capreg
->rtsoff
);
203 xhci_debug("dboff: 0x%"PRIx32
"\n", xhci
->capreg
->dboff
);
205 xhci_debug("hciversion: %"PRIx8
".%"PRIx8
"\n",
206 CAP_GET(CAPVER_HI
, xhci
->capreg
), CAP_GET(CAPVER_LO
, xhci
->capreg
));
207 if ((CAP_GET(CAPVER
, xhci
->capreg
) < 0x96) ||
208 (CAP_GET(CAPVER
, xhci
->capreg
) > 0x120)) {
209 xhci_debug("Unsupported xHCI version\n");
213 xhci_debug("context size: %dB\n", CTXSIZE(xhci
));
214 xhci_debug("maxslots: 0x%02"PRIx32
"\n", CAP_GET(MAXSLOTS
, xhci
->capreg
));
215 xhci_debug("maxports: 0x%02"PRIx32
"\n", CAP_GET(MAXPORTS
, xhci
->capreg
));
216 const unsigned pagesize
= xhci
->opreg
->pagesize
<< 12;
217 xhci_debug("pagesize: 0x%04x\n", pagesize
);
220 * We haven't touched the hardware yet. So we allocate all dynamic
221 * structures at first and can still chicken out easily if we run out
224 xhci
->max_slots_en
= CAP_GET(MAXSLOTS
, xhci
->capreg
) &
225 CONFIG_LP_MASK_MaxSlotsEn
;
226 xhci
->dcbaa
= xhci_align(64, (xhci
->max_slots_en
+ 1) * sizeof(u64
));
227 xhci
->dev
= malloc((xhci
->max_slots_en
+ 1) * sizeof(*xhci
->dev
));
228 if (!xhci
->dcbaa
|| !xhci
->dev
) {
229 xhci_debug("Out of memory\n");
232 memset(xhci
->dcbaa
, 0x00, (xhci
->max_slots_en
+ 1) * sizeof(u64
));
233 memset(xhci
->dev
, 0x00, (xhci
->max_slots_en
+ 1) * sizeof(*xhci
->dev
));
236 * Let dcbaa[0] point to another array of pointers, sp_ptrs.
237 * The pointers therein point to scratchpad buffers (pages).
239 const size_t max_sp_bufs
=
240 CAP_GET(MAX_SCRATCH_BUFS_HI
, xhci
->capreg
) << 5 |
241 CAP_GET(MAX_SCRATCH_BUFS_LO
, xhci
->capreg
);
242 xhci_debug("max scratchpad bufs: 0x%zx\n", max_sp_bufs
);
244 const size_t sp_ptrs_size
= max_sp_bufs
* sizeof(u64
);
245 xhci
->sp_ptrs
= xhci_align(64, sp_ptrs_size
);
246 if (!xhci
->sp_ptrs
) {
247 xhci_debug("Out of memory\n");
248 goto _free_xhci_structs
;
250 memset(xhci
->sp_ptrs
, 0x00, sp_ptrs_size
);
251 for (i
= 0; i
< max_sp_bufs
; ++i
) {
252 /* Could use mmap() here if we had it.
253 Maybe there is another way. */
254 void *const page
= memalign(pagesize
, pagesize
);
256 xhci_debug("Out of memory\n");
257 goto _free_xhci_structs
;
259 xhci
->sp_ptrs
[i
] = virt_to_phys(page
);
261 xhci
->dcbaa
[0] = virt_to_phys(xhci
->sp_ptrs
);
264 if (dma_initialized()) {
265 xhci
->dma_buffer
= dma_memalign(64 * 1024, DMA_SIZE
);
266 if (!xhci
->dma_buffer
) {
267 xhci_debug("Not enough memory for DMA bounce buffer\n");
268 goto _free_xhci_structs
;
272 /* Now start working on the hardware */
273 if (xhci_wait_ready(xhci
))
274 goto _free_xhci_structs
;
276 /* TODO: Check if BIOS claims ownership (and hand over) */
278 xhci_reset(controller
);
279 xhci_reinit(controller
);
281 xhci
->roothub
->controller
= controller
;
282 xhci
->roothub
->init
= xhci_rh_init
;
283 xhci
->roothub
->init(xhci
->roothub
);
288 free(xhci
->dma_buffer
);
290 for (i
= 0; i
< max_sp_bufs
; ++i
) {
291 if (xhci
->sp_ptrs
[i
])
292 free(phys_to_virt(xhci
->sp_ptrs
[i
]));
298 free((void *)xhci
->ev_ring_table
);
299 free((void *)xhci
->er
.ring
);
300 free((void *)xhci
->cr
.ring
);
304 /* _free_controller: */
305 detach_controller(controller
);
311 #if CONFIG(LP_USB_PCI)
313 xhci_pci_init(pcidev_t addr
)
318 reg_addr
= pci_read_config32(addr
, PCI_BASE_ADDRESS_0
) &
319 PCI_BASE_ADDRESS_MEM_MASK
;
320 if (pci_read_config32(addr
, PCI_BASE_ADDRESS_1
) > 0)
321 fatal("We don't do 64bit addressing.\n");
323 controller
= xhci_init((unsigned long)reg_addr
);
325 xhci_t
*xhci
= controller
->instance
;
326 controller
->pcidev
= addr
;
328 xhci_switch_ppt_ports(addr
);
330 /* Set up any quirks for controller root hub */
331 xhci
->roothub
->quirks
= pci_quirk_check(addr
);
339 xhci_reset(hci_t
*const controller
)
341 xhci_t
*const xhci
= XHCI_INST(controller
);
343 xhci_stop(controller
);
345 xhci
->opreg
->usbcmd
|= USBCMD_HCRST
;
347 /* Existing Intel xHCI controllers require a delay of 1 ms,
348 * after setting the CMD_RESET bit, and before accessing any
349 * HC registers. This allows the HC to complete the
350 * reset operation and be ready for HC register access.
351 * Without this delay, the subsequent HC register access,
352 * may result in a system hang very rarely.
354 if (CONFIG(LP_ARCH_X86
))
357 xhci_debug("Resetting controller... ");
358 if (!xhci_handshake(&xhci
->opreg
->usbcmd
, USBCMD_HCRST
, 0, 1000000L))
359 usb_debug("timeout!\n");
365 xhci_reinit(hci_t
*controller
)
367 xhci_t
*const xhci
= XHCI_INST(controller
);
369 if (xhci_wait_ready(xhci
))
372 /* Enable all available slots */
373 xhci
->opreg
->config
= xhci
->max_slots_en
;
376 xhci
->opreg
->dcbaap_lo
= virt_to_phys(xhci
->dcbaa
);
377 xhci
->opreg
->dcbaap_hi
= 0;
379 /* Initialize command ring */
380 xhci_init_cycle_ring(&xhci
->cr
, COMMAND_RING_SIZE
);
381 xhci_debug("command ring @%p (0x%08"PRIxPTR
")\n",
382 xhci
->cr
.ring
, virt_to_phys(xhci
->cr
.ring
));
383 xhci
->opreg
->crcr_lo
= virt_to_phys(xhci
->cr
.ring
) | CRCR_RCS
;
384 xhci
->opreg
->crcr_hi
= 0;
386 /* Make sure interrupts are disabled */
387 xhci
->opreg
->usbcmd
&= ~USBCMD_INTE
;
389 /* Initialize event ring */
390 xhci_reset_event_ring(&xhci
->er
);
391 xhci_debug("event ring @%p (0x%08"PRIxPTR
")\n",
392 xhci
->er
.ring
, virt_to_phys(xhci
->er
.ring
));
393 xhci_debug("ERST Max: 0x%"PRIx32
" -> 0x%x entries\n",
394 CAP_GET(ERST_MAX
, xhci
->capreg
),
395 1 << CAP_GET(ERST_MAX
, xhci
->capreg
));
396 memset((void*)xhci
->ev_ring_table
, 0x00, sizeof(erst_entry_t
));
397 xhci
->ev_ring_table
[0].seg_base_lo
= virt_to_phys(xhci
->er
.ring
);
398 xhci
->ev_ring_table
[0].seg_base_hi
= 0;
399 xhci
->ev_ring_table
[0].seg_size
= EVENT_RING_SIZE
;
401 /* pass event ring table to hardware */
403 /* Initialize primary interrupter */
404 xhci
->hcrreg
->intrrs
[0].erstsz
= 1;
405 xhci_update_event_dq(xhci
);
406 /* erstba has to be written at last */
407 xhci
->hcrreg
->intrrs
[0].erstba_lo
= virt_to_phys(xhci
->ev_ring_table
);
408 xhci
->hcrreg
->intrrs
[0].erstba_hi
= 0;
410 xhci_start(controller
);
414 for (i
= 0; i
< 32; ++i
) {
415 xhci_debug("NOOP run #%d\n", i
);
416 trb_t
*const cmd
= xhci_next_command_trb(xhci
);
417 TRB_SET(TT
, cmd
, TRB_CMD_NOOP
);
419 xhci_post_command(xhci
);
421 /* Wait for result in event ring */
422 int cc
= xhci_wait_for_command_done(xhci
, cmd
, 1);
424 xhci_debug("Command ring is %srunning: cc: %d\n",
425 (xhci
->opreg
->crcr_lo
& CRCR_CRR
) ? "" : "not ", cc
);
426 if (cc
!= CC_SUCCESS
)
427 xhci_debug("noop command failed.\n");
433 xhci_shutdown(hci_t
*const controller
)
440 detach_controller(controller
);
442 xhci_t
*const xhci
= XHCI_INST(controller
);
443 xhci_stop(controller
);
445 #if CONFIG(LP_USB_PCI)
446 if (controller
->pcidev
)
447 xhci_switchback_ppt_ports(controller
->pcidev
);
451 const size_t max_sp_bufs
=
452 CAP_GET(MAX_SCRATCH_BUFS_HI
, xhci
->capreg
) << 5 |
453 CAP_GET(MAX_SCRATCH_BUFS_LO
, xhci
->capreg
);
454 for (i
= 0; i
< max_sp_bufs
; ++i
) {
455 if (xhci
->sp_ptrs
[i
])
456 free(phys_to_virt(xhci
->sp_ptrs
[i
]));
460 free(xhci
->dma_buffer
);
463 free((void *)xhci
->ev_ring_table
);
464 free((void *)xhci
->er
.ring
);
465 free((void *)xhci
->cr
.ring
);
471 xhci_start(hci_t
*controller
)
473 xhci_t
*const xhci
= XHCI_INST(controller
);
475 xhci
->opreg
->usbcmd
|= USBCMD_RS
;
476 if (!xhci_handshake(&xhci
->opreg
->usbsts
, USBSTS_HCH
, 0, 1000000L))
477 xhci_debug("Controller didn't start within 1s\n");
481 xhci_stop(hci_t
*controller
)
483 xhci_t
*const xhci
= XHCI_INST(controller
);
485 xhci
->opreg
->usbcmd
&= ~USBCMD_RS
;
486 if (!xhci_handshake(&xhci
->opreg
->usbsts
,
487 USBSTS_HCH
, USBSTS_HCH
, 1000000L))
488 xhci_debug("Controller didn't halt within 1s\n");
492 xhci_reset_endpoint(usbdev_t
*const dev
, endpoint_t
*const ep
)
494 xhci_t
*const xhci
= XHCI_INST(dev
->controller
);
495 const int slot_id
= dev
->address
;
496 const int ep_id
= ep
? xhci_ep_id(ep
) : 1;
497 epctx_t
*const epctx
= xhci
->dev
[slot_id
].ctx
.ep
[ep_id
];
499 xhci_debug("Resetting ID %d EP %d (ep state: %d)\n",
500 slot_id
, ep_id
, EC_GET(STATE
, epctx
));
502 /* Run Reset Endpoint Command if the EP is in Halted state */
503 if (EC_GET(STATE
, epctx
) == 2) {
504 const int cc
= xhci_cmd_reset_endpoint(xhci
, slot_id
, ep_id
);
505 if (cc
!= CC_SUCCESS
) {
506 xhci_debug("Reset Endpoint Command failed: %d\n", cc
);
511 /* Clear TT buffer for bulk and control endpoints behind a TT */
512 const int hub
= dev
->hub
;
513 if (hub
&& dev
->speed
< HIGH_SPEED
&&
514 dev
->controller
->devices
[hub
]->speed
== HIGH_SPEED
)
517 /* Reset transfer ring if the endpoint is in the right state */
518 const unsigned ep_state
= EC_GET(STATE
, epctx
);
519 if (ep_state
== 3 || ep_state
== 4) {
520 transfer_ring_t
*const tr
=
521 xhci
->dev
[slot_id
].transfer_rings
[ep_id
];
522 const int cc
= xhci_cmd_set_tr_dq(xhci
, slot_id
, ep_id
,
524 if (cc
!= CC_SUCCESS
) {
525 xhci_debug("Set TR Dequeue Command failed: %d\n", cc
);
528 xhci_init_cycle_ring(tr
, TRANSFER_RING_SIZE
);
531 xhci_debug("Finished resetting ID %d EP %d (ep state: %d)\n",
532 slot_id
, ep_id
, EC_GET(STATE
, epctx
));
538 xhci_enqueue_trb(transfer_ring_t
*const tr
)
540 const int chain
= TRB_GET(CH
, tr
->cur
);
541 TRB_SET(C
, tr
->cur
, tr
->pcs
);
544 while (TRB_GET(TT
, tr
->cur
) == TRB_LINK
) {
545 xhci_spew("Handling LINK pointer\n");
546 const int tc
= TRB_GET(TC
, tr
->cur
);
547 TRB_SET(CH
, tr
->cur
, chain
);
549 TRB_SET(C
, tr
->cur
, tr
->pcs
);
550 tr
->cur
= phys_to_virt(tr
->cur
->ptr_low
);
557 xhci_ring_doorbell(endpoint_t
*const ep
)
559 /* Ensure all TRB changes are written to memory. */
561 XHCI_INST(ep
->dev
->controller
)->dbreg
[ep
->dev
->address
] =
566 xhci_enqueue_td(transfer_ring_t
*const tr
, const int ep
, const size_t mps
,
567 const int dalen
, void *const data
, const int dir
)
569 trb_t
*trb
= NULL
; /* cur TRB */
570 u8
*cur_start
= data
; /* cur data pointer */
571 size_t length
= dalen
; /* remaining bytes */
572 size_t packets
= (length
+ mps
- 1) / mps
; /* remaining packets */
573 size_t residue
= 0; /* residue from last TRB */
574 size_t trb_count
= 0; /* TRBs added so far */
576 while (length
|| !trb_count
/* enqueue at least one */) {
577 const size_t cur_end
= ((size_t)cur_start
+ 0x10000) & ~0xffff;
578 size_t cur_length
= cur_end
- (size_t)cur_start
;
579 if (length
< cur_length
) {
583 } else if (!CONFIG(LP_USB_XHCI_MTK_QUIRK
)) {
584 packets
-= (residue
+ cur_length
) / mps
;
585 residue
= (residue
+ cur_length
) % mps
;
586 length
-= cur_length
;
590 xhci_clear_trb(trb
, tr
->pcs
);
591 trb
->ptr_low
= virt_to_phys(cur_start
);
592 TRB_SET(TL
, trb
, cur_length
);
593 TRB_SET(TDS
, trb
, MIN(TRB_MAX_TD_SIZE
, packets
));
596 if (length
&& CONFIG(LP_USB_XHCI_MTK_QUIRK
)) {
598 * For MTK's xHCI controller, TDS defines a number of
599 * packets that remain to be transferred for a TD after
600 * processing all Max packets in all previous TRBs, that
601 * means don't include the current TRB's.
603 packets
-= (residue
+ cur_length
) / mps
;
604 residue
= (residue
+ cur_length
) % mps
;
605 length
-= cur_length
;
608 /* Check for first, data stage TRB */
609 if (!trb_count
&& ep
== 1) {
610 TRB_SET(DIR, trb
, dir
);
611 TRB_SET(TT
, trb
, TRB_DATA_STAGE
);
613 TRB_SET(TT
, trb
, TRB_NORMAL
);
616 * This is a workaround for Synopsys DWC3. If the ENT flag is
617 * not set for the Normal and Data Stage TRBs. We get Event TRB
618 * with length 0x20d from the controller when we enqueue a TRB
619 * for the IN endpoint with length 0x200.
622 TRB_SET(ENT
, trb
, 1);
624 xhci_enqueue_trb(tr
);
626 cur_start
+= cur_length
;
631 xhci_clear_trb(trb
, tr
->pcs
);
632 trb
->ptr_low
= virt_to_phys(trb
); /* for easier debugging only */
633 TRB_SET(TT
, trb
, TRB_EVENT_DATA
);
634 TRB_SET(IOC
, trb
, 1);
636 xhci_enqueue_trb(tr
);
640 xhci_control(usbdev_t
*const dev
, const direction_t dir
,
641 const int drlen
, void *const devreq
,
642 const int dalen
, unsigned char *const src
)
644 unsigned char *data
= src
;
645 xhci_t
*const xhci
= XHCI_INST(dev
->controller
);
646 epctx_t
*const epctx
= xhci
->dev
[dev
->address
].ctx
.ep0
;
647 transfer_ring_t
*const tr
= xhci
->dev
[dev
->address
].transfer_rings
[1];
649 const size_t off
= (size_t)data
& 0xffff;
650 if ((off
+ dalen
) > ((TRANSFER_RING_SIZE
- 4) << 16)) {
651 xhci_debug("Unsupported transfer size\n");
655 /* Reset endpoint if it's not running */
656 const unsigned ep_state
= EC_GET(STATE
, epctx
);
658 if (xhci_reset_endpoint(dev
, NULL
))
662 if (dalen
&& !dma_coherent(src
)) {
663 data
= xhci
->dma_buffer
;
664 if (dalen
> DMA_SIZE
) {
665 xhci_debug("Control transfer too large: %d\n", dalen
);
669 memcpy(data
, src
, dalen
);
672 /* Fill and enqueue setup TRB */
673 trb_t
*const setup
= tr
->cur
;
674 xhci_clear_trb(setup
, tr
->pcs
);
675 setup
->ptr_low
= ((u32
*)devreq
)[0];
676 setup
->ptr_high
= ((u32
*)devreq
)[1];
677 TRB_SET(TL
, setup
, 8);
678 TRB_SET(TRT
, setup
, (dalen
)
679 ? ((dir
== OUT
) ? TRB_TRT_OUT_DATA
: TRB_TRT_IN_DATA
)
681 TRB_SET(TT
, setup
, TRB_SETUP_STAGE
);
682 TRB_SET(IDT
, setup
, 1);
683 TRB_SET(IOC
, setup
, 1);
684 xhci_enqueue_trb(tr
);
686 /* Fill and enqueue data TRBs (if any) */
688 const unsigned mps
= EC_GET(MPS
, epctx
);
689 const unsigned dt_dir
= (dir
== OUT
) ? TRB_DIR_OUT
: TRB_DIR_IN
;
690 xhci_enqueue_td(tr
, 1, mps
, dalen
, data
, dt_dir
);
693 /* Fill status TRB */
694 trb_t
*const status
= tr
->cur
;
695 xhci_clear_trb(status
, tr
->pcs
);
696 TRB_SET(DIR, status
, (dir
== OUT
) ? TRB_DIR_IN
: TRB_DIR_OUT
);
697 TRB_SET(TT
, status
, TRB_STATUS_STAGE
);
698 TRB_SET(IOC
, status
, 1);
699 xhci_enqueue_trb(tr
);
701 /* Ring doorbell for EP0 */
702 xhci_ring_doorbell(&dev
->endpoints
[0]);
704 /* Wait for transfer events */
705 int i
, transferred
= 0;
706 const int n_stages
= 2 + !!dalen
;
707 for (i
= 0; i
< n_stages
; ++i
) {
708 const int ret
= xhci_wait_for_transfer(xhci
, dev
->address
, 1);
711 if (ret
== TIMEOUT
) {
712 xhci_debug("Stopping ID %d EP 1\n",
714 xhci_cmd_stop_endpoint(xhci
, dev
->address
, 1);
716 xhci_debug("Stage %d/%d failed: %d\n"
720 " ep state: %d -> %d\n"
721 " usbsts: 0x%08"PRIx32
"\n",
723 tr
->ring
, setup
, status
,
724 ep_state
, EC_GET(STATE
, epctx
),
725 xhci
->opreg
->usbsts
);
730 if (dir
== IN
&& data
!= src
)
731 memcpy(src
, data
, transferred
);
735 /* finalize == 1: if data is of packet aligned size, add a zero length packet */
737 xhci_bulk(endpoint_t
*const ep
, const int size
, u8
*const src
,
740 /* finalize: Hopefully the xHCI controller always does this.
741 We have no control over the packets. */
744 xhci_t
*const xhci
= XHCI_INST(ep
->dev
->controller
);
745 const int slot_id
= ep
->dev
->address
;
746 const int ep_id
= xhci_ep_id(ep
);
747 epctx_t
*const epctx
= xhci
->dev
[slot_id
].ctx
.ep
[ep_id
];
748 transfer_ring_t
*const tr
= xhci
->dev
[slot_id
].transfer_rings
[ep_id
];
750 const size_t off
= (size_t)data
& 0xffff;
751 if ((off
+ size
) > ((TRANSFER_RING_SIZE
- 2) << 16)) {
752 xhci_debug("Unsupported transfer size\n");
756 if (!dma_coherent(src
)) {
757 data
= xhci
->dma_buffer
;
758 if (size
> DMA_SIZE
) {
759 xhci_debug("Bulk transfer too large: %d\n", size
);
762 if (ep
->direction
== OUT
)
763 memcpy(data
, src
, size
);
766 /* Reset endpoint if it's not running */
767 const unsigned ep_state
= EC_GET(STATE
, epctx
);
769 if (xhci_reset_endpoint(ep
->dev
, ep
))
773 /* Enqueue transfer and ring doorbell */
774 const unsigned mps
= EC_GET(MPS
, epctx
);
775 const unsigned dir
= (ep
->direction
== OUT
) ? TRB_DIR_OUT
: TRB_DIR_IN
;
776 xhci_enqueue_td(tr
, ep_id
, mps
, size
, data
, dir
);
777 xhci_ring_doorbell(ep
);
779 /* Wait for transfer event */
780 const int ret
= xhci_wait_for_transfer(xhci
, ep
->dev
->address
, ep_id
);
782 if (ret
== TIMEOUT
) {
783 xhci_debug("Stopping ID %d EP %d\n",
784 ep
->dev
->address
, ep_id
);
785 xhci_cmd_stop_endpoint(xhci
, ep
->dev
->address
, ep_id
);
787 xhci_debug("Bulk transfer failed: %d\n"
788 " ep state: %d -> %d\n"
789 " usbsts: 0x%08"PRIx32
"\n",
791 EC_GET(STATE
, epctx
),
792 xhci
->opreg
->usbsts
);
796 if (ep
->direction
== IN
&& data
!= src
)
797 memcpy(src
, data
, ret
);
802 xhci_next_trb(trb_t
*cur
, int *const pcs
)
805 while (TRB_GET(TT
, cur
) == TRB_LINK
) {
806 if (pcs
&& TRB_GET(TC
, cur
))
808 cur
= phys_to_virt(cur
->ptr_low
);
813 /* create and hook-up an intr queue into device schedule */
815 xhci_create_intr_queue(endpoint_t
*const ep
,
816 const int reqsize
, const int reqcount
,
819 /* reqtiming: We ignore it and use the interval from the
820 endpoint descriptor configured earlier. */
822 xhci_t
*const xhci
= XHCI_INST(ep
->dev
->controller
);
823 const int slot_id
= ep
->dev
->address
;
824 const int ep_id
= xhci_ep_id(ep
);
825 transfer_ring_t
*const tr
= xhci
->dev
[slot_id
].transfer_rings
[ep_id
];
827 if (reqcount
> (TRANSFER_RING_SIZE
- 2)) {
828 xhci_debug("reqcount is too high, at most %d supported\n",
829 TRANSFER_RING_SIZE
- 2);
832 if (reqsize
> 0x10000) {
833 xhci_debug("reqsize is too large, at most 64KiB supported\n");
836 if (xhci
->dev
[slot_id
].interrupt_queues
[ep_id
]) {
837 xhci_debug("Only one interrupt queue per endpoint supported\n");
841 /* Allocate intrq structure and reqdata chunks */
843 intrq_t
*const intrq
= malloc(sizeof(*intrq
));
845 xhci_debug("Out of memory\n");
851 trb_t
*cur
= tr
->cur
;
852 for (i
= 0; i
< reqcount
; ++i
) {
853 if (TRB_GET(C
, cur
) == pcs
) {
854 xhci_debug("Not enough empty TRBs\n");
857 void *const reqdata
= xhci_align(1, reqsize
);
859 xhci_debug("Out of memory\n");
862 xhci_clear_trb(cur
, pcs
);
863 cur
->ptr_low
= virt_to_phys(reqdata
);
865 TRB_SET(TL
, cur
, reqsize
);
866 TRB_SET(TT
, cur
, TRB_NORMAL
);
867 TRB_SET(ISP
, cur
, 1);
868 TRB_SET(IOC
, cur
, 1);
870 cur
= xhci_next_trb(cur
, &pcs
);
873 intrq
->size
= reqsize
;
874 intrq
->count
= reqcount
;
875 intrq
->next
= tr
->cur
;
878 xhci
->dev
[slot_id
].interrupt_queues
[ep_id
] = intrq
;
880 /* Now enqueue all the prepared TRBs but the last
881 and ring the doorbell. */
882 for (i
= 0; i
< (reqcount
- 1); ++i
)
883 xhci_enqueue_trb(tr
);
884 xhci_ring_doorbell(ep
);
890 for (--i
; i
>= 0; --i
) {
891 free(phys_to_virt(cur
->ptr_low
));
892 cur
= xhci_next_trb(cur
, NULL
);
898 /* remove queue from device schedule, dropping all data that came in */
900 xhci_destroy_intr_queue(endpoint_t
*const ep
, void *const q
)
902 xhci_t
*const xhci
= XHCI_INST(ep
->dev
->controller
);
903 const int slot_id
= ep
->dev
->address
;
904 const int ep_id
= xhci_ep_id(ep
);
905 transfer_ring_t
*const tr
= xhci
->dev
[slot_id
].transfer_rings
[ep_id
];
907 intrq_t
*const intrq
= (intrq_t
*)q
;
909 /* Make sure the endpoint is stopped */
910 if (EC_GET(STATE
, xhci
->dev
[slot_id
].ctx
.ep
[ep_id
]) == 1) {
911 const int cc
= xhci_cmd_stop_endpoint(xhci
, slot_id
, ep_id
);
912 if (cc
!= CC_SUCCESS
)
913 xhci_debug("Warning: Failed to stop endpoint\n");
916 /* Process all remaining transfer events */
917 xhci_handle_events(xhci
);
919 /* Free all pending transfers and the interrupt queue structure */
921 for (i
= 0; i
< intrq
->count
; ++i
) {
922 free(phys_to_virt(intrq
->next
->ptr_low
));
923 intrq
->next
= xhci_next_trb(intrq
->next
, NULL
);
925 xhci
->dev
[slot_id
].interrupt_queues
[ep_id
] = NULL
;
928 /* Reset the controller's dequeue pointer and reinitialize the ring */
929 xhci_cmd_set_tr_dq(xhci
, slot_id
, ep_id
, tr
->ring
, 1);
930 xhci_init_cycle_ring(tr
, TRANSFER_RING_SIZE
);
933 /* read one intr-packet from queue, if available. extend the queue for new input.
934 return NULL if nothing new available.
935 Recommended use: while (data=poll_intr_queue(q)) process(data);
938 xhci_poll_intr_queue(void *const q
)
943 intrq_t
*const intrq
= (intrq_t
*)q
;
944 endpoint_t
*const ep
= intrq
->ep
;
945 xhci_t
*const xhci
= XHCI_INST(ep
->dev
->controller
);
947 /* TODO: Reset interrupt queue if it gets halted? */
949 xhci_handle_events(xhci
);
952 while (!reqdata
&& intrq
->ready
) {
953 const int ep_id
= xhci_ep_id(ep
);
954 transfer_ring_t
*const tr
=
955 xhci
->dev
[ep
->dev
->address
].transfer_rings
[ep_id
];
957 /* Fetch the request's buffer */
958 reqdata
= phys_to_virt(intrq
->next
->ptr_low
);
960 /* Enqueue the last (spare) TRB and ring doorbell */
961 xhci_enqueue_trb(tr
);
962 xhci_ring_doorbell(ep
);
964 /* Reuse the current buffer for the next spare TRB */
965 xhci_clear_trb(tr
->cur
, tr
->pcs
);
966 tr
->cur
->ptr_low
= virt_to_phys(reqdata
);
967 tr
->cur
->ptr_high
= 0;
968 TRB_SET(TL
, tr
->cur
, intrq
->size
);
969 TRB_SET(TT
, tr
->cur
, TRB_NORMAL
);
970 TRB_SET(ISP
, tr
->cur
, 1);
971 TRB_SET(IOC
, tr
->cur
, 1);
973 /* Check if anything was transferred */
974 const size_t read
= TRB_GET(TL
, intrq
->next
);
977 else if (read
< intrq
->size
)
978 /* At least zero it, poll interface is rather limited */
979 memset(reqdata
+ read
, 0x00, intrq
->size
- read
);
981 /* Advance the interrupt queue */
982 if (intrq
->ready
== intrq
->next
)
983 /* This was last TRB being ready */
985 intrq
->next
= xhci_next_trb(intrq
->next
, NULL
);