mb/google/nissa: Create pujjogatwin variant
[coreboot2.git] / payloads / libpayload / drivers / usb / xhci_events.c
blob139ea59619d694ad7c4b24f4d6a2e57a5625542f
1 /*
3 * Copyright (C) 2013 secunet Security Networks AG
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. The name of the author may not be used to endorse or promote products
14 * derived from this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
29 //#define XHCI_SPEW_DEBUG
31 #include <inttypes.h>
32 #include <arch/virtual.h>
33 #include "xhci_private.h"
35 void
36 xhci_reset_event_ring(event_ring_t *const er)
38 int i;
39 for (i = 0; i < EVENT_RING_SIZE; ++i)
40 er->ring[i].control &= ~TRB_CYCLE;
41 er->cur = er->ring;
42 er->last = er->ring + EVENT_RING_SIZE;
43 er->ccs = 1;
44 er->adv = 1;
47 static inline int
48 xhci_event_ready(const event_ring_t *const er)
50 return (er->cur->control & TRB_CYCLE) == er->ccs;
53 void
54 xhci_update_event_dq(xhci_t *const xhci)
56 if (xhci->er.adv) {
57 xhci_spew("Updating dq ptr: @%p(0x%08"PRIx32") -> %p\n",
58 phys_to_virt(xhci->hcrreg->intrrs[0].erdp_lo),
59 xhci->hcrreg->intrrs[0].erdp_lo, xhci->er.cur);
60 xhci->hcrreg->intrrs[0].erdp_lo = virt_to_phys(xhci->er.cur);
61 xhci->hcrreg->intrrs[0].erdp_hi = 0;
62 xhci->er.adv = 0;
66 void
67 xhci_advance_event_ring(xhci_t *const xhci)
69 xhci->er.cur++;
70 xhci->er.adv = 1;
71 if (xhci->er.cur == xhci->er.last) {
72 xhci_spew("Roll over in event ring\n");
73 xhci->er.cur = xhci->er.ring;
74 xhci->er.ccs ^= 1;
75 xhci_update_event_dq(xhci);
79 static void
80 xhci_handle_transfer_event(xhci_t *const xhci)
82 const trb_t *const ev = xhci->er.cur;
84 const int cc = TRB_GET(CC, ev);
85 const int id = TRB_GET(ID, ev);
86 const int ep = TRB_GET(EP, ev);
88 intrq_t *intrq;
90 if (id && id <= xhci->max_slots_en &&
91 (intrq = xhci->dev[id].interrupt_queues[ep])) {
92 /* It's a running interrupt endpoint */
93 intrq->ready = phys_to_virt(ev->ptr_low);
94 if (cc == CC_SUCCESS || cc == CC_SHORT_PACKET) {
95 TRB_SET(TL, intrq->ready,
96 intrq->size - TRB_GET(EVTL, ev));
97 } else {
98 xhci_debug("Interrupt Transfer failed: %d\n",
99 cc);
100 TRB_SET(TL, intrq->ready, 0);
102 } else if (cc == CC_STOPPED || cc == CC_STOPPED_LENGTH_INVALID) {
103 /* Ignore 'Forced Stop Events' */
104 } else {
105 xhci_debug("Warning: "
106 "Spurious transfer event for ID %d, EP %d:\n"
107 " Pointer: 0x%08x%08x\n"
108 " TL: 0x%06x\n"
109 " CC: %d\n",
110 id, ep,
111 ev->ptr_high, ev->ptr_low,
112 TRB_GET(EVTL, ev), cc);
114 xhci_advance_event_ring(xhci);
117 static void
118 xhci_handle_command_completion_event(xhci_t *const xhci)
120 const trb_t *const ev = xhci->er.cur;
122 xhci_debug("Warning: Spurious command completion event:\n"
123 " Pointer: 0x%08x%08x\n"
124 " CC: %d\n"
125 " Slot ID: %d\n"
126 " Cycle: %d\n",
127 ev->ptr_high, ev->ptr_low,
128 TRB_GET(CC, ev), TRB_GET(ID, ev), ev->control & TRB_CYCLE);
129 xhci_advance_event_ring(xhci);
132 static void
133 xhci_handle_host_controller_event(xhci_t *const xhci)
135 const trb_t *const ev = xhci->er.cur;
137 const int cc = TRB_GET(CC, ev);
138 switch (cc) {
139 case CC_EVENT_RING_FULL_ERROR:
140 xhci_debug("Event ring full! (@%p)\n", xhci->er.cur);
142 * If we get here, we have processed the whole queue:
143 * xHC pushes this event, when it sees the ring full,
144 * full of other events.
145 * IMO it's save and necessary to update the dequeue
146 * pointer here.
148 xhci_advance_event_ring(xhci);
149 xhci_update_event_dq(xhci);
150 break;
151 default:
152 xhci_debug("Warning: Spurious host controller event: %d\n", cc);
153 xhci_advance_event_ring(xhci);
154 break;
158 /* handle standard types:
159 * - command completion event
160 * - port status change event
161 * - transfer event
162 * - host controller event
164 static void
165 xhci_handle_event(xhci_t *const xhci)
167 const trb_t *const ev = xhci->er.cur;
169 const int trb_type = TRB_GET(TT, ev);
170 switch (trb_type) {
171 /* Either pass along the event or advance event ring */
172 case TRB_EV_TRANSFER:
173 xhci_handle_transfer_event(xhci);
174 break;
175 case TRB_EV_CMD_CMPL:
176 xhci_handle_command_completion_event(xhci);
177 break;
178 case TRB_EV_PORTSC:
179 xhci_debug("Port Status Change Event for %d: %d\n",
180 TRB_GET(PORT, ev), TRB_GET(CC, ev));
181 /* We ignore the event as we look for the PORTSC
182 registers instead, at a time when it suits _us_. */
183 xhci_advance_event_ring(xhci);
184 break;
185 case TRB_EV_HOST:
186 xhci_handle_host_controller_event(xhci);
187 break;
188 default:
189 xhci_debug("Warning: Spurious event: %d, Completion Code: %d\n",
190 trb_type, TRB_GET(CC, ev));
191 xhci_advance_event_ring(xhci);
192 break;
196 void
197 xhci_handle_events(xhci_t *const xhci)
199 while (xhci_event_ready(&xhci->er))
200 xhci_handle_event(xhci);
201 xhci_update_event_dq(xhci);
204 static unsigned long
205 xhci_wait_for_event(const event_ring_t *const er,
206 unsigned long *const timeout_us)
208 while (!xhci_event_ready(er) && *timeout_us) {
209 --*timeout_us;
210 udelay(1);
212 return *timeout_us;
215 static unsigned long
216 xhci_wait_for_event_type(xhci_t *const xhci,
217 const int trb_type,
218 unsigned long *const timeout_us)
220 while (xhci_wait_for_event(&xhci->er, timeout_us)) {
221 if (TRB_GET(TT, xhci->er.cur) == trb_type)
222 break;
224 xhci_handle_event(xhci);
226 return *timeout_us;
230 * Ref. xHCI Specification Revision 1.2, May 2019.
231 * Section 4.6.1.2.
233 * Process events from xHCI Abort command.
235 * Returns CC_COMMAND_RING_STOPPED on success and TIMEOUT on failure.
239 xhci_wait_for_command_aborted(xhci_t *const xhci, const trb_t *const address)
242 * Specification says that something might be seriously wrong, if
243 * we don't get a response after 5s. Still, let the caller decide,
244 * what to do then.
246 unsigned long timeout_us = USB_MAX_PROCESSING_TIME_US; /* 5s */
247 int cc = TIMEOUT;
249 * Expects two command completion events:
250 * The first with CC == COMMAND_ABORTED should point to address
251 * (not present if command was not running),
252 * the second with CC == COMMAND_RING_STOPPED should point to new dq.
254 while (xhci_wait_for_event_type(xhci, TRB_EV_CMD_CMPL, &timeout_us)) {
255 if ((xhci->er.cur->ptr_low == virt_to_phys(address)) &&
256 (xhci->er.cur->ptr_high == 0)) {
257 cc = TRB_GET(CC, xhci->er.cur);
258 xhci_advance_event_ring(xhci);
259 break;
262 xhci_handle_command_completion_event(xhci);
264 if (timeout_us == 0) {
265 xhci_debug("Warning: Timed out waiting for "
266 "COMMAND_ABORTED or COMMAND_RING_STOPPED.\n");
267 goto update_and_return;
269 if (cc == CC_COMMAND_RING_STOPPED) {
270 /* There may not have been a command to abort. */
271 goto update_and_return;
274 timeout_us = USB_MAX_PROCESSING_TIME_US; /* 5s */
275 while (xhci_wait_for_event_type(xhci, TRB_EV_CMD_CMPL, &timeout_us)) {
276 if (TRB_GET(CC, xhci->er.cur) == CC_COMMAND_RING_STOPPED) {
277 cc = CC_COMMAND_RING_STOPPED;
278 xhci_advance_event_ring(xhci);
279 break;
282 xhci_handle_command_completion_event(xhci);
284 if (timeout_us == 0)
285 xhci_debug("Warning: Timed out "
286 "waiting for COMMAND_RING_STOPPED.\n");
288 update_and_return:
289 xhci_update_event_dq(xhci);
290 return cc;
294 * returns cc of command in question (pointed to by `address`)
295 * caller should abort command if cc is TIMEOUT
298 xhci_wait_for_command_done(xhci_t *const xhci,
299 const trb_t *const address,
300 const int clear_event)
302 unsigned long timeout_us = USB_MAX_PROCESSING_TIME_US; /* 5s */
303 int cc = TIMEOUT;
304 while (xhci_wait_for_event_type(xhci, TRB_EV_CMD_CMPL, &timeout_us)) {
305 if ((xhci->er.cur->ptr_low == virt_to_phys(address)) &&
306 (xhci->er.cur->ptr_high == 0)) {
307 cc = TRB_GET(CC, xhci->er.cur);
308 break;
311 xhci_handle_command_completion_event(xhci);
313 if (!timeout_us) {
314 xhci_debug("Warning: Timed out waiting for TRB_EV_CMD_CMPL.\n");
315 } else if (clear_event) {
316 xhci_advance_event_ring(xhci);
318 xhci_update_event_dq(xhci);
319 return cc;
322 /* returns amount of bytes transferred on success, negative CC on error */
324 xhci_wait_for_transfer(xhci_t *const xhci, const int slot_id, const int ep_id)
326 xhci_spew("Waiting for transfer on ID %d EP %d\n", slot_id, ep_id);
327 /* 5s for all types of transfers */
328 unsigned long timeout_us = USB_MAX_PROCESSING_TIME_US;
329 int ret = TIMEOUT;
330 while (xhci_wait_for_event_type(xhci, TRB_EV_TRANSFER, &timeout_us)) {
331 if (TRB_GET(ID, xhci->er.cur) == slot_id &&
332 TRB_GET(EP, xhci->er.cur) == ep_id) {
333 ret = -TRB_GET(CC, xhci->er.cur);
334 if (ret == -CC_SUCCESS || ret == -CC_SHORT_PACKET)
335 ret = TRB_GET(EVTL, xhci->er.cur);
336 xhci_advance_event_ring(xhci);
337 break;
340 xhci_handle_transfer_event(xhci);
342 if (!timeout_us)
343 xhci_debug("Warning: Timed out waiting for TRB_EV_TRANSFER.\n");
344 xhci_update_event_dq(xhci);
345 return ret;