revert between 56095 -> 55830 in arch
[AROS.git] / rom / usb / pciusb / uhcichip.c
blob356035323554e552a5f9a58d77e481712476b599
1 /*
2 Copyright © 2010-2011, The AROS Development Team. All rights reserved
3 $Id$
4 */
6 #define DB_LEVEL 20
8 #include <proto/exec.h>
9 #include <proto/oop.h>
10 #include <hidd/pci.h>
12 #include <devices/usb_hub.h>
14 #include "uhwcmd.h"
16 #undef HiddPCIDeviceAttrBase
17 #define HiddPCIDeviceAttrBase (hd->hd_HiddPCIDeviceAB)
18 #undef HiddAttrBase
19 #define HiddAttrBase (hd->hd_HiddAB)
21 static AROS_INTH1(UhciResetHandler, struct PCIController *, hc)
23 AROS_INTFUNC_INIT
25 // stop controller and disable all interrupts
26 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, 0);
27 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBINTEN, 0);
29 return FALSE;
31 AROS_INTFUNC_EXIT
34 void uhciFreeQContext(struct PCIController *hc, struct UhciQH *uqh) {
36 struct UhciTD *utd = NULL;
37 struct UhciTD *nextutd;
39 KPRINTF(5, ("Unlinking QContext %08lx\n", uqh));
40 // unlink from schedule
41 uqh->uqh_Pred->uxx_Link = uqh->uqh_Succ->uxx_Self;
42 SYNC;
44 uqh->uqh_Succ->uxx_Pred = uqh->uqh_Pred;
45 uqh->uqh_Pred->uxx_Succ = uqh->uqh_Succ;
46 SYNC;
48 nextutd = uqh->uqh_FirstTD;
49 while(nextutd)
51 KPRINTF(1, ("FreeTD %08lx\n", nextutd));
52 utd = nextutd;
53 nextutd = (struct UhciTD *) utd->utd_Succ;
54 uhciFreeTD(hc, utd);
56 uhciFreeQH(hc, uqh);
59 void uhciUpdateIntTree(struct PCIController *hc) {
61 struct UhciXX *uxx;
62 struct UhciXX *preduxx;
63 struct UhciXX *lastuseduxx;
64 UWORD cnt;
66 // optimize linkage between queue heads
67 preduxx = lastuseduxx = (struct UhciXX *) hc->hc_UhciCtrlQH; //hc->hc_UhciIsoTD;
68 for(cnt = 0; cnt < 9; cnt++)
70 uxx = (struct UhciXX *) hc->hc_UhciIntQH[cnt];
71 if(uxx->uxx_Succ != preduxx)
73 lastuseduxx = uxx->uxx_Succ;
75 uxx->uxx_Link = lastuseduxx->uxx_Self;
76 preduxx = uxx;
80 void uhciCheckPortStatusChange(struct PCIController *hc) {
82 struct PCIUnit *unit = hc->hc_Unit;
83 UWORD oldval;
84 UWORD hciport;
86 // check for port status change for UHCI and frame rollovers
88 for(hciport = 0; hciport < 2; hciport++)
90 UWORD portreg;
91 UWORD idx = hc->hc_PortNum20[hciport];
92 // don't pay attention to UHCI port changes when pwned by EHCI
93 if(!unit->hu_EhciOwned[idx])
95 portreg = hciport ? UHCI_PORT2STSCTRL : UHCI_PORT1STSCTRL;
96 oldval = READIO16_LE(hc->hc_RegBase, portreg);
97 if(oldval & UHPF_ENABLECHANGE)
99 KPRINTF(10, ("Port %ld (%ld) Enable changed\n", idx, hciport));
100 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_ENABLE;
102 if(oldval & UHPF_CONNECTCHANGE)
104 KPRINTF(10, ("Port %ld (%ld) Connect changed\n", idx, hciport));
105 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_CONNECTION;
106 if(!(oldval & UHPF_PORTCONNECTED))
108 if(unit->hu_PortMap20[idx])
110 KPRINTF(20, ("Transferring Port %ld back to EHCI\n", idx));
111 unit->hu_EhciOwned[idx] = TRUE;
115 if(oldval & UHPF_RESUMEDTX)
117 KPRINTF(10, ("Port %ld (%ld) Resume changed\n", idx, hciport));
118 hc->hc_PortChangeMap[hciport] |= UPSF_PORT_SUSPEND|UPSF_PORT_ENABLE;
119 oldval &= ~UHPF_RESUMEDTX;
121 if(hc->hc_PortChangeMap[hciport])
123 unit->hu_RootPortChanges |= 1UL<<(idx+1);
124 /*KPRINTF(10, ("Port %ld (%ld) contributes %04lx to portmap %04lx\n",
125 idx, hciport, hc->hc_PortChangeMap[hciport], unit->hu_RootPortChanges));*/
127 WRITEIO16_LE(hc->hc_RegBase, portreg, oldval);
132 void uhciHandleFinishedTDs(struct PCIController *hc) {
134 struct PCIUnit *unit = hc->hc_Unit;
135 struct IOUsbHWReq *ioreq;
136 struct IOUsbHWReq *nextioreq;
137 struct UhciQH *uqh;
138 struct UhciTD *utd;
139 struct UhciTD *nextutd;
140 UWORD devadrep;
141 ULONG len;
142 ULONG linkelem;
143 UWORD inspect;
144 BOOL shortpkt;
145 ULONG ctrlstatus;
146 ULONG nextctrlstatus = 0;
147 ULONG token = 0;
148 ULONG actual;
149 BOOL updatetree = FALSE;
150 BOOL fixsetupterm = FALSE;
152 KPRINTF(1, ("Checking for work done...\n"));
153 ioreq = (struct IOUsbHWReq *) hc->hc_TDQueue.lh_Head;
154 while((nextioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ))
156 uqh = (struct UhciQH *) ioreq->iouh_DriverPrivate1;
157 if(uqh)
159 KPRINTF(1, ("Examining IOReq=%08lx with UQH=%08lx\n", ioreq, uqh));
160 linkelem = READMEM32_LE(&uqh->uqh_Element);
161 inspect = 0;
162 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
163 if(linkelem & UHCI_TERMINATE)
165 KPRINTF(1, ("UQH terminated %08lx\n", linkelem));
166 inspect = 2;
167 } else {
168 utd = (struct UhciTD *) ((linkelem & UHCI_PTRMASK) - hc->hc_PCIVirtualAdjust - UHCI_STRUCTURE_OFFSET); // struct UhciTD starts 16/32 bytes before physical TD depending on architecture
169 ctrlstatus = READMEM32_LE(&utd->utd_CtrlStatus);
170 nextutd = (struct UhciTD *)utd->utd_Succ;
171 if(!(ctrlstatus & UTCF_ACTIVE) && nextutd)
173 /* OK, it's not active. Does it look like it's done? Code copied from below.
174 If not done, check the next TD too. */
175 if(ctrlstatus & (UTSF_BABBLE|UTSF_STALLED|UTSF_CRCTIMEOUT|UTSF_DATABUFFERERR|UTSF_BITSTUFFERR))
178 Babble condition can only occur on the last data packet (or on the first if only one data packet is in the queue)
179 When UHCI encounters a babble condition it will halt immediately,
180 we can therefore just accept the data that has come through and resume as if we got interrupt on completition (IOC).
182 THEORETICAL: Possible fix for VIA babble bug
183 VIA chipset also halt the entire controller and sets the controller on stopped state.
184 We can resume the controller by changing the status bits in the queue so that the queue looks like it has ended with a completition or
185 remove the entire queue and act like it succeeded.
186 As VIA stops the controller we can then write a new frame list current index to point to the next item and then set the run bit back on.
188 nextutd = 0;
190 else
192 token = READMEM32_LE(&utd->utd_Token);
193 len = (ctrlstatus & UTSM_ACTUALLENGTH) >> UTSS_ACTUALLENGTH;
194 if((len != (token & UTTM_TRANSLENGTH) >> UTTS_TRANSLENGTH))
196 nextutd = 0;
199 if(nextutd)
201 nextctrlstatus = READMEM32_LE(&nextutd->utd_CtrlStatus);
204 /* Now, did the element link pointer change while we fetched the status for the pointed at TD?
205 If so, disregard the gathered information and assume still active. */
206 if(READMEM32_LE(&uqh->uqh_Element) != linkelem)
208 /* Oh well, probably still active */
209 KPRINTF(1, ("Link Element changed, still active.\n"));
211 else if(!(ctrlstatus & UTCF_ACTIVE) && (nextutd == 0 || !(nextctrlstatus & UTCF_ACTIVE)))
213 KPRINTF(1, ("CtrlStatus inactive %08lx\n", ctrlstatus));
214 inspect = 1;
216 else if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]))
218 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
219 inspect = 1;
222 fixsetupterm = FALSE;
223 if(inspect)
225 APTR data = &((UBYTE *)ioreq->iouh_Data)[ioreq->iouh_Actual];
226 shortpkt = FALSE;
227 if(inspect < 2) // if all went okay, don't traverse list, assume all bytes successfully transferred
229 utd = uqh->uqh_FirstTD;
230 actual = 0;
233 ctrlstatus = READMEM32_LE(&utd->utd_CtrlStatus);
234 if(ctrlstatus & UTCF_ACTIVE)
236 KPRINTF(20, ("Internal error! Still active?!\n"));
237 if(ctrlstatus & UTSF_BABBLE)
239 KPRINTF(200, ("HOST CONTROLLER IS DEAD!!!\n"));
240 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
241 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_HCRESET|UHCF_MAXPACKET64|UHCF_CONFIGURE|UHCF_RUNSTOP);
242 inspect = 0;
243 break;
245 break;
247 token = READMEM32_LE(&utd->utd_Token);
248 KPRINTF(1, ("TD=%08lx CS=%08lx Token=%08lx\n", utd, ctrlstatus, token));
249 if(ctrlstatus & (UTSF_BABBLE|UTSF_STALLED|UTSF_CRCTIMEOUT|UTSF_DATABUFFERERR|UTSF_BITSTUFFERR))
251 if(ctrlstatus & UTSF_BABBLE)
253 KPRINTF(20, ("Babble error %08lx/%08lx\n", ctrlstatus, token));
254 ctrlstatus &= ~(UTSF_BABBLE);
255 WRITEMEM32_LE(&utd->utd_CtrlStatus, ctrlstatus);
256 SYNC;
257 inspect = 3;
258 break;
260 else if(ctrlstatus & UTSF_CRCTIMEOUT)
262 KPRINTF(20, ("CRC/Timeout error IOReq=%08lx DIR=%ld\n", ioreq, ioreq->iouh_Dir));
263 if(ctrlstatus & UTSF_STALLED)
265 ioreq->iouh_Req.io_Error = UHIOERR_TIMEOUT;
266 } else {
267 ioreq->iouh_Req.io_Error = (ioreq->iouh_Dir == UHDIR_IN) ? UHIOERR_CRCERROR : UHIOERR_TIMEOUT;
270 else if(ctrlstatus & UTSF_STALLED)
272 KPRINTF(20, ("STALLED!\n"));
273 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
275 else if(ctrlstatus & UTSF_BITSTUFFERR)
277 KPRINTF(20, ("Bitstuff error\n"));
278 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
280 else if(ctrlstatus & UTSF_DATABUFFERERR)
282 KPRINTF(20, ("Databuffer error\n"));
283 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
285 inspect = 0;
286 break;
288 if(unit->hu_NakTimeoutFrame[devadrep] && (hc->hc_FrameCounter > unit->hu_NakTimeoutFrame[devadrep]) && (ctrlstatus & UTSF_NAK))
290 ioreq->iouh_Req.io_Error = UHIOERR_NAKTIMEOUT;
291 inspect = 0;
294 len = (ctrlstatus & UTSM_ACTUALLENGTH)>>UTSS_ACTUALLENGTH;
295 if((len != (token & UTTM_TRANSLENGTH)>>UTTS_TRANSLENGTH))
297 shortpkt = TRUE;
299 len = (len+1) & 0x7ff; // get real length
300 if((token & UTTM_PID)>>UTTS_PID != PID_SETUP) // don't count setup packet
302 actual += len;
303 // due to the VIA babble bug workaround, actually more bytes can
304 // be received than requested, limit the actual value to the upper limit
305 if(actual > uqh->uqh_Actual)
307 actual = uqh->uqh_Actual;
310 if(shortpkt)
312 break;
314 } while((utd = (struct UhciTD *) utd->utd_Succ));
315 if(inspect == 3)
317 /* bail out from babble */
318 actual = uqh->uqh_Actual;
320 if((actual < uqh->uqh_Actual) && (!ioreq->iouh_Req.io_Error) && (!(ioreq->iouh_Flags & UHFF_ALLOWRUNTPKTS)))
322 KPRINTF(10, ("Short packet: %ld < %ld\n", actual, ioreq->iouh_Length));
323 ioreq->iouh_Req.io_Error = UHIOERR_RUNTPACKET;
325 } else {
326 KPRINTF(10, ("all %ld bytes transferred\n", uqh->uqh_Actual));
327 actual = uqh->uqh_Actual;
329 ioreq->iouh_Actual += actual;
330 // due to the short packet, the terminal of a setup packet has not been sent. Please do so.
331 if(shortpkt && (ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER))
333 fixsetupterm = TRUE;
335 // this is actually no short packet but result of the VIA babble fix
336 if(shortpkt && (ioreq->iouh_Actual == ioreq->iouh_Length))
338 shortpkt = FALSE;
340 unit->hu_DevBusyReq[devadrep] = NULL;
341 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
342 if (uqh->uqh_DataBuffer)
344 UWORD dir;
345 if (ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER)
346 dir = (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? UHDIR_IN : UHDIR_OUT;
347 else
348 dir = ioreq->iouh_Dir;
350 usbReleaseBuffer(uqh->uqh_DataBuffer, data, actual, dir);
352 if (uqh->uqh_SetupBuffer)
353 usbReleaseBuffer(uqh->uqh_SetupBuffer, &ioreq->iouh_SetupData, sizeof(ioreq->iouh_SetupData), UHDIR_OUT);
354 uhciFreeQContext(hc, uqh);
355 if(ioreq->iouh_Req.io_Command == UHCMD_INTXFER)
357 updatetree = TRUE;
359 if(inspect)
361 if(inspect < 2) // otherwise, toggle will be right already
363 // use next data toggle bit based on last successful transaction
364 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? FALSE : TRUE;
366 if((!shortpkt && (ioreq->iouh_Actual < ioreq->iouh_Length)) || fixsetupterm)
368 // fragmented, do some more work
369 switch(ioreq->iouh_Req.io_Command)
371 case UHCMD_CONTROLXFER:
372 KPRINTF(10, ("Rescheduling CtrlTransfer at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
373 AddHead(&hc->hc_CtrlXFerQueue, (struct Node *) ioreq);
374 break;
376 case UHCMD_INTXFER:
377 KPRINTF(10, ("Rescheduling IntTransfer at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
378 AddHead(&hc->hc_IntXFerQueue, (struct Node *) ioreq);
379 break;
381 case UHCMD_BULKXFER:
382 KPRINTF(10, ("Rescheduling BulkTransfer at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
383 AddHead(&hc->hc_BulkXFerQueue, (struct Node *) ioreq);
384 break;
386 default:
387 KPRINTF(10, ("Uhm, internal error, dunno where to queue this req\n"));
388 ReplyMsg(&ioreq->iouh_Req.io_Message);
390 } else {
391 // check for sucessful clear feature and set address ctrl transfers
392 if(ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER)
394 uhwCheckSpecialCtrlTransfers(hc, ioreq);
396 ReplyMsg(&ioreq->iouh_Req.io_Message);
398 } else {
399 // be sure to save the data toggle bit where the error occurred
400 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? TRUE : FALSE;
401 ReplyMsg(&ioreq->iouh_Req.io_Message);
404 } else {
405 KPRINTF(20, ("IOReq=%08lx has no UQH!\n", ioreq));
407 ioreq = nextioreq;
409 if(updatetree)
411 KPRINTF(10, ("Updating Tree\n"));
412 uhciUpdateIntTree(hc);
416 void uhciScheduleCtrlTDs(struct PCIController *hc) {
418 struct PCIUnit *unit = hc->hc_Unit;
419 struct IOUsbHWReq *ioreq;
420 UWORD devadrep;
421 struct UhciQH *uqh;
422 struct UhciTD *setuputd;
423 struct UhciTD *datautd;
424 struct UhciTD *termutd;
425 struct UhciTD *predutd;
426 ULONG actual;
427 ULONG ctrlstatus;
428 ULONG token;
429 ULONG len;
430 ULONG phyaddr;
431 BOOL cont;
433 /* *** CTRL Transfers *** */
434 KPRINTF(1, ("Scheduling new CTRL transfers...\n"));
435 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
436 while(((struct Node *) ioreq)->ln_Succ)
438 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint;
439 KPRINTF(10, ("New CTRL transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
440 /* is endpoint already in use or do we have to wait for next transaction */
441 if(unit->hu_DevBusyReq[devadrep])
443 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
444 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
445 continue;
448 uqh = uhciAllocQH(hc);
449 if(!uqh)
451 break;
454 setuputd = uhciAllocTD(hc);
455 if(!setuputd)
457 uhciFreeQH(hc, uqh);
458 break;
460 termutd = uhciAllocTD(hc);
461 if(!termutd)
463 uhciFreeTD(hc, setuputd);
464 uhciFreeQH(hc, uqh);
465 break;
467 uqh->uqh_IOReq = ioreq;
469 //termutd->utd_QueueHead = setuputd->utd_QueueHead = uqh;
471 KPRINTF(1, ("SetupTD=%08lx, TermTD=%08lx\n", setuputd, termutd));
473 // fill setup td
474 ctrlstatus = UTCF_ACTIVE|UTCF_3ERRORSLIMIT;
475 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
477 KPRINTF(5, ("*** LOW SPEED ***\n"));
478 ctrlstatus |= UTCF_LOWSPEED;
480 token = (ioreq->iouh_DevAddr<<UTTS_DEVADDR)|(ioreq->iouh_Endpoint<<UTTS_ENDPOINT);
481 //setuputd->utd_Pred = NULL;
482 if(ioreq->iouh_Actual)
484 // this is a continuation of a fragmented ctrl transfer!
485 KPRINTF(1, ("Continuing FRAGMENT at %ld of %ld\n", ioreq->iouh_Actual, ioreq->iouh_Length));
486 cont = TRUE;
487 } else {
488 cont = FALSE;
489 uqh->uqh_FirstTD = setuputd;
490 uqh->uqh_Element = setuputd->utd_Self; // start of queue
491 uqh->uqh_SetupBuffer = usbGetBuffer(&ioreq->iouh_SetupData, sizeof(ioreq->iouh_SetupData), UHDIR_OUT);
492 WRITEMEM32_LE(&setuputd->utd_CtrlStatus, ctrlstatus);
493 WRITEMEM32_LE(&setuputd->utd_Token, (PID_SETUP<<UTTS_PID)|token|(7<<UTTS_TRANSLENGTH)|UTTF_DATA0);
494 WRITEMEM32_LE(&setuputd->utd_BufferPtr, (ULONG) (IPTR) pciGetPhysical(hc, uqh->uqh_SetupBuffer));
497 token |= (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? PID_IN : PID_OUT;
498 predutd = setuputd;
499 actual = ioreq->iouh_Actual;
501 if(ioreq->iouh_Length - actual)
503 ctrlstatus |= UTCF_SHORTPACKET;
504 if(cont)
506 if(!unit->hu_DevDataToggle[devadrep])
508 // continue with data toggle 0
509 token |= UTTF_DATA1;
511 } else {
512 ioreq->iouh_Actual=0;
514 uqh->uqh_DataBuffer = usbGetBuffer(&(((UBYTE *)ioreq->iouh_Data)[ioreq->iouh_Actual]), ioreq->iouh_Length - actual, (ioreq->iouh_SetupData.bmRequestType & URTF_IN) ? UHDIR_IN : UHDIR_OUT);
515 phyaddr = (ULONG)(IPTR)pciGetPhysical(hc, uqh->uqh_DataBuffer);
518 datautd = uhciAllocTD(hc);
519 if(!datautd)
521 break;
523 token ^= UTTF_DATA1; // toggle bit
524 predutd->utd_Link = datautd->utd_Self;
525 predutd->utd_Succ = (struct UhciXX *) datautd;
526 //datautd->utd_Pred = (struct UhciXX *) predutd;
527 //datautd->utd_QueueHead = uqh;
528 len = ioreq->iouh_Length - actual;
529 if(len > ioreq->iouh_MaxPktSize)
531 len = ioreq->iouh_MaxPktSize;
533 WRITEMEM32_LE(&datautd->utd_CtrlStatus, ctrlstatus);
534 WRITEMEM32_LE(&datautd->utd_Token, token|((len-1)<<UTTS_TRANSLENGTH)); // no masking need here as len is always >= 1
535 WRITEMEM32_LE(&datautd->utd_BufferPtr, phyaddr);
536 phyaddr += len;
537 actual += len;
538 predutd = datautd;
539 } while((actual < ioreq->iouh_Length) && (actual - ioreq->iouh_Actual < UHCI_TD_CTRL_LIMIT));
540 if(actual == ioreq->iouh_Actual)
542 // not at least one data TD? try again later
543 uhciFreeTD(hc, setuputd);
544 uhciFreeTD(hc, termutd);
545 uhciFreeQH(hc, uqh);
546 break;
548 if(cont)
550 // free Setup packet
551 KPRINTF(1, ("Freeing setup\n"));
552 uqh->uqh_FirstTD = (struct UhciTD *) setuputd->utd_Succ;
553 //uqh->uqh_FirstTD->utd_Pred = NULL;
554 uqh->uqh_Element = setuputd->utd_Succ->uxx_Self; // start of queue after setup packet
555 uhciFreeTD(hc, setuputd);
556 // set toggle for next batch
557 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? FALSE : TRUE;
559 } else {
560 if(cont)
562 // free Setup packet, assign termination as first packet (no data)
563 KPRINTF(1, ("Freeing setup (term only)\n"));
564 uqh->uqh_FirstTD = (struct UhciTD *) termutd;
565 uqh->uqh_Element = termutd->utd_Self; // start of queue after setup packet
566 uhciFreeTD(hc, setuputd);
567 predutd = NULL;
570 uqh->uqh_Actual = actual - ioreq->iouh_Actual;
571 ctrlstatus |= UTCF_READYINTEN;
572 if(actual == ioreq->iouh_Length)
574 // TERM packet
575 KPRINTF(1, ("Activating TERM\n"));
576 token |= UTTF_DATA1;
577 token ^= (PID_IN^PID_OUT)<<UTTS_PID;
579 if(predutd)
581 predutd->utd_Link = termutd->utd_Self;
582 predutd->utd_Succ = (struct UhciXX *) termutd;
584 //termutd->utd_Pred = (struct UhciXX *) predutd;
585 WRITEMEM32_LE(&termutd->utd_CtrlStatus, ctrlstatus);
586 WRITEMEM32_LE(&termutd->utd_Token, token|(0x7ff<<UTTS_TRANSLENGTH));
587 CONSTWRITEMEM32_LE(&termutd->utd_Link, UHCI_TERMINATE);
588 termutd->utd_Succ = NULL;
589 //uqh->uqh_LastTD = termutd;
590 } else {
591 KPRINTF(1, ("Setup data phase fragmented\n"));
592 // don't create TERM, we don't know the final data toggle bit
593 // but mark the last data TD for interrupt generation
594 WRITEMEM32_LE(&predutd->utd_CtrlStatus, ctrlstatus);
595 uhciFreeTD(hc, termutd);
596 CONSTWRITEMEM32_LE(&predutd->utd_Link, UHCI_TERMINATE);
597 predutd->utd_Succ = NULL;
598 //uqh->uqh_LastTD = predutd;
601 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
602 ioreq->iouh_DriverPrivate1 = uqh;
604 // manage endpoint going busy
605 unit->hu_DevBusyReq[devadrep] = ioreq;
606 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
608 Disable();
609 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
611 // looks good to me, now enqueue this entry (just behind the CtrlQH)
612 uqh->uqh_Succ = hc->hc_UhciCtrlQH->uqh_Succ;
613 uqh->uqh_Link = uqh->uqh_Succ->uxx_Self;
614 SYNC;
616 uqh->uqh_Pred = (struct UhciXX *) hc->hc_UhciCtrlQH;
617 uqh->uqh_Succ->uxx_Pred = (struct UhciXX *) uqh;
618 hc->hc_UhciCtrlQH->uqh_Succ = (struct UhciXX *) uqh;
619 hc->hc_UhciCtrlQH->uqh_Link = uqh->uqh_Self;
620 SYNC;
621 Enable();
623 ioreq = (struct IOUsbHWReq *) hc->hc_CtrlXFerQueue.lh_Head;
627 void uhciScheduleIntTDs(struct PCIController *hc) {
629 struct PCIUnit *unit = hc->hc_Unit;
630 struct IOUsbHWReq *ioreq;
631 UWORD cnt;
632 UWORD devadrep;
633 struct UhciQH *uqh;
634 struct UhciQH *intuqh;
635 struct UhciTD *utd;
636 struct UhciTD *predutd;
637 ULONG actual;
638 ULONG ctrlstatus;
639 ULONG token;
640 ULONG len;
641 ULONG phyaddr;
643 /* *** INT Transfers *** */
644 KPRINTF(1, ("Scheduling new INT transfers...\n"));
645 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
646 while(((struct Node *) ioreq)->ln_Succ)
648 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
649 KPRINTF(10, ("New INT transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
650 /* is endpoint already in use or do we have to wait for next transaction */
651 if(unit->hu_DevBusyReq[devadrep])
653 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
654 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
655 continue;
658 uqh = uhciAllocQH(hc);
659 if(!uqh)
661 break;
664 uqh->uqh_IOReq = ioreq;
666 ctrlstatus = UTCF_ACTIVE|UTCF_1ERRORLIMIT|UTCF_SHORTPACKET;
667 if(ioreq->iouh_Flags & UHFF_LOWSPEED)
669 KPRINTF(5, ("*** LOW SPEED ***\n"));
670 ctrlstatus |= UTCF_LOWSPEED;
672 token = (ioreq->iouh_DevAddr<<UTTS_DEVADDR)|(ioreq->iouh_Endpoint<<UTTS_ENDPOINT);
673 token |= (ioreq->iouh_Dir == UHDIR_IN) ? PID_IN : PID_OUT;
674 predutd = NULL;
675 actual = ioreq->iouh_Actual;
676 uqh->uqh_DataBuffer = usbGetBuffer(&(((UBYTE *) ioreq->iouh_Data)[ioreq->iouh_Actual]), ioreq->iouh_Length - actual, ioreq->iouh_Dir);
677 phyaddr = (ULONG) (IPTR) pciGetPhysical(hc, uqh->uqh_DataBuffer);
678 if(unit->hu_DevDataToggle[devadrep])
680 // continue with data toggle 1
681 KPRINTF(1, ("Data1\n"));
682 token |= UTTF_DATA1;
683 } else {
684 KPRINTF(1, ("Data0\n"));
688 utd = uhciAllocTD(hc);
689 if(!utd)
691 break;
693 if(predutd)
695 WRITEMEM32_LE(&predutd->utd_Link, READMEM32_LE(&utd->utd_Self)|UHCI_DFS);
696 predutd->utd_Succ = (struct UhciXX *) utd;
697 //utd->utd_Pred = (struct UhciXX *) predutd;
698 } else {
699 uqh->uqh_FirstTD = utd;
700 uqh->uqh_Element = utd->utd_Self;
701 //utd->utd_Pred = NULL;
703 //utd->utd_QueueHead = uqh;
704 len = ioreq->iouh_Length - actual;
705 if(len > ioreq->iouh_MaxPktSize)
707 len = ioreq->iouh_MaxPktSize;
710 WRITEMEM32_LE(&utd->utd_CtrlStatus, ctrlstatus);
711 WRITEMEM32_LE(&utd->utd_Token, token|(((len-1) & 0x7ff)<<UTTS_TRANSLENGTH));
712 WRITEMEM32_LE(&utd->utd_BufferPtr, phyaddr);
713 phyaddr += len;
714 actual += len;
715 predutd = utd;
716 token ^= UTTF_DATA1; // toggle bit
717 } while((actual < ioreq->iouh_Length) && (actual - ioreq->iouh_Actual < UHCI_TD_INT_LIMIT));
719 if(!utd)
721 // not at least one data TD? try again later
722 uhciFreeQH(hc, uqh);
723 break;
726 uqh->uqh_Actual = actual - ioreq->iouh_Actual;
727 // set toggle for next batch / succesful transfer
728 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? TRUE : FALSE;
729 if(unit->hu_DevDataToggle[devadrep])
731 // continue with data toggle 1
732 KPRINTF(1, ("NewData1\n"));
733 } else {
734 KPRINTF(1, ("NewData0\n"));
736 ctrlstatus |= UTCF_READYINTEN;
737 WRITEMEM32_LE(&predutd->utd_CtrlStatus, ctrlstatus);
738 CONSTWRITEMEM32_LE(&utd->utd_Link, UHCI_TERMINATE);
739 utd->utd_Succ = NULL;
740 //uqh->uqh_LastTD = utd;
742 if(ioreq->iouh_Interval >= 255)
744 intuqh = hc->hc_UhciIntQH[8]; // 256ms interval
745 } else {
746 cnt = 0;
749 intuqh = hc->hc_UhciIntQH[cnt++];
750 } while(ioreq->iouh_Interval >= (1<<cnt));
751 KPRINTF(1, ("Scheduled at level %ld\n", cnt));
754 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
755 ioreq->iouh_DriverPrivate1 = uqh;
757 // manage endpoint going busy
758 unit->hu_DevBusyReq[devadrep] = ioreq;
759 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
761 Disable();
762 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
764 // looks good to me, now enqueue this entry (just behind the right IntQH)
765 uqh->uqh_Succ = intuqh->uqh_Succ;
766 uqh->uqh_Link = uqh->uqh_Succ->uxx_Self;
767 SYNC;
769 uqh->uqh_Pred = (struct UhciXX *) intuqh;
770 uqh->uqh_Succ->uxx_Pred = (struct UhciXX *) uqh;
771 intuqh->uqh_Succ = (struct UhciXX *) uqh;
772 intuqh->uqh_Link = uqh->uqh_Self;
773 SYNC;
774 Enable();
776 uhciUpdateIntTree(hc);
778 ioreq = (struct IOUsbHWReq *) hc->hc_IntXFerQueue.lh_Head;
782 void uhciScheduleBulkTDs(struct PCIController *hc) {
784 struct PCIUnit *unit = hc->hc_Unit;
785 struct IOUsbHWReq *ioreq;
786 UWORD devadrep;
787 struct UhciQH *uqh;
788 struct UhciTD *utd;
789 struct UhciTD *predutd;
790 ULONG actual;
791 ULONG ctrlstatus;
792 ULONG token;
793 ULONG len;
794 ULONG phyaddr;
795 BOOL forcezero;
797 /* *** BULK Transfers *** */
798 KPRINTF(1, ("Scheduling new BULK transfers...\n"));
799 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
800 while(((struct Node *) ioreq)->ln_Succ)
802 devadrep = (ioreq->iouh_DevAddr<<5) + ioreq->iouh_Endpoint + ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
803 KPRINTF(10, ("New BULK transfer to %ld.%ld: %ld bytes\n", ioreq->iouh_DevAddr, ioreq->iouh_Endpoint, ioreq->iouh_Length));
804 /* is endpoint already in use or do we have to wait for next transaction */
805 if(unit->hu_DevBusyReq[devadrep])
807 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep));
808 ioreq = (struct IOUsbHWReq *) ((struct Node *) ioreq)->ln_Succ;
809 continue;
812 uqh = uhciAllocQH(hc);
813 if(!uqh)
815 break;
818 uqh->uqh_IOReq = ioreq;
820 // fill setup td
821 ctrlstatus = UTCF_ACTIVE|UTCF_1ERRORLIMIT|UTCF_SHORTPACKET;
822 token = (ioreq->iouh_DevAddr<<UTTS_DEVADDR)|(ioreq->iouh_Endpoint<<UTTS_ENDPOINT);
823 token |= (ioreq->iouh_Dir == UHDIR_IN) ? PID_IN : PID_OUT;
824 predutd = NULL;
825 actual = ioreq->iouh_Actual;
827 // Get a MEMF_31BIT bounce buffer
828 uqh->uqh_DataBuffer = usbGetBuffer(&(((UBYTE *) ioreq->iouh_Data)[ioreq->iouh_Actual]), ioreq->iouh_Length - actual, ioreq->iouh_Dir);
829 phyaddr = (IPTR)pciGetPhysical(hc, uqh->uqh_DataBuffer);
830 if(unit->hu_DevDataToggle[devadrep])
832 // continue with data toggle 1
833 token |= UTTF_DATA1;
837 utd = uhciAllocTD(hc);
838 if(!utd)
840 break;
842 forcezero = FALSE;
843 if(predutd)
845 WRITEMEM32_LE(&predutd->utd_Link, READMEM32_LE(&utd->utd_Self)|UHCI_DFS);
846 predutd->utd_Succ = (struct UhciXX *) utd;
847 //utd->utd_Pred = (struct UhciXX *) predutd;
848 } else {
849 uqh->uqh_FirstTD = utd;
850 uqh->uqh_Element = utd->utd_Self;
851 //utd->utd_Pred = NULL;
853 //utd->utd_QueueHead = uqh;
854 len = ioreq->iouh_Length - actual;
855 if(len > ioreq->iouh_MaxPktSize)
857 len = ioreq->iouh_MaxPktSize;
859 WRITEMEM32_LE(&utd->utd_CtrlStatus, ctrlstatus);
860 WRITEMEM32_LE(&utd->utd_Token, token|(((len-1) & 0x7ff)<<UTTS_TRANSLENGTH));
861 WRITEMEM32_LE(&utd->utd_BufferPtr, phyaddr);
862 phyaddr += len;
863 actual += len;
864 predutd = utd;
865 token ^= UTTF_DATA1; // toggle bit
866 if((actual == ioreq->iouh_Length) && len)
868 if((ioreq->iouh_Flags & UHFF_NOSHORTPKT) || (ioreq->iouh_Dir == UHDIR_IN) || (actual % ioreq->iouh_MaxPktSize))
870 // no last zero byte packet
871 break;
872 } else {
873 // avoid rare case that the zero byte packet is reached on TD_BULK_LIMIT
874 forcezero = TRUE;
877 } while(forcezero || (len && (actual <= ioreq->iouh_Length) && (actual - ioreq->iouh_Actual < UHCI_TD_BULK_LIMIT)));
879 if(!utd)
881 // not at least one data TD? try again later
882 uhciFreeQH(hc, uqh);
883 break;
885 uqh->uqh_Actual = actual - ioreq->iouh_Actual;
886 // set toggle for next batch / succesful transfer
887 unit->hu_DevDataToggle[devadrep] = (token & UTTF_DATA1) ? TRUE : FALSE;
889 ctrlstatus |= UTCF_READYINTEN;
890 WRITEMEM32_LE(&predutd->utd_CtrlStatus, ctrlstatus);
891 CONSTWRITEMEM32_LE(&utd->utd_Link, UHCI_TERMINATE);
892 utd->utd_Succ = NULL;
893 //uqh->uqh_LastTD = utd;
895 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
896 ioreq->iouh_DriverPrivate1 = uqh;
898 // manage endpoint going busy
899 unit->hu_DevBusyReq[devadrep] = ioreq;
900 unit->hu_NakTimeoutFrame[devadrep] = (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter + ioreq->iouh_NakTimeout : 0;
902 Disable();
903 AddTail(&hc->hc_TDQueue, (struct Node *) ioreq);
905 // looks good to me, now enqueue this entry (just behind the BulkQH)
906 uqh->uqh_Succ = hc->hc_UhciBulkQH->uqh_Succ;
907 uqh->uqh_Link = uqh->uqh_Succ->uxx_Self;
908 SYNC;
910 uqh->uqh_Pred = (struct UhciXX *) hc->hc_UhciBulkQH;
911 uqh->uqh_Succ->uxx_Pred = (struct UhciXX *) uqh;
912 hc->hc_UhciBulkQH->uqh_Succ = (struct UhciXX *) uqh;
913 hc->hc_UhciBulkQH->uqh_Link = uqh->uqh_Self;
914 SYNC;
915 Enable();
917 ioreq = (struct IOUsbHWReq *) hc->hc_BulkXFerQueue.lh_Head;
921 void uhciUpdateFrameCounter(struct PCIController *hc) {
923 UWORD framecnt;
924 Disable();
925 framecnt = READIO16_LE(hc->hc_RegBase, UHCI_FRAMECOUNT) & 0x07ff;
926 if(framecnt < (hc->hc_FrameCounter & 0x07ff))
928 hc->hc_FrameCounter |= 0x07ff;
929 hc->hc_FrameCounter++;
930 hc->hc_FrameCounter |= framecnt;
931 KPRINTF(10, ("Frame Counter Rollover %ld\n", hc->hc_FrameCounter));
932 } else {
933 hc->hc_FrameCounter = (hc->hc_FrameCounter & 0xfffff800)|framecnt;
935 Enable();
938 static AROS_INTH1(uhciCompleteInt, struct PCIController *,hc)
940 AROS_INTFUNC_INIT
942 KPRINTF(1, ("CompleteInt!\n"));
943 uhciUpdateFrameCounter(hc);
945 /* **************** PROCESS DONE TRANSFERS **************** */
947 uhciCheckPortStatusChange(hc);
948 uhwCheckRootHubChanges(hc->hc_Unit);
950 uhciHandleFinishedTDs(hc);
952 if(hc->hc_CtrlXFerQueue.lh_Head->ln_Succ)
954 uhciScheduleCtrlTDs(hc);
957 if(hc->hc_IntXFerQueue.lh_Head->ln_Succ)
959 uhciScheduleIntTDs(hc);
962 if(hc->hc_BulkXFerQueue.lh_Head->ln_Succ)
964 uhciScheduleBulkTDs(hc);
967 KPRINTF(1, ("CompleteDone\n"));
969 return FALSE;
971 AROS_INTFUNC_EXIT
974 static AROS_INTH1(uhciIntCode, struct PCIController *, hc)
976 AROS_INTFUNC_INIT
978 struct PCIDevice *base = hc->hc_Device;
979 UWORD intr;
981 KPRINTF(10, ("pciUhciInt()\n"));
982 intr = READIO16_LE(hc->hc_RegBase, UHCI_USBSTATUS);
983 if(intr & (UHSF_USBINT|UHSF_USBERRORINT|UHSF_RESUMEDTX|UHSF_HCSYSERROR|UHSF_HCPROCERROR|UHSF_HCHALTED))
985 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBSTATUS, intr);
986 KPRINTF(1, ("INT=%04lx\n", intr));
987 if(intr & (UHSF_HCSYSERROR|UHSF_HCPROCERROR|UHSF_HCHALTED))
989 KPRINTF(200, ("Host ERROR!\n"));
990 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_HCRESET|UHCF_GLOBALRESET|UHCF_MAXPACKET64|UHCF_CONFIGURE);
991 //WRITEIO16_LE(hc->hc_RegBase, UHCI_USBINTEN, 0);
993 if (!(hc->hc_Flags & HCF_ONLINE))
995 return FALSE;
997 if(intr & (UHSF_USBINT|UHSF_USBERRORINT))
999 SureCause(base, &hc->hc_CompleteInt);
1003 return FALSE;
1005 AROS_INTFUNC_EXIT
1008 BOOL uhciInit(struct PCIController *hc, struct PCIUnit *hu) {
1010 struct PCIDevice *hd = hu->hu_Device;
1012 struct UhciQH *uqh;
1013 struct UhciQH *preduqh;
1014 struct UhciTD *utd;
1015 ULONG *tabptr;
1016 UBYTE *memptr;
1017 ULONG bitcnt;
1019 ULONG cnt;
1021 struct TagItem pciActivateIO[] =
1023 { aHidd_PCIDevice_isIO, TRUE },
1024 { TAG_DONE, 0UL },
1027 struct TagItem pciActivateBusmaster[] =
1029 { aHidd_PCIDevice_isMaster, TRUE },
1030 { TAG_DONE, 0UL },
1033 struct TagItem pciDeactivateBusmaster[] =
1035 { aHidd_PCIDevice_isMaster, FALSE },
1036 { TAG_DONE, 0UL },
1039 hc->hc_NumPorts = 2; // UHCI always uses 2 ports per controller
1040 KPRINTF(20, ("Found UHCI Controller %08lx FuncNum=%ld with %ld ports\n", hc->hc_PCIDeviceObject, hc->hc_FunctionNum, hc->hc_NumPorts));
1041 hc->hc_CompleteInt.is_Node.ln_Type = NT_INTERRUPT;
1042 hc->hc_CompleteInt.is_Node.ln_Name = "UHCI CompleteInt";
1043 hc->hc_CompleteInt.is_Node.ln_Pri = 0;
1044 hc->hc_CompleteInt.is_Data = hc;
1045 hc->hc_CompleteInt.is_Code = (VOID_FUNC)uhciCompleteInt;
1047 hc->hc_PCIMemSize = sizeof(ULONG) * UHCI_FRAMELIST_SIZE + UHCI_FRAMELIST_ALIGNMENT + 1;
1048 hc->hc_PCIMemSize += sizeof(struct UhciQH) * UHCI_QH_POOLSIZE;
1049 hc->hc_PCIMemSize += sizeof(struct UhciTD) * UHCI_TD_POOLSIZE;
1051 memptr = HIDD_PCIDriver_AllocPCIMem(hc->hc_PCIDriverObject, hc->hc_PCIMemSize);
1052 /* memptr will be in the MEMF_31BIT type, therefore
1053 * we know that it's *physical address* will be 32 bits or
1054 * less, which is required for UHCI operation
1056 hc->hc_PCIMem = (APTR) memptr;
1057 if(memptr) {
1059 // PhysicalAddress - VirtualAdjust = VirtualAddress
1060 // VirtualAddress + VirtualAdjust = PhysicalAddress
1061 hc->hc_PCIVirtualAdjust = (IPTR)pciGetPhysical(hc, memptr) - (IPTR)memptr;
1062 KPRINTF(10, ("VirtualAdjust 0x%08lx\n", hc->hc_PCIVirtualAdjust));
1064 // align memory
1065 memptr = (UBYTE *) ((((IPTR) hc->hc_PCIMem) + UHCI_FRAMELIST_ALIGNMENT) & (~UHCI_FRAMELIST_ALIGNMENT));
1066 hc->hc_UhciFrameList = (ULONG *) memptr;
1067 KPRINTF(10, ("FrameListBase 0x%08lx\n", hc->hc_UhciFrameList));
1068 memptr += sizeof(APTR) * UHCI_FRAMELIST_SIZE;
1070 // build up QH pool
1071 // Again, all the UQHs are in the MEMF_31BIT hc_PCIMem pool,
1072 // so we can safely treat their physical addresses as 32 bit pointers
1073 uqh = (struct UhciQH *) memptr;
1074 hc->hc_UhciQHPool = uqh;
1075 cnt = UHCI_QH_POOLSIZE - 1;
1076 do {
1077 // minimal initalization
1078 uqh->uqh_Succ = (struct UhciXX *) (uqh + 1);
1079 WRITEMEM32_LE(&uqh->uqh_Self, (ULONG) ((IPTR)(&uqh->uqh_Link) + hc->hc_PCIVirtualAdjust + UHCI_QHSELECT));
1080 uqh++;
1081 } while(--cnt);
1082 uqh->uqh_Succ = NULL;
1083 WRITEMEM32_LE(&uqh->uqh_Self, (ULONG) ((IPTR)(&uqh->uqh_Link) + hc->hc_PCIVirtualAdjust + UHCI_QHSELECT));
1084 memptr += sizeof(struct UhciQH) * UHCI_QH_POOLSIZE;
1086 // build up TD pool
1087 // Again, all the UTDs are in the MEMF_31BIT hc_PCIMem pool,
1088 // so we can safely treat their physical addresses as 32 bit pointers
1089 utd = (struct UhciTD *) memptr;
1090 hc->hc_UhciTDPool = utd;
1091 cnt = UHCI_TD_POOLSIZE - 1;
1092 do {
1093 utd->utd_Succ = (struct UhciXX *) (utd + 1);
1094 WRITEMEM32_LE(&utd->utd_Self, (ULONG) ((IPTR)(&utd->utd_Link) + hc->hc_PCIVirtualAdjust + UHCI_TDSELECT));
1095 utd++;
1096 } while(--cnt);
1097 utd->utd_Succ = NULL;
1098 WRITEMEM32_LE(&utd->utd_Self, (ULONG) ((IPTR)(&utd->utd_Link) + hc->hc_PCIVirtualAdjust + UHCI_TDSELECT));
1099 memptr += sizeof(struct UhciTD) * UHCI_TD_POOLSIZE;
1101 // terminating QH
1102 hc->hc_UhciTermQH = preduqh = uqh = uhciAllocQH(hc);
1103 uqh->uqh_Succ = NULL;
1104 CONSTWRITEMEM32_LE(&uqh->uqh_Link, UHCI_TERMINATE);
1105 CONSTWRITEMEM32_LE(&uqh->uqh_Element, UHCI_TERMINATE);
1107 // dummy Bulk QH
1108 hc->hc_UhciBulkQH = uqh = uhciAllocQH(hc);
1109 uqh->uqh_Succ = (struct UhciXX *) preduqh;
1110 preduqh->uqh_Pred = (struct UhciXX *) uqh;
1111 uqh->uqh_Link = preduqh->uqh_Self; // link to terminating QH
1112 CONSTWRITEMEM32_LE(&uqh->uqh_Element, UHCI_TERMINATE);
1113 preduqh = uqh;
1115 // dummy Ctrl QH
1116 hc->hc_UhciCtrlQH = uqh = uhciAllocQH(hc);
1117 uqh->uqh_Succ = (struct UhciXX *) preduqh;
1118 preduqh->uqh_Pred = (struct UhciXX *) uqh;
1119 uqh->uqh_Link = preduqh->uqh_Self; // link to Bulk QH
1120 CONSTWRITEMEM32_LE(&uqh->uqh_Element, UHCI_TERMINATE);
1122 // dummy ISO TD
1123 hc->hc_UhciIsoTD = utd = uhciAllocTD(hc);
1124 utd->utd_Succ = (struct UhciXX *) uqh;
1125 //utd->utd_Pred = NULL; // no certain linkage above this level
1126 uqh->uqh_Pred = (struct UhciXX *) utd;
1127 utd->utd_Link = uqh->uqh_Self; // link to Ctrl QH
1129 CONSTWRITEMEM32_LE(&utd->utd_CtrlStatus, 0);
1131 // 1 ms INT QH
1132 hc->hc_UhciIntQH[0] = uqh = uhciAllocQH(hc);
1133 uqh->uqh_Succ = (struct UhciXX *) utd;
1134 uqh->uqh_Pred = NULL; // who knows...
1135 //uqh->uqh_Link = utd->utd_Self; // link to ISO
1136 CONSTWRITEMEM32_LE(&uqh->uqh_Element, UHCI_TERMINATE);
1137 preduqh = uqh;
1139 // make 9 levels of QH interrupts
1140 for(cnt = 1; cnt < 9; cnt++) {
1141 hc->hc_UhciIntQH[cnt] = uqh = uhciAllocQH(hc);
1142 uqh->uqh_Succ = (struct UhciXX *) preduqh;
1143 uqh->uqh_Pred = NULL; // who knows...
1144 //uqh->uqh_Link = preduqh->uqh_Self; // link to previous int level
1145 CONSTWRITEMEM32_LE(&uqh->uqh_Element, UHCI_TERMINATE);
1146 preduqh = uqh;
1149 uhciUpdateIntTree(hc);
1151 // fill in framelist with IntQH entry points based on interval
1152 tabptr = hc->hc_UhciFrameList;
1153 for(cnt = 0; cnt < UHCI_FRAMELIST_SIZE; cnt++) {
1154 uqh = hc->hc_UhciIntQH[8];
1155 bitcnt = 0;
1156 do {
1157 if(cnt & (1UL<<bitcnt)) {
1158 uqh = hc->hc_UhciIntQH[bitcnt];
1159 break;
1161 } while(++bitcnt < 9);
1162 *tabptr++ = uqh->uqh_Self;
1165 // this will cause more PCI memory access, but faster USB transfers as well
1166 //WRITEMEM32_LE(&hc->hc_UhciTermQH->uqh_Link, AROS_LONG2LE(hc->hc_UhciBulkQH->uqh_Self));
1168 // time to initialize hardware...
1169 OOP_GetAttr(hc->hc_PCIDeviceObject, aHidd_PCIDevice_Base4, (IPTR *) &hc->hc_RegBase);
1170 hc->hc_RegBase = (APTR) (((IPTR) hc->hc_RegBase) & (~0xf));
1171 KPRINTF(10, ("RegBase = 0x%08lx\n", hc->hc_RegBase));
1172 OOP_SetAttrs(hc->hc_PCIDeviceObject, (struct TagItem *) pciActivateIO);
1174 // disable BIOS legacy support
1175 KPRINTF(10, ("Turning off BIOS legacy support (old value=%04lx)\n", PCIXReadConfigWord(hc, UHCI_USBLEGSUP)));
1176 PCIXWriteConfigWord(hc, UHCI_USBLEGSUP, 0x8f00);
1178 KPRINTF(10, ("Resetting UHCI HC\n"));
1179 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_GLOBALRESET);
1180 uhwDelayMS(15, hu);
1182 OOP_SetAttrs(hc->hc_PCIDeviceObject, (struct TagItem *) pciDeactivateBusmaster); // no busmaster yet
1184 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_HCRESET);
1185 cnt = 100;
1186 do {
1187 uhwDelayMS(10, hu);
1188 if(!(READIO16_LE(hc->hc_RegBase, UHCI_USBCMD) & UHCF_HCRESET)) {
1189 break;
1191 } while(--cnt);
1193 if(cnt == 0) {
1194 KPRINTF(20, ("Reset Timeout!\n"));
1195 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_HCRESET);
1196 uhwDelayMS(15, hu);
1197 } else {
1198 KPRINTF(20, ("Reset finished after %ld ticks\n", 100-cnt));
1201 // stop controller and disable all interrupts first
1202 KPRINTF(10, ("Stopping controller and enabling busmaster\n"));
1203 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, 0);
1204 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBINTEN, 0);
1206 OOP_SetAttrs(hc->hc_PCIDeviceObject, (struct TagItem *) pciActivateBusmaster); // enable busmaster
1208 // Fix for VIA Babble problem
1209 cnt = PCIXReadConfigByte(hc, 0x40);
1210 if(!(cnt & 0x40)) {
1211 KPRINTF(20, ("Applying VIA Babble workaround\n"));
1212 PCIXWriteConfigByte(hc, 0x40, cnt|0x40);
1215 KPRINTF(10, ("Configuring UHCI HC\n"));
1216 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_MAXPACKET64|UHCF_CONFIGURE);
1218 WRITEIO16_LE(hc->hc_RegBase, UHCI_FRAMECOUNT, 0);
1220 /* hc->hc_UhciFrameList points to a portion of hc->hc_PciMem,
1221 * which we know is 32 bit
1223 WRITEIO32_LE(hc->hc_RegBase, UHCI_FRAMELISTADDR, (ULONG)(IPTR)pciGetPhysical(hc, hc->hc_UhciFrameList));
1225 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBSTATUS, UHSF_USBINT | UHSF_USBERRORINT | UHSF_RESUMEDTX | UHSF_HCSYSERROR | UHSF_HCPROCERROR | UHSF_HCHALTED);
1227 // install reset handler
1228 hc->hc_ResetInt.is_Code = (VOID_FUNC)UhciResetHandler;
1229 hc->hc_ResetInt.is_Data = hc;
1230 AddResetCallback(&hc->hc_ResetInt);
1232 // add interrupt
1233 hc->hc_PCIIntHandler.is_Node.ln_Name = "UHCI PCI (pciusb.device)";
1234 hc->hc_PCIIntHandler.is_Node.ln_Pri = 5;
1235 hc->hc_PCIIntHandler.is_Node.ln_Type = NT_INTERRUPT;
1236 hc->hc_PCIIntHandler.is_Code = (VOID_FUNC)uhciIntCode;
1237 hc->hc_PCIIntHandler.is_Data = hc;
1238 PCIXAddInterrupt(hc, &hc->hc_PCIIntHandler);
1240 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBINTEN, UHIF_TIMEOUTCRC|UHIF_INTONCOMPLETE|UHIF_SHORTPACKET);
1242 // clear all port bits (both ports)
1243 WRITEIO32_LE(hc->hc_RegBase, UHCI_PORT1STSCTRL, 0);
1245 // enable PIRQ
1246 KPRINTF(10, ("Enabling PIRQ (old value=%04lx)\n", PCIXReadConfigWord(hc, UHCI_USBLEGSUP)));
1247 PCIXWriteConfigWord(hc, UHCI_USBLEGSUP, 0x2000);
1249 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_MAXPACKET64|UHCF_CONFIGURE|UHCF_RUNSTOP);
1250 SYNC;
1252 KPRINTF(20, ("HW Init done\n"));
1254 KPRINTF(10, ("HW Regs USBCMD=%04lx\n", READIO16_LE(hc->hc_RegBase, UHCI_USBCMD)));
1255 KPRINTF(10, ("HW Regs USBSTS=%04lx\n", READIO16_LE(hc->hc_RegBase, UHCI_USBSTATUS)));
1256 KPRINTF(10, ("HW Regs FRAMECOUNT=%04lx\n", READIO16_LE(hc->hc_RegBase, UHCI_FRAMECOUNT)));
1258 KPRINTF(20, ("uhciInit returns TRUE...\n"));
1259 return TRUE;
1263 FIXME: What would the appropriate debug level be?
1265 KPRINTF(1000, ("uhciInit returns FALSE...\n"));
1266 return FALSE;
1269 void uhciFree(struct PCIController *hc, struct PCIUnit *hu) {
1271 hc = (struct PCIController *) hu->hu_Controllers.lh_Head;
1272 while(hc->hc_Node.ln_Succ)
1274 switch(hc->hc_HCIType)
1276 case HCITYPE_UHCI:
1278 KPRINTF(20, ("Shutting down UHCI %08lx\n", hc));
1279 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBINTEN, 0);
1280 // disable PIRQ
1281 PCIXWriteConfigWord(hc, UHCI_USBLEGSUP, 0);
1282 // disable all ports
1283 WRITEIO32_LE(hc->hc_RegBase, UHCI_PORT1STSCTRL, 0);
1284 uhwDelayMS(50, hu);
1285 //WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_MAXPACKET64|UHCF_CONFIGURE);
1286 //uhwDelayMS(50, hu);
1287 KPRINTF(20, ("Stopping UHCI %08lx\n", hc));
1288 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, 0);
1289 SYNC;
1291 //KPRINTF(20, ("Reset done UHCI %08lx\n", hc));
1292 uhwDelayMS(10, hu);
1294 KPRINTF(20, ("Resetting UHCI %08lx\n", hc));
1295 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_HCRESET);
1296 SYNC;
1298 uhwDelayMS(50, hu);
1299 WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, 0);
1300 SYNC;
1302 KPRINTF(20, ("Shutting down UHCI done.\n"));
1303 break;
1307 hc = (struct PCIController *) hc->hc_Node.ln_Succ;