2 Copyright © 2010-2011, The AROS Development Team. All rights reserved
6 #include <proto/exec.h>
10 #include <devices/usb_hub.h>
14 #undef HiddPCIDeviceAttrBase
15 #define HiddPCIDeviceAttrBase (hd->hd_HiddPCIDeviceAB)
17 #define HiddAttrBase (hd->hd_HiddAB)
19 static AROS_INTH1(UhciResetHandler
, struct PCIController
*, hc
)
23 // stop controller and disable all interrupts
24 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, 0);
25 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBINTEN
, 0);
32 void uhciFreeQContext(struct PCIController
*hc
, struct UhciQH
*uqh
) {
34 struct UhciTD
*utd
= NULL
;
35 struct UhciTD
*nextutd
;
37 KPRINTF(5, ("Unlinking QContext %08lx\n", uqh
));
38 // unlink from schedule
39 uqh
->uqh_Pred
->uxx_Link
= uqh
->uqh_Succ
->uxx_Self
;
42 uqh
->uqh_Succ
->uxx_Pred
= uqh
->uqh_Pred
;
43 uqh
->uqh_Pred
->uxx_Succ
= uqh
->uqh_Succ
;
46 nextutd
= uqh
->uqh_FirstTD
;
49 KPRINTF(1, ("FreeTD %08lx\n", nextutd
));
51 nextutd
= (struct UhciTD
*) utd
->utd_Succ
;
57 void uhciUpdateIntTree(struct PCIController
*hc
) {
60 struct UhciXX
*preduxx
;
61 struct UhciXX
*lastuseduxx
;
64 // optimize linkage between queue heads
65 preduxx
= lastuseduxx
= (struct UhciXX
*) hc
->hc_UhciCtrlQH
; //hc->hc_UhciIsoTD;
66 for(cnt
= 0; cnt
< 9; cnt
++)
68 uxx
= (struct UhciXX
*) hc
->hc_UhciIntQH
[cnt
];
69 if(uxx
->uxx_Succ
!= preduxx
)
71 lastuseduxx
= uxx
->uxx_Succ
;
73 uxx
->uxx_Link
= lastuseduxx
->uxx_Self
;
78 void uhciCheckPortStatusChange(struct PCIController
*hc
) {
80 struct PCIUnit
*unit
= hc
->hc_Unit
;
84 // check for port status change for UHCI and frame rollovers
86 for(hciport
= 0; hciport
< 2; hciport
++) {
88 UWORD idx
= hc
->hc_PortNumGlobal
[hciport
];
90 portreg
= hciport
? UHCI_PORT2STSCTRL
: UHCI_PORT1STSCTRL
;
91 oldval
= READIO16_LE(hc
->hc_RegBase
, portreg
);
93 if(oldval
& UHPF_ENABLECHANGE
) {
94 KPRINTF(200, ("Port %ld Enable changed\n", hciport
));
95 hc
->hc_PortChangeMap
[hciport
] |= UPSF_PORT_ENABLE
;
98 if(oldval
& UHPF_CONNECTCHANGE
) {
99 KPRINTF(200, ("Port %ld Connect changed\n", hciport
));
100 hc
->hc_PortChangeMap
[hciport
] |= UPSF_PORT_CONNECTION
;
102 if(!(oldval
& UHPF_PORTCONNECTED
)) {
103 KPRINTF(200, ("Device removed on port %ld \n", hciport
));
107 if(oldval
& UHPF_RESUMEDTX
) {
108 KPRINTF(200, ("Port %ld Resume changed\n", hciport
));
109 hc
->hc_PortChangeMap
[hciport
] |= UPSF_PORT_SUSPEND
|UPSF_PORT_ENABLE
;
110 oldval
&= ~UHPF_RESUMEDTX
;
113 if(hc
->hc_PortChangeMap
[hciport
]) {
114 unit
->hu_RootPortChanges
|= 1UL<<(idx
+1);
115 /*KPRINTF(10, ("Port %ld (%ld) contributes %04lx to portmap %04lx\n", idx, hciport, hc->hc_PortChangeMap[hciport], unit->hu_RootPortChanges));*/
118 WRITEIO16_LE(hc
->hc_RegBase
, portreg
, oldval
);
123 void uhciHandleFinishedTDs(struct PCIController
*hc
) {
125 struct PCIUnit
*unit
= hc
->hc_Unit
;
126 struct IOUsbHWReq
*ioreq
;
127 struct IOUsbHWReq
*nextioreq
;
130 struct UhciTD
*nextutd
;
137 ULONG nextctrlstatus
= 0;
140 BOOL updatetree
= FALSE
;
141 BOOL fixsetupterm
= FALSE
;
143 KPRINTF(1, ("Checking for work done...\n"));
144 ioreq
= (struct IOUsbHWReq
*) hc
->hc_TDQueue
.lh_Head
;
145 while((nextioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
))
147 uqh
= (struct UhciQH
*) ioreq
->iouh_DriverPrivate1
;
150 KPRINTF(1, ("Examining IOReq=%08lx with UQH=%08lx\n", ioreq
, uqh
));
151 linkelem
= READMEM32_LE(&uqh
->uqh_Element
);
153 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
154 if(linkelem
& UHCI_TERMINATE
)
156 KPRINTF(1, ("UQH terminated %08lx\n", linkelem
));
159 utd
= (struct UhciTD
*) (IPTR
) ((linkelem
& UHCI_PTRMASK
) - hc
->hc_PCIVirtualAdjust
- 16); // struct UhciTD starts 16 bytes before physical TD
160 ctrlstatus
= READMEM32_LE(&utd
->utd_CtrlStatus
);
161 nextutd
= (struct UhciTD
*)utd
->utd_Succ
;
162 if(!(ctrlstatus
& UTCF_ACTIVE
) && nextutd
)
164 /* OK, it's not active. Does it look like it's done? Code copied from below.
165 If not done, check the next TD too. */
166 if(ctrlstatus
& (UTSF_BABBLE
|UTSF_STALLED
|UTSF_CRCTIMEOUT
|UTSF_DATABUFFERERR
|UTSF_BITSTUFFERR
))
172 token
= READMEM32_LE(&utd
->utd_Token
);
173 len
= (ctrlstatus
& UTSM_ACTUALLENGTH
) >> UTSS_ACTUALLENGTH
;
174 if((len
!= (token
& UTTM_TRANSLENGTH
) >> UTTS_TRANSLENGTH
))
181 nextctrlstatus
= READMEM32_LE(&nextutd
->utd_CtrlStatus
);
184 /* Now, did the element link pointer change while we fetched the status for the pointed at TD?
185 If so, disregard the gathered information and assume still active. */
186 if(READMEM32_LE(&uqh
->uqh_Element
) != linkelem
)
188 /* Oh well, probably still active */
189 KPRINTF(1, ("Link Element changed, still active.\n"));
191 else if(!(ctrlstatus
& UTCF_ACTIVE
) && (nextutd
== 0 || !(nextctrlstatus
& UTCF_ACTIVE
)))
193 KPRINTF(1, ("CtrlStatus inactive %08lx\n", ctrlstatus
));
196 else if(unit
->hu_NakTimeoutFrame
[devadrep
] && (hc
->hc_FrameCounter
> unit
->hu_NakTimeoutFrame
[devadrep
]))
198 ioreq
->iouh_Req
.io_Error
= UHIOERR_NAKTIMEOUT
;
202 fixsetupterm
= FALSE
;
206 if(inspect
< 2) // if all went okay, don't traverse list, assume all bytes successfully transferred
208 utd
= uqh
->uqh_FirstTD
;
212 ctrlstatus
= READMEM32_LE(&utd
->utd_CtrlStatus
);
213 if(ctrlstatus
& UTCF_ACTIVE
)
215 KPRINTF(20, ("Internal error! Still active?!\n"));
216 if(ctrlstatus
& UTSF_BABBLE
)
218 KPRINTF(200, ("HOST CONTROLLER IS DEAD!!!\n"));
219 ioreq
->iouh_Req
.io_Error
= UHIOERR_HOSTERROR
;
220 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_HCRESET
|UHCF_MAXPACKET64
|UHCF_CONFIGURE
|UHCF_RUNSTOP
);
226 token
= READMEM32_LE(&utd
->utd_Token
);
227 KPRINTF(1, ("TD=%08lx CS=%08lx Token=%08lx\n", utd
, ctrlstatus
, token
));
228 if(ctrlstatus
& (UTSF_BABBLE
|UTSF_STALLED
|UTSF_CRCTIMEOUT
|UTSF_DATABUFFERERR
|UTSF_BITSTUFFERR
))
230 if(ctrlstatus
& UTSF_BABBLE
)
232 KPRINTF(20, ("Babble error %08lx/%08lx\n", ctrlstatus
, token
));
233 ioreq
->iouh_Req
.io_Error
= UHIOERR_OVERFLOW
;
235 // VIA chipset seems to die on babble!?!
236 KPRINTF(10, ("HW Regs USBCMD=%04lx\n", READIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
)));
237 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_MAXPACKET64
|UHCF_CONFIGURE
|UHCF_RUNSTOP
);
241 //ctrlstatus &= ~(UTSF_BABBLE|UTSF_STALLED|UTSF_CRCTIMEOUT|UTSF_DATABUFFERERR|UTSF_BITSTUFFERR|UTSF_NAK);
242 ctrlstatus
|= UTCF_ACTIVE
;
243 WRITEMEM32_LE(&utd
->utd_CtrlStatus
, ctrlstatus
);
248 else if(ctrlstatus
& UTSF_CRCTIMEOUT
)
250 KPRINTF(20, ("CRC/Timeout error IOReq=%08lx DIR=%ld\n", ioreq
, ioreq
->iouh_Dir
));
251 if(ctrlstatus
& UTSF_STALLED
)
253 ioreq
->iouh_Req
.io_Error
= UHIOERR_TIMEOUT
;
255 ioreq
->iouh_Req
.io_Error
= (ioreq
->iouh_Dir
== UHDIR_IN
) ? UHIOERR_CRCERROR
: UHIOERR_TIMEOUT
;
258 else if(ctrlstatus
& UTSF_STALLED
)
260 KPRINTF(20, ("STALLED!\n"));
261 ioreq
->iouh_Req
.io_Error
= UHIOERR_STALL
;
263 else if(ctrlstatus
& UTSF_BITSTUFFERR
)
265 KPRINTF(20, ("Bitstuff error\n"));
266 ioreq
->iouh_Req
.io_Error
= UHIOERR_CRCERROR
;
268 else if(ctrlstatus
& UTSF_DATABUFFERERR
)
270 KPRINTF(20, ("Databuffer error\n"));
271 ioreq
->iouh_Req
.io_Error
= UHIOERR_HOSTERROR
;
276 if(unit
->hu_NakTimeoutFrame
[devadrep
] && (hc
->hc_FrameCounter
> unit
->hu_NakTimeoutFrame
[devadrep
]) && (ctrlstatus
& UTSF_NAK
))
278 ioreq
->iouh_Req
.io_Error
= UHIOERR_NAKTIMEOUT
;
282 len
= (ctrlstatus
& UTSM_ACTUALLENGTH
)>>UTSS_ACTUALLENGTH
;
283 if((len
!= (token
& UTTM_TRANSLENGTH
)>>UTTS_TRANSLENGTH
))
287 len
= (len
+1) & 0x7ff; // get real length
288 if((token
& UTTM_PID
)>>UTTS_PID
!= PID_SETUP
) // don't count setup packet
291 // due to the VIA babble bug workaround, actually more bytes can
292 // be received than requested, limit the actual value to the upper limit
293 if(actual
> uqh
->uqh_Actual
)
295 actual
= uqh
->uqh_Actual
;
302 } while((utd
= (struct UhciTD
*) utd
->utd_Succ
));
305 // bail out from babble
306 ioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
;
309 if((actual
< uqh
->uqh_Actual
) && (!ioreq
->iouh_Req
.io_Error
) && (!(ioreq
->iouh_Flags
& UHFF_ALLOWRUNTPKTS
)))
311 KPRINTF(10, ("Short packet: %ld < %ld\n", actual
, ioreq
->iouh_Length
));
312 ioreq
->iouh_Req
.io_Error
= UHIOERR_RUNTPACKET
;
314 ioreq
->iouh_Actual
+= actual
;
316 KPRINTF(10, ("all %ld bytes transferred\n", uqh
->uqh_Actual
));
317 ioreq
->iouh_Actual
+= uqh
->uqh_Actual
;
319 // due to the short packet, the terminal of a setup packet has not been sent. Please do so.
320 if(shortpkt
&& (ioreq
->iouh_Req
.io_Command
== UHCMD_CONTROLXFER
))
324 // this is actually no short packet but result of the VIA babble fix
325 if(shortpkt
&& (ioreq
->iouh_Actual
== ioreq
->iouh_Length
))
329 unit
->hu_DevBusyReq
[devadrep
] = NULL
;
330 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
331 uhciFreeQContext(hc
, uqh
);
332 if(ioreq
->iouh_Req
.io_Command
== UHCMD_INTXFER
)
338 if(inspect
< 2) // otherwise, toggle will be right already
340 // use next data toggle bit based on last successful transaction
341 unit
->hu_DevDataToggle
[devadrep
] = (token
& UTTF_DATA1
) ? FALSE
: TRUE
;
343 if((!shortpkt
&& (ioreq
->iouh_Actual
< ioreq
->iouh_Length
)) || fixsetupterm
)
345 // fragmented, do some more work
346 switch(ioreq
->iouh_Req
.io_Command
)
348 case UHCMD_CONTROLXFER
:
349 KPRINTF(10, ("Rescheduling CtrlTransfer at %ld of %ld\n", ioreq
->iouh_Actual
, ioreq
->iouh_Length
));
350 AddHead(&hc
->hc_CtrlXFerQueue
, (struct Node
*) ioreq
);
354 KPRINTF(10, ("Rescheduling IntTransfer at %ld of %ld\n", ioreq
->iouh_Actual
, ioreq
->iouh_Length
));
355 AddHead(&hc
->hc_IntXFerQueue
, (struct Node
*) ioreq
);
359 KPRINTF(10, ("Rescheduling BulkTransfer at %ld of %ld\n", ioreq
->iouh_Actual
, ioreq
->iouh_Length
));
360 AddHead(&hc
->hc_BulkXFerQueue
, (struct Node
*) ioreq
);
364 KPRINTF(10, ("Uhm, internal error, dunno where to queue this req\n"));
365 ReplyMsg(&ioreq
->iouh_Req
.io_Message
);
368 // check for sucessful clear feature and set address ctrl transfers
369 if(ioreq
->iouh_Req
.io_Command
== UHCMD_CONTROLXFER
)
371 uhwCheckSpecialCtrlTransfers(hc
, ioreq
);
373 ReplyMsg(&ioreq
->iouh_Req
.io_Message
);
376 // be sure to save the data toggle bit where the error occurred
377 unit
->hu_DevDataToggle
[devadrep
] = (token
& UTTF_DATA1
) ? TRUE
: FALSE
;
378 ReplyMsg(&ioreq
->iouh_Req
.io_Message
);
382 KPRINTF(20, ("IOReq=%08lx has no UQH!\n", ioreq
));
388 KPRINTF(10, ("Updating Tree\n"));
389 uhciUpdateIntTree(hc
);
393 void uhciScheduleCtrlTDs(struct PCIController
*hc
) {
395 struct PCIUnit
*unit
= hc
->hc_Unit
;
396 struct IOUsbHWReq
*ioreq
;
399 struct UhciTD
*setuputd
;
400 struct UhciTD
*datautd
;
401 struct UhciTD
*termutd
;
402 struct UhciTD
*predutd
;
410 /* *** CTRL Transfers *** */
411 KPRINTF(1, ("Scheduling new CTRL transfers...\n"));
412 ioreq
= (struct IOUsbHWReq
*) hc
->hc_CtrlXFerQueue
.lh_Head
;
413 while(((struct Node
*) ioreq
)->ln_Succ
)
415 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
;
416 KPRINTF(10, ("New CTRL transfer to %ld.%ld: %ld bytes\n", ioreq
->iouh_DevAddr
, ioreq
->iouh_Endpoint
, ioreq
->iouh_Length
));
417 /* is endpoint already in use or do we have to wait for next transaction */
418 if(unit
->hu_DevBusyReq
[devadrep
])
420 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep
));
421 ioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
;
425 uqh
= uhciAllocQH(hc
);
431 setuputd
= uhciAllocTD(hc
);
437 termutd
= uhciAllocTD(hc
);
440 uhciFreeTD(hc
, setuputd
);
444 uqh
->uqh_IOReq
= ioreq
;
446 //termutd->utd_QueueHead = setuputd->utd_QueueHead = uqh;
448 KPRINTF(1, ("SetupTD=%08lx, TermTD=%08lx\n", setuputd
, termutd
));
451 ctrlstatus
= UTCF_ACTIVE
|UTCF_3ERRORSLIMIT
;
452 if(ioreq
->iouh_Flags
& UHFF_LOWSPEED
)
454 KPRINTF(5, ("*** LOW SPEED ***\n"));
455 ctrlstatus
|= UTCF_LOWSPEED
;
457 token
= (ioreq
->iouh_DevAddr
<<UTTS_DEVADDR
)|(ioreq
->iouh_Endpoint
<<UTTS_ENDPOINT
);
458 //setuputd->utd_Pred = NULL;
459 if(ioreq
->iouh_Actual
)
461 // this is a continuation of a fragmented ctrl transfer!
462 KPRINTF(1, ("Continuing FRAGMENT at %ld of %ld\n", ioreq
->iouh_Actual
, ioreq
->iouh_Length
));
466 uqh
->uqh_FirstTD
= setuputd
;
467 uqh
->uqh_Element
= setuputd
->utd_Self
; // start of queue
468 WRITEMEM32_LE(&setuputd
->utd_CtrlStatus
, ctrlstatus
);
469 WRITEMEM32_LE(&setuputd
->utd_Token
, (PID_SETUP
<<UTTS_PID
)|token
|(7<<UTTS_TRANSLENGTH
)|UTTF_DATA0
);
470 WRITEMEM32_LE(&setuputd
->utd_BufferPtr
, (IPTR
) pciGetPhysical(hc
, &ioreq
->iouh_SetupData
));
473 token
|= (ioreq
->iouh_SetupData
.bmRequestType
& URTF_IN
) ? PID_IN
: PID_OUT
;
475 actual
= ioreq
->iouh_Actual
;
476 if(ioreq
->iouh_Length
- actual
)
478 ctrlstatus
|= UTCF_SHORTPACKET
;
481 phyaddr
= (IPTR
) pciGetPhysical(hc
, &(((UBYTE
*) ioreq
->iouh_Data
)[ioreq
->iouh_Actual
]));
482 if(!unit
->hu_DevDataToggle
[devadrep
])
484 // continue with data toggle 0
488 phyaddr
= (IPTR
) pciGetPhysical(hc
, ioreq
->iouh_Data
);
492 datautd
= uhciAllocTD(hc
);
497 token
^= UTTF_DATA1
; // toggle bit
498 predutd
->utd_Link
= datautd
->utd_Self
;
499 predutd
->utd_Succ
= (struct UhciXX
*) datautd
;
500 //datautd->utd_Pred = (struct UhciXX *) predutd;
501 //datautd->utd_QueueHead = uqh;
502 len
= ioreq
->iouh_Length
- actual
;
503 if(len
> ioreq
->iouh_MaxPktSize
)
505 len
= ioreq
->iouh_MaxPktSize
;
507 WRITEMEM32_LE(&datautd
->utd_CtrlStatus
, ctrlstatus
);
509 /* FIXME: This workaround for a VIA babble bug will potentially overwrite innocent memory (very rarely), but will avoid the host controller dropping dead completely. */
510 if((len
< ioreq
->iouh_MaxPktSize
) && (ioreq
->iouh_SetupData
.bmRequestType
& URTF_IN
))
512 WRITEMEM32_LE(&datautd
->utd_Token
, token
|((ioreq
->iouh_MaxPktSize
-1)<<UTTS_TRANSLENGTH
)); // no masking need here as len is always >= 1
514 WRITEMEM32_LE(&datautd
->utd_Token
, token
|((len
-1)<<UTTS_TRANSLENGTH
)); // no masking need here as len is always >= 1
517 WRITEMEM32_LE(&datautd
->utd_Token
, token
|((len
-1)<<UTTS_TRANSLENGTH
)); // no masking need here as len is always >= 1
519 WRITEMEM32_LE(&datautd
->utd_BufferPtr
, phyaddr
);
523 } while((actual
< ioreq
->iouh_Length
) && (actual
- ioreq
->iouh_Actual
< UHCI_TD_CTRL_LIMIT
));
524 if(actual
== ioreq
->iouh_Actual
)
526 // not at least one data TD? try again later
527 uhciFreeTD(hc
, setuputd
);
528 uhciFreeTD(hc
, termutd
);
535 KPRINTF(1, ("Freeing setup\n"));
536 uqh
->uqh_FirstTD
= (struct UhciTD
*) setuputd
->utd_Succ
;
537 //uqh->uqh_FirstTD->utd_Pred = NULL;
538 uqh
->uqh_Element
= setuputd
->utd_Succ
->uxx_Self
; // start of queue after setup packet
539 uhciFreeTD(hc
, setuputd
);
540 // set toggle for next batch
541 unit
->hu_DevDataToggle
[devadrep
] = (token
& UTTF_DATA1
) ? FALSE
: TRUE
;
546 // free Setup packet, assign termination as first packet (no data)
547 KPRINTF(1, ("Freeing setup (term only)\n"));
548 uqh
->uqh_FirstTD
= (struct UhciTD
*) termutd
;
549 uqh
->uqh_Element
= termutd
->utd_Self
; // start of queue after setup packet
550 uhciFreeTD(hc
, setuputd
);
554 uqh
->uqh_Actual
= actual
- ioreq
->iouh_Actual
;
555 ctrlstatus
|= UTCF_READYINTEN
;
556 if(actual
== ioreq
->iouh_Length
)
559 KPRINTF(1, ("Activating TERM\n"));
561 token
^= (PID_IN
^PID_OUT
)<<UTTS_PID
;
565 predutd
->utd_Link
= termutd
->utd_Self
;
566 predutd
->utd_Succ
= (struct UhciXX
*) termutd
;
568 //termutd->utd_Pred = (struct UhciXX *) predutd;
569 WRITEMEM32_LE(&termutd
->utd_CtrlStatus
, ctrlstatus
);
570 WRITEMEM32_LE(&termutd
->utd_Token
, token
|(0x7ff<<UTTS_TRANSLENGTH
));
571 CONSTWRITEMEM32_LE(&termutd
->utd_Link
, UHCI_TERMINATE
);
572 termutd
->utd_Succ
= NULL
;
573 //uqh->uqh_LastTD = termutd;
575 KPRINTF(1, ("Setup data phase fragmented\n"));
576 // don't create TERM, we don't know the final data toggle bit
577 // but mark the last data TD for interrupt generation
578 WRITEMEM32_LE(&predutd
->utd_CtrlStatus
, ctrlstatus
);
579 uhciFreeTD(hc
, termutd
);
580 CONSTWRITEMEM32_LE(&predutd
->utd_Link
, UHCI_TERMINATE
);
581 predutd
->utd_Succ
= NULL
;
582 //uqh->uqh_LastTD = predutd;
585 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
586 ioreq
->iouh_DriverPrivate1
= uqh
;
588 // manage endpoint going busy
589 unit
->hu_DevBusyReq
[devadrep
] = ioreq
;
590 unit
->hu_NakTimeoutFrame
[devadrep
] = (ioreq
->iouh_Flags
& UHFF_NAKTIMEOUT
) ? hc
->hc_FrameCounter
+ ioreq
->iouh_NakTimeout
: 0;
593 AddTail(&hc
->hc_TDQueue
, (struct Node
*) ioreq
);
595 // looks good to me, now enqueue this entry (just behind the CtrlQH)
596 uqh
->uqh_Succ
= hc
->hc_UhciCtrlQH
->uqh_Succ
;
597 uqh
->uqh_Link
= uqh
->uqh_Succ
->uxx_Self
;
600 uqh
->uqh_Pred
= (struct UhciXX
*) hc
->hc_UhciCtrlQH
;
601 uqh
->uqh_Succ
->uxx_Pred
= (struct UhciXX
*) uqh
;
602 hc
->hc_UhciCtrlQH
->uqh_Succ
= (struct UhciXX
*) uqh
;
603 hc
->hc_UhciCtrlQH
->uqh_Link
= uqh
->uqh_Self
;
607 ioreq
= (struct IOUsbHWReq
*) hc
->hc_CtrlXFerQueue
.lh_Head
;
611 void uhciScheduleIntTDs(struct PCIController
*hc
) {
613 struct PCIUnit
*unit
= hc
->hc_Unit
;
614 struct IOUsbHWReq
*ioreq
;
618 struct UhciQH
*intuqh
;
620 struct UhciTD
*predutd
;
627 /* *** INT Transfers *** */
628 KPRINTF(1, ("Scheduling new INT transfers...\n"));
629 ioreq
= (struct IOUsbHWReq
*) hc
->hc_IntXFerQueue
.lh_Head
;
630 while(((struct Node
*) ioreq
)->ln_Succ
)
632 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
633 KPRINTF(10, ("New INT transfer to %ld.%ld: %ld bytes\n", ioreq
->iouh_DevAddr
, ioreq
->iouh_Endpoint
, ioreq
->iouh_Length
));
634 /* is endpoint already in use or do we have to wait for next transaction */
635 if(unit
->hu_DevBusyReq
[devadrep
]) {
636 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep
));
637 ioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
;
641 uqh
= uhciAllocQH(hc
);
646 uqh
->uqh_IOReq
= ioreq
;
648 ctrlstatus
= UTCF_ACTIVE
|UTCF_1ERRORLIMIT
|UTCF_SHORTPACKET
;
649 if(ioreq
->iouh_Flags
& UHFF_LOWSPEED
) {
650 KPRINTF(5, ("*** LOW SPEED ***\n"));
651 ctrlstatus
|= UTCF_LOWSPEED
;
653 token
= (ioreq
->iouh_DevAddr
<<UTTS_DEVADDR
)|(ioreq
->iouh_Endpoint
<<UTTS_ENDPOINT
);
654 token
|= (ioreq
->iouh_Dir
== UHDIR_IN
) ? PID_IN
: PID_OUT
;
656 actual
= ioreq
->iouh_Actual
;
657 phyaddr
= (IPTR
) pciGetPhysical(hc
, &(((UBYTE
*) ioreq
->iouh_Data
)[ioreq
->iouh_Actual
]));
658 if(unit
->hu_DevDataToggle
[devadrep
]) {
659 // continue with data toggle 1
660 KPRINTF(1, ("Data1\n"));
663 KPRINTF(1, ("Data0\n"));
666 utd
= uhciAllocTD(hc
);
671 WRITEMEM32_LE(&predutd
->utd_Link
, READMEM32_LE(&utd
->utd_Self
)|UHCI_DFS
);
672 predutd
->utd_Succ
= (struct UhciXX
*) utd
;
673 //utd->utd_Pred = (struct UhciXX *) predutd;
675 uqh
->uqh_FirstTD
= utd
;
676 uqh
->uqh_Element
= utd
->utd_Self
;
677 //utd->utd_Pred = NULL;
679 //utd->utd_QueueHead = uqh;
680 len
= ioreq
->iouh_Length
- actual
;
681 if(len
> ioreq
->iouh_MaxPktSize
) {
682 len
= ioreq
->iouh_MaxPktSize
;
685 WRITEMEM32_LE(&utd
->utd_CtrlStatus
, ctrlstatus
);
686 WRITEMEM32_LE(&utd
->utd_Token
, token
|(((len
-1) & 0x7ff)<<UTTS_TRANSLENGTH
));
687 WRITEMEM32_LE(&utd
->utd_BufferPtr
, phyaddr
);
691 token
^= UTTF_DATA1
; // toggle bit
692 } while((actual
< ioreq
->iouh_Length
) && (actual
- ioreq
->iouh_Actual
< UHCI_TD_INT_LIMIT
));
695 // not at least one data TD? try again later
700 uqh
->uqh_Actual
= actual
- ioreq
->iouh_Actual
;
701 // set toggle for next batch / succesful transfer
702 unit
->hu_DevDataToggle
[devadrep
] = (token
& UTTF_DATA1
) ? TRUE
: FALSE
;
703 if(unit
->hu_DevDataToggle
[devadrep
]) {
704 // continue with data toggle 1
705 KPRINTF(1, ("NewData1\n"));
707 KPRINTF(1, ("NewData0\n"));
709 ctrlstatus
|= UTCF_READYINTEN
;
710 WRITEMEM32_LE(&predutd
->utd_CtrlStatus
, ctrlstatus
);
711 CONSTWRITEMEM32_LE(&utd
->utd_Link
, UHCI_TERMINATE
);
712 utd
->utd_Succ
= NULL
;
713 //uqh->uqh_LastTD = utd;
715 if(ioreq
->iouh_Interval
>= 255) {
716 intuqh
= hc
->hc_UhciIntQH
[8]; // 256ms interval
720 intuqh
= hc
->hc_UhciIntQH
[cnt
++];
721 } while(ioreq
->iouh_Interval
>= (1<<cnt
));
722 KPRINTF(1, ("Scheduled at level %ld\n", cnt
));
725 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
726 ioreq
->iouh_DriverPrivate1
= uqh
;
728 // manage endpoint going busy
729 unit
->hu_DevBusyReq
[devadrep
] = ioreq
;
730 unit
->hu_NakTimeoutFrame
[devadrep
] = (ioreq
->iouh_Flags
& UHFF_NAKTIMEOUT
) ? hc
->hc_FrameCounter
+ ioreq
->iouh_NakTimeout
: 0;
733 AddTail(&hc
->hc_TDQueue
, (struct Node
*) ioreq
);
735 // looks good to me, now enqueue this entry (just behind the right IntQH)
736 uqh
->uqh_Succ
= intuqh
->uqh_Succ
;
737 uqh
->uqh_Link
= uqh
->uqh_Succ
->uxx_Self
;
740 uqh
->uqh_Pred
= (struct UhciXX
*) intuqh
;
741 uqh
->uqh_Succ
->uxx_Pred
= (struct UhciXX
*) uqh
;
742 intuqh
->uqh_Succ
= (struct UhciXX
*) uqh
;
743 intuqh
->uqh_Link
= uqh
->uqh_Self
;
747 uhciUpdateIntTree(hc
);
749 ioreq
= (struct IOUsbHWReq
*) hc
->hc_IntXFerQueue
.lh_Head
;
753 void uhciScheduleBulkTDs(struct PCIController
*hc
) {
755 struct PCIUnit
*unit
= hc
->hc_Unit
;
756 struct IOUsbHWReq
*ioreq
;
760 struct UhciTD
*predutd
;
768 /* *** BULK Transfers *** */
769 KPRINTF(1, ("Scheduling new BULK transfers...\n"));
770 ioreq
= (struct IOUsbHWReq
*) hc
->hc_BulkXFerQueue
.lh_Head
;
771 while(((struct Node
*) ioreq
)->ln_Succ
)
773 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
774 KPRINTF(10, ("New BULK transfer to %ld.%ld: %ld bytes\n", ioreq
->iouh_DevAddr
, ioreq
->iouh_Endpoint
, ioreq
->iouh_Length
));
775 /* is endpoint already in use or do we have to wait for next transaction */
776 if(unit
->hu_DevBusyReq
[devadrep
])
778 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep
));
779 ioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
;
783 uqh
= uhciAllocQH(hc
);
789 uqh
->uqh_IOReq
= ioreq
;
792 ctrlstatus
= UTCF_ACTIVE
|UTCF_1ERRORLIMIT
|UTCF_SHORTPACKET
;
793 token
= (ioreq
->iouh_DevAddr
<<UTTS_DEVADDR
)|(ioreq
->iouh_Endpoint
<<UTTS_ENDPOINT
);
794 token
|= (ioreq
->iouh_Dir
== UHDIR_IN
) ? PID_IN
: PID_OUT
;
796 actual
= ioreq
->iouh_Actual
;
797 phyaddr
= (IPTR
) pciGetPhysical(hc
, &(((UBYTE
*) ioreq
->iouh_Data
)[ioreq
->iouh_Actual
]));
798 if(unit
->hu_DevDataToggle
[devadrep
])
800 // continue with data toggle 1
805 utd
= uhciAllocTD(hc
);
813 WRITEMEM32_LE(&predutd
->utd_Link
, READMEM32_LE(&utd
->utd_Self
)|UHCI_DFS
);
814 predutd
->utd_Succ
= (struct UhciXX
*) utd
;
815 //utd->utd_Pred = (struct UhciXX *) predutd;
817 uqh
->uqh_FirstTD
= utd
;
818 uqh
->uqh_Element
= utd
->utd_Self
;
819 //utd->utd_Pred = NULL;
821 //utd->utd_QueueHead = uqh;
822 len
= ioreq
->iouh_Length
- actual
;
823 if(len
> ioreq
->iouh_MaxPktSize
)
825 len
= ioreq
->iouh_MaxPktSize
;
828 WRITEMEM32_LE(&utd
->utd_CtrlStatus
, ctrlstatus
);
829 WRITEMEM32_LE(&utd
->utd_Token
, token
|(((len
-1) & 0x7ff)<<UTTS_TRANSLENGTH
));
830 WRITEMEM32_LE(&utd
->utd_BufferPtr
, phyaddr
);
834 token
^= UTTF_DATA1
; // toggle bit
835 if((actual
== ioreq
->iouh_Length
) && len
)
837 if((ioreq
->iouh_Flags
& UHFF_NOSHORTPKT
) || (ioreq
->iouh_Dir
== UHDIR_IN
) || (actual
% ioreq
->iouh_MaxPktSize
))
839 // no last zero byte packet
842 // avoid rare case that the zero byte packet is reached on TD_BULK_LIMIT
846 } while(forcezero
|| (len
&& (actual
<= ioreq
->iouh_Length
) && (actual
- ioreq
->iouh_Actual
< UHCI_TD_BULK_LIMIT
)));
850 // not at least one data TD? try again later
854 uqh
->uqh_Actual
= actual
- ioreq
->iouh_Actual
;
855 // set toggle for next batch / succesful transfer
856 unit
->hu_DevDataToggle
[devadrep
] = (token
& UTTF_DATA1
) ? TRUE
: FALSE
;
858 ctrlstatus
|= UTCF_READYINTEN
;
859 WRITEMEM32_LE(&predutd
->utd_CtrlStatus
, ctrlstatus
);
860 CONSTWRITEMEM32_LE(&utd
->utd_Link
, UHCI_TERMINATE
);
861 utd
->utd_Succ
= NULL
;
862 //uqh->uqh_LastTD = utd;
864 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
865 ioreq
->iouh_DriverPrivate1
= uqh
;
867 // manage endpoint going busy
868 unit
->hu_DevBusyReq
[devadrep
] = ioreq
;
869 unit
->hu_NakTimeoutFrame
[devadrep
] = (ioreq
->iouh_Flags
& UHFF_NAKTIMEOUT
) ? hc
->hc_FrameCounter
+ ioreq
->iouh_NakTimeout
: 0;
872 AddTail(&hc
->hc_TDQueue
, (struct Node
*) ioreq
);
874 // looks good to me, now enqueue this entry (just behind the BulkQH)
875 uqh
->uqh_Succ
= hc
->hc_UhciBulkQH
->uqh_Succ
;
876 uqh
->uqh_Link
= uqh
->uqh_Succ
->uxx_Self
;
879 uqh
->uqh_Pred
= (struct UhciXX
*) hc
->hc_UhciBulkQH
;
880 uqh
->uqh_Succ
->uxx_Pred
= (struct UhciXX
*) uqh
;
881 hc
->hc_UhciBulkQH
->uqh_Succ
= (struct UhciXX
*) uqh
;
882 hc
->hc_UhciBulkQH
->uqh_Link
= uqh
->uqh_Self
;
886 ioreq
= (struct IOUsbHWReq
*) hc
->hc_BulkXFerQueue
.lh_Head
;
890 void uhciUpdateFrameCounter(struct PCIController
*hc
) {
894 framecnt
= READIO16_LE(hc
->hc_RegBase
, UHCI_FRAMECOUNT
) & 0x07ff;
895 if(framecnt
< (hc
->hc_FrameCounter
& 0x07ff))
897 hc
->hc_FrameCounter
|= 0x07ff;
898 hc
->hc_FrameCounter
++;
899 hc
->hc_FrameCounter
|= framecnt
;
900 KPRINTF(10, ("Frame Counter Rollover %ld\n", hc
->hc_FrameCounter
));
902 hc
->hc_FrameCounter
= (hc
->hc_FrameCounter
& 0xfffff800)|framecnt
;
907 static AROS_INTH1(uhciCompleteInt
, struct PCIController
*, hc
) {
911 KPRINTF(100, ("CompleteInt!\n"));
912 uhciUpdateFrameCounter(hc
);
914 /* **************** PROCESS DONE TRANSFERS **************** */
916 uhciCheckPortStatusChange(hc
);
917 uhwCheckRootHubChanges(hc
->hc_Unit
);
919 uhciHandleFinishedTDs(hc
);
921 if(hc
->hc_CtrlXFerQueue
.lh_Head
->ln_Succ
) {
922 uhciScheduleCtrlTDs(hc
);
925 if(hc
->hc_IntXFerQueue
.lh_Head
->ln_Succ
) {
926 uhciScheduleIntTDs(hc
);
929 if(hc
->hc_BulkXFerQueue
.lh_Head
->ln_Succ
) {
930 uhciScheduleBulkTDs(hc
);
933 KPRINTF(1, ("CompleteDone\n"));
940 static AROS_INTH1(uhciIntCode
, struct PCIController
*, hc
)
944 struct PCIDevice
*base
= hc
->hc_Device
;
947 intr
= READIO16_LE(hc
->hc_RegBase
, UHCI_USBSTATUS
);
948 if(intr
& (UHSF_USBINT
|UHSF_USBERRORINT
|UHSF_RESUMEDTX
|UHSF_HCSYSERROR
|UHSF_HCPROCERROR
|UHSF_HCHALTED
)) {
949 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBSTATUS
, intr
);
951 if(intr
& (UHSF_HCSYSERROR
|UHSF_HCPROCERROR
|UHSF_HCHALTED
)) {
952 KPRINTF(200, ("Host ERROR!\n"));
953 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_HCRESET
|UHCF_GLOBALRESET
|UHCF_MAXPACKET64
|UHCF_CONFIGURE
);
956 if (!(hc
->hc_Flags
& HCF_ONLINE
)) {
960 if(intr
& (UHSF_USBINT
|UHSF_USBERRORINT
)) {
961 SureCause(base
, &hc
->hc_CompleteInt
);
962 // uhciCompleteInt(hc);
971 BOOL
uhciInit(struct PCIController
*hc
, struct PCIUnit
*hu
) {
973 struct PCIDevice
*hd
= hu
->hu_Device
;
976 struct UhciQH
*preduqh
;
984 struct TagItem pciActivateIO
[] = {
985 { aHidd_PCIDevice_isIO
, TRUE
},
989 struct TagItem pciActivateBusmaster
[] = {
990 { aHidd_PCIDevice_isMaster
, TRUE
},
994 struct TagItem pciDeactivateBusmaster
[] = {
995 { aHidd_PCIDevice_isMaster
, FALSE
},
999 hc
->hc_NumPorts
= 2; // UHCI always uses 2 ports per controller
1000 KPRINTF(20, ("Found UHCI Controller %08lx FuncNum=%ld with %ld ports\n", hc
->hc_PCIDeviceObject
, hc
->hc_FunctionNum
, hc
->hc_NumPorts
));
1002 hc
->hc_CompleteInt
.is_Node
.ln_Type
= NT_INTERRUPT
;
1003 hc
->hc_CompleteInt
.is_Node
.ln_Name
= "UHCI CompleteInt";
1004 hc
->hc_CompleteInt
.is_Node
.ln_Pri
= 0;
1005 hc
->hc_CompleteInt
.is_Data
= hc
;
1006 hc
->hc_CompleteInt
.is_Code
= (VOID_FUNC
)uhciCompleteInt
;
1008 hc
->hc_PCIMemSize
= sizeof(ULONG
) * UHCI_FRAMELIST_SIZE
+ UHCI_FRAMELIST_ALIGNMENT
+ 1;
1009 hc
->hc_PCIMemSize
+= sizeof(struct UhciQH
) * UHCI_QH_POOLSIZE
;
1010 hc
->hc_PCIMemSize
+= sizeof(struct UhciTD
) * UHCI_TD_POOLSIZE
;
1012 memptr
= HIDD_PCIDriver_AllocPCIMem(hc
->hc_PCIDriverObject
, hc
->hc_PCIMemSize
);
1013 hc
->hc_PCIMem
= (APTR
) memptr
;
1016 // PhysicalAddress - VirtualAdjust = VirtualAddress
1017 // VirtualAddress + VirtualAdjust = PhysicalAddress
1018 hc
->hc_PCIVirtualAdjust
= ((IPTR
) pciGetPhysical(hc
, memptr
)) - ((IPTR
) memptr
);
1019 KPRINTF(10, ("VirtualAdjust 0x%08lx\n", hc
->hc_PCIVirtualAdjust
));
1022 memptr
= (UBYTE
*) ((((IPTR
) hc
->hc_PCIMem
) + UHCI_FRAMELIST_ALIGNMENT
) & (~UHCI_FRAMELIST_ALIGNMENT
));
1023 hc
->hc_UhciFrameList
= (ULONG
*) memptr
;
1024 KPRINTF(10, ("FrameListBase 0x%08lx\n", hc
->hc_UhciFrameList
));
1025 memptr
+= sizeof(APTR
) * UHCI_FRAMELIST_SIZE
;
1028 uqh
= (struct UhciQH
*) memptr
;
1029 hc
->hc_UhciQHPool
= uqh
;
1030 cnt
= UHCI_QH_POOLSIZE
- 1;
1032 // minimal initalization
1033 uqh
->uqh_Succ
= (struct UhciXX
*) (uqh
+ 1);
1034 WRITEMEM32_LE(&uqh
->uqh_Self
, (IPTR
) (&uqh
->uqh_Link
) + hc
->hc_PCIVirtualAdjust
+ UHCI_QHSELECT
);
1037 uqh
->uqh_Succ
= NULL
;
1038 WRITEMEM32_LE(&uqh
->uqh_Self
, (IPTR
) (&uqh
->uqh_Link
) + hc
->hc_PCIVirtualAdjust
+ UHCI_QHSELECT
);
1039 memptr
+= sizeof(struct UhciQH
) * UHCI_QH_POOLSIZE
;
1042 utd
= (struct UhciTD
*) memptr
;
1043 hc
->hc_UhciTDPool
= utd
;
1044 cnt
= UHCI_TD_POOLSIZE
- 1;
1046 utd
->utd_Succ
= (struct UhciXX
*) (utd
+ 1);
1047 WRITEMEM32_LE(&utd
->utd_Self
, (IPTR
) (&utd
->utd_Link
) + hc
->hc_PCIVirtualAdjust
+ UHCI_TDSELECT
);
1050 utd
->utd_Succ
= NULL
;
1051 WRITEMEM32_LE(&utd
->utd_Self
, (IPTR
) (&utd
->utd_Link
) + hc
->hc_PCIVirtualAdjust
+ UHCI_TDSELECT
);
1052 memptr
+= sizeof(struct UhciTD
) * UHCI_TD_POOLSIZE
;
1055 hc
->hc_UhciTermQH
= preduqh
= uqh
= uhciAllocQH(hc
);
1056 uqh
->uqh_Succ
= NULL
;
1057 CONSTWRITEMEM32_LE(&uqh
->uqh_Link
, UHCI_TERMINATE
);
1058 CONSTWRITEMEM32_LE(&uqh
->uqh_Element
, UHCI_TERMINATE
);
1061 hc
->hc_UhciBulkQH
= uqh
= uhciAllocQH(hc
);
1062 uqh
->uqh_Succ
= (struct UhciXX
*) preduqh
;
1063 preduqh
->uqh_Pred
= (struct UhciXX
*) uqh
;
1064 uqh
->uqh_Link
= preduqh
->uqh_Self
; // link to terminating QH
1065 CONSTWRITEMEM32_LE(&uqh
->uqh_Element
, UHCI_TERMINATE
);
1069 hc
->hc_UhciCtrlQH
= uqh
= uhciAllocQH(hc
);
1070 uqh
->uqh_Succ
= (struct UhciXX
*) preduqh
;
1071 preduqh
->uqh_Pred
= (struct UhciXX
*) uqh
;
1072 uqh
->uqh_Link
= preduqh
->uqh_Self
; // link to Bulk QH
1073 CONSTWRITEMEM32_LE(&uqh
->uqh_Element
, UHCI_TERMINATE
);
1076 hc
->hc_UhciIsoTD
= utd
= uhciAllocTD(hc
);
1077 utd
->utd_Succ
= (struct UhciXX
*) uqh
;
1078 //utd->utd_Pred = NULL; // no certain linkage above this level
1079 uqh
->uqh_Pred
= (struct UhciXX
*) utd
;
1080 utd
->utd_Link
= uqh
->uqh_Self
; // link to Ctrl QH
1082 CONSTWRITEMEM32_LE(&utd
->utd_CtrlStatus
, 0);
1085 hc
->hc_UhciIntQH
[0] = uqh
= uhciAllocQH(hc
);
1086 uqh
->uqh_Succ
= (struct UhciXX
*) utd
;
1087 uqh
->uqh_Pred
= NULL
; // who knows...
1088 //uqh->uqh_Link = utd->utd_Self; // link to ISO
1089 CONSTWRITEMEM32_LE(&uqh
->uqh_Element
, UHCI_TERMINATE
);
1092 // make 9 levels of QH interrupts
1093 for(cnt
= 1; cnt
< 9; cnt
++) {
1094 hc
->hc_UhciIntQH
[cnt
] = uqh
= uhciAllocQH(hc
);
1095 uqh
->uqh_Succ
= (struct UhciXX
*) preduqh
;
1096 uqh
->uqh_Pred
= NULL
; // who knows...
1097 //uqh->uqh_Link = preduqh->uqh_Self; // link to previous int level
1098 CONSTWRITEMEM32_LE(&uqh
->uqh_Element
, UHCI_TERMINATE
);
1102 uhciUpdateIntTree(hc
);
1104 // fill in framelist with IntQH entry points based on interval
1105 tabptr
= hc
->hc_UhciFrameList
;
1106 for(cnt
= 0; cnt
< UHCI_FRAMELIST_SIZE
; cnt
++) {
1107 uqh
= hc
->hc_UhciIntQH
[8];
1110 if(cnt
& (1UL<<bitcnt
)) {
1111 uqh
= hc
->hc_UhciIntQH
[bitcnt
];
1114 } while(++bitcnt
< 9);
1115 *tabptr
++ = uqh
->uqh_Self
;
1118 // this will cause more PCI memory access, but faster USB transfers as well
1119 //WRITEMEM32_LE(&hc->hc_UhciTermQH->uqh_Link, AROS_LONG2LE(hc->hc_UhciBulkQH->uqh_Self));
1121 // time to initialize hardware...
1122 OOP_GetAttr(hc
->hc_PCIDeviceObject
, aHidd_PCIDevice_Base4
, (IPTR
*) &hc
->hc_RegBase
);
1123 hc
->hc_RegBase
= (APTR
) (((IPTR
) hc
->hc_RegBase
) & (~0xf));
1124 KPRINTF(10, ("RegBase = 0x%08lx\n", hc
->hc_RegBase
));
1126 OOP_SetAttrs(hc
->hc_PCIDeviceObject
, (struct TagItem
*) pciActivateIO
);
1128 // disable BIOS legacy support
1129 KPRINTF(10, ("Turning off BIOS legacy support (old value=%04lx)\n", HIDD_PCIDevice_ReadConfigWord(hc
->hc_PCIDeviceObject
, UHCI_USBLEGSUP
) ));
1130 HIDD_PCIDevice_WriteConfigWord(hc
->hc_PCIDeviceObject
, UHCI_USBLEGSUP
, 0x8f00);
1132 KPRINTF(10, ("Resetting UHCI HC\n"));
1133 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_GLOBALRESET
);
1136 OOP_SetAttrs(hc
->hc_PCIDeviceObject
, (struct TagItem
*) pciDeactivateBusmaster
); // no busmaster yet
1138 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_HCRESET
);
1142 if(!(READIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
) & UHCF_HCRESET
)) {
1148 KPRINTF(20, ("Reset Timeout!\n"));
1149 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_HCRESET
);
1152 KPRINTF(20, ("Reset finished after %ld ticks\n", 100-cnt
));
1155 // stop controller and disable all interrupts first
1156 KPRINTF(10, ("Stopping controller and enabling busmaster\n"));
1157 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, 0);
1158 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBINTEN
, 0);
1160 OOP_SetAttrs(hc
->hc_PCIDeviceObject
, (struct TagItem
*) pciActivateBusmaster
); // enable busmaster
1162 // Fix for VIA Babble problem
1163 cnt
= HIDD_PCIDevice_ReadConfigByte(hc
->hc_PCIDeviceObject
, 0x40);
1165 KPRINTF(20, ("Applying VIA Babble workaround\n"));
1166 HIDD_PCIDevice_WriteConfigByte(hc
->hc_PCIDeviceObject
, 0x40, cnt
|0x40);
1169 KPRINTF(10, ("Configuring UHCI HC\n"));
1170 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_MAXPACKET64
|UHCF_CONFIGURE
);
1172 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_FRAMECOUNT
, 0);
1174 WRITEIO32_LE(hc
->hc_RegBase
, UHCI_FRAMELISTADDR
, (IPTR
) pciGetPhysical(hc
, hc
->hc_UhciFrameList
));
1176 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBSTATUS
, UHIF_TIMEOUTCRC
|UHIF_INTONCOMPLETE
|UHIF_SHORTPACKET
);
1178 // install reset handler
1179 hc
->hc_ResetInt
.is_Code
= (VOID_FUNC
)UhciResetHandler
;
1180 hc
->hc_ResetInt
.is_Data
= hc
;
1181 AddResetCallback(&hc
->hc_ResetInt
);
1184 hc
->hc_PCIIntHandler
.is_Node
.ln_Name
= "UHCI PCI (pciuhci.device)";
1185 hc
->hc_PCIIntHandler
.is_Node
.ln_Pri
= 5;
1186 hc
->hc_PCIIntHandler
.is_Node
.ln_Type
= NT_INTERRUPT
;
1187 hc
->hc_PCIIntHandler
.is_Code
= (VOID_FUNC
)uhciIntCode
;
1188 hc
->hc_PCIIntHandler
.is_Data
= hc
;
1189 AddIntServer(INTB_KERNEL
+ hc
->hc_PCIIntLine
, &hc
->hc_PCIIntHandler
);
1191 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBINTEN
, UHIF_TIMEOUTCRC
|UHIF_INTONCOMPLETE
|UHIF_SHORTPACKET
);
1193 // clear all port bits (both ports)
1194 WRITEIO32_LE(hc
->hc_RegBase
, UHCI_PORT1STSCTRL
, 0);
1197 KPRINTF(10, ("Enabling PIRQ (old value=%04lx)\n", HIDD_PCIDevice_ReadConfigWord(hc
->hc_PCIDeviceObject
, UHCI_USBLEGSUP
) ));
1198 HIDD_PCIDevice_WriteConfigWord(hc
->hc_PCIDeviceObject
, UHCI_USBLEGSUP
, 0x2000);
1200 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_MAXPACKET64
|UHCF_CONFIGURE
|UHCF_RUNSTOP
);
1203 KPRINTF(20, ("HW Init done\n"));
1205 KPRINTF(10, ("HW Regs USBCMD=%04lx\n", READIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
)));
1206 KPRINTF(10, ("HW Regs USBSTS=%04lx\n", READIO16_LE(hc
->hc_RegBase
, UHCI_USBSTATUS
)));
1207 KPRINTF(10, ("HW Regs FRAMECOUNT=%04lx\n", READIO16_LE(hc
->hc_RegBase
, UHCI_FRAMECOUNT
)));
1209 KPRINTF(20, ("uhciInit returns TRUE...\n"));
1214 FIXME: What would the appropriate debug level be?
1216 KPRINTF(1000, ("uhciInit returns FALSE...\n"));
1220 void uhciFree(struct PCIController
*hc
, struct PCIUnit
*hu
) {
1222 hc
= (struct PCIController
*) hu
->hu_Controllers
.lh_Head
;
1223 while(hc
->hc_Node
.ln_Succ
) {
1224 KPRINTF(20, ("Shutting down UHCI %08lx\n", hc
));
1225 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBINTEN
, 0);
1227 HIDD_PCIDevice_WriteConfigWord(hc
->hc_PCIDeviceObject
, UHCI_USBLEGSUP
, 0);
1228 // disable all ports
1229 WRITEIO32_LE(hc
->hc_RegBase
, UHCI_PORT1STSCTRL
, 0);
1231 //WRITEIO16_LE(hc->hc_RegBase, UHCI_USBCMD, UHCF_MAXPACKET64|UHCF_CONFIGURE);
1232 //uhwDelayMS(50, hu);
1233 KPRINTF(20, ("Stopping UHCI %08lx\n", hc
));
1234 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, 0);
1237 //KPRINTF(20, ("Reset done UHCI %08lx\n", hc));
1240 KPRINTF(20, ("Resetting UHCI %08lx\n", hc
));
1241 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, UHCF_HCRESET
);
1245 WRITEIO16_LE(hc
->hc_RegBase
, UHCI_USBCMD
, 0);
1248 KPRINTF(20, ("Shutting down UHCI done.\n"));
1249 hc
= (struct PCIController
*) hc
->hc_Node
.ln_Succ
;