2 Copyright © 2010-2013, The AROS Development Team. All rights reserved
6 #include <proto/exec.h>
10 #include <devices/usb_hub.h>
14 #undef HiddPCIDeviceAttrBase
15 #define HiddPCIDeviceAttrBase (hd->hd_HiddPCIDeviceAB)
17 #define HiddAttrBase (hd->hd_HiddAB)
19 static AROS_INTH1(EhciResetHandler
, struct PCIController
*, hc
)
24 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, EHUF_HCRESET
|(1UL<<EHUS_INTTHRESHOLD
));
31 static void ehciFinishRequest(struct PCIUnit
*unit
, struct IOUsbHWReq
*ioreq
)
33 struct EhciQH
*eqh
= ioreq
->iouh_DriverPrivate1
;
37 // unlink from schedule
38 eqh
->eqh_Pred
->eqh_NextQH
= eqh
->eqh_Succ
->eqh_Self
;
39 CacheClearE(&eqh
->eqh_Pred
->eqh_NextQH
, 32, CACRF_ClearD
);
42 eqh
->eqh_Succ
->eqh_Pred
= eqh
->eqh_Pred
;
43 eqh
->eqh_Pred
->eqh_Succ
= eqh
->eqh_Succ
;
46 /* Deactivate the endpoint */
47 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
48 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
49 unit
->hu_DevBusyReq
[devadrep
] = NULL
;
51 /* Release bounce buffers */
52 if (ioreq
->iouh_Req
.io_Command
== UHCMD_CONTROLXFER
)
53 dir
= (ioreq
->iouh_SetupData
.bmRequestType
& URTF_IN
) ? UHDIR_IN
: UHDIR_OUT
;
55 dir
= ioreq
->iouh_Dir
;
57 usbReleaseBuffer(eqh
->eqh_Buffer
, ioreq
->iouh_Data
, ioreq
->iouh_Actual
, dir
);
58 usbReleaseBuffer(eqh
->eqh_SetupBuf
, &ioreq
->iouh_SetupData
, 8, UHDIR_IN
);
59 eqh
->eqh_Buffer
= NULL
;
60 eqh
->eqh_SetupBuf
= NULL
;
63 void ehciFreeAsyncContext(struct PCIController
*hc
, struct IOUsbHWReq
*ioreq
)
65 struct EhciQH
*eqh
= ioreq
->iouh_DriverPrivate1
;
67 KPRINTF(5, ("Freeing AsyncContext 0x%p\n", eqh
));
68 ehciFinishRequest(hc
->hc_Unit
, ioreq
);
70 // need to wait until an async schedule rollover before freeing these
72 eqh
->eqh_Succ
= hc
->hc_EhciAsyncFreeQH
;
73 hc
->hc_EhciAsyncFreeQH
= eqh
;
75 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, hc
->hc_EhciUsbCmd
|EHUF_ASYNCDOORBELL
);
79 void ehciFreePeriodicContext(struct PCIController
*hc
, struct IOUsbHWReq
*ioreq
)
81 struct EhciQH
*eqh
= ioreq
->iouh_DriverPrivate1
;
83 struct EhciTD
*nextetd
;
85 KPRINTF(5, ("Freeing PeriodicContext 0x%p\n", eqh
));
86 ehciFinishRequest(hc
->hc_Unit
, ioreq
);
88 Disable(); // avoid race condition with interrupt
89 nextetd
= eqh
->eqh_FirstTD
;
90 while((etd
= nextetd
))
92 KPRINTF(1, ("FreeTD 0x%p\n", nextetd
));
93 nextetd
= etd
->etd_Succ
;
100 void ehciFreeQHandTDs(struct PCIController
*hc
, struct EhciQH
*eqh
) {
102 struct EhciTD
*etd
= NULL
;
103 struct EhciTD
*nextetd
;
105 KPRINTF(5, ("Unlinking QContext 0x%p\n", eqh
));
106 nextetd
= eqh
->eqh_FirstTD
;
109 KPRINTF(1, ("FreeTD 0x%p\n", nextetd
));
111 nextetd
= (struct EhciTD
*) etd
->etd_Succ
;
118 void ehciUpdateIntTree(struct PCIController
*hc
) {
121 struct EhciQH
*predeqh
;
122 struct EhciQH
*lastusedeqh
;
125 // optimize linkage between queue heads
126 predeqh
= lastusedeqh
= hc
->hc_EhciTermQH
;
127 for(cnt
= 0; cnt
< 11; cnt
++)
129 eqh
= hc
->hc_EhciIntQH
[cnt
];
130 if(eqh
->eqh_Succ
!= predeqh
)
132 lastusedeqh
= eqh
->eqh_Succ
;
134 eqh
->eqh_NextQH
= lastusedeqh
->eqh_Self
;
135 CacheClearE(&eqh
->eqh_NextQH
, 32, CACRF_ClearD
);
140 void ehciHandleFinishedTDs(struct PCIController
*hc
) {
142 struct PCIUnit
*unit
= hc
->hc_Unit
;
143 struct IOUsbHWReq
*ioreq
;
144 struct IOUsbHWReq
*nextioreq
;
147 struct EhciTD
*predetd
;
157 BOOL updatetree
= FALSE
;
161 KPRINTF(1, ("Checking for Async work done...\n"));
162 ioreq
= (struct IOUsbHWReq
*) hc
->hc_TDQueue
.lh_Head
;
163 while((nextioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
))
165 eqh
= (struct EhciQH
*) ioreq
->iouh_DriverPrivate1
;
168 KPRINTF(1, ("Examining IOReq=0x%p with EQH=0x%p\n", ioreq
, eqh
));
171 CacheClearE(&eqh
->eqh_NextQH
, 32, CACRF_InvalidateD
);
172 epctrlstatus
= READMEM32_LE(&eqh
->eqh_CtrlStatus
);
173 nexttd
= READMEM32_LE(&eqh
->eqh_NextTD
);
174 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
175 halted
= ((epctrlstatus
& (ETCF_ACTIVE
|ETSF_HALTED
)) == ETSF_HALTED
);
176 if(halted
|| (!(epctrlstatus
& ETCF_ACTIVE
) && (nexttd
& EHCI_TERMINATE
)))
178 KPRINTF(1, ("AS: CS=%08lx CP=%08lx NX=%08lx\n", epctrlstatus
, READMEM32_LE(&eqh
->eqh_CurrTD
), nexttd
));
182 etd
= eqh
->eqh_FirstTD
;
185 ctrlstatus
= READMEM32_LE(&etd
->etd_CtrlStatus
);
186 KPRINTF(1, ("AS: CS=%08lx SL=%08lx TD=0x%p\n", ctrlstatus
, READMEM32_LE(&etd
->etd_Self
), etd
));
187 if(ctrlstatus
& ETCF_ACTIVE
)
191 KPRINTF(20, ("Async: Halted before TD\n"));
192 //ctrlstatus = eqh->eqh_CtrlStatus;
194 if(unit
->hu_NakTimeoutFrame
[devadrep
] && (hc
->hc_FrameCounter
> unit
->hu_NakTimeoutFrame
[devadrep
]))
196 KPRINTF(20, ("NAK timeout\n"));
197 ioreq
->iouh_Req
.io_Error
= UHIOERR_NAKTIMEOUT
;
201 // what happened here? The host controller was just updating the fields and has not finished yet
202 ctrlstatus
= epctrlstatus
;
204 /*KPRINTF(20, ("AS: CS=%08lx CP=%08lx NX=%08lx\n", epctrlstatus, READMEM32_LE(&eqh->eqh_CurrTD), nexttd));
205 KPRINTF(20, ("AS: CS=%08lx CP=%08lx NX=%08lx\n", READMEM32_LE(&eqh->eqh_CtrlStatus), READMEM32_LE(&eqh->eqh_CurrTD), READMEM32_LE(&eqh->eqh_NextTD)));
206 KPRINTF(20, ("AS: CS=%08lx SL=%08lx TD=%08lx\n", ctrlstatus, READMEM32_LE(&etd->etd_Self), etd));
207 etd = eqh->eqh_FirstTD;
210 KPRINTF(20, ("XX: CS=%08lx SL=%08lx TD=%08lx\n", READMEM32_LE(&etd->etd_CtrlStatus), READMEM32_LE(&etd->etd_Self), etd));
211 } while(etd = etd->etd_Succ);
212 KPRINTF(20, ("Async: Internal error! Still active?!\n"));
218 if(ctrlstatus
& (ETSF_HALTED
|ETSF_TRANSERR
|ETSF_BABBLE
|ETSF_DATABUFFERERR
))
220 if(ctrlstatus
& ETSF_BABBLE
)
222 KPRINTF(20, ("Babble error %08lx\n", ctrlstatus
));
223 ioreq
->iouh_Req
.io_Error
= UHIOERR_OVERFLOW
;
225 else if(ctrlstatus
& ETSF_DATABUFFERERR
)
227 KPRINTF(20, ("Databuffer error\n"));
228 ioreq
->iouh_Req
.io_Error
= UHIOERR_HOSTERROR
;
230 else if(ctrlstatus
& ETSF_TRANSERR
)
232 if((ctrlstatus
& ETCM_ERRORLIMIT
)>>ETCS_ERRORLIMIT
)
234 KPRINTF(20, ("other kind of STALLED!\n"));
235 ioreq
->iouh_Req
.io_Error
= UHIOERR_STALL
;
237 KPRINTF(20, ("TIMEOUT!\n"));
238 ioreq
->iouh_Req
.io_Error
= UHIOERR_TIMEOUT
;
241 KPRINTF(20, ("STALLED!\n"));
242 ioreq
->iouh_Req
.io_Error
= UHIOERR_STALL
;
248 len
= etd
->etd_Length
- ((ctrlstatus
& ETSM_TRANSLENGTH
)>>ETSS_TRANSLENGTH
);
249 if((ctrlstatus
& ETCM_PIDCODE
) != ETCF_PIDCODE_SETUP
) // don't count setup packet
253 if(ctrlstatus
& ETSM_TRANSLENGTH
)
255 KPRINTF(10, ("Short packet: %ld < %ld\n", len
, etd
->etd_Length
));
260 } while(etd
&& (!(ctrlstatus
& ETCF_READYINTEN
)));
268 if(((actual
+ ioreq
->iouh_Actual
) < eqh
->eqh_Actual
) && (!ioreq
->iouh_Req
.io_Error
) && (!(ioreq
->iouh_Flags
& UHFF_ALLOWRUNTPKTS
)))
270 ioreq
->iouh_Req
.io_Error
= UHIOERR_RUNTPACKET
;
272 ioreq
->iouh_Actual
+= actual
;
273 if(inspect
&& (!shortpkt
) && (eqh
->eqh_Actual
< ioreq
->iouh_Length
))
275 KPRINTF(10, ("Reloading BULK at %ld/%ld\n", eqh
->eqh_Actual
, ioreq
->iouh_Length
));
277 ctrlstatus
= (ioreq
->iouh_Dir
== UHDIR_IN
) ? (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_IN
) : (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_OUT
);
278 phyaddr
= (IPTR
)pciGetPhysical(hc
, eqh
->eqh_Buffer
+ ioreq
->iouh_Actual
);
279 predetd
= etd
= eqh
->eqh_FirstTD
;
281 CONSTWRITEMEM32_LE(&eqh
->eqh_CurrTD
, EHCI_TERMINATE
);
282 CONSTWRITEMEM32_LE(&eqh
->eqh_NextTD
, EHCI_TERMINATE
);
283 CONSTWRITEMEM32_LE(&eqh
->eqh_AltNextTD
, EHCI_TERMINATE
);
286 len
= ioreq
->iouh_Length
- eqh
->eqh_Actual
;
287 if(len
> 4*EHCI_PAGE_SIZE
)
289 len
= 4*EHCI_PAGE_SIZE
;
291 etd
->etd_Length
= len
;
292 KPRINTF(1, ("Reload Bulk TD 0x%p len %ld (%ld/%ld) phy=0x%p\n",
293 etd
, len
, eqh
->eqh_Actual
, ioreq
->iouh_Length
, phyaddr
));
294 WRITEMEM32_LE(&etd
->etd_CtrlStatus
, ctrlstatus
|(len
<<ETSS_TRANSLENGTH
));
295 // FIXME need quark scatter gather mechanism here
296 WRITEMEM32_LE(&etd
->etd_BufferPtr
[0], phyaddr
);
297 WRITEMEM32_LE(&etd
->etd_BufferPtr
[1], (phyaddr
& EHCI_PAGE_MASK
) + (1*EHCI_PAGE_SIZE
));
298 WRITEMEM32_LE(&etd
->etd_BufferPtr
[2], (phyaddr
& EHCI_PAGE_MASK
) + (2*EHCI_PAGE_SIZE
));
299 WRITEMEM32_LE(&etd
->etd_BufferPtr
[3], (phyaddr
& EHCI_PAGE_MASK
) + (3*EHCI_PAGE_SIZE
));
300 WRITEMEM32_LE(&etd
->etd_BufferPtr
[4], (phyaddr
& EHCI_PAGE_MASK
) + (4*EHCI_PAGE_SIZE
));
302 // FIXME Make use of these on 64-bit-capable hardware
303 etd
->etd_ExtBufferPtr
[0] = 0;
304 etd
->etd_ExtBufferPtr
[1] = 0;
305 etd
->etd_ExtBufferPtr
[2] = 0;
306 etd
->etd_ExtBufferPtr
[3] = 0;
307 etd
->etd_ExtBufferPtr
[4] = 0;
310 eqh
->eqh_Actual
+= len
;
311 zeroterm
= (len
&& (ioreq
->iouh_Dir
== UHDIR_OUT
) && (eqh
->eqh_Actual
== ioreq
->iouh_Length
) && (!(ioreq
->iouh_Flags
& UHFF_NOSHORTPKT
)) && ((eqh
->eqh_Actual
% ioreq
->iouh_MaxPktSize
) == 0));
314 if((!etd
) && zeroterm
)
316 // rare case where the zero packet would be lost, allocate etd and append zero packet.
317 etd
= ehciAllocTD(hc
);
320 KPRINTF(200, ("INTERNAL ERROR! This should not happen! Could not allocate zero packet TD\n"));
323 predetd
->etd_Succ
= etd
;
324 predetd
->etd_NextTD
= etd
->etd_Self
;
325 predetd
->etd_AltNextTD
= hc
->hc_ShortPktEndTD
->etd_Self
;
326 etd
->etd_Succ
= NULL
;
327 CONSTWRITEMEM32_LE(&etd
->etd_NextTD
, EHCI_TERMINATE
);
328 CONSTWRITEMEM32_LE(&etd
->etd_AltNextTD
, EHCI_TERMINATE
);
330 } while(etd
&& ((eqh
->eqh_Actual
< ioreq
->iouh_Length
) || zeroterm
));
331 ctrlstatus
|= ETCF_READYINTEN
|(predetd
->etd_Length
<<ETSS_TRANSLENGTH
);
332 WRITEMEM32_LE(&predetd
->etd_CtrlStatus
, ctrlstatus
);
333 CONSTWRITEMEM32_LE(&predetd
->etd_NextTD
, EHCI_TERMINATE
);
334 CONSTWRITEMEM32_LE(&predetd
->etd_AltNextTD
, EHCI_TERMINATE
);
336 etd
= eqh
->eqh_FirstTD
;
337 eqh
->eqh_NextTD
= etd
->etd_Self
;
339 unit
->hu_NakTimeoutFrame
[devadrep
] = (ioreq
->iouh_Flags
& UHFF_NAKTIMEOUT
) ? hc
->hc_FrameCounter
+ (ioreq
->iouh_NakTimeout
<<3) : 0;
343 ehciFreeAsyncContext(hc
, ioreq
);
344 // use next data toggle bit based on last successful transaction
345 KPRINTF(1, ("Old Toggle %04lx:%ld\n", devadrep
, unit
->hu_DevDataToggle
[devadrep
]));
346 unit
->hu_DevDataToggle
[devadrep
] = (ctrlstatus
& ETCF_DATA1
) ? TRUE
: FALSE
;
347 KPRINTF(1, ("Toggle now %04lx:%ld\n", devadrep
, unit
->hu_DevDataToggle
[devadrep
]));
350 if(ioreq
->iouh_Req
.io_Command
== UHCMD_CONTROLXFER
)
352 // check for sucessful clear feature and set address ctrl transfers
353 uhwCheckSpecialCtrlTransfers(hc
, ioreq
);
356 ReplyMsg(&ioreq
->iouh_Req
.io_Message
);
360 KPRINTF(20, ("IOReq=0x%p has no UQH!\n", ioreq
));
365 KPRINTF(1, ("Checking for Periodic work done...\n"));
366 ioreq
= (struct IOUsbHWReq
*) hc
->hc_PeriodicTDQueue
.lh_Head
;
367 while((nextioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
))
369 eqh
= (struct EhciQH
*) ioreq
->iouh_DriverPrivate1
;
372 KPRINTF(1, ("Examining IOReq=0x%p with EQH=0x%p\n", ioreq
, eqh
));
373 nexttd
= READMEM32_LE(&eqh
->eqh_NextTD
);
374 etd
= eqh
->eqh_FirstTD
;
375 ctrlstatus
= READMEM32_LE(&eqh
->eqh_CtrlStatus
);
376 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
377 halted
= ((ctrlstatus
& (ETCF_ACTIVE
|ETSF_HALTED
)) == ETSF_HALTED
);
378 if(halted
|| (!(ctrlstatus
& ETCF_ACTIVE
) && (nexttd
& EHCI_TERMINATE
)))
380 KPRINTF(1, ("EQH not active %08lx\n", ctrlstatus
));
386 ctrlstatus
= READMEM32_LE(&etd
->etd_CtrlStatus
);
387 KPRINTF(1, ("Periodic: TD=0x%p CS=%08lx\n", etd
, ctrlstatus
));
388 if(ctrlstatus
& ETCF_ACTIVE
)
392 KPRINTF(20, ("Periodic: Halted before TD\n"));
393 //ctrlstatus = eqh->eqh_CtrlStatus;
395 if(unit
->hu_NakTimeoutFrame
[devadrep
] && (hc
->hc_FrameCounter
> unit
->hu_NakTimeoutFrame
[devadrep
]))
397 KPRINTF(20, ("NAK timeout\n"));
398 ioreq
->iouh_Req
.io_Error
= UHIOERR_NAKTIMEOUT
;
402 KPRINTF(20, ("Periodic: Internal error! Still active?!\n"));
407 if(ctrlstatus
& (ETSF_HALTED
|ETSF_TRANSERR
|ETSF_BABBLE
|ETSF_DATABUFFERERR
|ETSF_MISSEDCSPLIT
))
409 if(ctrlstatus
& ETSF_BABBLE
)
411 KPRINTF(20, ("Babble error %08lx\n", ctrlstatus
));
412 ioreq
->iouh_Req
.io_Error
= UHIOERR_OVERFLOW
;
414 else if(ctrlstatus
& ETSF_MISSEDCSPLIT
)
416 KPRINTF(20, ("Missed CSplit %08lx\n", ctrlstatus
));
417 ioreq
->iouh_Req
.io_Error
= UHIOERR_STALL
;
419 else if(ctrlstatus
& ETSF_DATABUFFERERR
)
421 KPRINTF(20, ("Databuffer error\n"));
422 ioreq
->iouh_Req
.io_Error
= UHIOERR_HOSTERROR
;
424 else if(ctrlstatus
& ETSF_TRANSERR
)
426 if((ctrlstatus
& ETCM_ERRORLIMIT
)>>ETCS_ERRORLIMIT
)
428 KPRINTF(20, ("STALLED!\n"));
429 ioreq
->iouh_Req
.io_Error
= UHIOERR_STALL
;
431 KPRINTF(20, ("TIMEOUT!\n"));
432 ioreq
->iouh_Req
.io_Error
= UHIOERR_TIMEOUT
;
435 else if(unit
->hu_NakTimeoutFrame
[devadrep
] && (hc
->hc_FrameCounter
> unit
->hu_NakTimeoutFrame
[devadrep
]))
437 ioreq
->iouh_Req
.io_Error
= UHIOERR_NAKTIMEOUT
;
443 len
= etd
->etd_Length
- ((ctrlstatus
& ETSM_TRANSLENGTH
)>>ETSS_TRANSLENGTH
);
445 if(ctrlstatus
& ETSM_TRANSLENGTH
)
447 KPRINTF(10, ("Short packet: %ld < %ld\n", len
, etd
->etd_Length
));
453 if((actual
< eqh
->eqh_Actual
) && (!ioreq
->iouh_Req
.io_Error
) && (!(ioreq
->iouh_Flags
& UHFF_ALLOWRUNTPKTS
)))
455 ioreq
->iouh_Req
.io_Error
= UHIOERR_RUNTPACKET
;
457 ioreq
->iouh_Actual
+= actual
;
458 ehciFreePeriodicContext(hc
, ioreq
);
460 // use next data toggle bit based on last successful transaction
461 KPRINTF(1, ("Old Toggle %04lx:%ld\n", devadrep
, unit
->hu_DevDataToggle
[devadrep
]));
462 unit
->hu_DevDataToggle
[devadrep
] = (ctrlstatus
& ETCF_DATA1
) ? TRUE
: FALSE
;
463 KPRINTF(1, ("Toggle now %04lx:%ld\n", devadrep
, unit
->hu_DevDataToggle
[devadrep
]));
464 ReplyMsg(&ioreq
->iouh_Req
.io_Message
);
467 KPRINTF(20, ("IOReq=0x%p has no UQH!\n", ioreq
));
473 ehciUpdateIntTree(hc
);
477 void ehciScheduleCtrlTDs(struct PCIController
*hc
) {
479 struct PCIUnit
*unit
= hc
->hc_Unit
;
480 struct IOUsbHWReq
*ioreq
;
483 struct EhciTD
*setupetd
;
484 struct EhciTD
*dataetd
;
485 struct EhciTD
*termetd
;
486 struct EhciTD
*predetd
;
492 /* *** CTRL Transfers *** */
493 KPRINTF(1, ("Scheduling new CTRL transfers...\n"));
494 ioreq
= (struct IOUsbHWReq
*) hc
->hc_CtrlXFerQueue
.lh_Head
;
495 while(((struct Node
*) ioreq
)->ln_Succ
)
497 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
;
498 KPRINTF(10, ("New CTRL transfer to %ld.%ld: %ld bytes\n", ioreq
->iouh_DevAddr
, ioreq
->iouh_Endpoint
, ioreq
->iouh_Length
));
499 /* is endpoint already in use or do we have to wait for next transaction */
500 if(unit
->hu_DevBusyReq
[devadrep
])
502 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep
));
503 ioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
;
507 eqh
= ehciAllocQH(hc
);
513 setupetd
= ehciAllocTD(hc
);
519 termetd
= ehciAllocTD(hc
);
522 ehciFreeTD(hc
, setupetd
);
526 eqh
->eqh_IOReq
= ioreq
;
527 eqh
->eqh_FirstTD
= setupetd
;
530 epcaps
= ((0<<EQES_RELOAD
)|EQEF_TOGGLEFROMTD
)|(ioreq
->iouh_MaxPktSize
<<EQES_MAXPKTLEN
)|(ioreq
->iouh_DevAddr
<<EQES_DEVADDR
)|(ioreq
->iouh_Endpoint
<<EQES_ENDPOINT
);
531 if(ioreq
->iouh_Flags
& UHFF_SPLITTRANS
)
533 KPRINTF(10, ("*** SPLIT TRANSACTION to HubPort %ld at Addr %ld\n", ioreq
->iouh_SplitHubPort
, ioreq
->iouh_SplitHubAddr
));
534 // full speed and low speed handling
535 WRITEMEM32_LE(&eqh
->eqh_SplitCtrl
, EQSF_MULTI_1
|(ioreq
->iouh_SplitHubPort
<<EQSS_PORTNUMBER
)|(ioreq
->iouh_SplitHubAddr
<<EQSS_HUBADDRESS
));
536 epcaps
|= EQEF_SPLITCTRLEP
;
537 if(ioreq
->iouh_Flags
& UHFF_LOWSPEED
)
539 KPRINTF(10, ("*** LOW SPEED ***\n"));
540 epcaps
|= EQEF_LOWSPEED
;
543 CONSTWRITEMEM32_LE(&eqh
->eqh_SplitCtrl
, EQSF_MULTI_1
);
544 epcaps
|= EQEF_HIGHSPEED
;
546 WRITEMEM32_LE(&eqh
->eqh_EPCaps
, epcaps
);
547 //eqh->eqh_CtrlStatus = eqh->eqh_CurrTD = 0;
548 //eqh->eqh_AltNextTD = eqh->eqh_NextTD = setupetd->etd_Self;
550 //termetd->etd_QueueHead = setupetd->etd_QueueHead = eqh;
552 KPRINTF(1, ("SetupTD=0x%p, TermTD=0x%p\n", setupetd
, termetd
));
555 setupetd
->etd_Length
= 8;
557 CONSTWRITEMEM32_LE(&setupetd
->etd_CtrlStatus
, (8<<ETSS_TRANSLENGTH
)|ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_SETUP
);
559 eqh
->eqh_SetupBuf
= usbGetBuffer(&ioreq
->iouh_SetupData
, 8, UHDIR_OUT
);
560 phyaddr
= (IPTR
) pciGetPhysical(hc
, eqh
->eqh_SetupBuf
);
562 WRITEMEM32_LE(&setupetd
->etd_BufferPtr
[0], phyaddr
);
563 WRITEMEM32_LE(&setupetd
->etd_BufferPtr
[1], (phyaddr
+ 8) & EHCI_PAGE_MASK
); // theoretically, setup data may cross one page
564 setupetd
->etd_BufferPtr
[2] = 0; // clear for overlay bits
566 // FIXME Make use of these on 64-bit-capable hardware
567 setupetd
->etd_ExtBufferPtr
[0] = 0;
568 setupetd
->etd_ExtBufferPtr
[1] = 0;
569 setupetd
->etd_ExtBufferPtr
[2] = 0;
571 ctrlstatus
= (ioreq
->iouh_SetupData
.bmRequestType
& URTF_IN
) ? (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_IN
) : (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_OUT
);
573 if(ioreq
->iouh_Length
)
575 eqh
->eqh_Buffer
= usbGetBuffer(ioreq
->iouh_Data
, ioreq
->iouh_Length
, (ioreq
->iouh_SetupData
.bmRequestType
& URTF_IN
) ? UHDIR_IN
: UHDIR_OUT
);
576 phyaddr
= (IPTR
)pciGetPhysical(hc
, eqh
->eqh_Buffer
);
579 dataetd
= ehciAllocTD(hc
);
584 ctrlstatus
^= ETCF_DATA1
; // toggle bit
585 predetd
->etd_Succ
= dataetd
;
586 predetd
->etd_NextTD
= dataetd
->etd_Self
;
587 dataetd
->etd_AltNextTD
= termetd
->etd_Self
;
589 len
= ioreq
->iouh_Length
- eqh
->eqh_Actual
;
590 if(len
> 4*EHCI_PAGE_SIZE
)
592 len
= 4*EHCI_PAGE_SIZE
;
594 dataetd
->etd_Length
= len
;
595 WRITEMEM32_LE(&dataetd
->etd_CtrlStatus
, ctrlstatus
|(len
<<ETSS_TRANSLENGTH
));
596 // FIXME need quark scatter gather mechanism here
597 WRITEMEM32_LE(&dataetd
->etd_BufferPtr
[0], phyaddr
);
598 WRITEMEM32_LE(&dataetd
->etd_BufferPtr
[1], (phyaddr
& EHCI_PAGE_MASK
) + (1*EHCI_PAGE_SIZE
));
599 WRITEMEM32_LE(&dataetd
->etd_BufferPtr
[2], (phyaddr
& EHCI_PAGE_MASK
) + (2*EHCI_PAGE_SIZE
));
600 WRITEMEM32_LE(&dataetd
->etd_BufferPtr
[3], (phyaddr
& EHCI_PAGE_MASK
) + (3*EHCI_PAGE_SIZE
));
601 WRITEMEM32_LE(&dataetd
->etd_BufferPtr
[4], (phyaddr
& EHCI_PAGE_MASK
) + (4*EHCI_PAGE_SIZE
));
603 // FIXME Make use of these on 64-bit-capable hardware
604 dataetd
->etd_ExtBufferPtr
[0] = 0;
605 dataetd
->etd_ExtBufferPtr
[1] = 0;
606 dataetd
->etd_ExtBufferPtr
[2] = 0;
607 dataetd
->etd_ExtBufferPtr
[3] = 0;
608 dataetd
->etd_ExtBufferPtr
[4] = 0;
611 eqh
->eqh_Actual
+= len
;
613 } while(eqh
->eqh_Actual
< ioreq
->iouh_Length
);
616 // not enough dataetds? try again later
617 usbReleaseBuffer(eqh
->eqh_Buffer
, ioreq
->iouh_Data
, 0, 0);
618 usbReleaseBuffer(eqh
->eqh_SetupBuf
, &ioreq
->iouh_SetupData
, 0, 0);
619 ehciFreeQHandTDs(hc
, eqh
);
620 ehciFreeTD(hc
, termetd
); // this one's not linked yet
625 ctrlstatus
|= ETCF_DATA1
|ETCF_READYINTEN
;
626 ctrlstatus
^= (ETCF_PIDCODE_IN
^ETCF_PIDCODE_OUT
);
628 predetd
->etd_NextTD
= termetd
->etd_Self
;
629 predetd
->etd_Succ
= termetd
;
630 CONSTWRITEMEM32_LE(&termetd
->etd_NextTD
, EHCI_TERMINATE
);
631 CONSTWRITEMEM32_LE(&termetd
->etd_AltNextTD
, EHCI_TERMINATE
);
632 WRITEMEM32_LE(&termetd
->etd_CtrlStatus
, ctrlstatus
);
633 termetd
->etd_Length
= 0;
634 termetd
->etd_BufferPtr
[0] = 0; // clear for overlay bits
635 termetd
->etd_BufferPtr
[1] = 0; // clear for overlay bits
636 termetd
->etd_BufferPtr
[2] = 0; // clear for overlay bits
637 termetd
->etd_ExtBufferPtr
[0] = 0; // clear for overlay bits
638 termetd
->etd_ExtBufferPtr
[1] = 0; // clear for overlay bits
639 termetd
->etd_ExtBufferPtr
[2] = 0; // clear for overlay bits
640 termetd
->etd_Succ
= NULL
;
642 // due to sillicon bugs, we fill in the first overlay ourselves.
643 eqh
->eqh_CurrTD
= setupetd
->etd_Self
;
644 eqh
->eqh_NextTD
= setupetd
->etd_NextTD
;
645 eqh
->eqh_AltNextTD
= setupetd
->etd_AltNextTD
;
646 eqh
->eqh_CtrlStatus
= setupetd
->etd_CtrlStatus
;
647 eqh
->eqh_BufferPtr
[0] = setupetd
->etd_BufferPtr
[0];
648 eqh
->eqh_BufferPtr
[1] = setupetd
->etd_BufferPtr
[1];
649 eqh
->eqh_BufferPtr
[2] = 0;
650 eqh
->eqh_ExtBufferPtr
[0] = setupetd
->etd_ExtBufferPtr
[0];
651 eqh
->eqh_ExtBufferPtr
[1] = setupetd
->etd_ExtBufferPtr
[1];
652 eqh
->eqh_ExtBufferPtr
[2] = 0;
654 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
655 ioreq
->iouh_DriverPrivate1
= eqh
;
657 // manage endpoint going busy
658 unit
->hu_DevBusyReq
[devadrep
] = ioreq
;
659 unit
->hu_NakTimeoutFrame
[devadrep
] = (ioreq
->iouh_Flags
& UHFF_NAKTIMEOUT
) ? hc
->hc_FrameCounter
+ (ioreq
->iouh_NakTimeout
<<3) : 0;
662 AddTail(&hc
->hc_TDQueue
, (struct Node
*) ioreq
);
664 // looks good to me, now enqueue this entry (just behind the asyncQH)
665 eqh
->eqh_Succ
= hc
->hc_EhciAsyncQH
->eqh_Succ
;
666 eqh
->eqh_NextQH
= eqh
->eqh_Succ
->eqh_Self
;
669 eqh
->eqh_Pred
= hc
->hc_EhciAsyncQH
;
670 eqh
->eqh_Succ
->eqh_Pred
= eqh
;
671 hc
->hc_EhciAsyncQH
->eqh_Succ
= eqh
;
672 hc
->hc_EhciAsyncQH
->eqh_NextQH
= eqh
->eqh_Self
;
676 ioreq
= (struct IOUsbHWReq
*) hc
->hc_CtrlXFerQueue
.lh_Head
;
680 void ehciScheduleIntTDs(struct PCIController
*hc
) {
682 struct PCIUnit
*unit
= hc
->hc_Unit
;
683 struct IOUsbHWReq
*ioreq
;
687 struct EhciQH
*inteqh
;
689 struct EhciTD
*predetd
;
696 /* *** INT Transfers *** */
697 KPRINTF(1, ("Scheduling new INT transfers...\n"));
698 ioreq
= (struct IOUsbHWReq
*) hc
->hc_IntXFerQueue
.lh_Head
;
699 while(((struct Node
*) ioreq
)->ln_Succ
)
701 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
702 KPRINTF(10, ("New INT transfer to %ld.%ld: %ld bytes\n", ioreq
->iouh_DevAddr
, ioreq
->iouh_Endpoint
, ioreq
->iouh_Length
));
703 /* is endpoint already in use or do we have to wait for next transaction */
704 if(unit
->hu_DevBusyReq
[devadrep
])
706 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep
));
707 ioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
;
711 eqh
= ehciAllocQH(hc
);
717 eqh
->eqh_IOReq
= ioreq
;
720 epcaps
= (0<<EQES_RELOAD
)|(ioreq
->iouh_MaxPktSize
<<EQES_MAXPKTLEN
)|(ioreq
->iouh_DevAddr
<<EQES_DEVADDR
)|(ioreq
->iouh_Endpoint
<<EQES_ENDPOINT
);
721 if(ioreq
->iouh_Flags
& UHFF_SPLITTRANS
)
723 KPRINTF(10, ("*** SPLIT TRANSACTION to HubPort %ld at Addr %ld\n", ioreq
->iouh_SplitHubPort
, ioreq
->iouh_SplitHubAddr
));
724 // full speed and low speed handling
725 if(ioreq
->iouh_Flags
& UHFF_LOWSPEED
)
727 KPRINTF(10, ("*** LOW SPEED ***\n"));
728 epcaps
|= EQEF_LOWSPEED
;
730 WRITEMEM32_LE(&eqh
->eqh_SplitCtrl
, (EQSF_MULTI_1
|(0x01<<EQSS_MUSOFACTIVE
)|(0x1c<<EQSS_MUSOFCSPLIT
))|(ioreq
->iouh_SplitHubPort
<<EQSS_PORTNUMBER
)|(ioreq
->iouh_SplitHubAddr
<<EQSS_HUBADDRESS
));
731 if(ioreq
->iouh_Interval
>= 255)
733 inteqh
= hc
->hc_EhciIntQH
[8]; // 256ms interval
738 inteqh
= hc
->hc_EhciIntQH
[cnt
++];
739 } while(ioreq
->iouh_Interval
>= (1<<cnt
));
742 epcaps
|= EQEF_HIGHSPEED
;
743 if(ioreq
->iouh_Flags
& UHFF_MULTI_3
)
745 splitctrl
= EQSF_MULTI_3
;
747 else if(ioreq
->iouh_Flags
& UHFF_MULTI_2
)
749 splitctrl
= EQSF_MULTI_2
;
751 splitctrl
= EQSF_MULTI_1
;
753 if(ioreq
->iouh_Interval
< 2) // 0-1 µFrames
755 splitctrl
|= (0xff<<EQSS_MUSOFACTIVE
);
757 else if(ioreq
->iouh_Interval
< 4) // 2-3 µFrames
759 splitctrl
|= (0x55<<EQSS_MUSOFACTIVE
);
761 else if(ioreq
->iouh_Interval
< 8) // 4-7 µFrames
763 splitctrl
|= (0x22<<EQSS_MUSOFACTIVE
);
765 else if(ioreq
->iouh_Interval
> 511) // 64ms and higher
767 splitctrl
|= (0x10<<EQSS_MUSOFACTIVE
);
769 else //if(ioreq->iouh_Interval >= 8) // 1-64ms
771 splitctrl
|= (0x01<<EQSS_MUSOFACTIVE
);
773 WRITEMEM32_LE(&eqh
->eqh_SplitCtrl
, splitctrl
);
774 if(ioreq
->iouh_Interval
>= 1024)
776 inteqh
= hc
->hc_EhciIntQH
[10]; // 1024 µFrames interval
781 inteqh
= hc
->hc_EhciIntQH
[cnt
++];
782 } while(ioreq
->iouh_Interval
>= (1<<cnt
));
785 WRITEMEM32_LE(&eqh
->eqh_EPCaps
, epcaps
);
786 //eqh->eqh_CtrlStatus = eqh->eqh_CurrTD = 0;
787 eqh
->eqh_FirstTD
= NULL
; // clear for ehciFreeQHandTDs()
789 ctrlstatus
= (ioreq
->iouh_Dir
== UHDIR_IN
) ? (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_IN
) : (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_OUT
);
790 if(unit
->hu_DevDataToggle
[devadrep
])
792 // continue with data toggle 0
793 ctrlstatus
|= ETCF_DATA1
;
796 eqh
->eqh_Buffer
= usbGetBuffer(ioreq
->iouh_Data
, ioreq
->iouh_Length
, ioreq
->iouh_Dir
);
797 phyaddr
= (IPTR
) pciGetPhysical(hc
, eqh
->eqh_Buffer
);
800 etd
= ehciAllocTD(hc
);
807 predetd
->etd_Succ
= etd
;
808 predetd
->etd_NextTD
= etd
->etd_Self
;
809 predetd
->etd_AltNextTD
= hc
->hc_ShortPktEndTD
->etd_Self
;
811 eqh
->eqh_FirstTD
= etd
;
812 //eqh->eqh_AltNextTD = eqh->eqh_NextTD = etd->etd_Self;
815 len
= ioreq
->iouh_Length
- eqh
->eqh_Actual
;
816 if(len
> 4*EHCI_PAGE_SIZE
)
818 len
= 4*EHCI_PAGE_SIZE
;
820 etd
->etd_Length
= len
;
821 WRITEMEM32_LE(&etd
->etd_CtrlStatus
, ctrlstatus
|(len
<<ETSS_TRANSLENGTH
));
822 // FIXME need quark scatter gather mechanism here
823 WRITEMEM32_LE(&etd
->etd_BufferPtr
[0], phyaddr
);
824 WRITEMEM32_LE(&etd
->etd_BufferPtr
[1], (phyaddr
& EHCI_PAGE_MASK
) + (1*EHCI_PAGE_SIZE
));
825 WRITEMEM32_LE(&etd
->etd_BufferPtr
[2], (phyaddr
& EHCI_PAGE_MASK
) + (2*EHCI_PAGE_SIZE
));
826 WRITEMEM32_LE(&etd
->etd_BufferPtr
[3], (phyaddr
& EHCI_PAGE_MASK
) + (3*EHCI_PAGE_SIZE
));
827 WRITEMEM32_LE(&etd
->etd_BufferPtr
[4], (phyaddr
& EHCI_PAGE_MASK
) + (4*EHCI_PAGE_SIZE
));
829 // FIXME Use these on 64-bit-capable hardware
830 etd
->etd_ExtBufferPtr
[0] = 0;
831 etd
->etd_ExtBufferPtr
[1] = 0;
832 etd
->etd_ExtBufferPtr
[2] = 0;
833 etd
->etd_ExtBufferPtr
[3] = 0;
834 etd
->etd_ExtBufferPtr
[4] = 0;
837 eqh
->eqh_Actual
+= len
;
839 } while(eqh
->eqh_Actual
< ioreq
->iouh_Length
);
843 // not enough etds? try again later
844 usbReleaseBuffer(eqh
->eqh_Buffer
, ioreq
->iouh_Data
, 0, 0);
845 ehciFreeQHandTDs(hc
, eqh
);
848 ctrlstatus
|= ETCF_READYINTEN
|(etd
->etd_Length
<<ETSS_TRANSLENGTH
);
849 WRITEMEM32_LE(&predetd
->etd_CtrlStatus
, ctrlstatus
);
851 CONSTWRITEMEM32_LE(&predetd
->etd_NextTD
, EHCI_TERMINATE
);
852 CONSTWRITEMEM32_LE(&predetd
->etd_AltNextTD
, EHCI_TERMINATE
);
853 predetd
->etd_Succ
= NULL
;
855 // due to sillicon bugs, we fill in the first overlay ourselves.
856 etd
= eqh
->eqh_FirstTD
;
857 eqh
->eqh_CurrTD
= etd
->etd_Self
;
858 eqh
->eqh_NextTD
= etd
->etd_NextTD
;
859 eqh
->eqh_AltNextTD
= etd
->etd_AltNextTD
;
860 eqh
->eqh_CtrlStatus
= etd
->etd_CtrlStatus
;
861 eqh
->eqh_BufferPtr
[0] = etd
->etd_BufferPtr
[0];
862 eqh
->eqh_BufferPtr
[1] = etd
->etd_BufferPtr
[1];
863 eqh
->eqh_BufferPtr
[2] = etd
->etd_BufferPtr
[2];
864 eqh
->eqh_BufferPtr
[3] = etd
->etd_BufferPtr
[3];
865 eqh
->eqh_BufferPtr
[4] = etd
->etd_BufferPtr
[4];
866 eqh
->eqh_ExtBufferPtr
[0] = etd
->etd_ExtBufferPtr
[0];
867 eqh
->eqh_ExtBufferPtr
[1] = etd
->etd_ExtBufferPtr
[1];
868 eqh
->eqh_ExtBufferPtr
[2] = etd
->etd_ExtBufferPtr
[2];
869 eqh
->eqh_ExtBufferPtr
[3] = etd
->etd_ExtBufferPtr
[3];
870 eqh
->eqh_ExtBufferPtr
[4] = etd
->etd_ExtBufferPtr
[4];
872 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
873 ioreq
->iouh_DriverPrivate1
= eqh
;
875 // manage endpoint going busy
876 unit
->hu_DevBusyReq
[devadrep
] = ioreq
;
877 unit
->hu_NakTimeoutFrame
[devadrep
] = (ioreq
->iouh_Flags
& UHFF_NAKTIMEOUT
) ? hc
->hc_FrameCounter
+ (ioreq
->iouh_NakTimeout
<<3) : 0;
880 AddTail(&hc
->hc_PeriodicTDQueue
, (struct Node
*) ioreq
);
882 // looks good to me, now enqueue this entry in the right IntQH
883 eqh
->eqh_Succ
= inteqh
->eqh_Succ
;
884 eqh
->eqh_NextQH
= eqh
->eqh_Succ
->eqh_Self
;
887 eqh
->eqh_Pred
= inteqh
;
888 eqh
->eqh_Succ
->eqh_Pred
= eqh
;
889 inteqh
->eqh_Succ
= eqh
;
890 inteqh
->eqh_NextQH
= eqh
->eqh_Self
;
894 ehciUpdateIntTree(hc
);
896 ioreq
= (struct IOUsbHWReq
*) hc
->hc_IntXFerQueue
.lh_Head
;
900 void ehciScheduleBulkTDs(struct PCIController
*hc
) {
902 struct PCIUnit
*unit
= hc
->hc_Unit
;
903 struct IOUsbHWReq
*ioreq
;
906 struct EhciTD
*etd
= NULL
;
907 struct EhciTD
*predetd
;
914 /* *** BULK Transfers *** */
915 KPRINTF(1, ("Scheduling new BULK transfers...\n"));
916 ioreq
= (struct IOUsbHWReq
*) hc
->hc_BulkXFerQueue
.lh_Head
;
917 while(((struct Node
*) ioreq
)->ln_Succ
)
919 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
920 KPRINTF(10, ("New BULK transfer to %ld.%ld: %ld bytes\n", ioreq
->iouh_DevAddr
, ioreq
->iouh_Endpoint
, ioreq
->iouh_Length
));
921 /* is endpoint already in use or do we have to wait for next transaction */
922 if(unit
->hu_DevBusyReq
[devadrep
])
924 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep
));
925 ioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
;
929 eqh
= ehciAllocQH(hc
);
935 eqh
->eqh_IOReq
= ioreq
;
938 epcaps
= (0<<EQES_RELOAD
)|(ioreq
->iouh_MaxPktSize
<<EQES_MAXPKTLEN
)|(ioreq
->iouh_DevAddr
<<EQES_DEVADDR
)|(ioreq
->iouh_Endpoint
<<EQES_ENDPOINT
);
939 if(ioreq
->iouh_Flags
& UHFF_SPLITTRANS
)
941 KPRINTF(10, ("*** SPLIT TRANSACTION to HubPort %ld at Addr %ld\n", ioreq
->iouh_SplitHubPort
, ioreq
->iouh_SplitHubAddr
));
942 // full speed and low speed handling
943 if(ioreq
->iouh_Flags
& UHFF_LOWSPEED
)
945 KPRINTF(10, ("*** LOW SPEED ***\n"));
946 epcaps
|= EQEF_LOWSPEED
;
948 WRITEMEM32_LE(&eqh
->eqh_SplitCtrl
, EQSF_MULTI_1
|(ioreq
->iouh_SplitHubPort
<<EQSS_PORTNUMBER
)|(ioreq
->iouh_SplitHubAddr
<<EQSS_HUBADDRESS
));
950 epcaps
|= EQEF_HIGHSPEED
;
951 if(ioreq
->iouh_Flags
& UHFF_MULTI_3
)
953 splitctrl
= EQSF_MULTI_3
;
955 else if(ioreq
->iouh_Flags
& UHFF_MULTI_2
)
957 splitctrl
= EQSF_MULTI_2
;
959 splitctrl
= EQSF_MULTI_1
;
961 WRITEMEM32_LE(&eqh
->eqh_SplitCtrl
, splitctrl
);
963 WRITEMEM32_LE(&eqh
->eqh_EPCaps
, epcaps
);
964 //eqh->eqh_CtrlStatus = eqh->eqh_CurrTD = 0;
965 eqh
->eqh_FirstTD
= NULL
; // clear for ehciFreeQHandTDs()
967 ctrlstatus
= (ioreq
->iouh_Dir
== UHDIR_IN
) ? (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_IN
) : (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_OUT
);
968 if(unit
->hu_DevDataToggle
[devadrep
])
970 // continue with data toggle 0
971 ctrlstatus
|= ETCF_DATA1
;
974 eqh
->eqh_Buffer
= usbGetBuffer(ioreq
->iouh_Data
, ioreq
->iouh_Length
, ioreq
->iouh_Dir
);
975 phyaddr
= (IPTR
)pciGetPhysical(hc
, eqh
->eqh_Buffer
);
978 if((eqh
->eqh_Actual
>= EHCI_TD_BULK_LIMIT
) && (eqh
->eqh_Actual
< ioreq
->iouh_Length
))
980 KPRINTF(10, ("Bulk too large, splitting...\n"));
983 etd
= ehciAllocTD(hc
);
990 predetd
->etd_Succ
= etd
;
991 predetd
->etd_NextTD
= etd
->etd_Self
;
992 predetd
->etd_AltNextTD
= hc
->hc_ShortPktEndTD
->etd_Self
;
994 eqh
->eqh_FirstTD
= etd
;
995 //eqh->eqh_AltNextTD = eqh->eqh_NextTD = etd->etd_Self;
998 len
= ioreq
->iouh_Length
- eqh
->eqh_Actual
;
999 if(len
> 4*EHCI_PAGE_SIZE
)
1001 len
= 4*EHCI_PAGE_SIZE
;
1003 etd
->etd_Length
= len
;
1004 KPRINTF(1, ("Bulk TD 0x%p len %ld (%ld/%ld) phy=0x%p\n",
1005 etd
, len
, eqh
->eqh_Actual
, ioreq
->iouh_Length
, phyaddr
));
1006 WRITEMEM32_LE(&etd
->etd_CtrlStatus
, ctrlstatus
|(len
<<ETSS_TRANSLENGTH
));
1007 // FIXME need quark scatter gather mechanism here
1008 WRITEMEM32_LE(&etd
->etd_BufferPtr
[0], phyaddr
);
1009 WRITEMEM32_LE(&etd
->etd_BufferPtr
[1], (phyaddr
& EHCI_PAGE_MASK
) + (1*EHCI_PAGE_SIZE
));
1010 WRITEMEM32_LE(&etd
->etd_BufferPtr
[2], (phyaddr
& EHCI_PAGE_MASK
) + (2*EHCI_PAGE_SIZE
));
1011 WRITEMEM32_LE(&etd
->etd_BufferPtr
[3], (phyaddr
& EHCI_PAGE_MASK
) + (3*EHCI_PAGE_SIZE
));
1012 WRITEMEM32_LE(&etd
->etd_BufferPtr
[4], (phyaddr
& EHCI_PAGE_MASK
) + (4*EHCI_PAGE_SIZE
));
1014 // FIXME Use these on 64-bit-capable hardware
1015 etd
->etd_ExtBufferPtr
[0] = 0;
1016 etd
->etd_ExtBufferPtr
[1] = 0;
1017 etd
->etd_ExtBufferPtr
[2] = 0;
1018 etd
->etd_ExtBufferPtr
[3] = 0;
1019 etd
->etd_ExtBufferPtr
[4] = 0;
1022 eqh
->eqh_Actual
+= len
;
1025 } while((eqh
->eqh_Actual
< ioreq
->iouh_Length
) || (len
&& (ioreq
->iouh_Dir
== UHDIR_OUT
) && (eqh
->eqh_Actual
== ioreq
->iouh_Length
) && (!(ioreq
->iouh_Flags
& UHFF_NOSHORTPKT
)) && ((eqh
->eqh_Actual
% ioreq
->iouh_MaxPktSize
) == 0)));
1029 // not enough etds? try again later
1030 usbReleaseBuffer(eqh
->eqh_Buffer
, ioreq
->iouh_Data
, 0, 0);
1031 ehciFreeQHandTDs(hc
, eqh
);
1034 ctrlstatus
|= ETCF_READYINTEN
|(predetd
->etd_Length
<<ETSS_TRANSLENGTH
);
1035 WRITEMEM32_LE(&predetd
->etd_CtrlStatus
, ctrlstatus
);
1037 predetd
->etd_Succ
= NULL
;
1038 CONSTWRITEMEM32_LE(&predetd
->etd_NextTD
, EHCI_TERMINATE
);
1039 CONSTWRITEMEM32_LE(&predetd
->etd_AltNextTD
, EHCI_TERMINATE
);
1041 // due to sillicon bugs, we fill in the first overlay ourselves.
1042 etd
= eqh
->eqh_FirstTD
;
1043 eqh
->eqh_CurrTD
= etd
->etd_Self
;
1044 eqh
->eqh_NextTD
= etd
->etd_NextTD
;
1045 eqh
->eqh_AltNextTD
= etd
->etd_AltNextTD
;
1046 eqh
->eqh_CtrlStatus
= etd
->etd_CtrlStatus
;
1047 eqh
->eqh_BufferPtr
[0] = etd
->etd_BufferPtr
[0];
1048 eqh
->eqh_BufferPtr
[1] = etd
->etd_BufferPtr
[1];
1049 eqh
->eqh_BufferPtr
[2] = etd
->etd_BufferPtr
[2];
1050 eqh
->eqh_BufferPtr
[3] = etd
->etd_BufferPtr
[3];
1051 eqh
->eqh_BufferPtr
[4] = etd
->etd_BufferPtr
[4];
1052 eqh
->eqh_ExtBufferPtr
[0] = etd
->etd_ExtBufferPtr
[0];
1053 eqh
->eqh_ExtBufferPtr
[1] = etd
->etd_ExtBufferPtr
[1];
1054 eqh
->eqh_ExtBufferPtr
[2] = etd
->etd_ExtBufferPtr
[2];
1055 eqh
->eqh_ExtBufferPtr
[3] = etd
->etd_ExtBufferPtr
[3];
1056 eqh
->eqh_ExtBufferPtr
[4] = etd
->etd_ExtBufferPtr
[4];
1058 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
1059 ioreq
->iouh_DriverPrivate1
= eqh
;
1061 // manage endpoint going busy
1062 unit
->hu_DevBusyReq
[devadrep
] = ioreq
;
1063 unit
->hu_NakTimeoutFrame
[devadrep
] = (ioreq
->iouh_Flags
& UHFF_NAKTIMEOUT
) ? hc
->hc_FrameCounter
+ (ioreq
->iouh_NakTimeout
<<3) : 0;
1066 AddTail(&hc
->hc_TDQueue
, (struct Node
*) ioreq
);
1068 // looks good to me, now enqueue this entry (just behind the asyncQH)
1069 eqh
->eqh_Succ
= hc
->hc_EhciAsyncQH
->eqh_Succ
;
1070 eqh
->eqh_NextQH
= eqh
->eqh_Succ
->eqh_Self
;
1073 eqh
->eqh_Pred
= hc
->hc_EhciAsyncQH
;
1074 eqh
->eqh_Succ
->eqh_Pred
= eqh
;
1075 hc
->hc_EhciAsyncQH
->eqh_Succ
= eqh
;
1076 hc
->hc_EhciAsyncQH
->eqh_NextQH
= eqh
->eqh_Self
;
1080 ioreq
= (struct IOUsbHWReq
*) hc
->hc_BulkXFerQueue
.lh_Head
;
1084 void ehciUpdateFrameCounter(struct PCIController
*hc
) {
1087 hc
->hc_FrameCounter
= (hc
->hc_FrameCounter
& 0xffffc000)|(READREG32_LE(hc
->hc_RegBase
, EHCI_FRAMECOUNT
) & 0x3fff);
1091 static AROS_INTH1(ehciCompleteInt
, struct PCIController
*, hc
)
1095 KPRINTF(1, ("CompleteInt!\n"));
1096 ehciUpdateFrameCounter(hc
);
1098 /* **************** PROCESS DONE TRANSFERS **************** */
1100 if(hc
->hc_AsyncAdvanced
)
1104 struct EhciTD
*nextetd
;
1106 hc
->hc_AsyncAdvanced
= FALSE
;
1108 KPRINTF(1, ("AsyncAdvance 0x%p\n", hc
->hc_EhciAsyncFreeQH
));
1110 while((eqh
= hc
->hc_EhciAsyncFreeQH
))
1112 KPRINTF(1, ("FreeQH 0x%p\n", eqh
));
1113 nextetd
= eqh
->eqh_FirstTD
;
1114 while((etd
= nextetd
))
1116 KPRINTF(1, ("FreeTD 0x%p\n", nextetd
));
1117 nextetd
= etd
->etd_Succ
;
1118 ehciFreeTD(hc
, etd
);
1120 hc
->hc_EhciAsyncFreeQH
= eqh
->eqh_Succ
;
1121 ehciFreeQH(hc
, eqh
);
1125 ehciHandleFinishedTDs(hc
);
1127 if(hc
->hc_CtrlXFerQueue
.lh_Head
->ln_Succ
)
1129 ehciScheduleCtrlTDs(hc
);
1132 if(hc
->hc_IntXFerQueue
.lh_Head
->ln_Succ
)
1134 ehciScheduleIntTDs(hc
);
1137 if(hc
->hc_BulkXFerQueue
.lh_Head
->ln_Succ
)
1139 ehciScheduleBulkTDs(hc
);
1142 KPRINTF(1, ("CompleteDone\n"));
1149 static AROS_INTH1(ehciIntCode
, struct PCIController
*, hc
)
1153 struct PCIDevice
*base
= hc
->hc_Device
;
1154 struct PCIUnit
*unit
= hc
->hc_Unit
;
1157 //KPRINTF(1, ("pciEhciInt()\n"));
1158 intr
= READREG32_LE(hc
->hc_RegBase
, EHCI_USBSTATUS
);
1159 if(intr
& hc
->hc_PCIIntEnMask
)
1161 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBSTATUS
, intr
);
1162 //KPRINTF(1, ("INT=%04lx\n", intr));
1163 if (!(hc
->hc_Flags
& HCF_ONLINE
))
1167 if(intr
& EHSF_FRAMECOUNTOVER
)
1169 hc
->hc_FrameCounter
|= 0x3fff;
1170 hc
->hc_FrameCounter
++;
1171 hc
->hc_FrameCounter
|= READREG32_LE(hc
->hc_RegBase
, EHCI_FRAMECOUNT
) & 0x3fff;
1172 KPRINTF(5, ("Frame Counter Rollover %ld\n", hc
->hc_FrameCounter
));
1174 if(intr
& EHSF_ASYNCADVANCE
)
1176 KPRINTF(1, ("AsyncAdvance\n"));
1177 hc
->hc_AsyncAdvanced
= TRUE
;
1179 if(intr
& EHSF_HOSTERROR
)
1181 KPRINTF(200, ("Host ERROR!\n"));
1183 if(intr
& EHSF_PORTCHANGED
)
1187 UWORD portreg
= EHCI_PORTSC1
;
1188 for(hciport
= 0; hciport
< hc
->hc_NumPorts
; hciport
++, portreg
+= 4)
1190 oldval
= READREG32_LE(hc
->hc_RegBase
, portreg
);
1191 // reflect port ownership (shortcut without hc->hc_PortNum20[hciport], as usb 2.0 maps 1:1)
1192 unit
->hu_EhciOwned
[hciport
] = (oldval
& EHPF_NOTPORTOWNER
) ? FALSE
: TRUE
;
1193 if(oldval
& EHPF_ENABLECHANGE
)
1195 hc
->hc_PortChangeMap
[hciport
] |= UPSF_PORT_ENABLE
;
1197 if(oldval
& EHPF_CONNECTCHANGE
)
1199 hc
->hc_PortChangeMap
[hciport
] |= UPSF_PORT_CONNECTION
;
1201 if(oldval
& EHPF_RESUMEDTX
)
1203 hc
->hc_PortChangeMap
[hciport
] |= UPSF_PORT_SUSPEND
|UPSF_PORT_ENABLE
;
1205 if(oldval
& EHPF_OVERCURRENTCHG
)
1207 hc
->hc_PortChangeMap
[hciport
] |= UPSF_PORT_OVER_CURRENT
;
1209 WRITEREG32_LE(hc
->hc_RegBase
, portreg
, oldval
);
1210 KPRINTF(20, ("PCI Int Port %ld Change %08lx\n", hciport
+ 1, oldval
));
1211 if(hc
->hc_PortChangeMap
[hciport
])
1213 unit
->hu_RootPortChanges
|= 1UL<<(hciport
+ 1);
1216 uhwCheckRootHubChanges(unit
);
1218 if(intr
& (EHSF_TDDONE
|EHSF_TDERROR
|EHSF_ASYNCADVANCE
))
1220 SureCause(base
, &hc
->hc_CompleteInt
);
1229 BOOL
ehciInit(struct PCIController
*hc
, struct PCIUnit
*hu
) {
1231 struct PCIDevice
*hd
= hu
->hu_Device
;
1234 struct EhciQH
*predeqh
;
1241 volatile APTR pciregbase
;
1249 struct TagItem pciActivateMem
[] =
1251 { aHidd_PCIDevice_isMEM
, TRUE
},
1255 struct TagItem pciActivateBusmaster
[] =
1257 { aHidd_PCIDevice_isMaster
, TRUE
},
1261 struct TagItem pciDeactivateBusmaster
[] =
1263 { aHidd_PCIDevice_isMaster
, FALSE
},
1267 hc
->hc_portroute
= 0;
1269 hc
->hc_CompleteInt
.is_Node
.ln_Type
= NT_INTERRUPT
;
1270 hc
->hc_CompleteInt
.is_Node
.ln_Name
= "EHCI CompleteInt";
1271 hc
->hc_CompleteInt
.is_Node
.ln_Pri
= 0;
1272 hc
->hc_CompleteInt
.is_Data
= hc
;
1273 hc
->hc_CompleteInt
.is_Code
= (VOID_FUNC
)ehciCompleteInt
;
1276 FIXME: Check the real size from USBCMD Frame List Size field (bits3:2)
1277 and set the value accordingly if Frame List Flag in the HCCPARAMS indicates RW for the field
1278 else use default value of EHCI_FRAMELIST_SIZE (1024)
1280 hc
->hc_PCIMemSize
= sizeof(ULONG
) * EHCI_FRAMELIST_SIZE
+ EHCI_FRAMELIST_ALIGNMENT
+ 1;
1281 hc
->hc_PCIMemSize
+= sizeof(struct EhciQH
) * EHCI_QH_POOLSIZE
;
1282 hc
->hc_PCIMemSize
+= sizeof(struct EhciTD
) * EHCI_TD_POOLSIZE
;
1285 FIXME: We should be able to read some EHCI registers before allocating memory
1287 memptr
= HIDD_PCIDriver_AllocPCIMem(hc
->hc_PCIDriverObject
, hc
->hc_PCIMemSize
);
1288 hc
->hc_PCIMem
= (APTR
) memptr
;
1291 // PhysicalAddress - VirtualAdjust = VirtualAddress
1292 // VirtualAddress + VirtualAdjust = PhysicalAddress
1293 hc
->hc_PCIVirtualAdjust
= pciGetPhysical(hc
, memptr
) - (APTR
)memptr
;
1294 KPRINTF(10, ("VirtualAdjust 0x%08lx\n", hc
->hc_PCIVirtualAdjust
));
1297 memptr
= (UBYTE
*) ((((IPTR
) hc
->hc_PCIMem
) + EHCI_FRAMELIST_ALIGNMENT
) & (~EHCI_FRAMELIST_ALIGNMENT
));
1298 hc
->hc_EhciFrameList
= (ULONG
*) memptr
;
1299 KPRINTF(10, ("FrameListBase 0x%p\n", hc
->hc_EhciFrameList
));
1300 memptr
+= sizeof(APTR
) * EHCI_FRAMELIST_SIZE
;
1303 eqh
= (struct EhciQH
*) memptr
;
1304 hc
->hc_EhciQHPool
= eqh
;
1305 cnt
= EHCI_QH_POOLSIZE
- 1;
1307 // minimal initalization
1308 eqh
->eqh_Succ
= (eqh
+ 1);
1309 WRITEMEM32_LE(&eqh
->eqh_Self
, (IPTR
) (&eqh
->eqh_NextQH
) + hc
->hc_PCIVirtualAdjust
+ EHCI_QUEUEHEAD
);
1310 CONSTWRITEMEM32_LE(&eqh
->eqh_NextTD
, EHCI_TERMINATE
);
1311 CONSTWRITEMEM32_LE(&eqh
->eqh_AltNextTD
, EHCI_TERMINATE
);
1314 eqh
->eqh_Succ
= NULL
;
1315 WRITEMEM32_LE(&eqh
->eqh_Self
, (IPTR
) (&eqh
->eqh_NextQH
) + hc
->hc_PCIVirtualAdjust
+ EHCI_QUEUEHEAD
);
1316 CONSTWRITEMEM32_LE(&eqh
->eqh_NextTD
, EHCI_TERMINATE
);
1317 CONSTWRITEMEM32_LE(&eqh
->eqh_AltNextTD
, EHCI_TERMINATE
);
1318 memptr
+= sizeof(struct EhciQH
) * EHCI_QH_POOLSIZE
;
1321 etd
= (struct EhciTD
*) memptr
;
1322 hc
->hc_EhciTDPool
= etd
;
1323 cnt
= EHCI_TD_POOLSIZE
- 1;
1326 etd
->etd_Succ
= (etd
+ 1);
1327 WRITEMEM32_LE(&etd
->etd_Self
, (IPTR
) (&etd
->etd_NextTD
) + hc
->hc_PCIVirtualAdjust
);
1330 etd
->etd_Succ
= NULL
;
1331 WRITEMEM32_LE(&etd
->etd_Self
, (IPTR
) (&etd
->etd_NextTD
) + hc
->hc_PCIVirtualAdjust
);
1332 memptr
+= sizeof(struct EhciTD
) * EHCI_TD_POOLSIZE
;
1334 // empty async queue head
1335 hc
->hc_EhciAsyncFreeQH
= NULL
;
1336 hc
->hc_EhciAsyncQH
= eqh
= ehciAllocQH(hc
);
1337 eqh
->eqh_Succ
= eqh
;
1338 eqh
->eqh_Pred
= eqh
;
1339 CONSTWRITEMEM32_LE(&eqh
->eqh_EPCaps
, EQEF_RECLAMHEAD
);
1340 eqh
->eqh_NextQH
= eqh
->eqh_Self
;
1342 // empty terminating queue head
1343 hc
->hc_EhciTermQH
= eqh
= ehciAllocQH(hc
);
1344 eqh
->eqh_Succ
= NULL
;
1345 CONSTWRITEMEM32_LE(&eqh
->eqh_NextQH
, EHCI_TERMINATE
);
1349 hc
->hc_EhciIntQH
[0] = eqh
= ehciAllocQH(hc
);
1350 eqh
->eqh_Succ
= predeqh
;
1351 predeqh
->eqh_Pred
= eqh
;
1352 eqh
->eqh_Pred
= NULL
; // who knows...
1353 //eqh->eqh_NextQH = predeqh->eqh_Self;
1356 // make 11 levels of QH interrupts
1357 for(cnt
= 1; cnt
< 11; cnt
++)
1359 hc
->hc_EhciIntQH
[cnt
] = eqh
= ehciAllocQH(hc
);
1360 eqh
->eqh_Succ
= predeqh
;
1361 eqh
->eqh_Pred
= NULL
; // who knows...
1362 //eqh->eqh_NextQH = predeqh->eqh_Self; // link to previous int level
1366 ehciUpdateIntTree(hc
);
1368 // fill in framelist with IntQH entry points based on interval
1369 tabptr
= hc
->hc_EhciFrameList
;
1370 for(cnt
= 0; cnt
< EHCI_FRAMELIST_SIZE
; cnt
++)
1372 eqh
= hc
->hc_EhciIntQH
[10];
1376 if(cnt
& (1UL<<bitcnt
))
1378 eqh
= hc
->hc_EhciIntQH
[bitcnt
];
1381 } while(++bitcnt
< 11);
1382 *tabptr
++ = eqh
->eqh_Self
;
1385 etd
= hc
->hc_ShortPktEndTD
= ehciAllocTD(hc
);
1386 etd
->etd_Succ
= NULL
;
1387 CONSTWRITEMEM32_LE(&etd
->etd_NextTD
, EHCI_TERMINATE
);
1388 CONSTWRITEMEM32_LE(&etd
->etd_AltNextTD
, EHCI_TERMINATE
);
1389 CONSTWRITEMEM32_LE(&etd
->etd_CtrlStatus
, 0);
1391 // time to initialize hardware...
1392 OOP_GetAttr(hc
->hc_PCIDeviceObject
, aHidd_PCIDevice_Base0
, (IPTR
*) &pciregbase
);
1393 pciregbase
= (APTR
) (((IPTR
) pciregbase
) & (~0xf));
1394 OOP_SetAttrs(hc
->hc_PCIDeviceObject
, (struct TagItem
*) pciActivateMem
); // activate memory
1396 extcapoffset
= (READREG32_LE(pciregbase
, EHCI_HCCPARAMS
) & EHCM_EXTCAPOFFSET
)>>EHCS_EXTCAPOFFSET
;
1398 while(extcapoffset
>= 0x40)
1400 KPRINTF(10, ("EHCI has extended caps at 0x%08lx\n", extcapoffset
));
1401 legsup
= PCIXReadConfigLong(hc
, extcapoffset
);
1402 if(((legsup
& EHLM_CAP_ID
) >> EHLS_CAP_ID
) == 0x01)
1404 if(legsup
& EHLF_BIOS_OWNER
)
1406 KPRINTF(10, ("BIOS still has hands on EHCI, trying to get rid of it\n"));
1407 legsup
|= EHLF_OS_OWNER
;
1408 PCIXWriteConfigLong(hc
, extcapoffset
, legsup
);
1412 legsup
= PCIXReadConfigLong(hc
, extcapoffset
);
1413 if(!(legsup
& EHLF_BIOS_OWNER
))
1415 KPRINTF(10, ("BIOS gave up on EHCI. Pwned!\n"));
1422 KPRINTF(10, ("BIOS didn't release EHCI. Forcing and praying...\n"));
1423 legsup
|= EHLF_OS_OWNER
;
1424 legsup
&= ~EHLF_BIOS_OWNER
;
1425 PCIXWriteConfigLong(hc
, extcapoffset
, legsup
);
1428 /* disable all SMIs */
1429 PCIXWriteConfigLong(hc
, extcapoffset
+ 4, 0);
1432 extcapoffset
= (legsup
& EHCM_EXTCAPOFFSET
)>>EHCS_EXTCAPOFFSET
;
1435 OOP_SetAttrs(hc
->hc_PCIDeviceObject
, (struct TagItem
*) pciDeactivateBusmaster
); // no busmaster yet
1437 // we use the operational registers as RegBase.
1438 hc
->hc_RegBase
= (APTR
) ((IPTR
) pciregbase
+ READREG16_LE(pciregbase
, EHCI_CAPLENGTH
));
1439 KPRINTF(10, ("RegBase = 0x%p\n", hc
->hc_RegBase
));
1441 KPRINTF(10, ("Resetting EHCI HC\n"));
1442 KPRINTF(10, ("EHCI CMD: 0x%08x STS: 0x%08x\n", READREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
), READREG32_LE(hc
->hc_RegBase
, EHCI_USBSTATUS
)));
1443 /* Step 1: Stop the HC */
1444 tmp
= READREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
);
1445 tmp
&= ~EHUF_RUNSTOP
;
1446 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, tmp
);
1448 /* Step 2. Wait for the controller to halt */
1453 if(READREG32_LE(hc
->hc_RegBase
, EHCI_USBSTATUS
) & EHSF_HCHALTED
)
1460 KPRINTF(200, ("EHCI: Timeout waiting for controller to halt\n"));
1463 /* Step 3. Reset the controller */
1464 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, tmp
| EHUF_HCRESET
);
1466 /* Step 4. Wait for the reset bit to clear */
1471 if(!(READREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
) & EHUF_HCRESET
))
1480 KPRINTF(20, ("Reset Timeout!\n"));
1482 KPRINTF(20, ("Reset finished after %ld ticks\n", 100-cnt
));
1486 OOP_SetAttrs(hc
->hc_PCIDeviceObject
, (struct TagItem
*) pciActivateBusmaster
); // enable busmaster
1488 // Read HCSPARAMS register to obtain number of downstream ports
1489 hcsparams
= READREG32_LE(pciregbase
, EHCI_HCSPARAMS
);
1490 hccparams
= READREG32_LE(pciregbase
, EHCI_HCCPARAMS
);
1492 hc
->hc_NumPorts
= (hcsparams
& EHSM_NUM_PORTS
)>>EHSS_NUM_PORTS
;
1494 KPRINTF(20, ("Found EHCI Controller 0x%p with %ld ports (%ld companions with %ld ports each)\n",
1495 hc
->hc_PCIDeviceObject
, hc
->hc_NumPorts
,
1496 (hcsparams
& EHSM_NUM_COMPANIONS
)>>EHSS_NUM_COMPANIONS
,
1497 (hcsparams
& EHSM_PORTS_PER_COMP
)>>EHSS_PORTS_PER_COMP
));
1499 if(hcsparams
& EHSF_EXTPORTROUTING
)
1501 hc
->hc_complexrouting
= TRUE
;
1502 hc
->hc_portroute
= READREG32_LE(pciregbase
, EHCI_HCSPPORTROUTE
);
1504 for(cnt
= 0; cnt
< hc
->hc_NumPorts
; cnt
++) {
1505 KPRINTF(100, ("Port %ld maps to controller %ld\n", cnt
, ((hc
->hc_portroute
>> (cnt
<<2)) & 0xf)));
1509 hc
->hc_complexrouting
= FALSE
;
1512 KPRINTF(20, ("HCCParams: 64 Bit=%s, ProgFrameList=%s, AsyncSchedPark=%s\n",
1513 (hccparams
& EHCF_64BITS
) ? "Yes" : "No",
1514 (hccparams
& EHCF_PROGFRAMELIST
) ? "Yes" : "No",
1515 (hccparams
& EHCF_ASYNCSCHEDPARK
) ? "Yes" : "No"));
1516 hc
->hc_EhciUsbCmd
= (1UL<<EHUS_INTTHRESHOLD
);
1518 /* FIXME HERE: Process EHCF_64BITS flag and implement 64-bit addressing */
1520 if(hccparams
& EHCF_ASYNCSCHEDPARK
)
1522 KPRINTF(20, ("Enabling AsyncSchedParkMode with MULTI_3\n"));
1523 hc
->hc_EhciUsbCmd
|= EHUF_ASYNCSCHEDPARK
|(3<<EHUS_ASYNCPARKCOUNT
);
1526 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, hc
->hc_EhciUsbCmd
);
1528 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_FRAMECOUNT
, 0);
1530 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_PERIODICLIST
, (IPTR
)pciGetPhysical(hc
, hc
->hc_EhciFrameList
));
1531 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_ASYNCADDR
, AROS_LONG2LE(hc
->hc_EhciAsyncQH
->eqh_Self
));
1532 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBSTATUS
, EHSF_ALL_INTS
);
1534 // install reset handler
1535 hc
->hc_ResetInt
.is_Code
= (VOID_FUNC
)EhciResetHandler
;
1536 hc
->hc_ResetInt
.is_Data
= hc
;
1537 AddResetCallback(&hc
->hc_ResetInt
);
1540 hc
->hc_PCIIntHandler
.is_Node
.ln_Name
= "EHCI PCI (pciusb.device)";
1541 hc
->hc_PCIIntHandler
.is_Node
.ln_Pri
= 5;
1542 hc
->hc_PCIIntHandler
.is_Node
.ln_Type
= NT_INTERRUPT
;
1543 hc
->hc_PCIIntHandler
.is_Code
= (VOID_FUNC
)ehciIntCode
;
1544 hc
->hc_PCIIntHandler
.is_Data
= hc
;
1545 PCIXAddInterrupt(hc
, &hc
->hc_PCIIntHandler
);
1547 hc
->hc_PCIIntEnMask
= EHSF_ALL_INTS
;
1548 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBINTEN
, hc
->hc_PCIIntEnMask
);
1550 CacheClearE(hc
->hc_EhciFrameList
, sizeof(ULONG
) * EHCI_FRAMELIST_SIZE
, CACRF_ClearD
);
1551 CacheClearE(hc
->hc_EhciQHPool
, sizeof(struct EhciQH
) * EHCI_QH_POOLSIZE
, CACRF_ClearD
);
1552 CacheClearE(hc
->hc_EhciTDPool
, sizeof(struct EhciTD
) * EHCI_TD_POOLSIZE
, CACRF_ClearD
);
1554 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_CONFIGFLAG
, EHCF_CONFIGURED
);
1555 hc
->hc_EhciUsbCmd
|= EHUF_RUNSTOP
|EHUF_PERIODICENABLE
|EHUF_ASYNCENABLE
;
1556 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, hc
->hc_EhciUsbCmd
);
1559 KPRINTF(20, ("HW Init done\n"));
1561 KPRINTF(10, ("HW Regs USBCMD=%04lx\n", READREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
)));
1562 KPRINTF(10, ("HW Regs USBSTS=%04lx\n", READREG32_LE(hc
->hc_RegBase
, EHCI_USBSTATUS
)));
1563 KPRINTF(10, ("HW Regs FRAMECOUNT=%04lx\n", READREG32_LE(hc
->hc_RegBase
, EHCI_FRAMECOUNT
)));
1565 KPRINTF(1000, ("ehciInit returns TRUE...\n"));
1570 FIXME: What would the appropriate debug level be?
1572 KPRINTF(1000, ("ehciInit returns FALSE...\n"));
1576 void ehciFree(struct PCIController
*hc
, struct PCIUnit
*hu
) {
1578 hc
= (struct PCIController
*) hu
->hu_Controllers
.lh_Head
;
1579 while(hc
->hc_Node
.ln_Succ
)
1581 switch(hc
->hc_HCIType
)
1587 KPRINTF(20, ("Shutting down EHCI 0x%p\n", hc
));
1588 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBINTEN
, 0);
1589 // disable all ports
1590 for(hciport
= 0; hciport
< hc
->hc_NumPorts
; hciport
++)
1592 portreg
= EHCI_PORTSC1
+ (hciport
<<2);
1593 WRITEREG32_LE(hc
->hc_RegBase
, portreg
, 0);
1595 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, 1UL<<EHUS_INTTHRESHOLD
);
1597 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_CONFIGFLAG
, 0);
1598 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, EHUF_HCRESET
|(1UL<<EHUS_INTTHRESHOLD
));
1602 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, 1UL<<EHUS_INTTHRESHOLD
);
1607 KPRINTF(20, ("Shutting down EHCI done.\n"));
1612 hc
= (struct PCIController
*) hc
->hc_Node
.ln_Succ
;