2 Copyright © 2010-2013, The AROS Development Team. All rights reserved
8 #include <proto/exec.h>
12 #include <devices/usb_hub.h>
16 #undef HiddPCIDeviceAttrBase
17 #define HiddPCIDeviceAttrBase (hd->hd_HiddPCIDeviceAB)
19 #define HiddAttrBase (hd->hd_HiddAB)
21 static AROS_INTH1(EhciResetHandler
, struct PCIController
*, hc
)
26 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, EHUF_HCRESET
|(1UL<<EHUS_INTTHRESHOLD
));
33 static void ehciFinishRequest(struct PCIUnit
*unit
, struct IOUsbHWReq
*ioreq
)
35 struct EhciQH
*eqh
= ioreq
->iouh_DriverPrivate1
;
39 // unlink from schedule
40 eqh
->eqh_Pred
->eqh_NextQH
= eqh
->eqh_Succ
->eqh_Self
;
41 CacheClearE(&eqh
->eqh_Pred
->eqh_NextQH
, 32, CACRF_ClearD
);
44 eqh
->eqh_Succ
->eqh_Pred
= eqh
->eqh_Pred
;
45 eqh
->eqh_Pred
->eqh_Succ
= eqh
->eqh_Succ
;
48 /* Deactivate the endpoint */
49 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
50 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
51 unit
->hu_DevBusyReq
[devadrep
] = NULL
;
53 /* Release bounce buffers */
54 if (ioreq
->iouh_Req
.io_Command
== UHCMD_CONTROLXFER
)
55 dir
= (ioreq
->iouh_SetupData
.bmRequestType
& URTF_IN
) ? UHDIR_IN
: UHDIR_OUT
;
57 dir
= ioreq
->iouh_Dir
;
59 usbReleaseBuffer(eqh
->eqh_Buffer
, ioreq
->iouh_Data
, ioreq
->iouh_Actual
, dir
);
60 usbReleaseBuffer(eqh
->eqh_SetupBuf
, &ioreq
->iouh_SetupData
, 8, UHDIR_OUT
);
61 eqh
->eqh_Buffer
= NULL
;
62 eqh
->eqh_SetupBuf
= NULL
;
65 void ehciFreeAsyncContext(struct PCIController
*hc
, struct IOUsbHWReq
*ioreq
)
67 struct EhciQH
*eqh
= ioreq
->iouh_DriverPrivate1
;
69 KPRINTF(5, ("Freeing AsyncContext 0x%p\n", eqh
));
70 ehciFinishRequest(hc
->hc_Unit
, ioreq
);
72 // need to wait until an async schedule rollover before freeing these
74 eqh
->eqh_Succ
= hc
->hc_EhciAsyncFreeQH
;
75 hc
->hc_EhciAsyncFreeQH
= eqh
;
77 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, hc
->hc_EhciUsbCmd
|EHUF_ASYNCDOORBELL
);
81 void ehciFreePeriodicContext(struct PCIController
*hc
, struct IOUsbHWReq
*ioreq
)
83 struct EhciQH
*eqh
= ioreq
->iouh_DriverPrivate1
;
85 struct EhciTD
*nextetd
;
87 KPRINTF(5, ("Freeing PeriodicContext 0x%p\n", eqh
));
88 ehciFinishRequest(hc
->hc_Unit
, ioreq
);
90 Disable(); // avoid race condition with interrupt
91 nextetd
= eqh
->eqh_FirstTD
;
92 while((etd
= nextetd
))
94 KPRINTF(1, ("FreeTD 0x%p\n", nextetd
));
95 nextetd
= etd
->etd_Succ
;
102 void ehciFreeQHandTDs(struct PCIController
*hc
, struct EhciQH
*eqh
) {
104 struct EhciTD
*etd
= NULL
;
105 struct EhciTD
*nextetd
;
107 KPRINTF(5, ("Unlinking QContext 0x%p\n", eqh
));
108 nextetd
= eqh
->eqh_FirstTD
;
111 KPRINTF(1, ("FreeTD 0x%p\n", nextetd
));
113 nextetd
= (struct EhciTD
*) etd
->etd_Succ
;
120 void ehciUpdateIntTree(struct PCIController
*hc
) {
123 struct EhciQH
*predeqh
;
124 struct EhciQH
*lastusedeqh
;
127 // optimize linkage between queue heads
128 predeqh
= lastusedeqh
= hc
->hc_EhciTermQH
;
129 for(cnt
= 0; cnt
< 11; cnt
++)
131 eqh
= hc
->hc_EhciIntQH
[cnt
];
132 if(eqh
->eqh_Succ
!= predeqh
)
134 lastusedeqh
= eqh
->eqh_Succ
;
136 eqh
->eqh_NextQH
= lastusedeqh
->eqh_Self
;
137 CacheClearE(&eqh
->eqh_NextQH
, 32, CACRF_ClearD
);
142 void ehciHandleFinishedTDs(struct PCIController
*hc
) {
144 struct PCIUnit
*unit
= hc
->hc_Unit
;
145 struct IOUsbHWReq
*ioreq
;
146 struct IOUsbHWReq
*nextioreq
;
149 struct EhciTD
*predetd
;
159 BOOL updatetree
= FALSE
;
163 KPRINTF(1, ("Checking for Async work done...\n"));
164 ioreq
= (struct IOUsbHWReq
*) hc
->hc_TDQueue
.lh_Head
;
165 while((nextioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
))
167 eqh
= (struct EhciQH
*) ioreq
->iouh_DriverPrivate1
;
170 KPRINTF(1, ("Examining IOReq=0x%p with EQH=0x%p\n", ioreq
, eqh
));
173 CacheClearE(&eqh
->eqh_NextQH
, 32, CACRF_InvalidateD
);
174 epctrlstatus
= READMEM32_LE(&eqh
->eqh_CtrlStatus
);
175 nexttd
= READMEM32_LE(&eqh
->eqh_NextTD
);
176 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
177 halted
= ((epctrlstatus
& (ETCF_ACTIVE
|ETSF_HALTED
)) == ETSF_HALTED
);
178 if(halted
|| (!(epctrlstatus
& ETCF_ACTIVE
) && (nexttd
& EHCI_TERMINATE
)))
180 KPRINTF(1, ("AS: CS=%08lx CP=%08lx NX=%08lx\n", epctrlstatus
, READMEM32_LE(&eqh
->eqh_CurrTD
), nexttd
));
184 etd
= eqh
->eqh_FirstTD
;
187 ctrlstatus
= READMEM32_LE(&etd
->etd_CtrlStatus
);
188 KPRINTF(1, ("AS: CS=%08lx SL=%08lx TD=0x%p\n", ctrlstatus
, READMEM32_LE(&etd
->etd_Self
), etd
));
189 if(ctrlstatus
& ETCF_ACTIVE
)
193 KPRINTF(20, ("Async: Halted before TD\n"));
194 //ctrlstatus = eqh->eqh_CtrlStatus;
196 if(unit
->hu_NakTimeoutFrame
[devadrep
] && (hc
->hc_FrameCounter
> unit
->hu_NakTimeoutFrame
[devadrep
]))
198 KPRINTF(20, ("NAK timeout\n"));
199 ioreq
->iouh_Req
.io_Error
= UHIOERR_NAKTIMEOUT
;
203 // what happened here? The host controller was just updating the fields and has not finished yet
204 ctrlstatus
= epctrlstatus
;
206 /*KPRINTF(20, ("AS: CS=%08lx CP=%08lx NX=%08lx\n", epctrlstatus, READMEM32_LE(&eqh->eqh_CurrTD), nexttd));
207 KPRINTF(20, ("AS: CS=%08lx CP=%08lx NX=%08lx\n", READMEM32_LE(&eqh->eqh_CtrlStatus), READMEM32_LE(&eqh->eqh_CurrTD), READMEM32_LE(&eqh->eqh_NextTD)));
208 KPRINTF(20, ("AS: CS=%08lx SL=%08lx TD=%08lx\n", ctrlstatus, READMEM32_LE(&etd->etd_Self), etd));
209 etd = eqh->eqh_FirstTD;
212 KPRINTF(20, ("XX: CS=%08lx SL=%08lx TD=%08lx\n", READMEM32_LE(&etd->etd_CtrlStatus), READMEM32_LE(&etd->etd_Self), etd));
213 } while(etd = etd->etd_Succ);
214 KPRINTF(20, ("Async: Internal error! Still active?!\n"));
220 if(ctrlstatus
& (ETSF_HALTED
|ETSF_TRANSERR
|ETSF_BABBLE
|ETSF_DATABUFFERERR
))
222 if(ctrlstatus
& ETSF_BABBLE
)
224 KPRINTF(20, ("Babble error %08lx\n", ctrlstatus
));
225 ioreq
->iouh_Req
.io_Error
= UHIOERR_OVERFLOW
;
227 else if(ctrlstatus
& ETSF_DATABUFFERERR
)
229 KPRINTF(20, ("Databuffer error\n"));
230 ioreq
->iouh_Req
.io_Error
= UHIOERR_HOSTERROR
;
232 else if(ctrlstatus
& ETSF_TRANSERR
)
234 if((ctrlstatus
& ETCM_ERRORLIMIT
)>>ETCS_ERRORLIMIT
)
236 KPRINTF(20, ("other kind of STALLED!\n"));
237 ioreq
->iouh_Req
.io_Error
= UHIOERR_STALL
;
239 KPRINTF(20, ("TIMEOUT!\n"));
240 ioreq
->iouh_Req
.io_Error
= UHIOERR_TIMEOUT
;
243 KPRINTF(20, ("STALLED!\n"));
244 ioreq
->iouh_Req
.io_Error
= UHIOERR_STALL
;
250 len
= etd
->etd_Length
- ((ctrlstatus
& ETSM_TRANSLENGTH
)>>ETSS_TRANSLENGTH
);
251 if((ctrlstatus
& ETCM_PIDCODE
) != ETCF_PIDCODE_SETUP
) // don't count setup packet
255 if(ctrlstatus
& ETSM_TRANSLENGTH
)
257 KPRINTF(10, ("Short packet: %ld < %ld\n", len
, etd
->etd_Length
));
262 } while(etd
&& (!(ctrlstatus
& ETCF_READYINTEN
)));
270 if(((actual
+ ioreq
->iouh_Actual
) < eqh
->eqh_Actual
) && (!ioreq
->iouh_Req
.io_Error
) && (!(ioreq
->iouh_Flags
& UHFF_ALLOWRUNTPKTS
)))
272 ioreq
->iouh_Req
.io_Error
= UHIOERR_RUNTPACKET
;
274 ioreq
->iouh_Actual
+= actual
;
275 if(inspect
&& (!shortpkt
) && (eqh
->eqh_Actual
< ioreq
->iouh_Length
))
277 KPRINTF(10, ("Reloading BULK at %ld/%ld\n", eqh
->eqh_Actual
, ioreq
->iouh_Length
));
279 ctrlstatus
= (ioreq
->iouh_Dir
== UHDIR_IN
) ? (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_IN
) : (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_OUT
);
280 phyaddr
= (IPTR
)pciGetPhysical(hc
, eqh
->eqh_Buffer
+ ioreq
->iouh_Actual
);
281 predetd
= etd
= eqh
->eqh_FirstTD
;
283 CONSTWRITEMEM32_LE(&eqh
->eqh_CurrTD
, EHCI_TERMINATE
);
284 CONSTWRITEMEM32_LE(&eqh
->eqh_NextTD
, EHCI_TERMINATE
);
285 CONSTWRITEMEM32_LE(&eqh
->eqh_AltNextTD
, EHCI_TERMINATE
);
288 len
= ioreq
->iouh_Length
- eqh
->eqh_Actual
;
289 if(len
> 4*EHCI_PAGE_SIZE
)
291 len
= 4*EHCI_PAGE_SIZE
;
293 etd
->etd_Length
= len
;
294 KPRINTF(1, ("Reload Bulk TD 0x%p len %ld (%ld/%ld) phy=0x%p\n",
295 etd
, len
, eqh
->eqh_Actual
, ioreq
->iouh_Length
, phyaddr
));
296 WRITEMEM32_LE(&etd
->etd_CtrlStatus
, ctrlstatus
|(len
<<ETSS_TRANSLENGTH
));
297 // FIXME need quark scatter gather mechanism here
298 WRITEMEM32_LE(&etd
->etd_BufferPtr
[0], phyaddr
);
299 WRITEMEM32_LE(&etd
->etd_BufferPtr
[1], (phyaddr
& EHCI_PAGE_MASK
) + (1*EHCI_PAGE_SIZE
));
300 WRITEMEM32_LE(&etd
->etd_BufferPtr
[2], (phyaddr
& EHCI_PAGE_MASK
) + (2*EHCI_PAGE_SIZE
));
301 WRITEMEM32_LE(&etd
->etd_BufferPtr
[3], (phyaddr
& EHCI_PAGE_MASK
) + (3*EHCI_PAGE_SIZE
));
302 WRITEMEM32_LE(&etd
->etd_BufferPtr
[4], (phyaddr
& EHCI_PAGE_MASK
) + (4*EHCI_PAGE_SIZE
));
304 // FIXME Make use of these on 64-bit-capable hardware
305 etd
->etd_ExtBufferPtr
[0] = 0;
306 etd
->etd_ExtBufferPtr
[1] = 0;
307 etd
->etd_ExtBufferPtr
[2] = 0;
308 etd
->etd_ExtBufferPtr
[3] = 0;
309 etd
->etd_ExtBufferPtr
[4] = 0;
312 eqh
->eqh_Actual
+= len
;
313 zeroterm
= (len
&& (ioreq
->iouh_Dir
== UHDIR_OUT
) && (eqh
->eqh_Actual
== ioreq
->iouh_Length
) && (!(ioreq
->iouh_Flags
& UHFF_NOSHORTPKT
)) && ((eqh
->eqh_Actual
% ioreq
->iouh_MaxPktSize
) == 0));
316 if((!etd
) && zeroterm
)
318 // rare case where the zero packet would be lost, allocate etd and append zero packet.
319 etd
= ehciAllocTD(hc
);
322 KPRINTF(200, ("INTERNAL ERROR! This should not happen! Could not allocate zero packet TD\n"));
325 predetd
->etd_Succ
= etd
;
326 predetd
->etd_NextTD
= etd
->etd_Self
;
327 predetd
->etd_AltNextTD
= hc
->hc_ShortPktEndTD
->etd_Self
;
328 etd
->etd_Succ
= NULL
;
329 CONSTWRITEMEM32_LE(&etd
->etd_NextTD
, EHCI_TERMINATE
);
330 CONSTWRITEMEM32_LE(&etd
->etd_AltNextTD
, EHCI_TERMINATE
);
332 } while(etd
&& ((eqh
->eqh_Actual
< ioreq
->iouh_Length
) || zeroterm
));
333 ctrlstatus
|= ETCF_READYINTEN
|(predetd
->etd_Length
<<ETSS_TRANSLENGTH
);
334 WRITEMEM32_LE(&predetd
->etd_CtrlStatus
, ctrlstatus
);
335 CONSTWRITEMEM32_LE(&predetd
->etd_NextTD
, EHCI_TERMINATE
);
336 CONSTWRITEMEM32_LE(&predetd
->etd_AltNextTD
, EHCI_TERMINATE
);
338 etd
= eqh
->eqh_FirstTD
;
339 eqh
->eqh_NextTD
= etd
->etd_Self
;
341 unit
->hu_NakTimeoutFrame
[devadrep
] = (ioreq
->iouh_Flags
& UHFF_NAKTIMEOUT
) ? hc
->hc_FrameCounter
+ (ioreq
->iouh_NakTimeout
<<3) : 0;
345 ehciFreeAsyncContext(hc
, ioreq
);
346 // use next data toggle bit based on last successful transaction
347 KPRINTF(1, ("Old Toggle %04lx:%ld\n", devadrep
, unit
->hu_DevDataToggle
[devadrep
]));
348 unit
->hu_DevDataToggle
[devadrep
] = (ctrlstatus
& ETCF_DATA1
) ? TRUE
: FALSE
;
349 KPRINTF(1, ("Toggle now %04lx:%ld\n", devadrep
, unit
->hu_DevDataToggle
[devadrep
]));
352 if(ioreq
->iouh_Req
.io_Command
== UHCMD_CONTROLXFER
)
354 // check for sucessful clear feature and set address ctrl transfers
355 uhwCheckSpecialCtrlTransfers(hc
, ioreq
);
358 ReplyMsg(&ioreq
->iouh_Req
.io_Message
);
362 KPRINTF(20, ("IOReq=0x%p has no UQH!\n", ioreq
));
367 KPRINTF(1, ("Checking for Periodic work done...\n"));
368 ioreq
= (struct IOUsbHWReq
*) hc
->hc_PeriodicTDQueue
.lh_Head
;
369 while((nextioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
))
371 eqh
= (struct EhciQH
*) ioreq
->iouh_DriverPrivate1
;
374 KPRINTF(1, ("Examining IOReq=0x%p with EQH=0x%p\n", ioreq
, eqh
));
375 nexttd
= READMEM32_LE(&eqh
->eqh_NextTD
);
376 etd
= eqh
->eqh_FirstTD
;
377 ctrlstatus
= READMEM32_LE(&eqh
->eqh_CtrlStatus
);
378 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
379 halted
= ((ctrlstatus
& (ETCF_ACTIVE
|ETSF_HALTED
)) == ETSF_HALTED
);
380 if(halted
|| (!(ctrlstatus
& ETCF_ACTIVE
) && (nexttd
& EHCI_TERMINATE
)))
382 KPRINTF(1, ("EQH not active %08lx\n", ctrlstatus
));
388 ctrlstatus
= READMEM32_LE(&etd
->etd_CtrlStatus
);
389 KPRINTF(1, ("Periodic: TD=0x%p CS=%08lx\n", etd
, ctrlstatus
));
390 if(ctrlstatus
& ETCF_ACTIVE
)
394 KPRINTF(20, ("Periodic: Halted before TD\n"));
395 //ctrlstatus = eqh->eqh_CtrlStatus;
397 if(unit
->hu_NakTimeoutFrame
[devadrep
] && (hc
->hc_FrameCounter
> unit
->hu_NakTimeoutFrame
[devadrep
]))
399 KPRINTF(20, ("NAK timeout\n"));
400 ioreq
->iouh_Req
.io_Error
= UHIOERR_NAKTIMEOUT
;
404 KPRINTF(20, ("Periodic: Internal error! Still active?!\n"));
409 if(ctrlstatus
& (ETSF_HALTED
|ETSF_TRANSERR
|ETSF_BABBLE
|ETSF_DATABUFFERERR
|ETSF_MISSEDCSPLIT
))
411 if(ctrlstatus
& ETSF_BABBLE
)
413 KPRINTF(20, ("Babble error %08lx\n", ctrlstatus
));
414 ioreq
->iouh_Req
.io_Error
= UHIOERR_OVERFLOW
;
416 else if(ctrlstatus
& ETSF_MISSEDCSPLIT
)
418 KPRINTF(20, ("Missed CSplit %08lx\n", ctrlstatus
));
419 ioreq
->iouh_Req
.io_Error
= UHIOERR_STALL
;
421 else if(ctrlstatus
& ETSF_DATABUFFERERR
)
423 KPRINTF(20, ("Databuffer error\n"));
424 ioreq
->iouh_Req
.io_Error
= UHIOERR_HOSTERROR
;
426 else if(ctrlstatus
& ETSF_TRANSERR
)
428 if((ctrlstatus
& ETCM_ERRORLIMIT
)>>ETCS_ERRORLIMIT
)
430 KPRINTF(20, ("STALLED!\n"));
431 ioreq
->iouh_Req
.io_Error
= UHIOERR_STALL
;
433 KPRINTF(20, ("TIMEOUT!\n"));
434 ioreq
->iouh_Req
.io_Error
= UHIOERR_TIMEOUT
;
437 else if(unit
->hu_NakTimeoutFrame
[devadrep
] && (hc
->hc_FrameCounter
> unit
->hu_NakTimeoutFrame
[devadrep
]))
439 ioreq
->iouh_Req
.io_Error
= UHIOERR_NAKTIMEOUT
;
445 len
= etd
->etd_Length
- ((ctrlstatus
& ETSM_TRANSLENGTH
)>>ETSS_TRANSLENGTH
);
447 if(ctrlstatus
& ETSM_TRANSLENGTH
)
449 KPRINTF(10, ("Short packet: %ld < %ld\n", len
, etd
->etd_Length
));
455 if((actual
< eqh
->eqh_Actual
) && (!ioreq
->iouh_Req
.io_Error
) && (!(ioreq
->iouh_Flags
& UHFF_ALLOWRUNTPKTS
)))
457 ioreq
->iouh_Req
.io_Error
= UHIOERR_RUNTPACKET
;
459 ioreq
->iouh_Actual
+= actual
;
460 ehciFreePeriodicContext(hc
, ioreq
);
462 // use next data toggle bit based on last successful transaction
463 KPRINTF(1, ("Old Toggle %04lx:%ld\n", devadrep
, unit
->hu_DevDataToggle
[devadrep
]));
464 unit
->hu_DevDataToggle
[devadrep
] = (ctrlstatus
& ETCF_DATA1
) ? TRUE
: FALSE
;
465 KPRINTF(1, ("Toggle now %04lx:%ld\n", devadrep
, unit
->hu_DevDataToggle
[devadrep
]));
466 ReplyMsg(&ioreq
->iouh_Req
.io_Message
);
469 KPRINTF(20, ("IOReq=0x%p has no UQH!\n", ioreq
));
475 ehciUpdateIntTree(hc
);
479 void ehciScheduleCtrlTDs(struct PCIController
*hc
) {
481 struct PCIUnit
*unit
= hc
->hc_Unit
;
482 struct IOUsbHWReq
*ioreq
;
485 struct EhciTD
*setupetd
;
486 struct EhciTD
*dataetd
;
487 struct EhciTD
*termetd
;
488 struct EhciTD
*predetd
;
494 /* *** CTRL Transfers *** */
495 KPRINTF(1, ("Scheduling new CTRL transfers...\n"));
496 ioreq
= (struct IOUsbHWReq
*) hc
->hc_CtrlXFerQueue
.lh_Head
;
497 while(((struct Node
*) ioreq
)->ln_Succ
)
499 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
;
500 KPRINTF(10, ("New CTRL transfer to %ld.%ld: %ld bytes\n", ioreq
->iouh_DevAddr
, ioreq
->iouh_Endpoint
, ioreq
->iouh_Length
));
501 /* is endpoint already in use or do we have to wait for next transaction */
502 if(unit
->hu_DevBusyReq
[devadrep
])
504 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep
));
505 ioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
;
509 eqh
= ehciAllocQH(hc
);
515 setupetd
= ehciAllocTD(hc
);
521 termetd
= ehciAllocTD(hc
);
524 ehciFreeTD(hc
, setupetd
);
528 eqh
->eqh_IOReq
= ioreq
;
529 eqh
->eqh_FirstTD
= setupetd
;
532 epcaps
= ((0<<EQES_RELOAD
)|EQEF_TOGGLEFROMTD
)|(ioreq
->iouh_MaxPktSize
<<EQES_MAXPKTLEN
)|(ioreq
->iouh_DevAddr
<<EQES_DEVADDR
)|(ioreq
->iouh_Endpoint
<<EQES_ENDPOINT
);
533 if(ioreq
->iouh_Flags
& UHFF_SPLITTRANS
)
535 KPRINTF(10, ("*** SPLIT TRANSACTION to HubPort %ld at Addr %ld\n", ioreq
->iouh_SplitHubPort
, ioreq
->iouh_SplitHubAddr
));
536 // full speed and low speed handling
537 WRITEMEM32_LE(&eqh
->eqh_SplitCtrl
, EQSF_MULTI_1
|(ioreq
->iouh_SplitHubPort
<<EQSS_PORTNUMBER
)|(ioreq
->iouh_SplitHubAddr
<<EQSS_HUBADDRESS
));
538 epcaps
|= EQEF_SPLITCTRLEP
;
539 if(ioreq
->iouh_Flags
& UHFF_LOWSPEED
)
541 KPRINTF(10, ("*** LOW SPEED ***\n"));
542 epcaps
|= EQEF_LOWSPEED
;
545 CONSTWRITEMEM32_LE(&eqh
->eqh_SplitCtrl
, EQSF_MULTI_1
);
546 epcaps
|= EQEF_HIGHSPEED
;
548 WRITEMEM32_LE(&eqh
->eqh_EPCaps
, epcaps
);
549 //eqh->eqh_CtrlStatus = eqh->eqh_CurrTD = 0;
550 //eqh->eqh_AltNextTD = eqh->eqh_NextTD = setupetd->etd_Self;
552 //termetd->etd_QueueHead = setupetd->etd_QueueHead = eqh;
554 KPRINTF(1, ("SetupTD=0x%p, TermTD=0x%p\n", setupetd
, termetd
));
557 setupetd
->etd_Length
= 8;
559 CONSTWRITEMEM32_LE(&setupetd
->etd_CtrlStatus
, (8<<ETSS_TRANSLENGTH
)|ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_SETUP
);
561 eqh
->eqh_SetupBuf
= usbGetBuffer(&ioreq
->iouh_SetupData
, 8, UHDIR_OUT
);
562 phyaddr
= (IPTR
) pciGetPhysical(hc
, eqh
->eqh_SetupBuf
);
564 WRITEMEM32_LE(&setupetd
->etd_BufferPtr
[0], phyaddr
);
565 WRITEMEM32_LE(&setupetd
->etd_BufferPtr
[1], (phyaddr
+ 8) & EHCI_PAGE_MASK
); // theoretically, setup data may cross one page
566 setupetd
->etd_BufferPtr
[2] = 0; // clear for overlay bits
568 // FIXME Make use of these on 64-bit-capable hardware
569 setupetd
->etd_ExtBufferPtr
[0] = 0;
570 setupetd
->etd_ExtBufferPtr
[1] = 0;
571 setupetd
->etd_ExtBufferPtr
[2] = 0;
573 ctrlstatus
= (ioreq
->iouh_SetupData
.bmRequestType
& URTF_IN
) ? (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_IN
) : (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_OUT
);
575 if(ioreq
->iouh_Length
)
577 eqh
->eqh_Buffer
= usbGetBuffer(ioreq
->iouh_Data
, ioreq
->iouh_Length
, (ioreq
->iouh_SetupData
.bmRequestType
& URTF_IN
) ? UHDIR_IN
: UHDIR_OUT
);
578 phyaddr
= (IPTR
)pciGetPhysical(hc
, eqh
->eqh_Buffer
);
581 dataetd
= ehciAllocTD(hc
);
586 ctrlstatus
^= ETCF_DATA1
; // toggle bit
587 predetd
->etd_Succ
= dataetd
;
588 predetd
->etd_NextTD
= dataetd
->etd_Self
;
589 dataetd
->etd_AltNextTD
= termetd
->etd_Self
;
591 len
= ioreq
->iouh_Length
- eqh
->eqh_Actual
;
592 if(len
> 4*EHCI_PAGE_SIZE
)
594 len
= 4*EHCI_PAGE_SIZE
;
596 dataetd
->etd_Length
= len
;
597 WRITEMEM32_LE(&dataetd
->etd_CtrlStatus
, ctrlstatus
|(len
<<ETSS_TRANSLENGTH
));
598 // FIXME need quark scatter gather mechanism here
599 WRITEMEM32_LE(&dataetd
->etd_BufferPtr
[0], phyaddr
);
600 WRITEMEM32_LE(&dataetd
->etd_BufferPtr
[1], (phyaddr
& EHCI_PAGE_MASK
) + (1*EHCI_PAGE_SIZE
));
601 WRITEMEM32_LE(&dataetd
->etd_BufferPtr
[2], (phyaddr
& EHCI_PAGE_MASK
) + (2*EHCI_PAGE_SIZE
));
602 WRITEMEM32_LE(&dataetd
->etd_BufferPtr
[3], (phyaddr
& EHCI_PAGE_MASK
) + (3*EHCI_PAGE_SIZE
));
603 WRITEMEM32_LE(&dataetd
->etd_BufferPtr
[4], (phyaddr
& EHCI_PAGE_MASK
) + (4*EHCI_PAGE_SIZE
));
605 // FIXME Make use of these on 64-bit-capable hardware
606 dataetd
->etd_ExtBufferPtr
[0] = 0;
607 dataetd
->etd_ExtBufferPtr
[1] = 0;
608 dataetd
->etd_ExtBufferPtr
[2] = 0;
609 dataetd
->etd_ExtBufferPtr
[3] = 0;
610 dataetd
->etd_ExtBufferPtr
[4] = 0;
613 eqh
->eqh_Actual
+= len
;
615 } while(eqh
->eqh_Actual
< ioreq
->iouh_Length
);
618 // not enough dataetds? try again later
619 usbReleaseBuffer(eqh
->eqh_Buffer
, ioreq
->iouh_Data
, 0, 0);
620 usbReleaseBuffer(eqh
->eqh_SetupBuf
, &ioreq
->iouh_SetupData
, 0, 0);
621 ehciFreeQHandTDs(hc
, eqh
);
622 ehciFreeTD(hc
, termetd
); // this one's not linked yet
627 ctrlstatus
|= ETCF_DATA1
|ETCF_READYINTEN
;
628 ctrlstatus
^= (ETCF_PIDCODE_IN
^ETCF_PIDCODE_OUT
);
630 predetd
->etd_NextTD
= termetd
->etd_Self
;
631 predetd
->etd_Succ
= termetd
;
632 CONSTWRITEMEM32_LE(&termetd
->etd_NextTD
, EHCI_TERMINATE
);
633 CONSTWRITEMEM32_LE(&termetd
->etd_AltNextTD
, EHCI_TERMINATE
);
634 WRITEMEM32_LE(&termetd
->etd_CtrlStatus
, ctrlstatus
);
635 termetd
->etd_Length
= 0;
636 termetd
->etd_BufferPtr
[0] = 0; // clear for overlay bits
637 termetd
->etd_BufferPtr
[1] = 0; // clear for overlay bits
638 termetd
->etd_BufferPtr
[2] = 0; // clear for overlay bits
639 termetd
->etd_ExtBufferPtr
[0] = 0; // clear for overlay bits
640 termetd
->etd_ExtBufferPtr
[1] = 0; // clear for overlay bits
641 termetd
->etd_ExtBufferPtr
[2] = 0; // clear for overlay bits
642 termetd
->etd_Succ
= NULL
;
644 // due to sillicon bugs, we fill in the first overlay ourselves.
645 eqh
->eqh_CurrTD
= setupetd
->etd_Self
;
646 eqh
->eqh_NextTD
= setupetd
->etd_NextTD
;
647 eqh
->eqh_AltNextTD
= setupetd
->etd_AltNextTD
;
648 eqh
->eqh_CtrlStatus
= setupetd
->etd_CtrlStatus
;
649 eqh
->eqh_BufferPtr
[0] = setupetd
->etd_BufferPtr
[0];
650 eqh
->eqh_BufferPtr
[1] = setupetd
->etd_BufferPtr
[1];
651 eqh
->eqh_BufferPtr
[2] = 0;
652 eqh
->eqh_ExtBufferPtr
[0] = setupetd
->etd_ExtBufferPtr
[0];
653 eqh
->eqh_ExtBufferPtr
[1] = setupetd
->etd_ExtBufferPtr
[1];
654 eqh
->eqh_ExtBufferPtr
[2] = 0;
656 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
657 ioreq
->iouh_DriverPrivate1
= eqh
;
659 // manage endpoint going busy
660 unit
->hu_DevBusyReq
[devadrep
] = ioreq
;
661 unit
->hu_NakTimeoutFrame
[devadrep
] = (ioreq
->iouh_Flags
& UHFF_NAKTIMEOUT
) ? hc
->hc_FrameCounter
+ (ioreq
->iouh_NakTimeout
<<3) : 0;
664 AddTail(&hc
->hc_TDQueue
, (struct Node
*) ioreq
);
666 // looks good to me, now enqueue this entry (just behind the asyncQH)
667 eqh
->eqh_Succ
= hc
->hc_EhciAsyncQH
->eqh_Succ
;
668 eqh
->eqh_NextQH
= eqh
->eqh_Succ
->eqh_Self
;
671 eqh
->eqh_Pred
= hc
->hc_EhciAsyncQH
;
672 eqh
->eqh_Succ
->eqh_Pred
= eqh
;
673 hc
->hc_EhciAsyncQH
->eqh_Succ
= eqh
;
674 hc
->hc_EhciAsyncQH
->eqh_NextQH
= eqh
->eqh_Self
;
678 ioreq
= (struct IOUsbHWReq
*) hc
->hc_CtrlXFerQueue
.lh_Head
;
682 void ehciScheduleIntTDs(struct PCIController
*hc
) {
684 struct PCIUnit
*unit
= hc
->hc_Unit
;
685 struct IOUsbHWReq
*ioreq
;
689 struct EhciQH
*inteqh
;
691 struct EhciTD
*predetd
;
698 /* *** INT Transfers *** */
699 KPRINTF(1, ("Scheduling new INT transfers...\n"));
700 ioreq
= (struct IOUsbHWReq
*) hc
->hc_IntXFerQueue
.lh_Head
;
701 while(((struct Node
*) ioreq
)->ln_Succ
)
703 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
704 KPRINTF(10, ("New INT transfer to %ld.%ld: %ld bytes\n", ioreq
->iouh_DevAddr
, ioreq
->iouh_Endpoint
, ioreq
->iouh_Length
));
705 /* is endpoint already in use or do we have to wait for next transaction */
706 if(unit
->hu_DevBusyReq
[devadrep
])
708 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep
));
709 ioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
;
713 eqh
= ehciAllocQH(hc
);
719 eqh
->eqh_IOReq
= ioreq
;
722 epcaps
= (0<<EQES_RELOAD
)|(ioreq
->iouh_MaxPktSize
<<EQES_MAXPKTLEN
)|(ioreq
->iouh_DevAddr
<<EQES_DEVADDR
)|(ioreq
->iouh_Endpoint
<<EQES_ENDPOINT
);
723 if(ioreq
->iouh_Flags
& UHFF_SPLITTRANS
)
725 KPRINTF(10, ("*** SPLIT TRANSACTION to HubPort %ld at Addr %ld\n", ioreq
->iouh_SplitHubPort
, ioreq
->iouh_SplitHubAddr
));
726 // full speed and low speed handling
727 if(ioreq
->iouh_Flags
& UHFF_LOWSPEED
)
729 KPRINTF(10, ("*** LOW SPEED ***\n"));
730 epcaps
|= EQEF_LOWSPEED
;
732 WRITEMEM32_LE(&eqh
->eqh_SplitCtrl
, (EQSF_MULTI_1
|(0x01<<EQSS_MUSOFACTIVE
)|(0x1c<<EQSS_MUSOFCSPLIT
))|(ioreq
->iouh_SplitHubPort
<<EQSS_PORTNUMBER
)|(ioreq
->iouh_SplitHubAddr
<<EQSS_HUBADDRESS
));
733 if(ioreq
->iouh_Interval
>= 255)
735 inteqh
= hc
->hc_EhciIntQH
[8]; // 256ms interval
740 inteqh
= hc
->hc_EhciIntQH
[cnt
++];
741 } while(ioreq
->iouh_Interval
>= (1<<cnt
));
744 epcaps
|= EQEF_HIGHSPEED
;
745 if(ioreq
->iouh_Flags
& UHFF_MULTI_3
)
747 splitctrl
= EQSF_MULTI_3
;
749 else if(ioreq
->iouh_Flags
& UHFF_MULTI_2
)
751 splitctrl
= EQSF_MULTI_2
;
753 splitctrl
= EQSF_MULTI_1
;
755 if(ioreq
->iouh_Interval
< 2) // 0-1 µFrames
757 splitctrl
|= (0xff<<EQSS_MUSOFACTIVE
);
759 else if(ioreq
->iouh_Interval
< 4) // 2-3 µFrames
761 splitctrl
|= (0x55<<EQSS_MUSOFACTIVE
);
763 else if(ioreq
->iouh_Interval
< 8) // 4-7 µFrames
765 splitctrl
|= (0x22<<EQSS_MUSOFACTIVE
);
767 else if(ioreq
->iouh_Interval
> 511) // 64ms and higher
769 splitctrl
|= (0x10<<EQSS_MUSOFACTIVE
);
771 else //if(ioreq->iouh_Interval >= 8) // 1-64ms
773 splitctrl
|= (0x01<<EQSS_MUSOFACTIVE
);
775 WRITEMEM32_LE(&eqh
->eqh_SplitCtrl
, splitctrl
);
776 if(ioreq
->iouh_Interval
>= 1024)
778 inteqh
= hc
->hc_EhciIntQH
[10]; // 1024 µFrames interval
783 inteqh
= hc
->hc_EhciIntQH
[cnt
++];
784 } while(ioreq
->iouh_Interval
>= (1<<cnt
));
787 WRITEMEM32_LE(&eqh
->eqh_EPCaps
, epcaps
);
788 //eqh->eqh_CtrlStatus = eqh->eqh_CurrTD = 0;
789 eqh
->eqh_FirstTD
= NULL
; // clear for ehciFreeQHandTDs()
791 ctrlstatus
= (ioreq
->iouh_Dir
== UHDIR_IN
) ? (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_IN
) : (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_OUT
);
792 if(unit
->hu_DevDataToggle
[devadrep
])
794 // continue with data toggle 0
795 ctrlstatus
|= ETCF_DATA1
;
798 eqh
->eqh_Buffer
= usbGetBuffer(ioreq
->iouh_Data
, ioreq
->iouh_Length
, ioreq
->iouh_Dir
);
799 phyaddr
= (IPTR
) pciGetPhysical(hc
, eqh
->eqh_Buffer
);
802 etd
= ehciAllocTD(hc
);
809 predetd
->etd_Succ
= etd
;
810 predetd
->etd_NextTD
= etd
->etd_Self
;
811 predetd
->etd_AltNextTD
= hc
->hc_ShortPktEndTD
->etd_Self
;
813 eqh
->eqh_FirstTD
= etd
;
814 //eqh->eqh_AltNextTD = eqh->eqh_NextTD = etd->etd_Self;
817 len
= ioreq
->iouh_Length
- eqh
->eqh_Actual
;
818 if(len
> 4*EHCI_PAGE_SIZE
)
820 len
= 4*EHCI_PAGE_SIZE
;
822 etd
->etd_Length
= len
;
823 WRITEMEM32_LE(&etd
->etd_CtrlStatus
, ctrlstatus
|(len
<<ETSS_TRANSLENGTH
));
824 // FIXME need quark scatter gather mechanism here
825 WRITEMEM32_LE(&etd
->etd_BufferPtr
[0], phyaddr
);
826 WRITEMEM32_LE(&etd
->etd_BufferPtr
[1], (phyaddr
& EHCI_PAGE_MASK
) + (1*EHCI_PAGE_SIZE
));
827 WRITEMEM32_LE(&etd
->etd_BufferPtr
[2], (phyaddr
& EHCI_PAGE_MASK
) + (2*EHCI_PAGE_SIZE
));
828 WRITEMEM32_LE(&etd
->etd_BufferPtr
[3], (phyaddr
& EHCI_PAGE_MASK
) + (3*EHCI_PAGE_SIZE
));
829 WRITEMEM32_LE(&etd
->etd_BufferPtr
[4], (phyaddr
& EHCI_PAGE_MASK
) + (4*EHCI_PAGE_SIZE
));
831 // FIXME Use these on 64-bit-capable hardware
832 etd
->etd_ExtBufferPtr
[0] = 0;
833 etd
->etd_ExtBufferPtr
[1] = 0;
834 etd
->etd_ExtBufferPtr
[2] = 0;
835 etd
->etd_ExtBufferPtr
[3] = 0;
836 etd
->etd_ExtBufferPtr
[4] = 0;
839 eqh
->eqh_Actual
+= len
;
841 } while(eqh
->eqh_Actual
< ioreq
->iouh_Length
);
845 // not enough etds? try again later
846 usbReleaseBuffer(eqh
->eqh_Buffer
, ioreq
->iouh_Data
, 0, 0);
847 ehciFreeQHandTDs(hc
, eqh
);
850 ctrlstatus
|= ETCF_READYINTEN
|(etd
->etd_Length
<<ETSS_TRANSLENGTH
);
851 WRITEMEM32_LE(&predetd
->etd_CtrlStatus
, ctrlstatus
);
853 CONSTWRITEMEM32_LE(&predetd
->etd_NextTD
, EHCI_TERMINATE
);
854 CONSTWRITEMEM32_LE(&predetd
->etd_AltNextTD
, EHCI_TERMINATE
);
855 predetd
->etd_Succ
= NULL
;
857 // due to sillicon bugs, we fill in the first overlay ourselves.
858 etd
= eqh
->eqh_FirstTD
;
859 eqh
->eqh_CurrTD
= etd
->etd_Self
;
860 eqh
->eqh_NextTD
= etd
->etd_NextTD
;
861 eqh
->eqh_AltNextTD
= etd
->etd_AltNextTD
;
862 eqh
->eqh_CtrlStatus
= etd
->etd_CtrlStatus
;
863 eqh
->eqh_BufferPtr
[0] = etd
->etd_BufferPtr
[0];
864 eqh
->eqh_BufferPtr
[1] = etd
->etd_BufferPtr
[1];
865 eqh
->eqh_BufferPtr
[2] = etd
->etd_BufferPtr
[2];
866 eqh
->eqh_BufferPtr
[3] = etd
->etd_BufferPtr
[3];
867 eqh
->eqh_BufferPtr
[4] = etd
->etd_BufferPtr
[4];
868 eqh
->eqh_ExtBufferPtr
[0] = etd
->etd_ExtBufferPtr
[0];
869 eqh
->eqh_ExtBufferPtr
[1] = etd
->etd_ExtBufferPtr
[1];
870 eqh
->eqh_ExtBufferPtr
[2] = etd
->etd_ExtBufferPtr
[2];
871 eqh
->eqh_ExtBufferPtr
[3] = etd
->etd_ExtBufferPtr
[3];
872 eqh
->eqh_ExtBufferPtr
[4] = etd
->etd_ExtBufferPtr
[4];
874 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
875 ioreq
->iouh_DriverPrivate1
= eqh
;
877 // manage endpoint going busy
878 unit
->hu_DevBusyReq
[devadrep
] = ioreq
;
879 unit
->hu_NakTimeoutFrame
[devadrep
] = (ioreq
->iouh_Flags
& UHFF_NAKTIMEOUT
) ? hc
->hc_FrameCounter
+ (ioreq
->iouh_NakTimeout
<<3) : 0;
882 AddTail(&hc
->hc_PeriodicTDQueue
, (struct Node
*) ioreq
);
884 // looks good to me, now enqueue this entry in the right IntQH
885 eqh
->eqh_Succ
= inteqh
->eqh_Succ
;
886 eqh
->eqh_NextQH
= eqh
->eqh_Succ
->eqh_Self
;
889 eqh
->eqh_Pred
= inteqh
;
890 eqh
->eqh_Succ
->eqh_Pred
= eqh
;
891 inteqh
->eqh_Succ
= eqh
;
892 inteqh
->eqh_NextQH
= eqh
->eqh_Self
;
896 ehciUpdateIntTree(hc
);
898 ioreq
= (struct IOUsbHWReq
*) hc
->hc_IntXFerQueue
.lh_Head
;
902 void ehciScheduleBulkTDs(struct PCIController
*hc
) {
904 struct PCIUnit
*unit
= hc
->hc_Unit
;
905 struct IOUsbHWReq
*ioreq
;
908 struct EhciTD
*etd
= NULL
;
909 struct EhciTD
*predetd
;
916 /* *** BULK Transfers *** */
917 KPRINTF(1, ("Scheduling new BULK transfers...\n"));
918 ioreq
= (struct IOUsbHWReq
*) hc
->hc_BulkXFerQueue
.lh_Head
;
919 while(((struct Node
*) ioreq
)->ln_Succ
)
921 devadrep
= (ioreq
->iouh_DevAddr
<<5) + ioreq
->iouh_Endpoint
+ ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
922 KPRINTF(10, ("New BULK transfer to %ld.%ld: %ld bytes\n", ioreq
->iouh_DevAddr
, ioreq
->iouh_Endpoint
, ioreq
->iouh_Length
));
923 /* is endpoint already in use or do we have to wait for next transaction */
924 if(unit
->hu_DevBusyReq
[devadrep
])
926 KPRINTF(5, ("Endpoint %02lx in use!\n", devadrep
));
927 ioreq
= (struct IOUsbHWReq
*) ((struct Node
*) ioreq
)->ln_Succ
;
931 eqh
= ehciAllocQH(hc
);
937 eqh
->eqh_IOReq
= ioreq
;
940 epcaps
= (0<<EQES_RELOAD
)|(ioreq
->iouh_MaxPktSize
<<EQES_MAXPKTLEN
)|(ioreq
->iouh_DevAddr
<<EQES_DEVADDR
)|(ioreq
->iouh_Endpoint
<<EQES_ENDPOINT
);
941 if(ioreq
->iouh_Flags
& UHFF_SPLITTRANS
)
943 KPRINTF(10, ("*** SPLIT TRANSACTION to HubPort %ld at Addr %ld\n", ioreq
->iouh_SplitHubPort
, ioreq
->iouh_SplitHubAddr
));
944 // full speed and low speed handling
945 if(ioreq
->iouh_Flags
& UHFF_LOWSPEED
)
947 KPRINTF(10, ("*** LOW SPEED ***\n"));
948 epcaps
|= EQEF_LOWSPEED
;
950 WRITEMEM32_LE(&eqh
->eqh_SplitCtrl
, EQSF_MULTI_1
|(ioreq
->iouh_SplitHubPort
<<EQSS_PORTNUMBER
)|(ioreq
->iouh_SplitHubAddr
<<EQSS_HUBADDRESS
));
952 epcaps
|= EQEF_HIGHSPEED
;
953 if(ioreq
->iouh_Flags
& UHFF_MULTI_3
)
955 splitctrl
= EQSF_MULTI_3
;
957 else if(ioreq
->iouh_Flags
& UHFF_MULTI_2
)
959 splitctrl
= EQSF_MULTI_2
;
961 splitctrl
= EQSF_MULTI_1
;
963 WRITEMEM32_LE(&eqh
->eqh_SplitCtrl
, splitctrl
);
965 WRITEMEM32_LE(&eqh
->eqh_EPCaps
, epcaps
);
966 //eqh->eqh_CtrlStatus = eqh->eqh_CurrTD = 0;
967 eqh
->eqh_FirstTD
= NULL
; // clear for ehciFreeQHandTDs()
969 ctrlstatus
= (ioreq
->iouh_Dir
== UHDIR_IN
) ? (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_IN
) : (ETCF_3ERRORSLIMIT
|ETCF_ACTIVE
|ETCF_PIDCODE_OUT
);
970 if(unit
->hu_DevDataToggle
[devadrep
])
972 // continue with data toggle 0
973 ctrlstatus
|= ETCF_DATA1
;
976 eqh
->eqh_Buffer
= usbGetBuffer(ioreq
->iouh_Data
, ioreq
->iouh_Length
, ioreq
->iouh_Dir
);
977 phyaddr
= (IPTR
)pciGetPhysical(hc
, eqh
->eqh_Buffer
);
980 if((eqh
->eqh_Actual
>= EHCI_TD_BULK_LIMIT
) && (eqh
->eqh_Actual
< ioreq
->iouh_Length
))
982 KPRINTF(10, ("Bulk too large, splitting...\n"));
985 etd
= ehciAllocTD(hc
);
992 predetd
->etd_Succ
= etd
;
993 predetd
->etd_NextTD
= etd
->etd_Self
;
994 predetd
->etd_AltNextTD
= hc
->hc_ShortPktEndTD
->etd_Self
;
996 eqh
->eqh_FirstTD
= etd
;
997 //eqh->eqh_AltNextTD = eqh->eqh_NextTD = etd->etd_Self;
1000 len
= ioreq
->iouh_Length
- eqh
->eqh_Actual
;
1001 if(len
> 4*EHCI_PAGE_SIZE
)
1003 len
= 4*EHCI_PAGE_SIZE
;
1005 etd
->etd_Length
= len
;
1006 KPRINTF(1, ("Bulk TD 0x%p len %ld (%ld/%ld) phy=0x%p\n",
1007 etd
, len
, eqh
->eqh_Actual
, ioreq
->iouh_Length
, phyaddr
));
1008 WRITEMEM32_LE(&etd
->etd_CtrlStatus
, ctrlstatus
|(len
<<ETSS_TRANSLENGTH
));
1009 // FIXME need quark scatter gather mechanism here
1010 WRITEMEM32_LE(&etd
->etd_BufferPtr
[0], phyaddr
);
1011 WRITEMEM32_LE(&etd
->etd_BufferPtr
[1], (phyaddr
& EHCI_PAGE_MASK
) + (1*EHCI_PAGE_SIZE
));
1012 WRITEMEM32_LE(&etd
->etd_BufferPtr
[2], (phyaddr
& EHCI_PAGE_MASK
) + (2*EHCI_PAGE_SIZE
));
1013 WRITEMEM32_LE(&etd
->etd_BufferPtr
[3], (phyaddr
& EHCI_PAGE_MASK
) + (3*EHCI_PAGE_SIZE
));
1014 WRITEMEM32_LE(&etd
->etd_BufferPtr
[4], (phyaddr
& EHCI_PAGE_MASK
) + (4*EHCI_PAGE_SIZE
));
1016 // FIXME Use these on 64-bit-capable hardware
1017 etd
->etd_ExtBufferPtr
[0] = 0;
1018 etd
->etd_ExtBufferPtr
[1] = 0;
1019 etd
->etd_ExtBufferPtr
[2] = 0;
1020 etd
->etd_ExtBufferPtr
[3] = 0;
1021 etd
->etd_ExtBufferPtr
[4] = 0;
1024 eqh
->eqh_Actual
+= len
;
1027 } while((eqh
->eqh_Actual
< ioreq
->iouh_Length
) || (len
&& (ioreq
->iouh_Dir
== UHDIR_OUT
) && (eqh
->eqh_Actual
== ioreq
->iouh_Length
) && (!(ioreq
->iouh_Flags
& UHFF_NOSHORTPKT
)) && ((eqh
->eqh_Actual
% ioreq
->iouh_MaxPktSize
) == 0)));
1031 // not enough etds? try again later
1032 usbReleaseBuffer(eqh
->eqh_Buffer
, ioreq
->iouh_Data
, 0, 0);
1033 ehciFreeQHandTDs(hc
, eqh
);
1036 ctrlstatus
|= ETCF_READYINTEN
|(predetd
->etd_Length
<<ETSS_TRANSLENGTH
);
1037 WRITEMEM32_LE(&predetd
->etd_CtrlStatus
, ctrlstatus
);
1039 predetd
->etd_Succ
= NULL
;
1040 CONSTWRITEMEM32_LE(&predetd
->etd_NextTD
, EHCI_TERMINATE
);
1041 CONSTWRITEMEM32_LE(&predetd
->etd_AltNextTD
, EHCI_TERMINATE
);
1043 // due to sillicon bugs, we fill in the first overlay ourselves.
1044 etd
= eqh
->eqh_FirstTD
;
1045 eqh
->eqh_CurrTD
= etd
->etd_Self
;
1046 eqh
->eqh_NextTD
= etd
->etd_NextTD
;
1047 eqh
->eqh_AltNextTD
= etd
->etd_AltNextTD
;
1048 eqh
->eqh_CtrlStatus
= etd
->etd_CtrlStatus
;
1049 eqh
->eqh_BufferPtr
[0] = etd
->etd_BufferPtr
[0];
1050 eqh
->eqh_BufferPtr
[1] = etd
->etd_BufferPtr
[1];
1051 eqh
->eqh_BufferPtr
[2] = etd
->etd_BufferPtr
[2];
1052 eqh
->eqh_BufferPtr
[3] = etd
->etd_BufferPtr
[3];
1053 eqh
->eqh_BufferPtr
[4] = etd
->etd_BufferPtr
[4];
1054 eqh
->eqh_ExtBufferPtr
[0] = etd
->etd_ExtBufferPtr
[0];
1055 eqh
->eqh_ExtBufferPtr
[1] = etd
->etd_ExtBufferPtr
[1];
1056 eqh
->eqh_ExtBufferPtr
[2] = etd
->etd_ExtBufferPtr
[2];
1057 eqh
->eqh_ExtBufferPtr
[3] = etd
->etd_ExtBufferPtr
[3];
1058 eqh
->eqh_ExtBufferPtr
[4] = etd
->etd_ExtBufferPtr
[4];
1060 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
1061 ioreq
->iouh_DriverPrivate1
= eqh
;
1063 // manage endpoint going busy
1064 unit
->hu_DevBusyReq
[devadrep
] = ioreq
;
1065 unit
->hu_NakTimeoutFrame
[devadrep
] = (ioreq
->iouh_Flags
& UHFF_NAKTIMEOUT
) ? hc
->hc_FrameCounter
+ (ioreq
->iouh_NakTimeout
<<3) : 0;
1068 AddTail(&hc
->hc_TDQueue
, (struct Node
*) ioreq
);
1070 // looks good to me, now enqueue this entry (just behind the asyncQH)
1071 eqh
->eqh_Succ
= hc
->hc_EhciAsyncQH
->eqh_Succ
;
1072 eqh
->eqh_NextQH
= eqh
->eqh_Succ
->eqh_Self
;
1075 eqh
->eqh_Pred
= hc
->hc_EhciAsyncQH
;
1076 eqh
->eqh_Succ
->eqh_Pred
= eqh
;
1077 hc
->hc_EhciAsyncQH
->eqh_Succ
= eqh
;
1078 hc
->hc_EhciAsyncQH
->eqh_NextQH
= eqh
->eqh_Self
;
1082 ioreq
= (struct IOUsbHWReq
*) hc
->hc_BulkXFerQueue
.lh_Head
;
1086 void ehciUpdateFrameCounter(struct PCIController
*hc
) {
1089 hc
->hc_FrameCounter
= (hc
->hc_FrameCounter
& 0xffffc000)|(READREG32_LE(hc
->hc_RegBase
, EHCI_FRAMECOUNT
) & 0x3fff);
1093 static AROS_INTH1(ehciCompleteInt
, struct PCIController
*, hc
)
1097 KPRINTF(1, ("CompleteInt!\n"));
1098 ehciUpdateFrameCounter(hc
);
1100 /* **************** PROCESS DONE TRANSFERS **************** */
1102 if(hc
->hc_AsyncAdvanced
)
1106 struct EhciTD
*nextetd
;
1108 hc
->hc_AsyncAdvanced
= FALSE
;
1110 KPRINTF(1, ("AsyncAdvance 0x%p\n", hc
->hc_EhciAsyncFreeQH
));
1112 while((eqh
= hc
->hc_EhciAsyncFreeQH
))
1114 KPRINTF(1, ("FreeQH 0x%p\n", eqh
));
1115 nextetd
= eqh
->eqh_FirstTD
;
1116 while((etd
= nextetd
))
1118 KPRINTF(1, ("FreeTD 0x%p\n", nextetd
));
1119 nextetd
= etd
->etd_Succ
;
1120 ehciFreeTD(hc
, etd
);
1122 hc
->hc_EhciAsyncFreeQH
= eqh
->eqh_Succ
;
1123 ehciFreeQH(hc
, eqh
);
1127 ehciHandleFinishedTDs(hc
);
1129 if(hc
->hc_CtrlXFerQueue
.lh_Head
->ln_Succ
)
1131 ehciScheduleCtrlTDs(hc
);
1134 if(hc
->hc_IntXFerQueue
.lh_Head
->ln_Succ
)
1136 ehciScheduleIntTDs(hc
);
1139 if(hc
->hc_BulkXFerQueue
.lh_Head
->ln_Succ
)
1141 ehciScheduleBulkTDs(hc
);
1144 KPRINTF(1, ("CompleteDone\n"));
1151 static AROS_INTH1(ehciIntCode
, struct PCIController
*, hc
)
1155 struct PCIDevice
*base
= hc
->hc_Device
;
1156 struct PCIUnit
*unit
= hc
->hc_Unit
;
1159 //KPRINTF(1, ("pciEhciInt()\n"));
1160 intr
= READREG32_LE(hc
->hc_RegBase
, EHCI_USBSTATUS
);
1161 if(intr
& hc
->hc_PCIIntEnMask
)
1163 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBSTATUS
, intr
);
1164 //KPRINTF(1, ("INT=%04lx\n", intr));
1165 if (!(hc
->hc_Flags
& HCF_ONLINE
))
1169 if(intr
& EHSF_FRAMECOUNTOVER
)
1171 hc
->hc_FrameCounter
|= 0x3fff;
1172 hc
->hc_FrameCounter
++;
1173 hc
->hc_FrameCounter
|= READREG32_LE(hc
->hc_RegBase
, EHCI_FRAMECOUNT
) & 0x3fff;
1174 KPRINTF(5, ("Frame Counter Rollover %ld\n", hc
->hc_FrameCounter
));
1176 if(intr
& EHSF_ASYNCADVANCE
)
1178 KPRINTF(1, ("AsyncAdvance\n"));
1179 hc
->hc_AsyncAdvanced
= TRUE
;
1181 if(intr
& EHSF_HOSTERROR
)
1183 KPRINTF(200, ("Host ERROR!\n"));
1185 if(intr
& EHSF_PORTCHANGED
)
1189 UWORD portreg
= EHCI_PORTSC1
;
1190 for(hciport
= 0; hciport
< hc
->hc_NumPorts
; hciport
++, portreg
+= 4)
1192 oldval
= READREG32_LE(hc
->hc_RegBase
, portreg
);
1193 // reflect port ownership (shortcut without hc->hc_PortNum20[hciport], as usb 2.0 maps 1:1)
1194 unit
->hu_EhciOwned
[hciport
] = (oldval
& EHPF_NOTPORTOWNER
) ? FALSE
: TRUE
;
1195 if(oldval
& EHPF_ENABLECHANGE
)
1197 hc
->hc_PortChangeMap
[hciport
] |= UPSF_PORT_ENABLE
;
1199 if(oldval
& EHPF_CONNECTCHANGE
)
1201 hc
->hc_PortChangeMap
[hciport
] |= UPSF_PORT_CONNECTION
;
1203 if(oldval
& EHPF_RESUMEDTX
)
1205 hc
->hc_PortChangeMap
[hciport
] |= UPSF_PORT_SUSPEND
|UPSF_PORT_ENABLE
;
1207 if(oldval
& EHPF_OVERCURRENTCHG
)
1209 hc
->hc_PortChangeMap
[hciport
] |= UPSF_PORT_OVER_CURRENT
;
1211 WRITEREG32_LE(hc
->hc_RegBase
, portreg
, oldval
);
1212 KPRINTF(20, ("PCI Int Port %ld Change %08lx\n", hciport
+ 1, oldval
));
1213 if(hc
->hc_PortChangeMap
[hciport
])
1215 unit
->hu_RootPortChanges
|= 1UL<<(hciport
+ 1);
1218 uhwCheckRootHubChanges(unit
);
1220 if(intr
& (EHSF_TDDONE
|EHSF_TDERROR
|EHSF_ASYNCADVANCE
))
1222 SureCause(base
, &hc
->hc_CompleteInt
);
1231 BOOL
ehciInit(struct PCIController
*hc
, struct PCIUnit
*hu
) {
1233 struct PCIDevice
*hd
= hu
->hu_Device
;
1236 struct EhciQH
*predeqh
;
1243 volatile APTR pciregbase
;
1251 struct TagItem pciActivateMem
[] =
1253 { aHidd_PCIDevice_isMEM
, TRUE
},
1257 struct TagItem pciActivateBusmaster
[] =
1259 { aHidd_PCIDevice_isMaster
, TRUE
},
1263 struct TagItem pciDeactivateBusmaster
[] =
1265 { aHidd_PCIDevice_isMaster
, FALSE
},
1269 hc
->hc_portroute
= 0;
1271 hc
->hc_CompleteInt
.is_Node
.ln_Type
= NT_INTERRUPT
;
1272 hc
->hc_CompleteInt
.is_Node
.ln_Name
= "EHCI CompleteInt";
1273 hc
->hc_CompleteInt
.is_Node
.ln_Pri
= 0;
1274 hc
->hc_CompleteInt
.is_Data
= hc
;
1275 hc
->hc_CompleteInt
.is_Code
= (VOID_FUNC
)ehciCompleteInt
;
1278 FIXME: Check the real size from USBCMD Frame List Size field (bits3:2)
1279 and set the value accordingly if Frame List Flag in the HCCPARAMS indicates RW for the field
1280 else use default value of EHCI_FRAMELIST_SIZE (1024)
1282 hc
->hc_PCIMemSize
= sizeof(ULONG
) * EHCI_FRAMELIST_SIZE
+ EHCI_FRAMELIST_ALIGNMENT
+ 1;
1283 hc
->hc_PCIMemSize
+= sizeof(struct EhciQH
) * EHCI_QH_POOLSIZE
;
1284 hc
->hc_PCIMemSize
+= sizeof(struct EhciTD
) * EHCI_TD_POOLSIZE
;
1287 FIXME: We should be able to read some EHCI registers before allocating memory
1289 memptr
= HIDD_PCIDriver_AllocPCIMem(hc
->hc_PCIDriverObject
, hc
->hc_PCIMemSize
);
1290 hc
->hc_PCIMem
= (APTR
) memptr
;
1293 // PhysicalAddress - VirtualAdjust = VirtualAddress
1294 // VirtualAddress + VirtualAdjust = PhysicalAddress
1295 hc
->hc_PCIVirtualAdjust
= pciGetPhysical(hc
, memptr
) - (APTR
)memptr
;
1296 KPRINTF(10, ("VirtualAdjust 0x%08lx\n", hc
->hc_PCIVirtualAdjust
));
1299 memptr
= (UBYTE
*) ((((IPTR
) hc
->hc_PCIMem
) + EHCI_FRAMELIST_ALIGNMENT
) & (~EHCI_FRAMELIST_ALIGNMENT
));
1300 hc
->hc_EhciFrameList
= (ULONG
*) memptr
;
1301 KPRINTF(10, ("FrameListBase 0x%p\n", hc
->hc_EhciFrameList
));
1302 memptr
+= sizeof(APTR
) * EHCI_FRAMELIST_SIZE
;
1305 eqh
= (struct EhciQH
*) memptr
;
1306 hc
->hc_EhciQHPool
= eqh
;
1307 cnt
= EHCI_QH_POOLSIZE
- 1;
1309 // minimal initalization
1310 eqh
->eqh_Succ
= (eqh
+ 1);
1311 WRITEMEM32_LE(&eqh
->eqh_Self
, (IPTR
) (&eqh
->eqh_NextQH
) + hc
->hc_PCIVirtualAdjust
+ EHCI_QUEUEHEAD
);
1312 CONSTWRITEMEM32_LE(&eqh
->eqh_NextTD
, EHCI_TERMINATE
);
1313 CONSTWRITEMEM32_LE(&eqh
->eqh_AltNextTD
, EHCI_TERMINATE
);
1316 eqh
->eqh_Succ
= NULL
;
1317 WRITEMEM32_LE(&eqh
->eqh_Self
, (IPTR
) (&eqh
->eqh_NextQH
) + hc
->hc_PCIVirtualAdjust
+ EHCI_QUEUEHEAD
);
1318 CONSTWRITEMEM32_LE(&eqh
->eqh_NextTD
, EHCI_TERMINATE
);
1319 CONSTWRITEMEM32_LE(&eqh
->eqh_AltNextTD
, EHCI_TERMINATE
);
1320 memptr
+= sizeof(struct EhciQH
) * EHCI_QH_POOLSIZE
;
1323 etd
= (struct EhciTD
*) memptr
;
1324 hc
->hc_EhciTDPool
= etd
;
1325 cnt
= EHCI_TD_POOLSIZE
- 1;
1328 etd
->etd_Succ
= (etd
+ 1);
1329 WRITEMEM32_LE(&etd
->etd_Self
, (IPTR
) (&etd
->etd_NextTD
) + hc
->hc_PCIVirtualAdjust
);
1332 etd
->etd_Succ
= NULL
;
1333 WRITEMEM32_LE(&etd
->etd_Self
, (IPTR
) (&etd
->etd_NextTD
) + hc
->hc_PCIVirtualAdjust
);
1334 memptr
+= sizeof(struct EhciTD
) * EHCI_TD_POOLSIZE
;
1336 // empty async queue head
1337 hc
->hc_EhciAsyncFreeQH
= NULL
;
1338 hc
->hc_EhciAsyncQH
= eqh
= ehciAllocQH(hc
);
1339 eqh
->eqh_Succ
= eqh
;
1340 eqh
->eqh_Pred
= eqh
;
1341 CONSTWRITEMEM32_LE(&eqh
->eqh_EPCaps
, EQEF_RECLAMHEAD
);
1342 eqh
->eqh_NextQH
= eqh
->eqh_Self
;
1344 // empty terminating queue head
1345 hc
->hc_EhciTermQH
= eqh
= ehciAllocQH(hc
);
1346 eqh
->eqh_Succ
= NULL
;
1347 CONSTWRITEMEM32_LE(&eqh
->eqh_NextQH
, EHCI_TERMINATE
);
1351 hc
->hc_EhciIntQH
[0] = eqh
= ehciAllocQH(hc
);
1352 eqh
->eqh_Succ
= predeqh
;
1353 predeqh
->eqh_Pred
= eqh
;
1354 eqh
->eqh_Pred
= NULL
; // who knows...
1355 //eqh->eqh_NextQH = predeqh->eqh_Self;
1358 // make 11 levels of QH interrupts
1359 for(cnt
= 1; cnt
< 11; cnt
++)
1361 hc
->hc_EhciIntQH
[cnt
] = eqh
= ehciAllocQH(hc
);
1362 eqh
->eqh_Succ
= predeqh
;
1363 eqh
->eqh_Pred
= NULL
; // who knows...
1364 //eqh->eqh_NextQH = predeqh->eqh_Self; // link to previous int level
1368 ehciUpdateIntTree(hc
);
1370 // fill in framelist with IntQH entry points based on interval
1371 tabptr
= hc
->hc_EhciFrameList
;
1372 for(cnt
= 0; cnt
< EHCI_FRAMELIST_SIZE
; cnt
++)
1374 eqh
= hc
->hc_EhciIntQH
[10];
1378 if(cnt
& (1UL<<bitcnt
))
1380 eqh
= hc
->hc_EhciIntQH
[bitcnt
];
1383 } while(++bitcnt
< 11);
1384 *tabptr
++ = eqh
->eqh_Self
;
1387 etd
= hc
->hc_ShortPktEndTD
= ehciAllocTD(hc
);
1388 etd
->etd_Succ
= NULL
;
1389 CONSTWRITEMEM32_LE(&etd
->etd_NextTD
, EHCI_TERMINATE
);
1390 CONSTWRITEMEM32_LE(&etd
->etd_AltNextTD
, EHCI_TERMINATE
);
1391 CONSTWRITEMEM32_LE(&etd
->etd_CtrlStatus
, 0);
1393 // time to initialize hardware...
1394 OOP_GetAttr(hc
->hc_PCIDeviceObject
, aHidd_PCIDevice_Base0
, (IPTR
*) &pciregbase
);
1395 pciregbase
= (APTR
) (((IPTR
) pciregbase
) & (~0xf));
1396 OOP_SetAttrs(hc
->hc_PCIDeviceObject
, (struct TagItem
*) pciActivateMem
); // activate memory
1398 extcapoffset
= (READREG32_LE(pciregbase
, EHCI_HCCPARAMS
) & EHCM_EXTCAPOFFSET
)>>EHCS_EXTCAPOFFSET
;
1400 while(extcapoffset
>= 0x40)
1402 KPRINTF(10, ("EHCI has extended caps at 0x%08lx\n", extcapoffset
));
1403 legsup
= PCIXReadConfigLong(hc
, extcapoffset
);
1404 if(((legsup
& EHLM_CAP_ID
) >> EHLS_CAP_ID
) == 0x01)
1406 if(legsup
& EHLF_BIOS_OWNER
)
1408 KPRINTF(10, ("BIOS still has hands on EHCI, trying to get rid of it\n"));
1409 legsup
|= EHLF_OS_OWNER
;
1410 PCIXWriteConfigLong(hc
, extcapoffset
, legsup
);
1414 legsup
= PCIXReadConfigLong(hc
, extcapoffset
);
1415 if(!(legsup
& EHLF_BIOS_OWNER
))
1417 KPRINTF(10, ("BIOS gave up on EHCI. Pwned!\n"));
1424 KPRINTF(10, ("BIOS didn't release EHCI. Forcing and praying...\n"));
1425 legsup
|= EHLF_OS_OWNER
;
1426 legsup
&= ~EHLF_BIOS_OWNER
;
1427 PCIXWriteConfigLong(hc
, extcapoffset
, legsup
);
1430 /* disable all SMIs */
1431 PCIXWriteConfigLong(hc
, extcapoffset
+ 4, 0);
1434 extcapoffset
= (legsup
& EHCM_EXTCAPOFFSET
)>>EHCS_EXTCAPOFFSET
;
1437 OOP_SetAttrs(hc
->hc_PCIDeviceObject
, (struct TagItem
*) pciDeactivateBusmaster
); // no busmaster yet
1439 // we use the operational registers as RegBase.
1440 hc
->hc_RegBase
= (APTR
) ((IPTR
) pciregbase
+ READREG16_LE(pciregbase
, EHCI_CAPLENGTH
));
1441 KPRINTF(10, ("RegBase = 0x%p\n", hc
->hc_RegBase
));
1443 KPRINTF(10, ("Resetting EHCI HC\n"));
1444 KPRINTF(10, ("EHCI CMD: 0x%08x STS: 0x%08x\n", READREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
), READREG32_LE(hc
->hc_RegBase
, EHCI_USBSTATUS
)));
1445 /* Step 1: Stop the HC */
1446 tmp
= READREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
);
1447 tmp
&= ~EHUF_RUNSTOP
;
1448 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, tmp
);
1450 /* Step 2. Wait for the controller to halt */
1455 if(READREG32_LE(hc
->hc_RegBase
, EHCI_USBSTATUS
) & EHSF_HCHALTED
)
1462 KPRINTF(200, ("EHCI: Timeout waiting for controller to halt\n"));
1465 /* Step 3. Reset the controller */
1466 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, tmp
| EHUF_HCRESET
);
1468 /* Step 4. Wait for the reset bit to clear */
1473 if(!(READREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
) & EHUF_HCRESET
))
1482 KPRINTF(20, ("Reset Timeout!\n"));
1484 KPRINTF(20, ("Reset finished after %ld ticks\n", 100-cnt
));
1488 OOP_SetAttrs(hc
->hc_PCIDeviceObject
, (struct TagItem
*) pciActivateBusmaster
); // enable busmaster
1490 // Read HCSPARAMS register to obtain number of downstream ports
1491 hcsparams
= READREG32_LE(pciregbase
, EHCI_HCSPARAMS
);
1492 hccparams
= READREG32_LE(pciregbase
, EHCI_HCCPARAMS
);
1494 hc
->hc_NumPorts
= (hcsparams
& EHSM_NUM_PORTS
)>>EHSS_NUM_PORTS
;
1496 KPRINTF(20, ("Found EHCI Controller 0x%p with %ld ports (%ld companions with %ld ports each)\n",
1497 hc
->hc_PCIDeviceObject
, hc
->hc_NumPorts
,
1498 (hcsparams
& EHSM_NUM_COMPANIONS
)>>EHSS_NUM_COMPANIONS
,
1499 (hcsparams
& EHSM_PORTS_PER_COMP
)>>EHSS_PORTS_PER_COMP
));
1501 if(hcsparams
& EHSF_EXTPORTROUTING
)
1503 hc
->hc_complexrouting
= TRUE
;
1504 hc
->hc_portroute
= READREG32_LE(pciregbase
, EHCI_HCSPPORTROUTE
);
1506 for(cnt
= 0; cnt
< hc
->hc_NumPorts
; cnt
++) {
1507 KPRINTF(100, ("Port %ld maps to controller %ld\n", cnt
, ((hc
->hc_portroute
>> (cnt
<<2)) & 0xf)));
1511 hc
->hc_complexrouting
= FALSE
;
1514 KPRINTF(20, ("HCCParams: 64 Bit=%s, ProgFrameList=%s, AsyncSchedPark=%s\n",
1515 (hccparams
& EHCF_64BITS
) ? "Yes" : "No",
1516 (hccparams
& EHCF_PROGFRAMELIST
) ? "Yes" : "No",
1517 (hccparams
& EHCF_ASYNCSCHEDPARK
) ? "Yes" : "No"));
1518 hc
->hc_EhciUsbCmd
= (1UL<<EHUS_INTTHRESHOLD
);
1520 /* FIXME HERE: Process EHCF_64BITS flag and implement 64-bit addressing */
1522 if(hccparams
& EHCF_ASYNCSCHEDPARK
)
1524 KPRINTF(20, ("Enabling AsyncSchedParkMode with MULTI_3\n"));
1525 hc
->hc_EhciUsbCmd
|= EHUF_ASYNCSCHEDPARK
|(3<<EHUS_ASYNCPARKCOUNT
);
1528 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, hc
->hc_EhciUsbCmd
);
1530 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_FRAMECOUNT
, 0);
1532 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_PERIODICLIST
, (IPTR
)pciGetPhysical(hc
, hc
->hc_EhciFrameList
));
1533 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_ASYNCADDR
, AROS_LONG2LE(hc
->hc_EhciAsyncQH
->eqh_Self
));
1534 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBSTATUS
, EHSF_ALL_INTS
);
1536 // install reset handler
1537 hc
->hc_ResetInt
.is_Code
= (VOID_FUNC
)EhciResetHandler
;
1538 hc
->hc_ResetInt
.is_Data
= hc
;
1539 AddResetCallback(&hc
->hc_ResetInt
);
1542 hc
->hc_PCIIntHandler
.is_Node
.ln_Name
= "EHCI PCI (pciusb.device)";
1543 hc
->hc_PCIIntHandler
.is_Node
.ln_Pri
= 5;
1544 hc
->hc_PCIIntHandler
.is_Node
.ln_Type
= NT_INTERRUPT
;
1545 hc
->hc_PCIIntHandler
.is_Code
= (VOID_FUNC
)ehciIntCode
;
1546 hc
->hc_PCIIntHandler
.is_Data
= hc
;
1547 PCIXAddInterrupt(hc
, &hc
->hc_PCIIntHandler
);
1549 hc
->hc_PCIIntEnMask
= EHSF_ALL_INTS
;
1550 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBINTEN
, hc
->hc_PCIIntEnMask
);
1552 CacheClearE(hc
->hc_EhciFrameList
, sizeof(ULONG
) * EHCI_FRAMELIST_SIZE
, CACRF_ClearD
);
1553 CacheClearE(hc
->hc_EhciQHPool
, sizeof(struct EhciQH
) * EHCI_QH_POOLSIZE
, CACRF_ClearD
);
1554 CacheClearE(hc
->hc_EhciTDPool
, sizeof(struct EhciTD
) * EHCI_TD_POOLSIZE
, CACRF_ClearD
);
1556 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_CONFIGFLAG
, EHCF_CONFIGURED
);
1557 hc
->hc_EhciUsbCmd
|= EHUF_RUNSTOP
|EHUF_PERIODICENABLE
|EHUF_ASYNCENABLE
;
1558 WRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, hc
->hc_EhciUsbCmd
);
1561 KPRINTF(20, ("HW Init done\n"));
1563 KPRINTF(10, ("HW Regs USBCMD=%04lx\n", READREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
)));
1564 KPRINTF(10, ("HW Regs USBSTS=%04lx\n", READREG32_LE(hc
->hc_RegBase
, EHCI_USBSTATUS
)));
1565 KPRINTF(10, ("HW Regs FRAMECOUNT=%04lx\n", READREG32_LE(hc
->hc_RegBase
, EHCI_FRAMECOUNT
)));
1567 KPRINTF(1000, ("ehciInit returns TRUE...\n"));
1572 FIXME: What would the appropriate debug level be?
1574 KPRINTF(1000, ("ehciInit returns FALSE...\n"));
1578 void ehciFree(struct PCIController
*hc
, struct PCIUnit
*hu
) {
1580 hc
= (struct PCIController
*) hu
->hu_Controllers
.lh_Head
;
1581 while(hc
->hc_Node
.ln_Succ
)
1583 switch(hc
->hc_HCIType
)
1589 KPRINTF(20, ("Shutting down EHCI 0x%p\n", hc
));
1590 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBINTEN
, 0);
1591 // disable all ports
1592 for(hciport
= 0; hciport
< hc
->hc_NumPorts
; hciport
++)
1594 portreg
= EHCI_PORTSC1
+ (hciport
<<2);
1595 WRITEREG32_LE(hc
->hc_RegBase
, portreg
, 0);
1597 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, 1UL<<EHUS_INTTHRESHOLD
);
1599 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_CONFIGFLAG
, 0);
1600 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, EHUF_HCRESET
|(1UL<<EHUS_INTTHRESHOLD
));
1604 CONSTWRITEREG32_LE(hc
->hc_RegBase
, EHCI_USBCMD
, 1UL<<EHUS_INTTHRESHOLD
);
1609 KPRINTF(20, ("Shutting down EHCI done.\n"));
1614 hc
= (struct PCIController
*) hc
->hc_Node
.ln_Succ
;