2 Copyright © 2002-2009, Chris Hodges. All rights reserved.
3 Copyright © 2009-2012, The AROS Development Team. All rights reserved.
7 #include <devices/usb_hub.h>
10 #include <proto/exec.h>
11 #include <clib/alib_protos.h>
12 #include <proto/oop.h>
18 #include "chip_protos.h"
19 #include "roothub_protos.h"
20 #include "buffer_protos.h"
21 #include "cmd_protos.h"
22 #include "pci_protos.h"
24 #undef HiddPCIDeviceAttrBase
25 #define HiddPCIDeviceAttrBase (hd->hd_HiddPCIDeviceAB)
27 #define HiddAttrBase (hd->hd_HiddAB)
29 ULONG start_masks
[] = {OCSF_CTRLENABLE
, OCSF_BULKENABLE
, 0UL, 0UL};
30 ULONG current_ed_regs
[] = {OHCI_CTRL_ED
, OHCI_BULK_ED
, 0UL, 0UL};
32 static ULONG
ScheduleED(struct PCIController
*hc
, UWORD xfer_type
,
33 struct IOUsbHWReq
*ioreq
);
34 static ULONG
FillED(struct PCIController
*hc
, struct EDNode
*ed
,
35 UWORD xfer_type
, struct IOUsbHWReq
*ioreq
, UWORD dir
);
37 /* /// "AddHeadPhy()" */
38 static void AddHeadED(ULONG
* list
, struct EDNode
*ed
)
40 ed
->ed_ED
.NextED
= *list
;
42 CacheClearE(&ed
->ed_ED
.EPCaps
, 16, CACRF_ClearD
);
43 CacheClearE(list
, 4, CACRF_ClearD
);
48 static struct EDNode
*AllocED(struct PCIController
*hc
)
51 (struct EDNode
*)RemHead((struct List
*)&hc
->hc_FreeEDList
);
55 ed
->ed_ED
.HeadPtr
= 0UL;
56 ed
->ed_ED
.TailPtr
= hc
->hc_TermTD
->td_Self
;
59 KPRINTF(20, ("Out of EDs!\n"));
66 static void FreeED(struct PCIController
*hc
, struct EDNode
*ed
)
68 CONSTWRITEMEM32_LE(&ed
->ed_ED
.EPCaps
, OECF_SKIP
);
73 ed
->ed_SetupData
= NULL
;
74 AddTail((struct List
*)&hc
->hc_FreeEDList
, (struct Node
*)ed
);
75 ed
->ed_ED
.HeadPtr
= ed
->ed_ED
.TailPtr
= 0UL;
80 static struct TDNode
*AllocTD(struct PCIController
*hc
)
83 (struct TDNode
*)RemHead((struct List
*)&hc
->hc_FreeTDList
);
86 KPRINTF(20, ("Out of TDs!\n"));
93 static void FreeTD(struct PCIController
*hc
, struct TDNode
*td
)
95 td
->td_TD
.NextTD
= 0UL;
99 AddTail((struct List
*)&hc
->hc_FreeTDList
, (struct Node
*)td
);
103 /* /// "DisableED()" */
104 /* note: does not work on EDs in the interrupt tree */
105 static void DisableED(struct EDNode
*ed
)
107 ULONG ctrlstatus
, succ_ed_phy
, dma_size
;
108 struct EDNode
*pred_ed
, *succ_ed
;
111 ctrlstatus
= READMEM32_LE(&ed
->ed_ED
.EPCaps
);
112 ctrlstatus
|= OECF_SKIP
;
113 WRITEMEM32_LE(&ed
->ed_ED
.EPCaps
, ctrlstatus
);
115 // unlink from schedule
116 succ_ed
= (struct EDNode
*)ed
->ed_Node
.mln_Succ
;
117 pred_ed
= (struct EDNode
*)ed
->ed_Node
.mln_Pred
;
118 if (succ_ed
->ed_Node
.mln_Succ
!= NULL
)
119 succ_ed_phy
= succ_ed
->ed_Self
;
122 if (pred_ed
->ed_Node
.mln_Pred
!= NULL
)
123 pred_ed
->ed_ED
.NextED
= succ_ed_phy
;
125 Remove((struct Node
*)ed
);
127 dma_size
= sizeof(struct EndpointDescriptor
);
128 CachePreDMA(&ed
->ed_ED
, &dma_size
, 0);
133 /* /// "DisableInt()" */
134 static void DisableInt(struct PCIController
*hc
, ULONG mask
)
136 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_INTDIS
, mask
);
137 hc
->hc_PCIIntEnMask
&= ~mask
;
141 /* /// "EnableInt()" */
142 static void EnableInt(struct PCIController
*hc
, ULONG mask
)
144 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_INTSTATUS
, mask
);
145 hc
->hc_PCIIntEnMask
|= mask
;
146 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_INTEN
, mask
);
152 /* /// "PrintTD()" */
153 static void PrintTD(const char *txt
, ULONG ptd
, struct PCIController
*hc
)
155 KPrintF("HC 0x%p %s TD list:", hc
, txt
);
160 (struct TDNode
*)((IPTR
) ptd
- hc
->hc_PCIVirtualAdjust
-
161 offsetof(struct TDNode
, td_TD
.Ctrl
));
163 KPrintF(" 0x%p", td
);
164 ptd
= READMEM32_LE(&td
->td_TD
.NextTD
);
171 #define PrintTD(txt, ptd, hc)
176 /* /// "PrintED()" */
177 static void PrintED(const char *txt
, struct EDNode
*ed
,
178 struct PCIController
*hc
)
183 ("%s ED 0x%p: EPCaps=%08lx, HeadPtr=%08lx, TailPtr=%08lx,"
185 txt
, ed
, READMEM32_LE(&ed
->ed_ED
.EPCaps
),
186 READMEM32_LE(&ed
->ed_ED
.HeadPtr
), READMEM32_LE(&ed
->ed_ED
.TailPtr
),
187 READMEM32_LE(&ed
->ed_ED
.NextED
));
189 KPrintF("...TD list:\n");
190 for (td
= (struct TDNode
*)ed
->ed_TDList
.mlh_Head
; td
->td_Node
.mln_Succ
;
191 td
= (struct TDNode
*)td
->td_Node
.mln_Succ
)
193 ("TD 0x%p: td_TD.Ctrl=%lx BufferPtr=%lx NextTD=%lx"
195 td
, td
->td_TD
.Ctrl
, td
->td_TD
.BufferPtr
, td
->td_TD
.NextTD
,
196 td
->td_TD
.BufferEnd
);
201 #define PrintED(txt, ed, hc)
204 /* /// "ResetHandler()" */
205 static AROS_INTH1(ResetHandler
, struct PCIController
*, hc
)
210 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_CMDSTATUS
, OCSF_HCRESET
);
218 /* /// "AddTailTD()" */
219 static void AddTailTD(struct EDNode
*ed
, struct TDNode
*td
)
221 struct TDNode
*old_tail_td
= NULL
;
222 ULONG dma_size
, td_phy
;
224 if ((ed
->ed_ED
.HeadPtr
& OHCI_PTRMASK
) != 0UL)
225 old_tail_td
= (struct TDNode
*)ed
->ed_TDList
.mlh_TailPred
;
227 td
->td_TD
.NextTD
= ed
->ed_ED
.TailPtr
;
230 dma_size
= sizeof(struct TransferDescriptor
);
231 td_phy
= (ULONG
) (IPTR
) CachePreDMA(&td
->td_TD
, &dma_size
, 0);
233 if (old_tail_td
!= NULL
)
235 old_tail_td
->td_TD
.NextTD
= td_phy
;
236 dma_size
= sizeof(struct TransferDescriptor
);
237 CachePreDMA(&old_tail_td
->td_TD
, &dma_size
, 0);
241 ed
->ed_ED
.HeadPtr
|= td
->td_Self
;
242 dma_size
= sizeof(struct EndpointDescriptor
);
243 CachePreDMA(&ed
->ed_ED
, &dma_size
, 0);
248 /* /// "FreeTDChain()" */
249 static void FreeTDChain(struct PCIController
*hc
, struct MinList
*tdlist
)
253 while ((td
= (struct TDNode
*)RemHead((struct List
*)tdlist
)) != NULL
)
255 KPRINTF(1, ("FreeTD %p\n", td
));
261 /* /// "FreeEDContext()" */
262 static void FreeEDContext(struct PCIController
*hc
, struct EDNode
*ed
,
263 struct IOUsbHWReq
*ioreq
)
267 KPRINTF(5, ("Freeing EDContext 0x%p IOReq 0x%p\n", ed
, ioreq
));
269 if (ioreq
->iouh_Req
.io_Command
== UHCMD_CONTROLXFER
)
272 iouh_SetupData
.bmRequestType
& URTF_IN
) ? UHDIR_IN
: UHDIR_OUT
;
274 dir
= ioreq
->iouh_Dir
;
276 usbReleaseBuffer(ed
->ed_Buffer
, ioreq
->iouh_Data
, ioreq
->iouh_Actual
,
278 usbReleaseBuffer(ed
->ed_SetupData
, &ioreq
->iouh_SetupData
, 8, UHDIR_IN
);
281 FreeTDChain(hc
, &ed
->ed_TDList
);
287 /* /// "UpdateIntTree()" */
288 static void UpdateIntTree(struct PCIController
*hc
)
292 ULONG
*queue_heads
= hc
->hc_HCCA
->ha_IntEDs
;
294 // initialise every queue head to point at the terminal ED by default
295 for (i
= 0; i
< 32; i
++)
297 queue_heads
[i
] = hc
->hc_TermED
->ed_Self
;
300 // put each ED in the right number of queues for its interval level.
301 // we balance the tree by incrementing the slot we start at for each ED
302 for (i
= 0; i
< INT_LIST_COUNT
; i
++)
304 ed
= (struct EDNode
*)hc
->hc_EDLists
[INT_XFER
+ i
].mlh_Head
;
305 for (j
= 0; ed
->ed_Node
.mln_Succ
!= NULL
; j
++)
307 for (k
= 0, l
= j
; k
< 1 << (INT_LIST_COUNT
- i
- 1); k
++)
309 AddHeadED(&queue_heads
[l
% 32], ed
);
312 ed
= (struct EDNode
*)ed
->ed_Node
.mln_Succ
;
318 /* /// "HandleFinishedTDs()" */
319 static void HandleFinishedTDs(struct PCIController
*hc
)
321 struct IOUsbHWReq
*ioreq
;
322 struct IOUsbHWReq
*nextioreq
;
323 struct EDNode
*ed
= NULL
;
324 struct TDNode
*td
, *nexttd
;
330 BOOL updatetree
= FALSE
;
334 struct PCIUnit
*unit
= hc
->hc_Unit
;
337 KPRINTF(100, ("Checking for work done...\n"));
339 donehead
= hc
->hc_DoneQueue
;
340 hc
->hc_DoneQueue
= 0UL;
344 KPRINTF(1, ("Nothing to do!\n"));
347 td
= (struct TDNode
*)((IPTR
) donehead
- hc
->hc_PCIVirtualAdjust
-
348 offsetof(struct TDNode
, td_TD
.Ctrl
));
349 KPRINTF(100, ("DoneHead=%08lx, TD=%p, Frame=%ld\n", donehead
, td
,
350 READREG32_LE(hc
->hc_RegBase
, OHCI_FRAMECOUNT
)));
351 PrintTD("Done", donehead
, hc
);
355 dma_size
= sizeof(struct TransferDescriptor
);
356 CachePostDMA(&td
->td_TD
, &dma_size
, 0);
361 ("Came across a rogue TD 0x%p that already has been freed!\n",
363 ptr
= READMEM32_LE(&td
->td_TD
.NextTD
) & OHCI_PTRMASK
;
368 td
= (struct TDNode
*)((IPTR
) ptr
- hc
->hc_PCIVirtualAdjust
-
369 offsetof(struct TDNode
, td_TD
));
372 dma_size
= sizeof(struct EndpointDescriptor
);
373 CachePostDMA(&ed
->ed_ED
, &dma_size
, 0);
375 ctrlstatus
= READMEM32_LE(&td
->td_TD
.Ctrl
);
376 KPRINTF(100, ("TD: %08lx - %08lx\n",
377 READMEM32_LE(&td
->td_TD
.BufferPtr
),
378 READMEM32_LE(&td
->td_TD
.BufferEnd
)));
379 if (td
->td_TD
.BufferPtr
)
381 // FIXME: this will blow up if physical memory is ever going to
384 READMEM32_LE(&td
->td_TD
.BufferPtr
) -
385 (READMEM32_LE(&td
->td_TD
.BufferEnd
) + 1 - td
->td_Length
);
392 ioreq
= ed
->ed_IOReq
;
395 ("Examining TD %p for ED %p (IOReq=%p), Status %08lx, len=%ld\n",
396 td
, ed
, ioreq
, ctrlstatus
, len
));
399 /* You should never see this (very weird inconsistency), but who
402 ("Came across a rogue ED 0x%p that already has been replied! "
405 ptr
= READMEM32_LE(&td
->td_TD
.NextTD
) & OHCI_PTRMASK
;
410 td
= (struct TDNode
*)((IPTR
) ptr
- hc
->hc_PCIVirtualAdjust
-
411 offsetof(struct TDNode
, td_TD
.Ctrl
));
417 epcaps
= READMEM32_LE(&ed
->ed_ED
.EPCaps
);
418 direction_in
= ((epcaps
& OECM_DIRECTION
) == OECF_DIRECTION_TD
)
419 ? (ioreq
->iouh_SetupData
.bmRequestType
& URTF_IN
)
420 : (epcaps
& OECF_DIRECTION_IN
);
421 // FIXME: CachePostDMA() should be passed a virtual pointer
422 CachePostDMA((APTR
) (IPTR
) READMEM32_LE(&td
->td_TD
.BufferEnd
) -
423 len
+ 1, &len
, direction_in
? 0 : DMA_ReadFromRAM
);
426 ioreq
->iouh_Actual
+= len
;
428 switch ((ctrlstatus
& OTCM_COMPLETIONCODE
))
430 case OTCF_CC_CRCERROR
:
432 case OTCF_CC_PIDCORRUPT
:
433 case OTCF_CC_WRONGPID
:
434 ioreq
->iouh_Req
.io_Error
= UHIOERR_CRCERROR
;
437 ioreq
->iouh_Req
.io_Error
= UHIOERR_STALL
;
439 case OTCF_CC_TIMEOUT
:
440 ioreq
->iouh_Req
.io_Error
= UHIOERR_TIMEOUT
;
442 case OTCF_CC_OVERFLOW
:
443 ioreq
->iouh_Req
.io_Error
= UHIOERR_OVERFLOW
;
445 case OTCF_CC_SHORTPKT
:
446 if ((!ioreq
->iouh_Req
.io_Error
)
447 && (!(ioreq
->iouh_Flags
& UHFF_ALLOWRUNTPKTS
)))
449 ioreq
->iouh_Req
.io_Error
= UHIOERR_RUNTPACKET
;
452 case OTCF_CC_OVERRUN
:
453 case OTCF_CC_UNDERRUN
:
454 ioreq
->iouh_Req
.io_Error
= UHIOERR_HOSTERROR
;
456 case OTCF_CC_NOERROR
:
457 case OTCF_CC_WRONGTOGGLE
:
458 case OTCF_CC_INVALID
:
464 KPRINTF(200, ("Bad completion code: %d\n",
465 (ctrlstatus
& OTCM_COMPLETIONCODE
) >>
466 OTCS_COMPLETIONCODE
));
467 if ((ctrlstatus
& OTCM_DELAYINT
) != OTCF_NOINT
)
469 KPRINTF(10, ("TD 0x%p Terminator detected\n", td
));
472 if (READMEM32_LE(&ed
->ed_ED
.HeadPtr
) & OEHF_HALTED
)
474 KPRINTF(100, ("ED halted!\n"));
480 KPRINTF(50, ("ED 0x%p stopped at TD 0x%p\n", ed
, td
));
481 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
482 AddHead(&hc
->hc_RetireQueue
,
483 &ioreq
->iouh_Req
.io_Message
.mn_Node
);
486 ptr
= READMEM32_LE(&td
->td_TD
.NextTD
) & OHCI_PTRMASK
;
487 KPRINTF(1, ("NextTD=0x%08lx\n", ptr
));
492 td
= (struct TDNode
*)((IPTR
) ptr
- hc
->hc_PCIVirtualAdjust
-
493 offsetof(struct TDNode
, td_TD
.Ctrl
));
494 KPRINTF(1, ("NextTD = %p\n", td
));
498 ioreq
= (struct IOUsbHWReq
*)hc
->hc_RetireQueue
.lh_Head
;
500 (struct IOUsbHWReq
*)((struct Node
*)ioreq
)->ln_Succ
))
502 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
503 ed
= (struct EDNode
*)ioreq
->iouh_DriverPrivate1
;
507 ("HC 0x%p Retiring IOReq=0x%p Command=%ld ED=0x%p, Frame=%ld\n",
508 hc
, ioreq
, ioreq
->iouh_Req
.io_Command
, ed
,
509 READREG32_LE(hc
->hc_RegBase
, OHCI_FRAMECOUNT
)));
513 // reinitialise physical links in ED and its TD list
514 td
= (struct TDNode
*)ed
->ed_TDList
.mlh_Head
;
515 ed
->ed_ED
.HeadPtr
= td
->td_Self
;
516 while (td
->td_Node
.mln_Succ
!= NULL
)
518 nexttd
= (struct TDNode
*)td
->td_Node
.mln_Succ
;
519 if (nexttd
!= (struct TDNode
*)&ed
->ed_TDList
.mlh_Tail
)
520 td
->td_TD
.NextTD
= nexttd
->td_Self
;
522 td
->td_TD
.NextTD
= hc
->hc_TermTD
->td_Self
;
526 // Refill ED with next data block
527 FillED(hc
, ed
, BULK_XFER
, ioreq
, ioreq
->iouh_Dir
);
528 PrintED("Continued bulk", ed
, hc
);
531 AddTail(&hc
->hc_TDQueue
, (struct Node
*)ioreq
);
532 oldenables
= READREG32_LE(hc
->hc_RegBase
, OHCI_CMDSTATUS
);
533 oldenables
|= OCSF_BULKENABLE
;
534 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_CMDSTATUS
, oldenables
);
541 if (ioreq
->iouh_Req
.io_Command
== UHCMD_INTXFER
)
544 Remove((struct Node
*)ed
);
548 PrintED("Completed", ed
, hc
);
551 (ioreq
->iouh_DevAddr
<< 5) + ioreq
->iouh_Endpoint
+
552 ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
553 unit
->hu_DevBusyReq
[target
] = NULL
;
554 unit
->hu_DevDataToggle
[target
] =
556 ed_ED
.HeadPtr
) & OEHF_DATA1
) ? TRUE
: FALSE
;
557 FreeEDContext(hc
, ed
, ioreq
);
559 // check for successful clear feature and set address ctrl
561 if ((!ioreq
->iouh_Req
.io_Error
)
562 && (ioreq
->iouh_Req
.io_Command
== UHCMD_CONTROLXFER
))
564 CheckSpecialCtrlTransfers(hc
, ioreq
);
566 ReplyMsg(&ioreq
->iouh_Req
.io_Message
);
571 KPRINTF(20, ("IOReq=%p has no ED!\n", ioreq
));
582 /* /// "HandleAbortedEDs()" */
583 static ULONG
HandleAbortedEDs(struct PCIController
*hc
)
585 struct IOUsbHWReq
*ioreq
;
586 ULONG restartmask
= 0;
589 struct PCIUnit
*unit
= hc
->hc_Unit
;
591 KPRINTF(50, ("Processing abort queue...\n"));
593 // We don't need this any more
594 DisableInt(hc
, OISF_SOF
);
597 * If the aborted IORequest was replied in HandleFinishedTDs(),
598 * it was already Remove()d from this queue. It's safe to do no checks.
599 * io_Error was set earlier.
601 while ((ioreq
= (struct IOUsbHWReq
*)RemHead(&hc
->hc_AbortQueue
)))
603 KPRINTF(70, ("HC 0x%p Aborted IOReq 0x%p\n", hc
, ioreq
));
604 PrintED("Aborted", ioreq
->iouh_DriverPrivate1
, hc
);
606 ed
= ioreq
->iouh_DriverPrivate1
;
608 (ioreq
->iouh_DevAddr
<< 5) + ioreq
->iouh_Endpoint
+
609 ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
610 unit
->hu_DevBusyReq
[target
] = NULL
;
611 unit
->hu_DevDataToggle
[target
] =
612 (READMEM32_LE(&ed
->ed_ED
.HeadPtr
) & OEHF_DATA1
) ? TRUE
: FALSE
;
613 FreeEDContext(hc
, ed
, ioreq
);
614 ReplyMsg(&ioreq
->iouh_Req
.io_Message
);
617 /* Restart stopped queues */
618 if (hc
->hc_Flags
& HCF_STOP_CTRL
)
620 KPRINTF(50, ("Restarting control transfers\n"));
621 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_CTRL_ED
, 0);
622 restartmask
|= OCSF_CTRLENABLE
;
625 if (hc
->hc_Flags
& HCF_STOP_BULK
)
627 KPRINTF(50, ("Restarting bulk transfers\n"));
628 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_BULK_ED
, 0);
629 restartmask
|= OCSF_BULKENABLE
;
632 /* Everything is enabled again, aborting done */
633 hc
->hc_Flags
&= ~(HCF_STOP_CTRL
| HCF_STOP_BULK
| HCF_ABORT
);
635 /* We will accumulate flags and start queues only once, when everything
642 static ULONG
FillED(struct PCIController
*hc
, struct EDNode
*ed
,
643 UWORD xfer_type
, struct IOUsbHWReq
*ioreq
, UWORD dir
)
645 BOOL success
= TRUE
, is_new_td
;
653 if (xfer_type
== CTRL_XFER
)
655 // construct set-up TD
660 td
->td_Length
= 0; // don't increase io_Actual for that transfer
661 CONSTWRITEMEM32_LE(&td
->td_TD
.Ctrl
,
662 OTCF_CC_INVALID
| OTCF_TOGGLEFROMTD
| OTCF_NOINT
|
663 OTCF_PIDCODE_SETUP
| OTCF_ALLOWSHORTPKT
);
667 usbGetBuffer(&ioreq
->iouh_SetupData
, len
, UHDIR_OUT
);
669 (ULONG
) CachePreDMA(ed
->ed_SetupData
, &len
,
671 WRITEMEM32_LE(&td
->td_TD
.BufferPtr
, phyaddr
);
672 WRITEMEM32_LE(&td
->td_TD
.BufferEnd
, phyaddr
+ len
- 1);
674 KPRINTF(1, ("TD send: %08lx - %08lx\n",
675 READMEM32_LE(&td
->td_TD
.BufferPtr
),
676 READMEM32_LE(&td
->td_TD
.BufferEnd
)));
679 AddTail((struct List
*)&ed
->ed_TDList
, (struct Node
*)td
);
687 // put data into a series of TDs
688 actual
= ioreq
->iouh_Actual
;
690 OTCF_CC_INVALID
| OTCF_NOINT
| (dir
==
691 UHDIR_IN
? OTCF_PIDCODE_IN
: OTCF_PIDCODE_OUT
);
692 if (xfer_type
== CTRL_XFER
)
693 ctrl
|= OTCF_TOGGLEFROMTD
| OTCF_DATA1
;
694 if (xfer_type
== CTRL_XFER
695 || !(ioreq
->iouh_Flags
& UHFF_NOSHORTPKT
))
696 ctrl
|= OTCF_ALLOWSHORTPKT
;
699 usbGetBuffer(ioreq
->iouh_Data
, ioreq
->iouh_Length
, dir
);
700 if (ed
->ed_Buffer
== NULL
&& ioreq
->iouh_Data
!= NULL
)
702 if (xfer_type
== BULK_XFER
)
703 td
= (struct TDNode
*)ed
->ed_TDList
.mlh_Head
;
705 td
= (struct TDNode
*)&ed
->ed_TDList
.mlh_Tail
;
707 while (success
&& actual
< ioreq
->iouh_Length
708 && (actual
- ioreq
->iouh_Actual
< OHCI_TD_BULK_LIMIT
709 || xfer_type
!= BULK_XFER
))
711 // reuse the next old TD or get a new one
712 if (td
== (struct TDNode
*)&ed
->ed_TDList
.mlh_Tail
)
724 len
= ioreq
->iouh_Length
- actual
;
725 if (len
> OHCI_PAGE_SIZE
)
727 len
= OHCI_PAGE_SIZE
;
730 KPRINTF(1, ("TD with %ld bytes. Status=%lx\n", len
, ctrl
));
731 WRITEMEM32_LE(&td
->td_TD
.Ctrl
, ctrl
);
733 (ULONG
) (IPTR
) CachePreDMA(ed
->ed_Buffer
+ actual
, &len
,
734 dir
== UHDIR_IN
? 0 : DMA_ReadFromRAM
);
735 WRITEMEM32_LE(&td
->td_TD
.BufferPtr
, phyaddr
);
737 WRITEMEM32_LE(&td
->td_TD
.BufferEnd
, phyaddr
);
739 KPRINTF(1, ("TD send: %08lx - %08lx\n",
740 READMEM32_LE(&td
->td_TD
.BufferPtr
),
741 READMEM32_LE(&td
->td_TD
.BufferEnd
)));
748 AddTail((struct List
*)&ed
->ed_TDList
,
753 dma_size
= sizeof(struct TransferDescriptor
);
754 CachePreDMA(&td
->td_TD
, &dma_size
, 0);
757 td
= (struct TDNode
*)td
->td_Node
.mln_Succ
;
761 // construct control-status TD or empty-bulk TD
764 if (xfer_type
== CTRL_XFER
|| xfer_type
== BULK_XFER
765 && dir
== UHDIR_OUT
&& actual
== ioreq
->iouh_Length
766 && (!(ioreq
->iouh_Flags
& UHFF_NOSHORTPKT
))
767 && actual
% ioreq
->iouh_MaxPktSize
== 0)
769 if (td
== (struct TDNode
*)&ed
->ed_TDList
.mlh_Tail
)
781 if (xfer_type
== CTRL_XFER
)
784 OTCF_NOINT
| OTCF_PIDCODE_IN
| OTCF_PIDCODE_OUT
|
786 ctrl
|= OTCF_TOGGLEFROMTD
| OTCF_DATA1
;
792 CONSTWRITEMEM32_LE(&td
->td_TD
.Ctrl
, ctrl
);
793 CONSTWRITEMEM32_LE(&td
->td_TD
.BufferPtr
, 0);
794 CONSTWRITEMEM32_LE(&td
->td_TD
.BufferEnd
, 0);
799 AddTail((struct List
*)&ed
->ed_TDList
,
804 td
->td_TD
.NextTD
= hc
->hc_TermTD
->td_Self
;
805 dma_size
= sizeof(struct TransferDescriptor
);
806 CachePreDMA(&td
->td_TD
, &dma_size
, 0);
812 if (xfer_type
== BULK_XFER
)
813 ed
->ed_Continue
= (actual
< ioreq
->iouh_Length
);
814 td
= (struct TDNode
*)td
->td_Node
.mln_Pred
;
815 td
->td_TD
.NextTD
= hc
->hc_TermTD
->td_Self
;
816 CONSTWRITEMEM32_LE(&td
->td_TD
.Ctrl
, OTCF_CC_INVALID
);
817 dma_size
= sizeof(struct TransferDescriptor
);
818 CachePreDMA(&td
->td_TD
, &dma_size
, 0);
824 FreeEDContext(hc
, ed
, ioreq
);
831 /* /// "ScheduleED()" */
832 static ULONG
ScheduleED(struct PCIController
*hc
, UWORD xfer_type
,
833 struct IOUsbHWReq
*ioreq
)
836 struct PCIUnit
*unit
= hc
->hc_Unit
;
838 UWORD dir
, list_no
, list_index
, interval
;
840 struct EDNode
*pred_ed
;
841 ULONG epcaps
, dma_size
, phy_addr
;
849 ed
->ed_IOReq
= ioreq
;
851 if (xfer_type
== CTRL_XFER
)
854 iouh_SetupData
.bmRequestType
& URTF_IN
) ? UHDIR_IN
:
857 dir
= ioreq
->iouh_Dir
;
859 target
= (ioreq
->iouh_DevAddr
<< 5) + ioreq
->iouh_Endpoint
;
860 if (xfer_type
!= CTRL_XFER
&& dir
== UHDIR_IN
)
865 iouh_DevAddr
<< OECS_DEVADDR
) | (ioreq
->iouh_Endpoint
<<
866 OECS_ENDPOINT
) | (ioreq
->iouh_MaxPktSize
<< OECS_MAXPKTLEN
);
867 if (xfer_type
== CTRL_XFER
)
868 epcaps
|= OECF_DIRECTION_TD
;
871 dir
== UHDIR_IN
? OECF_DIRECTION_IN
: OECF_DIRECTION_OUT
;
873 if (ioreq
->iouh_Flags
& UHFF_LOWSPEED
)
875 KPRINTF(5, ("*** LOW SPEED ***\n"));
876 epcaps
|= OECF_LOWSPEED
;
879 WRITEMEM32_LE(&ed
->ed_ED
.EPCaps
, epcaps
);
881 if (xfer_type
!= CTRL_XFER
&& unit
->hu_DevDataToggle
[target
])
882 WRITEMEM32_LE(&ed
->ed_ED
.HeadPtr
, OEHF_DATA1
);
884 if (!FillED(hc
, ed
, xfer_type
, ioreq
, dir
))
890 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
891 ioreq
->iouh_DriverPrivate1
= ed
;
893 // choose logical list to add ED to
895 if (xfer_type
== INT_XFER
)
897 interval
= ioreq
->iouh_Interval
;
907 list_index
= INT_LIST_COUNT
- 1;
909 list_no
= xfer_type
+ list_index
;
911 // manage endpoint going busy
913 unit
->hu_DevBusyReq
[target
] = ioreq
;
914 unit
->hu_NakTimeoutFrame
[target
] =
915 (ioreq
->iouh_Flags
& UHFF_NAKTIMEOUT
) ? hc
->hc_FrameCounter
+
916 ioreq
->iouh_NakTimeout
: 0;
918 AddTail(&hc
->hc_TDQueue
, (struct Node
*)ioreq
);
920 // looks good to me, now enqueue this entry
921 AddTail((struct List
*)&hc
->hc_EDLists
[list_no
], (struct Node
*)ed
);
923 if (xfer_type
== INT_XFER
)
929 ed
->ed_ED
.NextED
= 0L;
930 dma_size
= sizeof(struct EndpointDescriptor
);
931 phy_addr
= (ULONG
) (IPTR
) CachePreDMA(&ed
->ed_ED
, &dma_size
, 0);
933 pred_ed
= (struct EDNode
*)ed
->ed_Node
.mln_Pred
;
934 if (pred_ed
->ed_Node
.mln_Pred
!= NULL
)
936 pred_ed
->ed_ED
.NextED
= phy_addr
;
937 dma_size
= sizeof(struct EndpointDescriptor
);
938 CachePreDMA(&pred_ed
->ed_ED
, &dma_size
, 0);
941 WRITEREG32_LE(hc
->hc_RegBase
, (xfer_type
== CTRL_XFER
) ?
942 OHCI_CTRL_HEAD_ED
: OHCI_BULK_HEAD_ED
, ed
->ed_Self
);
947 PrintED(xfer_names
[xfer_type
], ed
, hc
);
954 FreeEDContext(hc
, ed
, ioreq
);
961 /* /// "ScheduleXfers()" */
962 static ULONG
ScheduleXfers(struct PCIController
*hc
, UWORD xfer_type
)
965 struct PCIUnit
*unit
= hc
->hc_Unit
;
966 struct IOUsbHWReq
*ioreq
;
972 KPRINTF(1, ("Scheduling new %s transfers...\n", xfer_names
[xfer_type
]));
973 ioreq
= (struct IOUsbHWReq
*)hc
->hc_XferQueues
[xfer_type
].lh_Head
;
974 while (success
&& ((struct Node
*)ioreq
)->ln_Succ
)
976 if (xfer_type
== CTRL_XFER
)
979 iouh_SetupData
.bmRequestType
& URTF_IN
) ? UHDIR_IN
:
982 dir
= ioreq
->iouh_Dir
;
984 target
= (ioreq
->iouh_DevAddr
<< 5) + ioreq
->iouh_Endpoint
;
985 if (xfer_type
!= CTRL_XFER
&& dir
== UHDIR_IN
)
987 KPRINTF(10, ("New %s transfer to %ld.%ld: %ld bytes\n",
988 xfer_names
[xfer_type
], ioreq
->iouh_DevAddr
,
989 ioreq
->iouh_Endpoint
, ioreq
->iouh_Length
));
990 /* is endpoint already in use or do we have to wait for next
992 if (unit
->hu_DevBusyReq
[target
])
994 KPRINTF(5, ("Endpoint %02lx in use!\n", target
));
995 ioreq
= (struct IOUsbHWReq
*)((struct Node
*)ioreq
)->ln_Succ
;
999 success
= ScheduleED(hc
, xfer_type
, ioreq
);
1001 ioreq
= (struct IOUsbHWReq
*)hc
->hc_XferQueues
[xfer_type
].lh_Head
;
1007 * If we are going to start the queue but it's not running yet,
1008 * reset current ED pointer to zero. This will cause the HC to
1009 * start over from the head.
1011 startmask
= start_masks
[xfer_type
];
1012 oldenables
= READREG32_LE(hc
->hc_RegBase
, OHCI_CMDSTATUS
);
1013 if (!(oldenables
& startmask
))
1015 CONSTWRITEREG32_LE(hc
->hc_RegBase
, current_ed_regs
[xfer_type
],
1024 /* /// "UpdateFrameCounter()" */
1025 void UpdateFrameCounter(struct PCIController
*hc
)
1029 hc
->hc_FrameCounter
=
1030 (hc
->hc_FrameCounter
& 0xffff0000) | (READREG32_LE(hc
->hc_RegBase
,
1031 OHCI_FRAMECOUNT
) & 0xffff);
1036 /* /// "CompleteInt()" */
1037 static AROS_INTH1(CompleteInt
, struct PCIController
*, hc
)
1041 ULONG restartmask
= 0;
1043 KPRINTF(1, ("CompleteInt!\n"));
1045 UpdateFrameCounter(hc
);
1047 /* **************** PROCESS DONE TRANSFERS **************** */
1049 WRITEREG32_LE(&hc
->hc_RegBase
, OHCI_INTDIS
, OISF_DONEHEAD
);
1050 if (hc
->hc_DoneQueue
)
1051 HandleFinishedTDs(hc
);
1053 if (hc
->hc_Flags
& HCF_ABORT
)
1054 restartmask
= HandleAbortedEDs(hc
);
1055 WRITEREG32_LE(&hc
->hc_RegBase
, OHCI_INTEN
, OISF_DONEHEAD
);
1057 if ((!(hc
->hc_Flags
& HCF_STOP_CTRL
))
1058 && hc
->hc_XferQueues
[CTRL_XFER
].lh_Head
->ln_Succ
)
1059 restartmask
|= ScheduleXfers(hc
, CTRL_XFER
);
1061 if (hc
->hc_XferQueues
[INT_XFER
].lh_Head
->ln_Succ
)
1062 ScheduleXfers(hc
, INT_XFER
);
1064 if ((!(hc
->hc_Flags
& HCF_STOP_BULK
))
1065 && hc
->hc_XferQueues
[BULK_XFER
].lh_Head
->ln_Succ
)
1066 restartmask
|= ScheduleXfers(hc
, BULK_XFER
);
1069 * Restart queues. In restartmask we have accumulated which queues need
1072 * We do it here only once, after everything is set up, because
1073 * otherwise HC goes nuts in some cases. For example, the following
1074 * situation caused TD queue loop: we are simultaneously scheduling two
1075 * control EDs and one of them completes with error. If we attempt to
1076 * start the queue right after an ED is scheduled (this is how the code
1077 * originally worked), it looks like the HC manages to deal with the
1078 * first ED right before the second one is scheduled. At this moment the
1079 * first TD is HALTed with HeadPtr pointing to the failed TD, which went
1080 * to the DoneQueue (which will be picked up only on next ISR round, we
1081 * are still in ScheduleCtrlEDs()). The second ED is scheduled (first
1082 * one is not removed yet!) and we re-trigger control queue to start.
1083 * It causes errorneous TD to reappear on the DoneQueue, effectively
1084 * looping it. DoneQueue loop causes HandleFinishedTDs() to never exit.
1085 * Restarting queues here in this manner actually fixed the problem.
1089 restartmask
|= READREG32_LE(hc
->hc_RegBase
, OHCI_CMDSTATUS
);
1090 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_CMDSTATUS
, restartmask
);
1094 KPRINTF(1, ("CompleteDone\n"));
1102 /* /// "IntCode()" */
1103 static AROS_INTH1(IntCode
, struct PCIController
*, hc
)
1107 struct PCIUnit
*unit
= hc
->hc_Unit
;
1112 dma_size
= sizeof(struct HCCA
);
1113 CachePostDMA(hc
->hc_HCCA
, &dma_size
, 0);
1115 donehead
= READMEM32_LE(&hc
->hc_HCCA
->ha_DoneHead
);
1120 intr
= OISF_DONEHEAD
;
1123 intr
|= READREG32_LE(hc
->hc_RegBase
, OHCI_INTSTATUS
);
1125 donehead
&= OHCI_PTRMASK
;
1127 CONSTWRITEMEM32_LE(&hc
->hc_HCCA
->ha_DoneHead
, 0);
1131 intr
= READREG32_LE(hc
->hc_RegBase
, OHCI_INTSTATUS
);
1133 if (intr
& OISF_DONEHEAD
)
1135 KPRINTF(1, ("DONEHEAD WAS EMPTY!\n"));
1137 READMEM32_LE(&hc
->hc_HCCA
->ha_DoneHead
) & OHCI_PTRMASK
;
1138 CONSTWRITEMEM32_LE(&hc
->hc_HCCA
->ha_DoneHead
, 0);
1140 KPRINTF(500, ("New Donehead %08lx for old %08lx\n", donehead
,
1144 dma_size
= sizeof(struct HCCA
);
1145 CachePreDMA(hc
->hc_HCCA
, &dma_size
, 0);
1147 intr
&= ~OISF_MASTERENABLE
;
1149 if (intr
& hc
->hc_PCIIntEnMask
)
1151 KPRINTF(1, ("IntCode(0x%p) interrupts 0x%08lx, mask 0x%08lx\n", hc
,
1152 intr
, hc
->hc_PCIIntEnMask
));
1154 if (intr
& OISF_HOSTERROR
)
1156 KPRINTF(200, ("Host ERROR!\n"));
1158 if (intr
& OISF_SCHEDOVERRUN
)
1160 KPRINTF(200, ("Schedule overrun!\n"));
1162 if (!(hc
->hc_Flags
& HCF_ONLINE
))
1164 if (READREG32_LE(hc
->hc_RegBase
,
1165 OHCI_INTSTATUS
) & OISF_HUBCHANGE
)
1167 // if the driver is not online and the controller has a broken
1168 // hub change interrupt, make sure we don't run into infinite
1169 // interrupt by disabling the interrupt bit
1170 DisableInt(hc
, OISF_HUBCHANGE
);
1174 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_INTEN
, OISF_HUBCHANGE
);
1175 if (intr
& OISF_FRAMECOUNTOVER
)
1177 hc
->hc_FrameCounter
|= 0x7fff;
1178 hc
->hc_FrameCounter
++;
1179 hc
->hc_FrameCounter
|=
1180 READMEM16_LE(&hc
->hc_HCCA
->ha_FrameCount
);
1181 KPRINTF(10, ("HCI 0x%p: Frame Counter Rollover %ld\n", hc
,
1182 hc
->hc_FrameCounter
));
1184 if (intr
& OISF_HUBCHANGE
)
1188 UWORD portreg
= OHCI_PORTSTATUS
;
1189 BOOL clearbits
= FALSE
;
1191 if (READREG32_LE(hc
->hc_RegBase
,
1192 OHCI_INTSTATUS
) & OISF_HUBCHANGE
)
1194 /* Some OHCI implementations will keep the interrupt bit
1195 * stuck until all port changes have been cleared, which is
1196 * wrong according to the OHCI spec. As a workaround we will
1197 * clear all change bits, which should be no problem as the
1198 * port changes are reflected in the PortChangeMap array.
1202 for (hciport
= 0; hciport
< hc
->hc_NumPorts
;
1203 hciport
++, portreg
+= 4)
1205 oldval
= READREG32_LE(hc
->hc_RegBase
, portreg
);
1206 hc
->hc_PortChangeMap
[hciport
] |= TranslatePortFlags(oldval
,
1207 OHPF_OVERCURRENTCHG
| OHPF_RESETCHANGE
|
1208 OHPF_ENABLECHANGE
| OHPF_CONNECTCHANGE
|
1212 WRITEREG32_LE(hc
->hc_RegBase
, portreg
,
1213 OHPF_CONNECTCHANGE
| OHPF_ENABLECHANGE
|
1214 OHPF_RESUMEDTX
| OHPF_OVERCURRENTCHG
|
1218 KPRINTF(20, ("PCI Int Port %ld (glob %ld) Change %08lx\n",
1219 hciport
, hc
->hc_PortNum20
[hciport
] + 1, oldval
));
1220 if (hc
->hc_PortChangeMap
[hciport
])
1222 unit
->hu_RootPortChanges
|=
1223 1UL << (hc
->hc_PortNum20
[hciport
] + 1);
1226 CheckRootHubChanges(unit
);
1229 // again try to get rid of any bits that may be causing the
1231 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_HUBSTATUS
,
1232 OHSF_OVERCURRENTCHG
);
1233 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_INTSTATUS
,
1237 if (intr
& OISF_DONEHEAD
)
1239 KPRINTF(10, ("DoneHead Frame=%ld\n",
1240 READREG32_LE(hc
->hc_RegBase
, OHCI_FRAMECOUNT
)));
1242 if (hc
->hc_DoneQueue
)
1244 struct TDNode
*donetd
=
1245 (struct TDNode
*)((IPTR
) donehead
-
1246 hc
->hc_PCIVirtualAdjust
- offsetof(struct TDNode
,
1249 CacheClearE(&donetd
->td_TD
, 16, CACRF_InvalidateD
);
1250 while (donetd
->td_TD
.NextTD
)
1253 (struct TDNode
*)((IPTR
) donetd
->td_TD
.NextTD
-
1254 hc
->hc_PCIVirtualAdjust
- offsetof(struct TDNode
,
1256 CacheClearE(&donetd
->td_TD
, 16, CACRF_InvalidateD
);
1258 WRITEMEM32_LE(&donetd
->td_TD
.NextTD
, hc
->hc_DoneQueue
);
1259 CacheClearE(&donetd
->td_TD
, 16, CACRF_ClearD
);
1262 ("Attached old DoneHead 0x%08lx to TD 0x%08lx\n",
1263 hc
->hc_DoneQueue
, donetd
->td_Self
));
1265 hc
->hc_DoneQueue
= donehead
;
1267 if (intr
& OISF_SOF
)
1269 /* Aborted EDs are available for freeing */
1270 hc
->hc_Flags
|= HCF_ABORT
;
1273 if (intr
& (OISF_SOF
| OISF_DONEHEAD
))
1276 * These two are leveraged down to SoftInt.
1277 * This is done in order to keep queues rotation synchronized.
1279 Cause(&hc
->hc_CompleteInt
);
1282 KPRINTF(1, ("Exiting IntCode(0x%p)\n", unit
));
1285 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_INTSTATUS
, intr
);
1287 /* Unlock interrupts */
1288 WRITEREG32_LE(&hc
->hc_RegBase
, OHCI_INTEN
, OISF_MASTERENABLE
);
1296 /* /// "AbortRequest()" */
1297 void AbortRequest(struct PCIController
*hc
, struct IOUsbHWReq
*ioreq
)
1299 struct PCIUnit
*unit
= hc
->hc_Unit
;
1300 struct EDNode
*ed
= ioreq
->iouh_DriverPrivate1
;
1302 (ioreq
->iouh_DevAddr
<< 5) + ioreq
->iouh_Endpoint
+
1303 ((ioreq
->iouh_Dir
== UHDIR_IN
) ? 0x10 : 0);
1304 ULONG disablemask
= 0;
1307 KPRINTF(70, ("HC 0x%p Aborting request 0x%p, command %ld, "
1308 "endpoint 0x%04lx, Frame=%ld\n",
1309 hc
, ioreq
, ioreq
->iouh_Req
.io_Command
, target
,
1310 READREG32_LE(hc
->hc_RegBase
, OHCI_FRAMECOUNT
)));
1311 PrintED("Aborting", ed
, hc
);
1313 /* Removing control and bulk EDs requires to stop the appropriate HC
1314 * queue first (according to specification) */
1315 switch (ioreq
->iouh_Req
.io_Command
)
1317 case UHCMD_CONTROLXFER
:
1318 KPRINTF(50, ("Stopping control queue\n"));
1319 hc
->hc_Flags
|= HCF_STOP_CTRL
;
1320 disablemask
= OCSF_CTRLENABLE
;
1323 case UHCMD_BULKXFER
:
1324 KPRINTF(50, ("Stopping bulk queue\n"));
1325 hc
->hc_Flags
|= HCF_STOP_BULK
;
1326 disablemask
= OCSF_BULKENABLE
;
1330 /* Stop selected queue(s) */
1333 ctrlstatus
= READREG32_LE(hc
->hc_RegBase
, OHCI_CMDSTATUS
);
1334 ctrlstatus
&= ~disablemask
;
1335 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_CMDSTATUS
, ctrlstatus
);
1343 * ...and move to abort queue.
1344 * We can't reply the request right now because some of its TDs
1345 * can be used by the HC right now. This means it does something
1346 * to the data buffer referred to by the request.
1347 * We reply the request only when the HC stops doing this. Otherwise
1348 * we may end up in trashed memory.
1350 Remove(&ioreq
->iouh_Req
.io_Message
.mn_Node
);
1351 AddTail(&hc
->hc_AbortQueue
, &ioreq
->iouh_Req
.io_Message
.mn_Node
);
1353 if (ioreq
->iouh_Req
.io_Command
== UHCMD_INTXFER
)
1356 unit
->hu_DevDataToggle
[target
] =
1357 (READMEM32_LE(&ed
->ed_ED
.HeadPtr
) & OEHF_DATA1
) ? TRUE
: FALSE
;
1360 * Request StartOfFrame interrupt. Upon next frame this ED
1361 * is guaranteed to be out of use and can be freed.
1363 EnableInt(hc
, OISF_SOF
);
1367 /* /// "InitController()" */
1368 BOOL
InitController(struct PCIController
*hc
, struct PCIUnit
*hu
)
1371 struct PCIDevice
*hd
= hu
->hu_Device
;
1387 struct TagItem pciActivateMem
[] = {
1388 {aHidd_PCIDevice_isMEM
, TRUE
},
1392 struct TagItem pciActivateBusmaster
[] = {
1393 {aHidd_PCIDevice_isMaster
, TRUE
},
1397 struct TagItem pciDeactivateBusmaster
[] = {
1398 {aHidd_PCIDevice_isMaster
, FALSE
},
1402 hc
->hc_CompleteInt
.is_Node
.ln_Type
= NT_INTERRUPT
;
1403 hc
->hc_CompleteInt
.is_Node
.ln_Name
= "OHCI CompleteInt";
1404 hc
->hc_CompleteInt
.is_Node
.ln_Pri
= 0;
1405 hc
->hc_CompleteInt
.is_Data
= hc
;
1406 hc
->hc_CompleteInt
.is_Code
= (VOID_FUNC
) CompleteInt
;
1408 hc
->hc_PCIMemSize
= OHCI_HCCA_SIZE
+ OHCI_HCCA_ALIGNMENT
+ 1;
1409 hc
->hc_PCIMemSize
+= sizeof(struct EDNode
) * OHCI_ED_POOLSIZE
;
1410 hc
->hc_PCIMemSize
+= sizeof(struct TDNode
) * OHCI_TD_POOLSIZE
;
1413 HIDD_PCIDriver_AllocPCIMem(hc
->hc_PCIDriverObject
,
1415 hc
->hc_PCIMem
= (APTR
) memptr
;
1418 // PhysicalAddress - VirtualAdjust = VirtualAddress
1419 // VirtualAddress + VirtualAdjust = PhysicalAddress
1420 hc
->hc_PCIVirtualAdjust
=
1421 pciGetPhysical(hc
, memptr
) - (APTR
) memptr
;
1422 KPRINTF(10, ("VirtualAdjust 0x%08lx\n", hc
->hc_PCIVirtualAdjust
));
1426 (UBYTE
*) (((IPTR
) hc
->hc_PCIMem
+
1427 OHCI_HCCA_ALIGNMENT
) & (~OHCI_HCCA_ALIGNMENT
));
1428 hc
->hc_HCCA
= (struct HCCA
*)memptr
;
1429 KPRINTF(10, ("HCCA 0x%p\n", hc
->hc_HCCA
));
1430 memptr
+= OHCI_HCCA_SIZE
;
1433 NewList((struct List
*)&hc
->hc_FreeEDList
);
1434 ed
= (struct EDNode
*)memptr
;
1435 cnt
= OHCI_ED_POOLSIZE
;
1438 // minimal initialization
1439 AddTail((struct List
*)&hc
->hc_FreeEDList
, (struct Node
*)ed
);
1440 NewList((struct List
*)&ed
->ed_TDList
);
1441 WRITEMEM32_LE(&ed
->ed_Self
,
1442 (IPTR
) (&ed
->ed_ED
.EPCaps
) + hc
->hc_PCIVirtualAdjust
);
1446 memptr
+= sizeof(struct EDNode
) * OHCI_ED_POOLSIZE
;
1449 NewList((struct List
*)&hc
->hc_FreeTDList
);
1450 td
= (struct TDNode
*)memptr
;
1451 cnt
= OHCI_TD_POOLSIZE
- 1;
1454 AddTail((struct List
*)&hc
->hc_FreeTDList
, (struct Node
*)td
);
1455 WRITEMEM32_LE(&td
->td_Self
,
1456 (IPTR
) (&td
->td_TD
.Ctrl
) + hc
->hc_PCIVirtualAdjust
);
1460 WRITEMEM32_LE(&td
->td_Self
,
1461 (IPTR
) (&td
->td_TD
.Ctrl
) + hc
->hc_PCIVirtualAdjust
);
1462 memptr
+= sizeof(struct TDNode
) * OHCI_TD_POOLSIZE
;
1465 hc
->hc_TermTD
= td
= AllocTD(hc
);
1466 td
->td_Node
.mln_Succ
= NULL
;
1467 td
->td_Node
.mln_Pred
= NULL
;
1468 td
->td_TD
.NextTD
= 0;
1471 hc
->hc_TermED
= ed
= AllocED(hc
);
1472 ed
->ed_Node
.mln_Succ
= NULL
;
1473 ed
->ed_Node
.mln_Pred
= NULL
;
1474 CONSTWRITEMEM32_LE(&ed
->ed_ED
.EPCaps
, OECF_SKIP
);
1475 ed
->ed_ED
.NextED
= 0L;
1477 for (cnt
= 0; cnt
< XFER_COUNT
+ INT_LIST_COUNT
- 1; cnt
++)
1478 NewList((struct List
*)&hc
->hc_EDLists
[cnt
]);
1482 // fill in framelist with IntED entry points based on interval
1483 tabptr
= hc
->hc_HCCA
->ha_IntEDs
;
1484 for (cnt
= 0; cnt
< 32; cnt
++)
1486 *tabptr
++ = hc
->hc_TermED
->ed_Self
;
1489 // time to initialize hardware...
1490 OOP_GetAttr(hc
->hc_PCIDeviceObject
, aHidd_PCIDevice_Base0
,
1491 (IPTR
*) & hc
->hc_RegBase
);
1492 hc
->hc_RegBase
= (APTR
) (((IPTR
) hc
->hc_RegBase
) & (~0xf));
1493 KPRINTF(10, ("RegBase = 0x%p\n", hc
->hc_RegBase
));
1496 OOP_SetAttrs(hc
->hc_PCIDeviceObject
, (struct TagItem
*)pciActivateMem
);
1498 hubdesca
= READREG32_LE(hc
->hc_RegBase
, OHCI_HUBDESCA
);
1499 hc
->hc_NumPorts
= (hubdesca
& OHAM_NUMPORTS
) >> OHAS_NUMPORTS
;
1500 KPRINTF(20, ("Found OHCI Controller %p FuncNum = %ld, Rev %02lx, "
1502 hc
->hc_PCIDeviceObject
, hc
->hc_FunctionNum
,
1503 READREG32_LE(hc
->hc_RegBase
, OHCI_REVISION
) & 0xFF,
1506 KPRINTF(20, ("Powerswitching: %s %s\n",
1507 hubdesca
& OHAF_NOPOWERSWITCH
? "Always on" : "Available",
1508 hubdesca
& OHAF_INDIVIDUALPS
? "per port" : "global"));
1510 control
= READREG32_LE(hc
->hc_RegBase
, OHCI_CONTROL
);
1511 KPRINTF(10, ("OHCI control state: 0x%08lx\n", control
));
1513 // disable BIOS legacy support
1514 if (control
& OCLF_SMIINT
)
1517 ("BIOS still has hands on OHCI, trying to get rid of it\n"));
1519 cmdstatus
= READREG32_LE(hc
->hc_RegBase
, OHCI_CMDSTATUS
);
1520 cmdstatus
|= OCSF_OWNERCHANGEREQ
;
1521 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_CMDSTATUS
, cmdstatus
);
1525 control
= READREG32_LE(hc
->hc_RegBase
, OHCI_CONTROL
);
1526 if (!(control
& OCLF_SMIINT
))
1528 KPRINTF(10, ("BIOS gave up on OHCI. Pwned!\n"));
1537 ("BIOS didn't release OHCI. Forcing and praying...\n"));
1538 control
&= ~OCLF_SMIINT
;
1539 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_CONTROL
, control
);
1543 OOP_SetAttrs(hc
->hc_PCIDeviceObject
,
1544 (struct TagItem
*)pciDeactivateBusmaster
); // no busmaster yet
1546 KPRINTF(10, ("Resetting OHCI HC\n"));
1547 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_CMDSTATUS
, OCSF_HCRESET
);
1551 if (!(READREG32_LE(hc
->hc_RegBase
,
1552 OHCI_CMDSTATUS
) & OCSF_HCRESET
))
1563 KPRINTF(20, ("Reset Timeout!\n"));
1567 KPRINTF(20, ("Reset finished after %ld ticks\n", 100 - cnt
));
1571 OOP_SetAttrs(hc
->hc_PCIDeviceObject
,
1572 (struct TagItem
*)pciActivateBusmaster
); // enable busmaster
1574 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_FRAMECOUNT
, 0);
1575 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_PERIODICSTART
, 10800);
1577 frameival
= READREG32_LE(hc
->hc_RegBase
, OHCI_FRAMEINTERVAL
);
1578 KPRINTF(10, ("FrameInterval=%08lx\n", frameival
));
1579 frameival
&= ~OIVM_BITSPERFRAME
;
1580 frameival
|= OHCI_DEF_BITSPERFRAME
<< OIVS_BITSPERFRAME
;
1581 frameival
|= OIVF_TOGGLE
;
1582 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_FRAMEINTERVAL
, frameival
);
1584 // make sure nothing is running
1585 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_PERIODIC_ED
, 0);
1586 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_CTRL_HEAD_ED
, 0);
1587 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_CTRL_ED
, 0);
1588 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_BULK_HEAD_ED
, 0);
1589 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_BULK_ED
, 0);
1590 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_DONEHEAD
, 0);
1592 dma_size
= sizeof(struct HCCA
);
1594 (ULONG
) (IPTR
) CachePreDMA(hc
->hc_HCCA
, &dma_size
,
1596 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_HCCA
, phy_addr
);
1598 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_INTSTATUS
, OISF_ALL_INTS
);
1599 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_INTDIS
, OISF_ALL_INTS
);
1602 // install reset handler
1603 hc
->hc_ResetInt
.is_Code
= (VOID_FUNC
) ResetHandler
;
1604 hc
->hc_ResetInt
.is_Data
= hc
;
1605 AddResetCallback(&hc
->hc_ResetInt
);
1608 hc
->hc_PCIIntHandler
.is_Node
.ln_Name
=
1609 hu
->hu_Device
->hd_Library
.lib_Node
.ln_Name
;
1610 hc
->hc_PCIIntHandler
.is_Node
.ln_Pri
= 5;
1611 hc
->hc_PCIIntHandler
.is_Code
= (VOID_FUNC
) IntCode
;
1612 hc
->hc_PCIIntHandler
.is_Data
= hc
;
1613 AddIntServer(INTB_KERNEL
+ hc
->hc_PCIIntLine
,
1614 &hc
->hc_PCIIntHandler
);
1616 hc
->hc_PCIIntEnMask
=
1617 OISF_DONEHEAD
| OISF_RESUMEDTX
| OISF_HOSTERROR
|
1618 OISF_FRAMECOUNTOVER
| OISF_HUBCHANGE
;
1620 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_INTEN
,
1621 hc
->hc_PCIIntEnMask
| OISF_MASTERENABLE
);
1623 /* Reset controller twice (needed for some OHCI chips) */
1624 for (i
= 0; i
< 2; i
++)
1626 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_CONTROL
,
1627 OCLF_PERIODICENABLE
| OCLF_CTRLENABLE
| OCLF_BULKENABLE
|
1628 OCLF_ISOENABLE
| OCLF_USBRESET
);
1630 KPRINTF(10, ("POST-RESET FrameInterval=%08lx\n",
1631 READREG32_LE(hc
->hc_RegBase
, OHCI_FRAMEINTERVAL
)));
1632 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_FRAMEINTERVAL
, frameival
);
1635 // make sure the ports are on with chipset quirk workaround
1636 hubdesca
= READREG32_LE(hc
->hc_RegBase
, OHCI_HUBDESCA
);
1637 hubdesca
|= OHAF_NOPOWERSWITCH
;
1638 hubdesca
&= ~OHAF_INDIVIDUALPS
;
1639 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_HUBDESCA
, hubdesca
);
1641 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_HUBSTATUS
, OHSF_POWERHUB
);
1644 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_HUBDESCA
, hubdesca
);
1646 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_CONTROL
,
1647 OCLF_PERIODICENABLE
| OCLF_CTRLENABLE
| OCLF_BULKENABLE
|
1648 OCLF_ISOENABLE
| OCLF_USBOPER
);
1651 KPRINTF(20, ("Init returns TRUE...\n"));
1655 KPRINTF(1000, ("Init returns FALSE...\n"));
1660 /* /// "FreeController()" */
1661 void FreeController(struct PCIController
*hc
, struct PCIUnit
*hu
)
1664 hc
= (struct PCIController
*)hu
->hu_Controllers
.lh_Head
;
1665 while (hc
->hc_Node
.ln_Succ
)
1667 KPRINTF(20, ("Shutting down OHCI %p\n", hc
));
1668 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_INTDIS
, OISF_ALL_INTS
);
1669 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_INTSTATUS
, OISF_ALL_INTS
);
1671 // disable all ports
1672 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_HUBDESCB
, 0);
1673 WRITEREG32_LE(hc
->hc_RegBase
, OHCI_HUBSTATUS
, OHSF_UNPOWERHUB
);
1676 KPRINTF(20, ("Stopping OHCI %p\n", hc
));
1677 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_CONTROL
, 0);
1678 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_CMDSTATUS
, 0);
1681 //KPRINTF(20, ("Reset done OHCI %08lx\n", hc));
1683 KPRINTF(20, ("Resetting OHCI %p\n", hc
));
1684 CONSTWRITEREG32_LE(hc
->hc_RegBase
, OHCI_CMDSTATUS
, OCSF_HCRESET
);
1688 KPRINTF(20, ("Shutting down OHCI done.\n"));
1690 hc
= (struct PCIController
*)hc
->hc_Node
.ln_Succ
;
1695 /* /// "TranslatePortFlags()" */
1696 UWORD
TranslatePortFlags(ULONG flags
, ULONG mask
)
1698 UWORD new_flags
= 0;
1702 if (flags
& OHPF_PORTPOWER
)
1703 new_flags
|= UPSF_PORT_POWER
;
1704 if (flags
& OHPF_OVERCURRENT
)
1705 new_flags
|= UPSF_PORT_OVER_CURRENT
;
1706 if (flags
& OHPF_PORTCONNECTED
)
1707 new_flags
|= UPSF_PORT_CONNECTION
;
1708 if (flags
& OHPF_PORTENABLE
)
1709 new_flags
|= UPSF_PORT_ENABLE
;
1710 if (flags
& OHPF_LOWSPEED
)
1711 new_flags
|= UPSF_PORT_LOW_SPEED
;
1712 if (flags
& OHPF_PORTRESET
)
1713 new_flags
|= UPSF_PORT_RESET
;
1714 if (flags
& OHPF_PORTSUSPEND
)
1715 new_flags
|= UPSF_PORT_SUSPEND
;
1716 if (flags
& OHPF_OVERCURRENTCHG
)
1717 new_flags
|= UPSF_PORT_OVER_CURRENT
;
1718 if (flags
& OHPF_RESETCHANGE
)
1719 new_flags
|= UPSF_PORT_RESET
;
1720 if (flags
& OHPF_ENABLECHANGE
)
1721 new_flags
|= UPSF_PORT_ENABLE
;
1722 if (flags
& OHPF_CONNECTCHANGE
)
1723 new_flags
|= UPSF_PORT_CONNECTION
;
1724 if (flags
& OHPF_RESUMEDTX
)
1725 new_flags
|= UPSF_PORT_SUSPEND
;