revert between 56095 -> 55830 in arch
[AROS.git] / rom / usb / pciusbhc / ohci / chip.c
blob783455afcbef8efe7a7009ed98afd0f9014f5441
1 /*
2 Copyright © 2002-2009, Chris Hodges. All rights reserved.
3 Copyright © 2009-2012, The AROS Development Team. All rights reserved.
4 $Id$
5 */
7 #include <devices/usb_hub.h>
8 #include <hidd/pci.h>
10 #include <proto/exec.h>
11 #include <clib/alib_protos.h>
12 #include <proto/oop.h>
14 #include "debug.h"
15 #include "chip.h"
16 #include "pci.h"
18 #include "chip_protos.h"
19 #include "roothub_protos.h"
20 #include "buffer_protos.h"
21 #include "cmd_protos.h"
22 #include "pci_protos.h"
24 #undef HiddPCIDeviceAttrBase
25 #define HiddPCIDeviceAttrBase (hd->hd_HiddPCIDeviceAB)
26 #undef HiddAttrBase
27 #define HiddAttrBase (hd->hd_HiddAB)
29 ULONG start_masks[] = {OCSF_CTRLENABLE, OCSF_BULKENABLE, 0UL, 0UL};
30 ULONG current_ed_regs[] = {OHCI_CTRL_ED, OHCI_BULK_ED, 0UL, 0UL};
32 static ULONG ScheduleED(struct PCIController *hc, UWORD xfer_type,
33 struct IOUsbHWReq *ioreq);
34 static ULONG FillED(struct PCIController *hc, struct EDNode *ed,
35 UWORD xfer_type, struct IOUsbHWReq *ioreq, UWORD dir);
37 /* /// "AddHeadPhy()" */
38 static void AddHeadED(ULONG * list, struct EDNode *ed)
40 ed->ed_ED.NextED = *list;
41 *list = ed->ed_Self;
42 CacheClearE(&ed->ed_ED.EPCaps, 16, CACRF_ClearD);
43 CacheClearE(list, 4, CACRF_ClearD);
45 /* \\\ */
47 /* /// "AllocED()" */
48 static struct EDNode *AllocED(struct PCIController *hc)
50 struct EDNode *ed =
51 (struct EDNode *)RemHead((struct List *)&hc->hc_FreeEDList);
53 if (ed != NULL)
55 ed->ed_ED.HeadPtr = 0UL;
56 ed->ed_ED.TailPtr = hc->hc_TermTD->td_Self;
58 if (ed == NULL)
59 KPRINTF(20, ("Out of EDs!\n"));
61 return ed;
63 /* \\\ */
65 /* /// "FreeED()" */
66 static void FreeED(struct PCIController *hc, struct EDNode *ed)
68 CONSTWRITEMEM32_LE(&ed->ed_ED.EPCaps, OECF_SKIP);
69 SYNC;
71 ed->ed_IOReq = NULL;
72 ed->ed_Buffer = NULL;
73 ed->ed_SetupData = NULL;
74 AddTail((struct List *)&hc->hc_FreeEDList, (struct Node *)ed);
75 ed->ed_ED.HeadPtr = ed->ed_ED.TailPtr = 0UL;
77 /* \\\ */
79 /* /// "AllocTD()" */
80 static struct TDNode *AllocTD(struct PCIController *hc)
82 struct TDNode *td =
83 (struct TDNode *)RemHead((struct List *)&hc->hc_FreeTDList);
85 if (td == NULL)
86 KPRINTF(20, ("Out of TDs!\n"));
88 return td;
90 /* \\\ */
92 /* /// "FreeTD()" */
93 static void FreeTD(struct PCIController *hc, struct TDNode *td)
95 td->td_TD.NextTD = 0UL;
96 SYNC;
98 td->td_ED = NULL;
99 AddTail((struct List *)&hc->hc_FreeTDList, (struct Node *)td);
101 /* \\\ */
103 /* /// "DisableED()" */
104 /* note: does not work on EDs in the interrupt tree */
105 static void DisableED(struct EDNode *ed)
107 ULONG ctrlstatus, succ_ed_phy, dma_size;
108 struct EDNode *pred_ed, *succ_ed;
110 // disable ED
111 ctrlstatus = READMEM32_LE(&ed->ed_ED.EPCaps);
112 ctrlstatus |= OECF_SKIP;
113 WRITEMEM32_LE(&ed->ed_ED.EPCaps, ctrlstatus);
115 // unlink from schedule
116 succ_ed = (struct EDNode *)ed->ed_Node.mln_Succ;
117 pred_ed = (struct EDNode *)ed->ed_Node.mln_Pred;
118 if (succ_ed->ed_Node.mln_Succ != NULL)
119 succ_ed_phy = succ_ed->ed_Self;
120 else
121 succ_ed_phy = 0L;
122 if (pred_ed->ed_Node.mln_Pred != NULL)
123 pred_ed->ed_ED.NextED = succ_ed_phy;
125 Remove((struct Node *)ed);
126 ed->ed_IOReq = NULL;
127 dma_size = sizeof(struct EndpointDescriptor);
128 CachePreDMA(&ed->ed_ED, &dma_size, 0);
129 SYNC;
131 /* \\\ */
133 /* /// "DisableInt()" */
134 static void DisableInt(struct PCIController *hc, ULONG mask)
136 WRITEREG32_LE(hc->hc_RegBase, OHCI_INTDIS, mask);
137 hc->hc_PCIIntEnMask &= ~mask;
139 /* \\\ */
141 /* /// "EnableInt()" */
142 static void EnableInt(struct PCIController *hc, ULONG mask)
144 WRITEREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS, mask);
145 hc->hc_PCIIntEnMask |= mask;
146 WRITEREG32_LE(hc->hc_RegBase, OHCI_INTEN, mask);
148 /* \\\ */
150 #ifdef DEBUG_TD
152 /* /// "PrintTD()" */
153 static void PrintTD(const char *txt, ULONG ptd, struct PCIController *hc)
155 KPrintF("HC 0x%p %s TD list:", hc, txt);
157 while (ptd)
159 struct TDNode *td =
160 (struct TDNode *)((IPTR) ptd - hc->hc_PCIVirtualAdjust -
161 offsetof(struct TDNode, td_TD.Ctrl));
163 KPrintF(" 0x%p", td);
164 ptd = READMEM32_LE(&td->td_TD.NextTD);
166 RawPutChar('\n');
168 /* \\\ */
170 #else
171 #define PrintTD(txt, ptd, hc)
172 #endif
174 #ifdef DEBUG_ED
176 /* /// "PrintED()" */
177 static void PrintED(const char *txt, struct EDNode *ed,
178 struct PCIController *hc)
180 struct TDNode *td;
182 KPrintF
183 ("%s ED 0x%p: EPCaps=%08lx, HeadPtr=%08lx, TailPtr=%08lx,"
184 " NextED=%08lx\n",
185 txt, ed, READMEM32_LE(&ed->ed_ED.EPCaps),
186 READMEM32_LE(&ed->ed_ED.HeadPtr), READMEM32_LE(&ed->ed_ED.TailPtr),
187 READMEM32_LE(&ed->ed_ED.NextED));
189 KPrintF("...TD list:\n");
190 for (td = (struct TDNode *)ed->ed_TDList.mlh_Head; td->td_Node.mln_Succ;
191 td = (struct TDNode *)td->td_Node.mln_Succ)
192 KPrintF
193 ("TD 0x%p: td_TD.Ctrl=%lx BufferPtr=%lx NextTD=%lx"
194 " BufferEnd=%lx\n",
195 td, td->td_TD.Ctrl, td->td_TD.BufferPtr, td->td_TD.NextTD,
196 td->td_TD.BufferEnd);
198 /* \\\ */
200 #else
201 #define PrintED(txt, ed, hc)
202 #endif
204 /* /// "ResetHandler()" */
205 static AROS_INTH1(ResetHandler, struct PCIController *, hc)
207 AROS_INTFUNC_INIT
209 // reset controller
210 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, OCSF_HCRESET);
212 return FALSE;
214 AROS_INTFUNC_EXIT
216 /* \\\ */
218 /* /// "AddTailTD()" */
219 static void AddTailTD(struct EDNode *ed, struct TDNode *td)
221 struct TDNode *old_tail_td = NULL;
222 ULONG dma_size, td_phy;
224 if ((ed->ed_ED.HeadPtr & OHCI_PTRMASK) != 0UL)
225 old_tail_td = (struct TDNode *)ed->ed_TDList.mlh_TailPred;
227 td->td_TD.NextTD = ed->ed_ED.TailPtr;
228 td->td_ED = ed;
230 dma_size = sizeof(struct TransferDescriptor);
231 td_phy = (ULONG) (IPTR) CachePreDMA(&td->td_TD, &dma_size, 0);
233 if (old_tail_td != NULL)
235 old_tail_td->td_TD.NextTD = td_phy;
236 dma_size = sizeof(struct TransferDescriptor);
237 CachePreDMA(&old_tail_td->td_TD, &dma_size, 0);
239 else
241 ed->ed_ED.HeadPtr |= td->td_Self;
242 dma_size = sizeof(struct EndpointDescriptor);
243 CachePreDMA(&ed->ed_ED, &dma_size, 0);
246 /* \\\ */
248 /* /// "FreeTDChain()" */
249 static void FreeTDChain(struct PCIController *hc, struct MinList *tdlist)
251 struct TDNode *td;
253 while ((td = (struct TDNode *)RemHead((struct List *)tdlist)) != NULL)
255 KPRINTF(1, ("FreeTD %p\n", td));
256 FreeTD(hc, td);
259 /* \\\ */
261 /* /// "FreeEDContext()" */
262 static void FreeEDContext(struct PCIController *hc, struct EDNode *ed,
263 struct IOUsbHWReq *ioreq)
265 UWORD dir;
267 KPRINTF(5, ("Freeing EDContext 0x%p IOReq 0x%p\n", ed, ioreq));
269 if (ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER)
270 dir =
271 (ioreq->
272 iouh_SetupData.bmRequestType & URTF_IN) ? UHDIR_IN : UHDIR_OUT;
273 else
274 dir = ioreq->iouh_Dir;
276 usbReleaseBuffer(ed->ed_Buffer, ioreq->iouh_Data, ioreq->iouh_Actual,
277 dir);
278 usbReleaseBuffer(ed->ed_SetupData, &ioreq->iouh_SetupData, 8, UHDIR_IN);
280 Disable();
281 FreeTDChain(hc, &ed->ed_TDList);
282 FreeED(hc, ed);
283 Enable();
285 /* \\\ */
287 /* /// "UpdateIntTree()" */
288 static void UpdateIntTree(struct PCIController *hc)
290 struct EDNode *ed;
291 UWORD i, j, k, l;
292 ULONG *queue_heads = hc->hc_HCCA->ha_IntEDs;
294 // initialise every queue head to point at the terminal ED by default
295 for (i = 0; i < 32; i++)
297 queue_heads[i] = hc->hc_TermED->ed_Self;
300 // put each ED in the right number of queues for its interval level.
301 // we balance the tree by incrementing the slot we start at for each ED
302 for (i = 0; i < INT_LIST_COUNT; i++)
304 ed = (struct EDNode *)hc->hc_EDLists[INT_XFER + i].mlh_Head;
305 for (j = 0; ed->ed_Node.mln_Succ != NULL; j++)
307 for (k = 0, l = j; k < 1 << (INT_LIST_COUNT - i - 1); k++)
309 AddHeadED(&queue_heads[l % 32], ed);
310 l += 1 << i;
312 ed = (struct EDNode *)ed->ed_Node.mln_Succ;
316 /* \\\ */
318 /* /// "HandleFinishedTDs()" */
319 static void HandleFinishedTDs(struct PCIController *hc)
321 struct IOUsbHWReq *ioreq;
322 struct IOUsbHWReq *nextioreq;
323 struct EDNode *ed = NULL;
324 struct TDNode *td, *nexttd;
325 ULONG len;
326 ULONG ctrlstatus;
327 ULONG epcaps;
328 UWORD target;
329 BOOL direction_in;
330 BOOL updatetree = FALSE;
331 ULONG donehead, ptr;
332 BOOL retire;
333 ULONG oldenables;
334 struct PCIUnit *unit = hc->hc_Unit;
335 ULONG dma_size;
337 KPRINTF(100, ("Checking for work done...\n"));
338 Disable();
339 donehead = hc->hc_DoneQueue;
340 hc->hc_DoneQueue = 0UL;
341 Enable();
342 if (!donehead)
344 KPRINTF(1, ("Nothing to do!\n"));
345 return;
347 td = (struct TDNode *)((IPTR) donehead - hc->hc_PCIVirtualAdjust -
348 offsetof(struct TDNode, td_TD.Ctrl));
349 KPRINTF(100, ("DoneHead=%08lx, TD=%p, Frame=%ld\n", donehead, td,
350 READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
351 PrintTD("Done", donehead, hc);
355 dma_size = sizeof(struct TransferDescriptor);
356 CachePostDMA(&td->td_TD, &dma_size, 0);
357 ed = td->td_ED;
358 if (!ed)
360 KPRINTF(1000,
361 ("Came across a rogue TD 0x%p that already has been freed!\n",
362 td));
363 ptr = READMEM32_LE(&td->td_TD.NextTD) & OHCI_PTRMASK;
364 if (!ptr)
366 break;
368 td = (struct TDNode *)((IPTR) ptr - hc->hc_PCIVirtualAdjust -
369 offsetof(struct TDNode, td_TD));
370 continue;
372 dma_size = sizeof(struct EndpointDescriptor);
373 CachePostDMA(&ed->ed_ED, &dma_size, 0);
375 ctrlstatus = READMEM32_LE(&td->td_TD.Ctrl);
376 KPRINTF(100, ("TD: %08lx - %08lx\n",
377 READMEM32_LE(&td->td_TD.BufferPtr),
378 READMEM32_LE(&td->td_TD.BufferEnd)));
379 if (td->td_TD.BufferPtr)
381 // FIXME: this will blow up if physical memory is ever going to
382 // be discontinuous
383 len =
384 READMEM32_LE(&td->td_TD.BufferPtr) -
385 (READMEM32_LE(&td->td_TD.BufferEnd) + 1 - td->td_Length);
387 else
389 len = td->td_Length;
392 ioreq = ed->ed_IOReq;
394 KPRINTF(100,
395 ("Examining TD %p for ED %p (IOReq=%p), Status %08lx, len=%ld\n",
396 td, ed, ioreq, ctrlstatus, len));
397 if (!ioreq)
399 /* You should never see this (very weird inconsistency), but who
400 * knows... */
401 KPRINTF(1000,
402 ("Came across a rogue ED 0x%p that already has been replied! "
403 "TD 0x%p,\n",
404 ed, td));
405 ptr = READMEM32_LE(&td->td_TD.NextTD) & OHCI_PTRMASK;
406 if (!ptr)
408 break;
410 td = (struct TDNode *)((IPTR) ptr - hc->hc_PCIVirtualAdjust -
411 offsetof(struct TDNode, td_TD.Ctrl));
412 continue;
415 if (len)
417 epcaps = READMEM32_LE(&ed->ed_ED.EPCaps);
418 direction_in = ((epcaps & OECM_DIRECTION) == OECF_DIRECTION_TD)
419 ? (ioreq->iouh_SetupData.bmRequestType & URTF_IN)
420 : (epcaps & OECF_DIRECTION_IN);
421 // FIXME: CachePostDMA() should be passed a virtual pointer
422 CachePostDMA((APTR) (IPTR) READMEM32_LE(&td->td_TD.BufferEnd) -
423 len + 1, &len, direction_in ? 0 : DMA_ReadFromRAM);
426 ioreq->iouh_Actual += len;
427 retire = TRUE;
428 switch ((ctrlstatus & OTCM_COMPLETIONCODE))
430 case OTCF_CC_CRCERROR:
431 case OTCF_CC_BABBLE:
432 case OTCF_CC_PIDCORRUPT:
433 case OTCF_CC_WRONGPID:
434 ioreq->iouh_Req.io_Error = UHIOERR_CRCERROR;
435 break;
436 case OTCF_CC_STALL:
437 ioreq->iouh_Req.io_Error = UHIOERR_STALL;
438 break;
439 case OTCF_CC_TIMEOUT:
440 ioreq->iouh_Req.io_Error = UHIOERR_TIMEOUT;
441 break;
442 case OTCF_CC_OVERFLOW:
443 ioreq->iouh_Req.io_Error = UHIOERR_OVERFLOW;
444 break;
445 case OTCF_CC_SHORTPKT:
446 if ((!ioreq->iouh_Req.io_Error)
447 && (!(ioreq->iouh_Flags & UHFF_ALLOWRUNTPKTS)))
449 ioreq->iouh_Req.io_Error = UHIOERR_RUNTPACKET;
451 break;
452 case OTCF_CC_OVERRUN:
453 case OTCF_CC_UNDERRUN:
454 ioreq->iouh_Req.io_Error = UHIOERR_HOSTERROR;
455 break;
456 case OTCF_CC_NOERROR:
457 case OTCF_CC_WRONGTOGGLE:
458 case OTCF_CC_INVALID:
459 default:
460 retire = FALSE;
461 break;
463 if (retire)
464 KPRINTF(200, ("Bad completion code: %d\n",
465 (ctrlstatus & OTCM_COMPLETIONCODE) >>
466 OTCS_COMPLETIONCODE));
467 if ((ctrlstatus & OTCM_DELAYINT) != OTCF_NOINT)
469 KPRINTF(10, ("TD 0x%p Terminator detected\n", td));
470 retire = TRUE;
472 if (READMEM32_LE(&ed->ed_ED.HeadPtr) & OEHF_HALTED)
474 KPRINTF(100, ("ED halted!\n"));
475 retire = TRUE;
478 if (retire)
480 KPRINTF(50, ("ED 0x%p stopped at TD 0x%p\n", ed, td));
481 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
482 AddHead(&hc->hc_RetireQueue,
483 &ioreq->iouh_Req.io_Message.mn_Node);
486 ptr = READMEM32_LE(&td->td_TD.NextTD) & OHCI_PTRMASK;
487 KPRINTF(1, ("NextTD=0x%08lx\n", ptr));
488 if (!ptr)
490 break;
492 td = (struct TDNode *)((IPTR) ptr - hc->hc_PCIVirtualAdjust -
493 offsetof(struct TDNode, td_TD.Ctrl));
494 KPRINTF(1, ("NextTD = %p\n", td));
496 while (TRUE);
498 ioreq = (struct IOUsbHWReq *)hc->hc_RetireQueue.lh_Head;
499 while ((nextioreq =
500 (struct IOUsbHWReq *)((struct Node *)ioreq)->ln_Succ))
502 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
503 ed = (struct EDNode *)ioreq->iouh_DriverPrivate1;
504 if (ed)
506 KPRINTF(50,
507 ("HC 0x%p Retiring IOReq=0x%p Command=%ld ED=0x%p, Frame=%ld\n",
508 hc, ioreq, ioreq->iouh_Req.io_Command, ed,
509 READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
511 if (ed->ed_Continue)
513 // reinitialise physical links in ED and its TD list
514 td = (struct TDNode *)ed->ed_TDList.mlh_Head;
515 ed->ed_ED.HeadPtr = td->td_Self;
516 while (td->td_Node.mln_Succ != NULL)
518 nexttd = (struct TDNode *)td->td_Node.mln_Succ;
519 if (nexttd != (struct TDNode *)&ed->ed_TDList.mlh_Tail)
520 td->td_TD.NextTD = nexttd->td_Self;
521 else
522 td->td_TD.NextTD = hc->hc_TermTD->td_Self;
523 td = nexttd;
526 // Refill ED with next data block
527 FillED(hc, ed, BULK_XFER, ioreq, ioreq->iouh_Dir);
528 PrintED("Continued bulk", ed, hc);
530 Disable();
531 AddTail(&hc->hc_TDQueue, (struct Node *)ioreq);
532 oldenables = READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
533 oldenables |= OCSF_BULKENABLE;
534 WRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, oldenables);
535 SYNC;
536 Enable();
538 else
540 // disable ED
541 if (ioreq->iouh_Req.io_Command == UHCMD_INTXFER)
543 updatetree = TRUE;
544 Remove((struct Node *)ed);
546 else
547 DisableED(ed);
548 PrintED("Completed", ed, hc);
550 target =
551 (ioreq->iouh_DevAddr << 5) + ioreq->iouh_Endpoint +
552 ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
553 unit->hu_DevBusyReq[target] = NULL;
554 unit->hu_DevDataToggle[target] =
555 (READMEM32_LE(&ed->
556 ed_ED.HeadPtr) & OEHF_DATA1) ? TRUE : FALSE;
557 FreeEDContext(hc, ed, ioreq);
559 // check for successful clear feature and set address ctrl
560 // transfers
561 if ((!ioreq->iouh_Req.io_Error)
562 && (ioreq->iouh_Req.io_Command == UHCMD_CONTROLXFER))
564 CheckSpecialCtrlTransfers(hc, ioreq);
566 ReplyMsg(&ioreq->iouh_Req.io_Message);
569 else
571 KPRINTF(20, ("IOReq=%p has no ED!\n", ioreq));
573 ioreq = nextioreq;
575 if (updatetree)
577 UpdateIntTree(hc);
580 /* \\\ */
582 /* /// "HandleAbortedEDs()" */
583 static ULONG HandleAbortedEDs(struct PCIController *hc)
585 struct IOUsbHWReq *ioreq;
586 ULONG restartmask = 0;
587 UWORD target;
588 struct EDNode *ed;
589 struct PCIUnit *unit = hc->hc_Unit;
591 KPRINTF(50, ("Processing abort queue...\n"));
593 // We don't need this any more
594 DisableInt(hc, OISF_SOF);
597 * If the aborted IORequest was replied in HandleFinishedTDs(),
598 * it was already Remove()d from this queue. It's safe to do no checks.
599 * io_Error was set earlier.
601 while ((ioreq = (struct IOUsbHWReq *)RemHead(&hc->hc_AbortQueue)))
603 KPRINTF(70, ("HC 0x%p Aborted IOReq 0x%p\n", hc, ioreq));
604 PrintED("Aborted", ioreq->iouh_DriverPrivate1, hc);
606 ed = ioreq->iouh_DriverPrivate1;
607 target =
608 (ioreq->iouh_DevAddr << 5) + ioreq->iouh_Endpoint +
609 ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
610 unit->hu_DevBusyReq[target] = NULL;
611 unit->hu_DevDataToggle[target] =
612 (READMEM32_LE(&ed->ed_ED.HeadPtr) & OEHF_DATA1) ? TRUE : FALSE;
613 FreeEDContext(hc, ed, ioreq);
614 ReplyMsg(&ioreq->iouh_Req.io_Message);
617 /* Restart stopped queues */
618 if (hc->hc_Flags & HCF_STOP_CTRL)
620 KPRINTF(50, ("Restarting control transfers\n"));
621 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CTRL_ED, 0);
622 restartmask |= OCSF_CTRLENABLE;
625 if (hc->hc_Flags & HCF_STOP_BULK)
627 KPRINTF(50, ("Restarting bulk transfers\n"));
628 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_BULK_ED, 0);
629 restartmask |= OCSF_BULKENABLE;
632 /* Everything is enabled again, aborting done */
633 hc->hc_Flags &= ~(HCF_STOP_CTRL | HCF_STOP_BULK | HCF_ABORT);
635 /* We will accumulate flags and start queues only once, when everything
636 * is set up */
637 return restartmask;
639 /* \\\ */
641 /* /// "FillED()" */
642 static ULONG FillED(struct PCIController *hc, struct EDNode *ed,
643 UWORD xfer_type, struct IOUsbHWReq *ioreq, UWORD dir)
645 BOOL success = TRUE, is_new_td;
646 struct TDNode *td;
647 ULONG actual;
648 ULONG ctrl;
649 ULONG len;
650 ULONG phyaddr;
651 ULONG dma_size;
653 if (xfer_type == CTRL_XFER)
655 // construct set-up TD
656 td = AllocTD(hc);
657 if (td != NULL)
659 // fill setup td
660 td->td_Length = 0; // don't increase io_Actual for that transfer
661 CONSTWRITEMEM32_LE(&td->td_TD.Ctrl,
662 OTCF_CC_INVALID | OTCF_TOGGLEFROMTD | OTCF_NOINT |
663 OTCF_PIDCODE_SETUP | OTCF_ALLOWSHORTPKT);
664 len = 8;
666 ed->ed_SetupData =
667 usbGetBuffer(&ioreq->iouh_SetupData, len, UHDIR_OUT);
668 phyaddr =
669 (ULONG) CachePreDMA(ed->ed_SetupData, &len,
670 DMA_ReadFromRAM);
671 WRITEMEM32_LE(&td->td_TD.BufferPtr, phyaddr);
672 WRITEMEM32_LE(&td->td_TD.BufferEnd, phyaddr + len - 1);
674 KPRINTF(1, ("TD send: %08lx - %08lx\n",
675 READMEM32_LE(&td->td_TD.BufferPtr),
676 READMEM32_LE(&td->td_TD.BufferEnd)));
678 AddTailTD(ed, td);
679 AddTail((struct List *)&ed->ed_TDList, (struct Node *)td);
681 else
682 success = FALSE;
685 if (success)
687 // put data into a series of TDs
688 actual = ioreq->iouh_Actual;
689 ctrl =
690 OTCF_CC_INVALID | OTCF_NOINT | (dir ==
691 UHDIR_IN ? OTCF_PIDCODE_IN : OTCF_PIDCODE_OUT);
692 if (xfer_type == CTRL_XFER)
693 ctrl |= OTCF_TOGGLEFROMTD | OTCF_DATA1;
694 if (xfer_type == CTRL_XFER
695 || !(ioreq->iouh_Flags & UHFF_NOSHORTPKT))
696 ctrl |= OTCF_ALLOWSHORTPKT;
698 ed->ed_Buffer =
699 usbGetBuffer(ioreq->iouh_Data, ioreq->iouh_Length, dir);
700 if (ed->ed_Buffer == NULL && ioreq->iouh_Data != NULL)
701 success = FALSE;
702 if (xfer_type == BULK_XFER)
703 td = (struct TDNode *)ed->ed_TDList.mlh_Head;
704 else
705 td = (struct TDNode *)&ed->ed_TDList.mlh_Tail;
707 while (success && actual < ioreq->iouh_Length
708 && (actual - ioreq->iouh_Actual < OHCI_TD_BULK_LIMIT
709 || xfer_type != BULK_XFER))
711 // reuse the next old TD or get a new one
712 if (td == (struct TDNode *)&ed->ed_TDList.mlh_Tail)
714 td = AllocTD(hc);
715 if (td == NULL)
716 success = FALSE;
717 is_new_td = TRUE;
719 else
720 is_new_td = FALSE;
722 if (success)
724 len = ioreq->iouh_Length - actual;
725 if (len > OHCI_PAGE_SIZE)
727 len = OHCI_PAGE_SIZE;
729 td->td_Length = len;
730 KPRINTF(1, ("TD with %ld bytes. Status=%lx\n", len, ctrl));
731 WRITEMEM32_LE(&td->td_TD.Ctrl, ctrl);
732 phyaddr =
733 (ULONG) (IPTR) CachePreDMA(ed->ed_Buffer + actual, &len,
734 dir == UHDIR_IN ? 0 : DMA_ReadFromRAM);
735 WRITEMEM32_LE(&td->td_TD.BufferPtr, phyaddr);
736 phyaddr += len - 1;
737 WRITEMEM32_LE(&td->td_TD.BufferEnd, phyaddr);
739 KPRINTF(1, ("TD send: %08lx - %08lx\n",
740 READMEM32_LE(&td->td_TD.BufferPtr),
741 READMEM32_LE(&td->td_TD.BufferEnd)));
743 actual += len;
745 if (is_new_td)
747 AddTailTD(ed, td);
748 AddTail((struct List *)&ed->ed_TDList,
749 (struct Node *)td);
751 else
753 dma_size = sizeof(struct TransferDescriptor);
754 CachePreDMA(&td->td_TD, &dma_size, 0);
757 td = (struct TDNode *)td->td_Node.mln_Succ;
761 // construct control-status TD or empty-bulk TD
762 if (success)
764 if (xfer_type == CTRL_XFER || xfer_type == BULK_XFER
765 && dir == UHDIR_OUT && actual == ioreq->iouh_Length
766 && (!(ioreq->iouh_Flags & UHFF_NOSHORTPKT))
767 && actual % ioreq->iouh_MaxPktSize == 0)
769 if (td == (struct TDNode *)&ed->ed_TDList.mlh_Tail)
771 td = AllocTD(hc);
772 if (td == NULL)
773 success = FALSE;
774 is_new_td = TRUE;
776 else
777 is_new_td = FALSE;
779 if (success)
781 if (xfer_type == CTRL_XFER)
783 ctrl ^=
784 OTCF_NOINT | OTCF_PIDCODE_IN | OTCF_PIDCODE_OUT |
785 OTCF_ALLOWSHORTPKT;
786 ctrl |= OTCF_TOGGLEFROMTD | OTCF_DATA1;
788 else
789 ctrl ^= OTCF_NOINT;
791 td->td_Length = 0;
792 CONSTWRITEMEM32_LE(&td->td_TD.Ctrl, ctrl);
793 CONSTWRITEMEM32_LE(&td->td_TD.BufferPtr, 0);
794 CONSTWRITEMEM32_LE(&td->td_TD.BufferEnd, 0);
796 if (is_new_td)
798 AddTailTD(ed, td);
799 AddTail((struct List *)&ed->ed_TDList,
800 (struct Node *)td);
802 else
804 td->td_TD.NextTD = hc->hc_TermTD->td_Self;
805 dma_size = sizeof(struct TransferDescriptor);
806 CachePreDMA(&td->td_TD, &dma_size, 0);
810 else
812 if (xfer_type == BULK_XFER)
813 ed->ed_Continue = (actual < ioreq->iouh_Length);
814 td = (struct TDNode *)td->td_Node.mln_Pred;
815 td->td_TD.NextTD = hc->hc_TermTD->td_Self;
816 CONSTWRITEMEM32_LE(&td->td_TD.Ctrl, OTCF_CC_INVALID);
817 dma_size = sizeof(struct TransferDescriptor);
818 CachePreDMA(&td->td_TD, &dma_size, 0);
822 if (!success)
824 FreeEDContext(hc, ed, ioreq);
827 return success;
829 /* \\\ */
831 /* /// "ScheduleED()" */
832 static ULONG ScheduleED(struct PCIController *hc, UWORD xfer_type,
833 struct IOUsbHWReq *ioreq)
835 BOOL success = TRUE;
836 struct PCIUnit *unit = hc->hc_Unit;
837 UWORD target;
838 UWORD dir, list_no, list_index, interval;
839 struct EDNode *ed;
840 struct EDNode *pred_ed;
841 ULONG epcaps, dma_size, phy_addr;
843 ed = AllocED(hc);
844 if (ed == NULL)
845 success = FALSE;
847 if (success)
849 ed->ed_IOReq = ioreq;
851 if (xfer_type == CTRL_XFER)
852 dir =
853 (ioreq->
854 iouh_SetupData.bmRequestType & URTF_IN) ? UHDIR_IN :
855 UHDIR_OUT;
856 else
857 dir = ioreq->iouh_Dir;
859 target = (ioreq->iouh_DevAddr << 5) + ioreq->iouh_Endpoint;
860 if (xfer_type != CTRL_XFER && dir == UHDIR_IN)
861 target |= 0x10;
863 epcaps =
864 (ioreq->
865 iouh_DevAddr << OECS_DEVADDR) | (ioreq->iouh_Endpoint <<
866 OECS_ENDPOINT) | (ioreq->iouh_MaxPktSize << OECS_MAXPKTLEN);
867 if (xfer_type == CTRL_XFER)
868 epcaps |= OECF_DIRECTION_TD;
869 else
870 epcaps |=
871 dir == UHDIR_IN ? OECF_DIRECTION_IN : OECF_DIRECTION_OUT;
873 if (ioreq->iouh_Flags & UHFF_LOWSPEED)
875 KPRINTF(5, ("*** LOW SPEED ***\n"));
876 epcaps |= OECF_LOWSPEED;
879 WRITEMEM32_LE(&ed->ed_ED.EPCaps, epcaps);
881 if (xfer_type != CTRL_XFER && unit->hu_DevDataToggle[target])
882 WRITEMEM32_LE(&ed->ed_ED.HeadPtr, OEHF_DATA1);
884 if (!FillED(hc, ed, xfer_type, ioreq, dir))
885 success = FALSE;
888 if (success)
890 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
891 ioreq->iouh_DriverPrivate1 = ed;
893 // choose logical list to add ED to
894 list_index = 0;
895 if (xfer_type == INT_XFER)
897 interval = ioreq->iouh_Interval;
898 if (interval < 32)
900 while (interval > 1)
902 interval >>= 1;
903 list_index++;
906 else
907 list_index = INT_LIST_COUNT - 1;
909 list_no = xfer_type + list_index;
911 // manage endpoint going busy
912 Disable();
913 unit->hu_DevBusyReq[target] = ioreq;
914 unit->hu_NakTimeoutFrame[target] =
915 (ioreq->iouh_Flags & UHFF_NAKTIMEOUT) ? hc->hc_FrameCounter +
916 ioreq->iouh_NakTimeout : 0;
918 AddTail(&hc->hc_TDQueue, (struct Node *)ioreq);
920 // looks good to me, now enqueue this entry
921 AddTail((struct List *)&hc->hc_EDLists[list_no], (struct Node *)ed);
923 if (xfer_type == INT_XFER)
925 UpdateIntTree(hc);
927 else
929 ed->ed_ED.NextED = 0L;
930 dma_size = sizeof(struct EndpointDescriptor);
931 phy_addr = (ULONG) (IPTR) CachePreDMA(&ed->ed_ED, &dma_size, 0);
933 pred_ed = (struct EDNode *)ed->ed_Node.mln_Pred;
934 if (pred_ed->ed_Node.mln_Pred != NULL)
936 pred_ed->ed_ED.NextED = phy_addr;
937 dma_size = sizeof(struct EndpointDescriptor);
938 CachePreDMA(&pred_ed->ed_ED, &dma_size, 0);
940 else
941 WRITEREG32_LE(hc->hc_RegBase, (xfer_type == CTRL_XFER) ?
942 OHCI_CTRL_HEAD_ED : OHCI_BULK_HEAD_ED, ed->ed_Self);
945 SYNC;
947 PrintED(xfer_names[xfer_type], ed, hc);
949 Enable();
952 if (!success)
954 FreeEDContext(hc, ed, ioreq);
957 return success;
959 /* \\\ */
961 /* /// "ScheduleXfers()" */
962 static ULONG ScheduleXfers(struct PCIController *hc, UWORD xfer_type)
964 BOOL success = TRUE;
965 struct PCIUnit *unit = hc->hc_Unit;
966 struct IOUsbHWReq *ioreq;
967 UWORD target;
968 UWORD dir;
969 ULONG oldenables;
970 ULONG startmask = 0;
972 KPRINTF(1, ("Scheduling new %s transfers...\n", xfer_names[xfer_type]));
973 ioreq = (struct IOUsbHWReq *)hc->hc_XferQueues[xfer_type].lh_Head;
974 while (success && ((struct Node *)ioreq)->ln_Succ)
976 if (xfer_type == CTRL_XFER)
977 dir =
978 (ioreq->
979 iouh_SetupData.bmRequestType & URTF_IN) ? UHDIR_IN :
980 UHDIR_OUT;
981 else
982 dir = ioreq->iouh_Dir;
984 target = (ioreq->iouh_DevAddr << 5) + ioreq->iouh_Endpoint;
985 if (xfer_type != CTRL_XFER && dir == UHDIR_IN)
986 target |= 0x10;
987 KPRINTF(10, ("New %s transfer to %ld.%ld: %ld bytes\n",
988 xfer_names[xfer_type], ioreq->iouh_DevAddr,
989 ioreq->iouh_Endpoint, ioreq->iouh_Length));
990 /* is endpoint already in use or do we have to wait for next
991 * transaction */
992 if (unit->hu_DevBusyReq[target])
994 KPRINTF(5, ("Endpoint %02lx in use!\n", target));
995 ioreq = (struct IOUsbHWReq *)((struct Node *)ioreq)->ln_Succ;
996 continue;
999 success = ScheduleED(hc, xfer_type, ioreq);
1001 ioreq = (struct IOUsbHWReq *)hc->hc_XferQueues[xfer_type].lh_Head;
1004 if (success)
1007 * If we are going to start the queue but it's not running yet,
1008 * reset current ED pointer to zero. This will cause the HC to
1009 * start over from the head.
1011 startmask = start_masks[xfer_type];
1012 oldenables = READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
1013 if (!(oldenables & startmask))
1015 CONSTWRITEREG32_LE(hc->hc_RegBase, current_ed_regs[xfer_type],
1020 return startmask;
1022 /* \\\ */
1024 /* /// "UpdateFrameCounter()" */
1025 void UpdateFrameCounter(struct PCIController *hc)
1028 Disable();
1029 hc->hc_FrameCounter =
1030 (hc->hc_FrameCounter & 0xffff0000) | (READREG32_LE(hc->hc_RegBase,
1031 OHCI_FRAMECOUNT) & 0xffff);
1032 Enable();
1034 /* \\\ */
1036 /* /// "CompleteInt()" */
1037 static AROS_INTH1(CompleteInt, struct PCIController *, hc)
1039 AROS_INTFUNC_INIT
1041 ULONG restartmask = 0;
1043 KPRINTF(1, ("CompleteInt!\n"));
1045 UpdateFrameCounter(hc);
1047 /* **************** PROCESS DONE TRANSFERS **************** */
1049 WRITEREG32_LE(&hc->hc_RegBase, OHCI_INTDIS, OISF_DONEHEAD);
1050 if (hc->hc_DoneQueue)
1051 HandleFinishedTDs(hc);
1053 if (hc->hc_Flags & HCF_ABORT)
1054 restartmask = HandleAbortedEDs(hc);
1055 WRITEREG32_LE(&hc->hc_RegBase, OHCI_INTEN, OISF_DONEHEAD);
1057 if ((!(hc->hc_Flags & HCF_STOP_CTRL))
1058 && hc->hc_XferQueues[CTRL_XFER].lh_Head->ln_Succ)
1059 restartmask |= ScheduleXfers(hc, CTRL_XFER);
1061 if (hc->hc_XferQueues[INT_XFER].lh_Head->ln_Succ)
1062 ScheduleXfers(hc, INT_XFER);
1064 if ((!(hc->hc_Flags & HCF_STOP_BULK))
1065 && hc->hc_XferQueues[BULK_XFER].lh_Head->ln_Succ)
1066 restartmask |= ScheduleXfers(hc, BULK_XFER);
1069 * Restart queues. In restartmask we have accumulated which queues need
1070 * to be started.
1072 * We do it here only once, after everything is set up, because
1073 * otherwise HC goes nuts in some cases. For example, the following
1074 * situation caused TD queue loop: we are simultaneously scheduling two
1075 * control EDs and one of them completes with error. If we attempt to
1076 * start the queue right after an ED is scheduled (this is how the code
1077 * originally worked), it looks like the HC manages to deal with the
1078 * first ED right before the second one is scheduled. At this moment the
1079 * first TD is HALTed with HeadPtr pointing to the failed TD, which went
1080 * to the DoneQueue (which will be picked up only on next ISR round, we
1081 * are still in ScheduleCtrlEDs()). The second ED is scheduled (first
1082 * one is not removed yet!) and we re-trigger control queue to start.
1083 * It causes errorneous TD to reappear on the DoneQueue, effectively
1084 * looping it. DoneQueue loop causes HandleFinishedTDs() to never exit.
1085 * Restarting queues here in this manner actually fixed the problem.
1087 if (restartmask)
1089 restartmask |= READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
1090 WRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, restartmask);
1091 SYNC;
1094 KPRINTF(1, ("CompleteDone\n"));
1096 return 0;
1098 AROS_INTFUNC_EXIT
1100 /* \\\ */
1102 /* /// "IntCode()" */
1103 static AROS_INTH1(IntCode, struct PCIController *, hc)
1105 AROS_INTFUNC_INIT
1107 struct PCIUnit *unit = hc->hc_Unit;
1108 ULONG intr = 0;
1109 ULONG donehead;
1110 ULONG dma_size;
1112 dma_size = sizeof(struct HCCA);
1113 CachePostDMA(hc->hc_HCCA, &dma_size, 0);
1115 donehead = READMEM32_LE(&hc->hc_HCCA->ha_DoneHead);
1117 if (donehead)
1119 if (donehead & ~1)
1120 intr = OISF_DONEHEAD;
1121 if (donehead & 1)
1123 intr |= READREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS);
1125 donehead &= OHCI_PTRMASK;
1127 CONSTWRITEMEM32_LE(&hc->hc_HCCA->ha_DoneHead, 0);
1129 else
1131 intr = READREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS);
1133 if (intr & OISF_DONEHEAD)
1135 KPRINTF(1, ("DONEHEAD WAS EMPTY!\n"));
1136 donehead =
1137 READMEM32_LE(&hc->hc_HCCA->ha_DoneHead) & OHCI_PTRMASK;
1138 CONSTWRITEMEM32_LE(&hc->hc_HCCA->ha_DoneHead, 0);
1140 KPRINTF(500, ("New Donehead %08lx for old %08lx\n", donehead,
1141 hc->hc_DoneQueue));
1144 dma_size = sizeof(struct HCCA);
1145 CachePreDMA(hc->hc_HCCA, &dma_size, 0);
1147 intr &= ~OISF_MASTERENABLE;
1149 if (intr & hc->hc_PCIIntEnMask)
1151 KPRINTF(1, ("IntCode(0x%p) interrupts 0x%08lx, mask 0x%08lx\n", hc,
1152 intr, hc->hc_PCIIntEnMask));
1154 if (intr & OISF_HOSTERROR)
1156 KPRINTF(200, ("Host ERROR!\n"));
1158 if (intr & OISF_SCHEDOVERRUN)
1160 KPRINTF(200, ("Schedule overrun!\n"));
1162 if (!(hc->hc_Flags & HCF_ONLINE))
1164 if (READREG32_LE(hc->hc_RegBase,
1165 OHCI_INTSTATUS) & OISF_HUBCHANGE)
1167 // if the driver is not online and the controller has a broken
1168 // hub change interrupt, make sure we don't run into infinite
1169 // interrupt by disabling the interrupt bit
1170 DisableInt(hc, OISF_HUBCHANGE);
1172 return FALSE;
1174 WRITEREG32_LE(hc->hc_RegBase, OHCI_INTEN, OISF_HUBCHANGE);
1175 if (intr & OISF_FRAMECOUNTOVER)
1177 hc->hc_FrameCounter |= 0x7fff;
1178 hc->hc_FrameCounter++;
1179 hc->hc_FrameCounter |=
1180 READMEM16_LE(&hc->hc_HCCA->ha_FrameCount);
1181 KPRINTF(10, ("HCI 0x%p: Frame Counter Rollover %ld\n", hc,
1182 hc->hc_FrameCounter));
1184 if (intr & OISF_HUBCHANGE)
1186 UWORD hciport;
1187 ULONG oldval;
1188 UWORD portreg = OHCI_PORTSTATUS;
1189 BOOL clearbits = FALSE;
1191 if (READREG32_LE(hc->hc_RegBase,
1192 OHCI_INTSTATUS) & OISF_HUBCHANGE)
1194 /* Some OHCI implementations will keep the interrupt bit
1195 * stuck until all port changes have been cleared, which is
1196 * wrong according to the OHCI spec. As a workaround we will
1197 * clear all change bits, which should be no problem as the
1198 * port changes are reflected in the PortChangeMap array.
1200 clearbits = TRUE;
1202 for (hciport = 0; hciport < hc->hc_NumPorts;
1203 hciport++, portreg += 4)
1205 oldval = READREG32_LE(hc->hc_RegBase, portreg);
1206 hc->hc_PortChangeMap[hciport] |= TranslatePortFlags(oldval,
1207 OHPF_OVERCURRENTCHG | OHPF_RESETCHANGE |
1208 OHPF_ENABLECHANGE | OHPF_CONNECTCHANGE |
1209 OHPF_RESUMEDTX);
1210 if (clearbits)
1212 WRITEREG32_LE(hc->hc_RegBase, portreg,
1213 OHPF_CONNECTCHANGE | OHPF_ENABLECHANGE |
1214 OHPF_RESUMEDTX | OHPF_OVERCURRENTCHG |
1215 OHPF_RESETCHANGE);
1218 KPRINTF(20, ("PCI Int Port %ld (glob %ld) Change %08lx\n",
1219 hciport, hc->hc_PortNum20[hciport] + 1, oldval));
1220 if (hc->hc_PortChangeMap[hciport])
1222 unit->hu_RootPortChanges |=
1223 1UL << (hc->hc_PortNum20[hciport] + 1);
1226 CheckRootHubChanges(unit);
1227 if (clearbits)
1229 // again try to get rid of any bits that may be causing the
1230 // interrupt
1231 WRITEREG32_LE(hc->hc_RegBase, OHCI_HUBSTATUS,
1232 OHSF_OVERCURRENTCHG);
1233 WRITEREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS,
1234 OISF_HUBCHANGE);
1237 if (intr & OISF_DONEHEAD)
1239 KPRINTF(10, ("DoneHead Frame=%ld\n",
1240 READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
1242 if (hc->hc_DoneQueue)
1244 struct TDNode *donetd =
1245 (struct TDNode *)((IPTR) donehead -
1246 hc->hc_PCIVirtualAdjust - offsetof(struct TDNode,
1247 td_TD.Ctrl));
1249 CacheClearE(&donetd->td_TD, 16, CACRF_InvalidateD);
1250 while (donetd->td_TD.NextTD)
1252 donetd =
1253 (struct TDNode *)((IPTR) donetd->td_TD.NextTD -
1254 hc->hc_PCIVirtualAdjust - offsetof(struct TDNode,
1255 td_TD.Ctrl));
1256 CacheClearE(&donetd->td_TD, 16, CACRF_InvalidateD);
1258 WRITEMEM32_LE(&donetd->td_TD.NextTD, hc->hc_DoneQueue);
1259 CacheClearE(&donetd->td_TD, 16, CACRF_ClearD);
1261 KPRINTF(10,
1262 ("Attached old DoneHead 0x%08lx to TD 0x%08lx\n",
1263 hc->hc_DoneQueue, donetd->td_Self));
1265 hc->hc_DoneQueue = donehead;
1267 if (intr & OISF_SOF)
1269 /* Aborted EDs are available for freeing */
1270 hc->hc_Flags |= HCF_ABORT;
1273 if (intr & (OISF_SOF | OISF_DONEHEAD))
1276 * These two are leveraged down to SoftInt.
1277 * This is done in order to keep queues rotation synchronized.
1279 Cause(&hc->hc_CompleteInt);
1282 KPRINTF(1, ("Exiting IntCode(0x%p)\n", unit));
1285 WRITEREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS, intr);
1287 /* Unlock interrupts */
1288 WRITEREG32_LE(&hc->hc_RegBase, OHCI_INTEN, OISF_MASTERENABLE);
1290 return FALSE;
1292 AROS_INTFUNC_EXIT
1294 /* \\\ */
1296 /* /// "AbortRequest()" */
1297 void AbortRequest(struct PCIController *hc, struct IOUsbHWReq *ioreq)
1299 struct PCIUnit *unit = hc->hc_Unit;
1300 struct EDNode *ed = ioreq->iouh_DriverPrivate1;
1301 UWORD target =
1302 (ioreq->iouh_DevAddr << 5) + ioreq->iouh_Endpoint +
1303 ((ioreq->iouh_Dir == UHDIR_IN) ? 0x10 : 0);
1304 ULONG disablemask = 0;
1305 ULONG ctrlstatus;
1307 KPRINTF(70, ("HC 0x%p Aborting request 0x%p, command %ld, "
1308 "endpoint 0x%04lx, Frame=%ld\n",
1309 hc, ioreq, ioreq->iouh_Req.io_Command, target,
1310 READREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT)));
1311 PrintED("Aborting", ed, hc);
1313 /* Removing control and bulk EDs requires to stop the appropriate HC
1314 * queue first (according to specification) */
1315 switch (ioreq->iouh_Req.io_Command)
1317 case UHCMD_CONTROLXFER:
1318 KPRINTF(50, ("Stopping control queue\n"));
1319 hc->hc_Flags |= HCF_STOP_CTRL;
1320 disablemask = OCSF_CTRLENABLE;
1321 break;
1323 case UHCMD_BULKXFER:
1324 KPRINTF(50, ("Stopping bulk queue\n"));
1325 hc->hc_Flags |= HCF_STOP_BULK;
1326 disablemask = OCSF_BULKENABLE;
1327 break;
1330 /* Stop selected queue(s) */
1331 if (disablemask)
1333 ctrlstatus = READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
1334 ctrlstatus &= ~disablemask;
1335 WRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, ctrlstatus);
1336 SYNC;
1339 // disable ED
1340 DisableED(ed);
1343 * ...and move to abort queue.
1344 * We can't reply the request right now because some of its TDs
1345 * can be used by the HC right now. This means it does something
1346 * to the data buffer referred to by the request.
1347 * We reply the request only when the HC stops doing this. Otherwise
1348 * we may end up in trashed memory.
1350 Remove(&ioreq->iouh_Req.io_Message.mn_Node);
1351 AddTail(&hc->hc_AbortQueue, &ioreq->iouh_Req.io_Message.mn_Node);
1353 if (ioreq->iouh_Req.io_Command == UHCMD_INTXFER)
1354 UpdateIntTree(hc);
1356 unit->hu_DevDataToggle[target] =
1357 (READMEM32_LE(&ed->ed_ED.HeadPtr) & OEHF_DATA1) ? TRUE : FALSE;
1360 * Request StartOfFrame interrupt. Upon next frame this ED
1361 * is guaranteed to be out of use and can be freed.
1363 EnableInt(hc, OISF_SOF);
1365 /* \\\ */
1367 /* /// "InitController()" */
1368 BOOL InitController(struct PCIController *hc, struct PCIUnit *hu)
1371 struct PCIDevice *hd = hu->hu_Device;
1373 struct EDNode *ed;
1374 struct TDNode *td;
1375 ULONG *tabptr;
1376 UBYTE *memptr;
1377 ULONG hubdesca;
1378 ULONG cmdstatus;
1379 ULONG control;
1380 ULONG timeout;
1381 ULONG frameival;
1382 UWORD i;
1383 ULONG cnt;
1384 ULONG dma_size;
1385 ULONG phy_addr;
1387 struct TagItem pciActivateMem[] = {
1388 {aHidd_PCIDevice_isMEM, TRUE},
1389 {TAG_DONE, 0UL},
1392 struct TagItem pciActivateBusmaster[] = {
1393 {aHidd_PCIDevice_isMaster, TRUE},
1394 {TAG_DONE, 0UL},
1397 struct TagItem pciDeactivateBusmaster[] = {
1398 {aHidd_PCIDevice_isMaster, FALSE},
1399 {TAG_DONE, 0UL},
1402 hc->hc_CompleteInt.is_Node.ln_Type = NT_INTERRUPT;
1403 hc->hc_CompleteInt.is_Node.ln_Name = "OHCI CompleteInt";
1404 hc->hc_CompleteInt.is_Node.ln_Pri = 0;
1405 hc->hc_CompleteInt.is_Data = hc;
1406 hc->hc_CompleteInt.is_Code = (VOID_FUNC) CompleteInt;
1408 hc->hc_PCIMemSize = OHCI_HCCA_SIZE + OHCI_HCCA_ALIGNMENT + 1;
1409 hc->hc_PCIMemSize += sizeof(struct EDNode) * OHCI_ED_POOLSIZE;
1410 hc->hc_PCIMemSize += sizeof(struct TDNode) * OHCI_TD_POOLSIZE;
1412 memptr =
1413 HIDD_PCIDriver_AllocPCIMem(hc->hc_PCIDriverObject,
1414 hc->hc_PCIMemSize);
1415 hc->hc_PCIMem = (APTR) memptr;
1416 if (memptr)
1418 // PhysicalAddress - VirtualAdjust = VirtualAddress
1419 // VirtualAddress + VirtualAdjust = PhysicalAddress
1420 hc->hc_PCIVirtualAdjust =
1421 pciGetPhysical(hc, memptr) - (APTR) memptr;
1422 KPRINTF(10, ("VirtualAdjust 0x%08lx\n", hc->hc_PCIVirtualAdjust));
1424 // align memory
1425 memptr =
1426 (UBYTE *) (((IPTR) hc->hc_PCIMem +
1427 OHCI_HCCA_ALIGNMENT) & (~OHCI_HCCA_ALIGNMENT));
1428 hc->hc_HCCA = (struct HCCA *)memptr;
1429 KPRINTF(10, ("HCCA 0x%p\n", hc->hc_HCCA));
1430 memptr += OHCI_HCCA_SIZE;
1432 // build up ED pool
1433 NewList((struct List *)&hc->hc_FreeEDList);
1434 ed = (struct EDNode *)memptr;
1435 cnt = OHCI_ED_POOLSIZE;
1438 // minimal initialization
1439 AddTail((struct List *)&hc->hc_FreeEDList, (struct Node *)ed);
1440 NewList((struct List *)&ed->ed_TDList);
1441 WRITEMEM32_LE(&ed->ed_Self,
1442 (IPTR) (&ed->ed_ED.EPCaps) + hc->hc_PCIVirtualAdjust);
1443 ed++;
1445 while (--cnt);
1446 memptr += sizeof(struct EDNode) * OHCI_ED_POOLSIZE;
1448 // build up TD pool
1449 NewList((struct List *)&hc->hc_FreeTDList);
1450 td = (struct TDNode *)memptr;
1451 cnt = OHCI_TD_POOLSIZE - 1;
1454 AddTail((struct List *)&hc->hc_FreeTDList, (struct Node *)td);
1455 WRITEMEM32_LE(&td->td_Self,
1456 (IPTR) (&td->td_TD.Ctrl) + hc->hc_PCIVirtualAdjust);
1457 td++;
1459 while (--cnt);
1460 WRITEMEM32_LE(&td->td_Self,
1461 (IPTR) (&td->td_TD.Ctrl) + hc->hc_PCIVirtualAdjust);
1462 memptr += sizeof(struct TDNode) * OHCI_TD_POOLSIZE;
1464 // terminating TD
1465 hc->hc_TermTD = td = AllocTD(hc);
1466 td->td_Node.mln_Succ = NULL;
1467 td->td_Node.mln_Pred = NULL;
1468 td->td_TD.NextTD = 0;
1470 // terminating ED
1471 hc->hc_TermED = ed = AllocED(hc);
1472 ed->ed_Node.mln_Succ = NULL;
1473 ed->ed_Node.mln_Pred = NULL;
1474 CONSTWRITEMEM32_LE(&ed->ed_ED.EPCaps, OECF_SKIP);
1475 ed->ed_ED.NextED = 0L;
1477 for (cnt = 0; cnt < XFER_COUNT + INT_LIST_COUNT - 1; cnt++)
1478 NewList((struct List *)&hc->hc_EDLists[cnt]);
1480 UpdateIntTree(hc);
1482 // fill in framelist with IntED entry points based on interval
1483 tabptr = hc->hc_HCCA->ha_IntEDs;
1484 for (cnt = 0; cnt < 32; cnt++)
1486 *tabptr++ = hc->hc_TermED->ed_Self;
1489 // time to initialize hardware...
1490 OOP_GetAttr(hc->hc_PCIDeviceObject, aHidd_PCIDevice_Base0,
1491 (IPTR *) & hc->hc_RegBase);
1492 hc->hc_RegBase = (APTR) (((IPTR) hc->hc_RegBase) & (~0xf));
1493 KPRINTF(10, ("RegBase = 0x%p\n", hc->hc_RegBase));
1495 // enable memory
1496 OOP_SetAttrs(hc->hc_PCIDeviceObject, (struct TagItem *)pciActivateMem);
1498 hubdesca = READREG32_LE(hc->hc_RegBase, OHCI_HUBDESCA);
1499 hc->hc_NumPorts = (hubdesca & OHAM_NUMPORTS) >> OHAS_NUMPORTS;
1500 KPRINTF(20, ("Found OHCI Controller %p FuncNum = %ld, Rev %02lx, "
1501 "with %ld ports\n",
1502 hc->hc_PCIDeviceObject, hc->hc_FunctionNum,
1503 READREG32_LE(hc->hc_RegBase, OHCI_REVISION) & 0xFF,
1504 hc->hc_NumPorts));
1506 KPRINTF(20, ("Powerswitching: %s %s\n",
1507 hubdesca & OHAF_NOPOWERSWITCH ? "Always on" : "Available",
1508 hubdesca & OHAF_INDIVIDUALPS ? "per port" : "global"));
1510 control = READREG32_LE(hc->hc_RegBase, OHCI_CONTROL);
1511 KPRINTF(10, ("OHCI control state: 0x%08lx\n", control));
1513 // disable BIOS legacy support
1514 if (control & OCLF_SMIINT)
1516 KPRINTF(10,
1517 ("BIOS still has hands on OHCI, trying to get rid of it\n"));
1519 cmdstatus = READREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS);
1520 cmdstatus |= OCSF_OWNERCHANGEREQ;
1521 WRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, cmdstatus);
1522 timeout = 100;
1525 control = READREG32_LE(hc->hc_RegBase, OHCI_CONTROL);
1526 if (!(control & OCLF_SMIINT))
1528 KPRINTF(10, ("BIOS gave up on OHCI. Pwned!\n"));
1529 break;
1531 DelayMS(10, hu);
1533 while (--timeout);
1534 if (!timeout)
1536 KPRINTF(10,
1537 ("BIOS didn't release OHCI. Forcing and praying...\n"));
1538 control &= ~OCLF_SMIINT;
1539 WRITEREG32_LE(hc->hc_RegBase, OHCI_CONTROL, control);
1543 OOP_SetAttrs(hc->hc_PCIDeviceObject,
1544 (struct TagItem *)pciDeactivateBusmaster); // no busmaster yet
1546 KPRINTF(10, ("Resetting OHCI HC\n"));
1547 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, OCSF_HCRESET);
1548 cnt = 100;
1551 if (!(READREG32_LE(hc->hc_RegBase,
1552 OHCI_CMDSTATUS) & OCSF_HCRESET))
1554 break;
1556 DelayMS(1, hu);
1558 while (--cnt);
1560 #ifdef DEBUG
1561 if (cnt == 0)
1563 KPRINTF(20, ("Reset Timeout!\n"));
1565 else
1567 KPRINTF(20, ("Reset finished after %ld ticks\n", 100 - cnt));
1569 #endif
1571 OOP_SetAttrs(hc->hc_PCIDeviceObject,
1572 (struct TagItem *)pciActivateBusmaster); // enable busmaster
1574 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_FRAMECOUNT, 0);
1575 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_PERIODICSTART, 10800);
1576 // 10% of 12000
1577 frameival = READREG32_LE(hc->hc_RegBase, OHCI_FRAMEINTERVAL);
1578 KPRINTF(10, ("FrameInterval=%08lx\n", frameival));
1579 frameival &= ~OIVM_BITSPERFRAME;
1580 frameival |= OHCI_DEF_BITSPERFRAME << OIVS_BITSPERFRAME;
1581 frameival |= OIVF_TOGGLE;
1582 WRITEREG32_LE(hc->hc_RegBase, OHCI_FRAMEINTERVAL, frameival);
1584 // make sure nothing is running
1585 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_PERIODIC_ED, 0);
1586 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CTRL_HEAD_ED, 0);
1587 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CTRL_ED, 0);
1588 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_BULK_HEAD_ED, 0);
1589 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_BULK_ED, 0);
1590 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_DONEHEAD, 0);
1592 dma_size = sizeof(struct HCCA);
1593 phy_addr =
1594 (ULONG) (IPTR) CachePreDMA(hc->hc_HCCA, &dma_size,
1595 DMA_ReadFromRAM);
1596 WRITEREG32_LE(hc->hc_RegBase, OHCI_HCCA, phy_addr);
1598 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS, OISF_ALL_INTS);
1599 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_INTDIS, OISF_ALL_INTS);
1600 SYNC;
1602 // install reset handler
1603 hc->hc_ResetInt.is_Code = (VOID_FUNC) ResetHandler;
1604 hc->hc_ResetInt.is_Data = hc;
1605 AddResetCallback(&hc->hc_ResetInt);
1607 // add interrupt
1608 hc->hc_PCIIntHandler.is_Node.ln_Name =
1609 hu->hu_Device->hd_Library.lib_Node.ln_Name;
1610 hc->hc_PCIIntHandler.is_Node.ln_Pri = 5;
1611 hc->hc_PCIIntHandler.is_Code = (VOID_FUNC) IntCode;
1612 hc->hc_PCIIntHandler.is_Data = hc;
1613 AddIntServer(INTB_KERNEL + hc->hc_PCIIntLine,
1614 &hc->hc_PCIIntHandler);
1616 hc->hc_PCIIntEnMask =
1617 OISF_DONEHEAD | OISF_RESUMEDTX | OISF_HOSTERROR |
1618 OISF_FRAMECOUNTOVER | OISF_HUBCHANGE;
1620 WRITEREG32_LE(hc->hc_RegBase, OHCI_INTEN,
1621 hc->hc_PCIIntEnMask | OISF_MASTERENABLE);
1623 /* Reset controller twice (needed for some OHCI chips) */
1624 for (i = 0; i < 2; i++)
1626 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CONTROL,
1627 OCLF_PERIODICENABLE | OCLF_CTRLENABLE | OCLF_BULKENABLE |
1628 OCLF_ISOENABLE | OCLF_USBRESET);
1629 SYNC;
1630 KPRINTF(10, ("POST-RESET FrameInterval=%08lx\n",
1631 READREG32_LE(hc->hc_RegBase, OHCI_FRAMEINTERVAL)));
1632 WRITEREG32_LE(hc->hc_RegBase, OHCI_FRAMEINTERVAL, frameival);
1635 // make sure the ports are on with chipset quirk workaround
1636 hubdesca = READREG32_LE(hc->hc_RegBase, OHCI_HUBDESCA);
1637 hubdesca |= OHAF_NOPOWERSWITCH;
1638 hubdesca &= ~OHAF_INDIVIDUALPS;
1639 WRITEREG32_LE(hc->hc_RegBase, OHCI_HUBDESCA, hubdesca);
1641 WRITEREG32_LE(hc->hc_RegBase, OHCI_HUBSTATUS, OHSF_POWERHUB);
1643 DelayMS(50, hu);
1644 WRITEREG32_LE(hc->hc_RegBase, OHCI_HUBDESCA, hubdesca);
1646 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CONTROL,
1647 OCLF_PERIODICENABLE | OCLF_CTRLENABLE | OCLF_BULKENABLE |
1648 OCLF_ISOENABLE | OCLF_USBOPER);
1649 SYNC;
1651 KPRINTF(20, ("Init returns TRUE...\n"));
1652 return TRUE;
1655 KPRINTF(1000, ("Init returns FALSE...\n"));
1656 return FALSE;
1658 /* \\\ */
1660 /* /// "FreeController()" */
1661 void FreeController(struct PCIController *hc, struct PCIUnit *hu)
1664 hc = (struct PCIController *)hu->hu_Controllers.lh_Head;
1665 while (hc->hc_Node.ln_Succ)
1667 KPRINTF(20, ("Shutting down OHCI %p\n", hc));
1668 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_INTDIS, OISF_ALL_INTS);
1669 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_INTSTATUS, OISF_ALL_INTS);
1671 // disable all ports
1672 WRITEREG32_LE(hc->hc_RegBase, OHCI_HUBDESCB, 0);
1673 WRITEREG32_LE(hc->hc_RegBase, OHCI_HUBSTATUS, OHSF_UNPOWERHUB);
1675 DelayMS(50, hu);
1676 KPRINTF(20, ("Stopping OHCI %p\n", hc));
1677 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CONTROL, 0);
1678 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, 0);
1679 SYNC;
1681 //KPRINTF(20, ("Reset done OHCI %08lx\n", hc));
1682 DelayMS(10, hu);
1683 KPRINTF(20, ("Resetting OHCI %p\n", hc));
1684 CONSTWRITEREG32_LE(hc->hc_RegBase, OHCI_CMDSTATUS, OCSF_HCRESET);
1685 SYNC;
1686 DelayMS(50, hu);
1688 KPRINTF(20, ("Shutting down OHCI done.\n"));
1690 hc = (struct PCIController *)hc->hc_Node.ln_Succ;
1693 /* \\\ */
1695 /* /// "TranslatePortFlags()" */
1696 UWORD TranslatePortFlags(ULONG flags, ULONG mask)
1698 UWORD new_flags = 0;
1700 flags &= mask;
1702 if (flags & OHPF_PORTPOWER)
1703 new_flags |= UPSF_PORT_POWER;
1704 if (flags & OHPF_OVERCURRENT)
1705 new_flags |= UPSF_PORT_OVER_CURRENT;
1706 if (flags & OHPF_PORTCONNECTED)
1707 new_flags |= UPSF_PORT_CONNECTION;
1708 if (flags & OHPF_PORTENABLE)
1709 new_flags |= UPSF_PORT_ENABLE;
1710 if (flags & OHPF_LOWSPEED)
1711 new_flags |= UPSF_PORT_LOW_SPEED;
1712 if (flags & OHPF_PORTRESET)
1713 new_flags |= UPSF_PORT_RESET;
1714 if (flags & OHPF_PORTSUSPEND)
1715 new_flags |= UPSF_PORT_SUSPEND;
1716 if (flags & OHPF_OVERCURRENTCHG)
1717 new_flags |= UPSF_PORT_OVER_CURRENT;
1718 if (flags & OHPF_RESETCHANGE)
1719 new_flags |= UPSF_PORT_RESET;
1720 if (flags & OHPF_ENABLECHANGE)
1721 new_flags |= UPSF_PORT_ENABLE;
1722 if (flags & OHPF_CONNECTCHANGE)
1723 new_flags |= UPSF_PORT_CONNECTION;
1724 if (flags & OHPF_RESUMEDTX)
1725 new_flags |= UPSF_PORT_SUSPEND;
1727 return new_flags;
1729 /* \\\ */