alpha: fix trivial section mismatch warnings
[pv_ops_mirror.git] / drivers / ieee1394 / ohci1394.c
blob5667c8102efc0dcc56d14a786e602a7437ab4596
1 /*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
36 * Things not implemented:
37 * . DMA error recovery
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
45 * Acknowledgments:
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
68 * Leon van Stuivenberg <leonvs@iae.nl>
69 * . Bug fixes
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
85 #include <linux/kernel.h>
86 #include <linux/list.h>
87 #include <linux/slab.h>
88 #include <linux/interrupt.h>
89 #include <linux/wait.h>
90 #include <linux/errno.h>
91 #include <linux/module.h>
92 #include <linux/moduleparam.h>
93 #include <linux/pci.h>
94 #include <linux/fs.h>
95 #include <linux/poll.h>
96 #include <asm/byteorder.h>
97 #include <asm/atomic.h>
98 #include <asm/uaccess.h>
99 #include <linux/delay.h>
100 #include <linux/spinlock.h>
102 #include <asm/pgtable.h>
103 #include <asm/page.h>
104 #include <asm/irq.h>
105 #include <linux/types.h>
106 #include <linux/vmalloc.h>
107 #include <linux/init.h>
109 #ifdef CONFIG_PPC_PMAC
110 #include <asm/machdep.h>
111 #include <asm/pmac_feature.h>
112 #include <asm/prom.h>
113 #include <asm/pci-bridge.h>
114 #endif
116 #include "csr1212.h"
117 #include "ieee1394.h"
118 #include "ieee1394_types.h"
119 #include "hosts.h"
120 #include "dma.h"
121 #include "iso.h"
122 #include "ieee1394_core.h"
123 #include "highlevel.h"
124 #include "ohci1394.h"
126 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
127 #define OHCI1394_DEBUG
128 #endif
130 #ifdef DBGMSG
131 #undef DBGMSG
132 #endif
134 #ifdef OHCI1394_DEBUG
135 #define DBGMSG(fmt, args...) \
136 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
137 #else
138 #define DBGMSG(fmt, args...) do {} while (0)
139 #endif
141 /* print general (card independent) information */
142 #define PRINT_G(level, fmt, args...) \
143 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
145 /* print card specific information */
146 #define PRINT(level, fmt, args...) \
147 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
149 /* Module Parameters */
150 static int phys_dma = 1;
151 module_param(phys_dma, int, 0444);
152 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
154 static void dma_trm_tasklet(unsigned long data);
155 static void dma_trm_reset(struct dma_trm_ctx *d);
157 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
158 enum context_type type, int ctx, int num_desc,
159 int buf_size, int split_buf_size, int context_base);
160 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
162 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
163 enum context_type type, int ctx, int num_desc,
164 int context_base);
166 static void ohci1394_pci_remove(struct pci_dev *pdev);
168 #ifndef __LITTLE_ENDIAN
169 static const size_t hdr_sizes[] = {
170 3, /* TCODE_WRITEQ */
171 4, /* TCODE_WRITEB */
172 3, /* TCODE_WRITE_RESPONSE */
173 0, /* reserved */
174 3, /* TCODE_READQ */
175 4, /* TCODE_READB */
176 3, /* TCODE_READQ_RESPONSE */
177 4, /* TCODE_READB_RESPONSE */
178 1, /* TCODE_CYCLE_START */
179 4, /* TCODE_LOCK_REQUEST */
180 2, /* TCODE_ISO_DATA */
181 4, /* TCODE_LOCK_RESPONSE */
182 /* rest is reserved or link-internal */
185 static inline void header_le32_to_cpu(quadlet_t *data, unsigned char tcode)
187 size_t size;
189 if (unlikely(tcode >= ARRAY_SIZE(hdr_sizes)))
190 return;
192 size = hdr_sizes[tcode];
193 while (size--)
194 data[size] = le32_to_cpu(data[size]);
196 #else
197 #define header_le32_to_cpu(w,x) do {} while (0)
198 #endif /* !LITTLE_ENDIAN */
200 /***********************************
201 * IEEE-1394 functionality section *
202 ***********************************/
204 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
206 int i;
207 unsigned long flags;
208 quadlet_t r;
210 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
212 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
214 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
215 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
216 break;
218 mdelay(1);
221 r = reg_read(ohci, OHCI1394_PhyControl);
223 if (i >= OHCI_LOOP_COUNT)
224 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
225 r, r & 0x80000000, i);
227 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
229 return (r & 0x00ff0000) >> 16;
232 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
234 int i;
235 unsigned long flags;
236 u32 r = 0;
238 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
240 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
242 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
243 r = reg_read(ohci, OHCI1394_PhyControl);
244 if (!(r & 0x00004000))
245 break;
247 mdelay(1);
250 if (i == OHCI_LOOP_COUNT)
251 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
252 r, r & 0x00004000, i);
254 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
256 return;
259 /* Or's our value into the current value */
260 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
262 u8 old;
264 old = get_phy_reg (ohci, addr);
265 old |= data;
266 set_phy_reg (ohci, addr, old);
268 return;
271 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
272 int phyid, int isroot)
274 quadlet_t *q = ohci->selfid_buf_cpu;
275 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
276 size_t size;
277 quadlet_t q0, q1;
279 /* Check status of self-id reception */
281 if (ohci->selfid_swap)
282 q0 = le32_to_cpu(q[0]);
283 else
284 q0 = q[0];
286 if ((self_id_count & 0x80000000) ||
287 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
288 PRINT(KERN_ERR,
289 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
290 self_id_count, q0, ohci->self_id_errors);
292 /* Tip by James Goodwin <jamesg@Filanet.com>:
293 * We had an error, generate another bus reset in response. */
294 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
295 set_phy_reg_mask (ohci, 1, 0x40);
296 ohci->self_id_errors++;
297 } else {
298 PRINT(KERN_ERR,
299 "Too many errors on SelfID error reception, giving up!");
301 return;
304 /* SelfID Ok, reset error counter. */
305 ohci->self_id_errors = 0;
307 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
308 q++;
310 while (size > 0) {
311 if (ohci->selfid_swap) {
312 q0 = le32_to_cpu(q[0]);
313 q1 = le32_to_cpu(q[1]);
314 } else {
315 q0 = q[0];
316 q1 = q[1];
319 if (q0 == ~q1) {
320 DBGMSG ("SelfID packet 0x%x received", q0);
321 hpsb_selfid_received(host, cpu_to_be32(q0));
322 if (((q0 & 0x3f000000) >> 24) == phyid)
323 DBGMSG ("SelfID for this node is 0x%08x", q0);
324 } else {
325 PRINT(KERN_ERR,
326 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
328 q += 2;
329 size -= 2;
332 DBGMSG("SelfID complete");
334 return;
337 static void ohci_soft_reset(struct ti_ohci *ohci) {
338 int i;
340 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
342 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
343 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
344 break;
345 mdelay(1);
347 DBGMSG ("Soft reset finished");
351 /* Generate the dma receive prgs and start the context */
352 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
354 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
355 int i;
357 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
359 for (i=0; i<d->num_desc; i++) {
360 u32 c;
362 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
363 if (generate_irq)
364 c |= DMA_CTL_IRQ;
366 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
368 /* End of descriptor list? */
369 if (i + 1 < d->num_desc) {
370 d->prg_cpu[i]->branchAddress =
371 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
372 } else {
373 d->prg_cpu[i]->branchAddress =
374 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
377 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
378 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
381 d->buf_ind = 0;
382 d->buf_offset = 0;
384 if (d->type == DMA_CTX_ISO) {
385 /* Clear contextControl */
386 reg_write(ohci, d->ctrlClear, 0xffffffff);
388 /* Set bufferFill, isochHeader, multichannel for IR context */
389 reg_write(ohci, d->ctrlSet, 0xd0000000);
391 /* Set the context match register to match on all tags */
392 reg_write(ohci, d->ctxtMatch, 0xf0000000);
394 /* Clear the multi channel mask high and low registers */
395 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
396 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
398 /* Set up isoRecvIntMask to generate interrupts */
399 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
402 /* Tell the controller where the first AR program is */
403 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
405 /* Run context */
406 reg_write(ohci, d->ctrlSet, 0x00008000);
408 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
411 /* Initialize the dma transmit context */
412 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
414 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
416 /* Stop the context */
417 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
419 d->prg_ind = 0;
420 d->sent_ind = 0;
421 d->free_prgs = d->num_desc;
422 d->branchAddrPtr = NULL;
423 INIT_LIST_HEAD(&d->fifo_list);
424 INIT_LIST_HEAD(&d->pending_list);
426 if (d->type == DMA_CTX_ISO) {
427 /* enable interrupts */
428 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
431 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
434 /* Count the number of available iso contexts */
435 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
437 int i,ctx=0;
438 u32 tmp;
440 reg_write(ohci, reg, 0xffffffff);
441 tmp = reg_read(ohci, reg);
443 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
445 /* Count the number of contexts */
446 for (i=0; i<32; i++) {
447 if (tmp & 1) ctx++;
448 tmp >>= 1;
450 return ctx;
453 /* Global initialization */
454 static void ohci_initialize(struct ti_ohci *ohci)
456 quadlet_t buf;
457 int num_ports, i;
459 spin_lock_init(&ohci->phy_reg_lock);
461 /* Put some defaults to these undefined bus options */
462 buf = reg_read(ohci, OHCI1394_BusOptions);
463 buf |= 0x60000000; /* Enable CMC and ISC */
464 if (hpsb_disable_irm)
465 buf &= ~0x80000000;
466 else
467 buf |= 0x80000000; /* Enable IRMC */
468 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
469 buf &= ~0x18000000; /* Disable PMC and BMC */
470 reg_write(ohci, OHCI1394_BusOptions, buf);
472 /* Set the bus number */
473 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
475 /* Enable posted writes */
476 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
478 /* Clear link control register */
479 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
481 /* Enable cycle timer and cycle master and set the IRM
482 * contender bit in our self ID packets if appropriate. */
483 reg_write(ohci, OHCI1394_LinkControlSet,
484 OHCI1394_LinkControl_CycleTimerEnable |
485 OHCI1394_LinkControl_CycleMaster);
486 i = get_phy_reg(ohci, 4) | PHY_04_LCTRL;
487 if (hpsb_disable_irm)
488 i &= ~PHY_04_CONTENDER;
489 else
490 i |= PHY_04_CONTENDER;
491 set_phy_reg(ohci, 4, i);
493 /* Set up self-id dma buffer */
494 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
496 /* enable self-id */
497 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID);
499 /* Set the Config ROM mapping register */
500 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
502 /* Now get our max packet size */
503 ohci->max_packet_size =
504 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
506 /* Clear the interrupt mask */
507 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
508 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
510 /* Clear the interrupt mask */
511 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
512 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
514 /* Initialize AR dma */
515 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
516 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
518 /* Initialize AT dma */
519 initialize_dma_trm_ctx(&ohci->at_req_context);
520 initialize_dma_trm_ctx(&ohci->at_resp_context);
522 /* Accept AR requests from all nodes */
523 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
525 /* Set the address range of the physical response unit.
526 * Most controllers do not implement it as a writable register though.
527 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
528 * register content.
529 * To actually enable physical responses is the job of our interrupt
530 * handler which programs the physical request filter. */
531 reg_write(ohci, OHCI1394_PhyUpperBound,
532 OHCI1394_PHYS_UPPER_BOUND_PROGRAMMED >> 16);
534 DBGMSG("physUpperBoundOffset=%08x",
535 reg_read(ohci, OHCI1394_PhyUpperBound));
537 /* Specify AT retries */
538 reg_write(ohci, OHCI1394_ATRetries,
539 OHCI1394_MAX_AT_REQ_RETRIES |
540 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
541 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
543 /* We don't want hardware swapping */
544 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
546 /* Enable interrupts */
547 reg_write(ohci, OHCI1394_IntMaskSet,
548 OHCI1394_unrecoverableError |
549 OHCI1394_masterIntEnable |
550 OHCI1394_busReset |
551 OHCI1394_selfIDComplete |
552 OHCI1394_RSPkt |
553 OHCI1394_RQPkt |
554 OHCI1394_respTxComplete |
555 OHCI1394_reqTxComplete |
556 OHCI1394_isochRx |
557 OHCI1394_isochTx |
558 OHCI1394_postedWriteErr |
559 OHCI1394_cycleTooLong |
560 OHCI1394_cycleInconsistent);
562 /* Enable link */
563 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
565 buf = reg_read(ohci, OHCI1394_Version);
566 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%d] "
567 "MMIO=[%llx-%llx] Max Packet=[%d] IR/IT contexts=[%d/%d]",
568 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
569 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), ohci->dev->irq,
570 (unsigned long long)pci_resource_start(ohci->dev, 0),
571 (unsigned long long)pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
572 ohci->max_packet_size,
573 ohci->nb_iso_rcv_ctx, ohci->nb_iso_xmit_ctx);
575 /* Check all of our ports to make sure that if anything is
576 * connected, we enable that port. */
577 num_ports = get_phy_reg(ohci, 2) & 0xf;
578 for (i = 0; i < num_ports; i++) {
579 unsigned int status;
581 set_phy_reg(ohci, 7, i);
582 status = get_phy_reg(ohci, 8);
584 if (status & 0x20)
585 set_phy_reg(ohci, 8, status & ~1);
588 /* Serial EEPROM Sanity check. */
589 if ((ohci->max_packet_size < 512) ||
590 (ohci->max_packet_size > 4096)) {
591 /* Serial EEPROM contents are suspect, set a sane max packet
592 * size and print the raw contents for bug reports if verbose
593 * debug is enabled. */
594 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
595 int i;
596 #endif
598 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
599 "attempting to set max_packet_size to 512 bytes");
600 reg_write(ohci, OHCI1394_BusOptions,
601 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
602 ohci->max_packet_size = 512;
603 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
604 PRINT(KERN_DEBUG, " EEPROM Present: %d",
605 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
606 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
608 for (i = 0;
609 ((i < 1000) &&
610 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
611 udelay(10);
613 for (i = 0; i < 0x20; i++) {
614 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
615 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
616 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
618 #endif
623 * Insert a packet in the DMA fifo and generate the DMA prg
624 * FIXME: rewrite the program in order to accept packets crossing
625 * page boundaries.
626 * check also that a single dma descriptor doesn't cross a
627 * page boundary.
629 static void insert_packet(struct ti_ohci *ohci,
630 struct dma_trm_ctx *d, struct hpsb_packet *packet)
632 u32 cycleTimer;
633 int idx = d->prg_ind;
635 DBGMSG("Inserting packet for node " NODE_BUS_FMT
636 ", tlabel=%d, tcode=0x%x, speed=%d",
637 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
638 packet->tcode, packet->speed_code);
640 d->prg_cpu[idx]->begin.address = 0;
641 d->prg_cpu[idx]->begin.branchAddress = 0;
643 if (d->type == DMA_CTX_ASYNC_RESP) {
645 * For response packets, we need to put a timeout value in
646 * the 16 lower bits of the status... let's try 1 sec timeout
648 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
649 d->prg_cpu[idx]->begin.status = cpu_to_le32(
650 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
651 ((cycleTimer&0x01fff000)>>12));
653 DBGMSG("cycleTimer: %08x timeStamp: %08x",
654 cycleTimer, d->prg_cpu[idx]->begin.status);
655 } else
656 d->prg_cpu[idx]->begin.status = 0;
658 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
660 if (packet->type == hpsb_raw) {
661 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
662 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
663 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
664 } else {
665 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
666 (packet->header[0] & 0xFFFF);
668 if (packet->tcode == TCODE_ISO_DATA) {
669 /* Sending an async stream packet */
670 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
671 } else {
672 /* Sending a normal async request or response */
673 d->prg_cpu[idx]->data[1] =
674 (packet->header[1] & 0xFFFF) |
675 (packet->header[0] & 0xFFFF0000);
676 d->prg_cpu[idx]->data[2] = packet->header[2];
677 d->prg_cpu[idx]->data[3] = packet->header[3];
679 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
682 if (packet->data_size) { /* block transmit */
683 if (packet->tcode == TCODE_STREAM_DATA){
684 d->prg_cpu[idx]->begin.control =
685 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
686 DMA_CTL_IMMEDIATE | 0x8);
687 } else {
688 d->prg_cpu[idx]->begin.control =
689 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
690 DMA_CTL_IMMEDIATE | 0x10);
692 d->prg_cpu[idx]->end.control =
693 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
694 DMA_CTL_IRQ |
695 DMA_CTL_BRANCH |
696 packet->data_size);
698 * Check that the packet data buffer
699 * does not cross a page boundary.
701 * XXX Fix this some day. eth1394 seems to trigger
702 * it, but ignoring it doesn't seem to cause a
703 * problem.
705 #if 0
706 if (cross_bound((unsigned long)packet->data,
707 packet->data_size)>0) {
708 /* FIXME: do something about it */
709 PRINT(KERN_ERR,
710 "%s: packet data addr: %p size %Zd bytes "
711 "cross page boundary", __FUNCTION__,
712 packet->data, packet->data_size);
714 #endif
715 d->prg_cpu[idx]->end.address = cpu_to_le32(
716 pci_map_single(ohci->dev, packet->data,
717 packet->data_size,
718 PCI_DMA_TODEVICE));
720 d->prg_cpu[idx]->end.branchAddress = 0;
721 d->prg_cpu[idx]->end.status = 0;
722 if (d->branchAddrPtr)
723 *(d->branchAddrPtr) =
724 cpu_to_le32(d->prg_bus[idx] | 0x3);
725 d->branchAddrPtr =
726 &(d->prg_cpu[idx]->end.branchAddress);
727 } else { /* quadlet transmit */
728 if (packet->type == hpsb_raw)
729 d->prg_cpu[idx]->begin.control =
730 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
731 DMA_CTL_IMMEDIATE |
732 DMA_CTL_IRQ |
733 DMA_CTL_BRANCH |
734 (packet->header_size + 4));
735 else
736 d->prg_cpu[idx]->begin.control =
737 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
738 DMA_CTL_IMMEDIATE |
739 DMA_CTL_IRQ |
740 DMA_CTL_BRANCH |
741 packet->header_size);
743 if (d->branchAddrPtr)
744 *(d->branchAddrPtr) =
745 cpu_to_le32(d->prg_bus[idx] | 0x2);
746 d->branchAddrPtr =
747 &(d->prg_cpu[idx]->begin.branchAddress);
750 } else { /* iso packet */
751 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
752 (packet->header[0] & 0xFFFF);
753 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
754 header_le32_to_cpu(d->prg_cpu[idx]->data, packet->tcode);
756 d->prg_cpu[idx]->begin.control =
757 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
758 DMA_CTL_IMMEDIATE | 0x8);
759 d->prg_cpu[idx]->end.control =
760 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
761 DMA_CTL_UPDATE |
762 DMA_CTL_IRQ |
763 DMA_CTL_BRANCH |
764 packet->data_size);
765 d->prg_cpu[idx]->end.address = cpu_to_le32(
766 pci_map_single(ohci->dev, packet->data,
767 packet->data_size, PCI_DMA_TODEVICE));
769 d->prg_cpu[idx]->end.branchAddress = 0;
770 d->prg_cpu[idx]->end.status = 0;
771 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
772 " begin=%08x %08x %08x %08x\n"
773 " %08x %08x %08x %08x\n"
774 " end =%08x %08x %08x %08x",
775 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
776 d->prg_cpu[idx]->begin.control,
777 d->prg_cpu[idx]->begin.address,
778 d->prg_cpu[idx]->begin.branchAddress,
779 d->prg_cpu[idx]->begin.status,
780 d->prg_cpu[idx]->data[0],
781 d->prg_cpu[idx]->data[1],
782 d->prg_cpu[idx]->data[2],
783 d->prg_cpu[idx]->data[3],
784 d->prg_cpu[idx]->end.control,
785 d->prg_cpu[idx]->end.address,
786 d->prg_cpu[idx]->end.branchAddress,
787 d->prg_cpu[idx]->end.status);
788 if (d->branchAddrPtr)
789 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
790 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
792 d->free_prgs--;
794 /* queue the packet in the appropriate context queue */
795 list_add_tail(&packet->driver_list, &d->fifo_list);
796 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
800 * This function fills the FIFO with the (eventual) pending packets
801 * and runs or wakes up the DMA prg if necessary.
803 * The function MUST be called with the d->lock held.
805 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
807 struct hpsb_packet *packet, *ptmp;
808 int idx = d->prg_ind;
809 int z = 0;
811 /* insert the packets into the dma fifo */
812 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
813 if (!d->free_prgs)
814 break;
816 /* For the first packet only */
817 if (!z)
818 z = (packet->data_size) ? 3 : 2;
820 /* Insert the packet */
821 list_del_init(&packet->driver_list);
822 insert_packet(ohci, d, packet);
825 /* Nothing must have been done, either no free_prgs or no packets */
826 if (z == 0)
827 return;
829 /* Is the context running ? (should be unless it is
830 the first packet to be sent in this context) */
831 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
832 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
834 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
835 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
837 /* Check that the node id is valid, and not 63 */
838 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
839 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
840 else
841 reg_write(ohci, d->ctrlSet, 0x8000);
842 } else {
843 /* Wake up the dma context if necessary */
844 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
845 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
847 /* do this always, to avoid race condition */
848 reg_write(ohci, d->ctrlSet, 0x1000);
851 return;
854 /* Transmission of an async or iso packet */
855 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
857 struct ti_ohci *ohci = host->hostdata;
858 struct dma_trm_ctx *d;
859 unsigned long flags;
861 if (packet->data_size > ohci->max_packet_size) {
862 PRINT(KERN_ERR,
863 "Transmit packet size %Zd is too big",
864 packet->data_size);
865 return -EOVERFLOW;
868 if (packet->type == hpsb_raw)
869 d = &ohci->at_req_context;
870 else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
871 d = &ohci->at_resp_context;
872 else
873 d = &ohci->at_req_context;
875 spin_lock_irqsave(&d->lock,flags);
877 list_add_tail(&packet->driver_list, &d->pending_list);
879 dma_trm_flush(ohci, d);
881 spin_unlock_irqrestore(&d->lock,flags);
883 return 0;
886 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
888 struct ti_ohci *ohci = host->hostdata;
889 int retval = 0, phy_reg;
891 switch (cmd) {
892 case RESET_BUS:
893 switch (arg) {
894 case SHORT_RESET:
895 phy_reg = get_phy_reg(ohci, 5);
896 phy_reg |= 0x40;
897 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
898 break;
899 case LONG_RESET:
900 phy_reg = get_phy_reg(ohci, 1);
901 phy_reg |= 0x40;
902 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
903 break;
904 case SHORT_RESET_NO_FORCE_ROOT:
905 phy_reg = get_phy_reg(ohci, 1);
906 if (phy_reg & 0x80) {
907 phy_reg &= ~0x80;
908 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
911 phy_reg = get_phy_reg(ohci, 5);
912 phy_reg |= 0x40;
913 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
914 break;
915 case LONG_RESET_NO_FORCE_ROOT:
916 phy_reg = get_phy_reg(ohci, 1);
917 phy_reg &= ~0x80;
918 phy_reg |= 0x40;
919 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
920 break;
921 case SHORT_RESET_FORCE_ROOT:
922 phy_reg = get_phy_reg(ohci, 1);
923 if (!(phy_reg & 0x80)) {
924 phy_reg |= 0x80;
925 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
928 phy_reg = get_phy_reg(ohci, 5);
929 phy_reg |= 0x40;
930 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
931 break;
932 case LONG_RESET_FORCE_ROOT:
933 phy_reg = get_phy_reg(ohci, 1);
934 phy_reg |= 0xc0;
935 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
936 break;
937 default:
938 retval = -1;
940 break;
942 case GET_CYCLE_COUNTER:
943 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
944 break;
946 case SET_CYCLE_COUNTER:
947 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
948 break;
950 case SET_BUS_ID:
951 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
952 break;
954 case ACT_CYCLE_MASTER:
955 if (arg) {
956 /* check if we are root and other nodes are present */
957 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
958 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
960 * enable cycleTimer, cycleMaster
962 DBGMSG("Cycle master enabled");
963 reg_write(ohci, OHCI1394_LinkControlSet,
964 OHCI1394_LinkControl_CycleTimerEnable |
965 OHCI1394_LinkControl_CycleMaster);
967 } else {
968 /* disable cycleTimer, cycleMaster, cycleSource */
969 reg_write(ohci, OHCI1394_LinkControlClear,
970 OHCI1394_LinkControl_CycleTimerEnable |
971 OHCI1394_LinkControl_CycleMaster |
972 OHCI1394_LinkControl_CycleSource);
974 break;
976 case CANCEL_REQUESTS:
977 DBGMSG("Cancel request received");
978 dma_trm_reset(&ohci->at_req_context);
979 dma_trm_reset(&ohci->at_resp_context);
980 break;
982 default:
983 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
984 cmd);
985 break;
987 return retval;
990 /***********************************
991 * rawiso ISO reception *
992 ***********************************/
995 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
996 buffer is split into "blocks" (regions described by one DMA
997 descriptor). Each block must be one page or less in size, and
998 must not cross a page boundary.
1000 There is one little wrinkle with buffer-fill mode: a packet that
1001 starts in the final block may wrap around into the first block. But
1002 the user API expects all packets to be contiguous. Our solution is
1003 to keep the very last page of the DMA buffer in reserve - if a
1004 packet spans the gap, we copy its tail into this page.
1007 struct ohci_iso_recv {
1008 struct ti_ohci *ohci;
1010 struct ohci1394_iso_tasklet task;
1011 int task_active;
1013 enum { BUFFER_FILL_MODE = 0,
1014 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1016 /* memory and PCI mapping for the DMA descriptors */
1017 struct dma_prog_region prog;
1018 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1020 /* how many DMA blocks fit in the buffer */
1021 unsigned int nblocks;
1023 /* stride of DMA blocks */
1024 unsigned int buf_stride;
1026 /* number of blocks to batch between interrupts */
1027 int block_irq_interval;
1029 /* block that DMA will finish next */
1030 int block_dma;
1032 /* (buffer-fill only) block that the reader will release next */
1033 int block_reader;
1035 /* (buffer-fill only) bytes of buffer the reader has released,
1036 less than one block */
1037 int released_bytes;
1039 /* (buffer-fill only) buffer offset at which the next packet will appear */
1040 int dma_offset;
1042 /* OHCI DMA context control registers */
1043 u32 ContextControlSet;
1044 u32 ContextControlClear;
1045 u32 CommandPtr;
1046 u32 ContextMatch;
1049 static void ohci_iso_recv_task(unsigned long data);
1050 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1051 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1052 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1053 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1055 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1057 struct ti_ohci *ohci = iso->host->hostdata;
1058 struct ohci_iso_recv *recv;
1059 int ctx;
1060 int ret = -ENOMEM;
1062 recv = kmalloc(sizeof(*recv), GFP_KERNEL);
1063 if (!recv)
1064 return -ENOMEM;
1066 iso->hostdata = recv;
1067 recv->ohci = ohci;
1068 recv->task_active = 0;
1069 dma_prog_region_init(&recv->prog);
1070 recv->block = NULL;
1072 /* use buffer-fill mode, unless irq_interval is 1
1073 (note: multichannel requires buffer-fill) */
1075 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1076 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1077 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1078 } else {
1079 recv->dma_mode = BUFFER_FILL_MODE;
1082 /* set nblocks, buf_stride, block_irq_interval */
1084 if (recv->dma_mode == BUFFER_FILL_MODE) {
1085 recv->buf_stride = PAGE_SIZE;
1087 /* one block per page of data in the DMA buffer, minus the final guard page */
1088 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1089 if (recv->nblocks < 3) {
1090 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1091 goto err;
1094 /* iso->irq_interval is in packets - translate that to blocks */
1095 if (iso->irq_interval == 1)
1096 recv->block_irq_interval = 1;
1097 else
1098 recv->block_irq_interval = iso->irq_interval *
1099 ((recv->nblocks+1)/iso->buf_packets);
1100 if (recv->block_irq_interval*4 > recv->nblocks)
1101 recv->block_irq_interval = recv->nblocks/4;
1102 if (recv->block_irq_interval < 1)
1103 recv->block_irq_interval = 1;
1105 } else {
1106 int max_packet_size;
1108 recv->nblocks = iso->buf_packets;
1109 recv->block_irq_interval = iso->irq_interval;
1110 if (recv->block_irq_interval * 4 > iso->buf_packets)
1111 recv->block_irq_interval = iso->buf_packets / 4;
1112 if (recv->block_irq_interval < 1)
1113 recv->block_irq_interval = 1;
1115 /* choose a buffer stride */
1116 /* must be a power of 2, and <= PAGE_SIZE */
1118 max_packet_size = iso->buf_size / iso->buf_packets;
1120 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1121 recv->buf_stride *= 2);
1123 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1124 recv->buf_stride > PAGE_SIZE) {
1125 /* this shouldn't happen, but anyway... */
1126 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1127 goto err;
1131 recv->block_reader = 0;
1132 recv->released_bytes = 0;
1133 recv->block_dma = 0;
1134 recv->dma_offset = 0;
1136 /* size of DMA program = one descriptor per block */
1137 if (dma_prog_region_alloc(&recv->prog,
1138 sizeof(struct dma_cmd) * recv->nblocks,
1139 recv->ohci->dev))
1140 goto err;
1142 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1144 ohci1394_init_iso_tasklet(&recv->task,
1145 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1146 OHCI_ISO_RECEIVE,
1147 ohci_iso_recv_task, (unsigned long) iso);
1149 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0) {
1150 ret = -EBUSY;
1151 goto err;
1154 recv->task_active = 1;
1156 /* recv context registers are spaced 32 bytes apart */
1157 ctx = recv->task.context;
1158 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1159 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1160 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1161 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1163 if (iso->channel == -1) {
1164 /* clear multi-channel selection mask */
1165 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1166 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1169 /* write the DMA program */
1170 ohci_iso_recv_program(iso);
1172 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1173 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1174 recv->dma_mode == BUFFER_FILL_MODE ?
1175 "buffer-fill" : "packet-per-buffer",
1176 iso->buf_size/PAGE_SIZE, iso->buf_size,
1177 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1179 return 0;
1181 err:
1182 ohci_iso_recv_shutdown(iso);
1183 return ret;
1186 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1188 struct ohci_iso_recv *recv = iso->hostdata;
1190 /* disable interrupts */
1191 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1193 /* halt DMA */
1194 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1197 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1199 struct ohci_iso_recv *recv = iso->hostdata;
1201 if (recv->task_active) {
1202 ohci_iso_recv_stop(iso);
1203 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1204 recv->task_active = 0;
1207 dma_prog_region_free(&recv->prog);
1208 kfree(recv);
1209 iso->hostdata = NULL;
1212 /* set up a "gapped" ring buffer DMA program */
1213 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1215 struct ohci_iso_recv *recv = iso->hostdata;
1216 int blk;
1218 /* address of 'branch' field in previous DMA descriptor */
1219 u32 *prev_branch = NULL;
1221 for (blk = 0; blk < recv->nblocks; blk++) {
1222 u32 control;
1224 /* the DMA descriptor */
1225 struct dma_cmd *cmd = &recv->block[blk];
1227 /* offset of the DMA descriptor relative to the DMA prog buffer */
1228 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1230 /* offset of this packet's data within the DMA buffer */
1231 unsigned long buf_offset = blk * recv->buf_stride;
1233 if (recv->dma_mode == BUFFER_FILL_MODE) {
1234 control = 2 << 28; /* INPUT_MORE */
1235 } else {
1236 control = 3 << 28; /* INPUT_LAST */
1239 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1241 /* interrupt on last block, and at intervals */
1242 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1243 control |= 3 << 20; /* want interrupt */
1246 control |= 3 << 18; /* enable branch to address */
1247 control |= recv->buf_stride;
1249 cmd->control = cpu_to_le32(control);
1250 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1251 cmd->branchAddress = 0; /* filled in on next loop */
1252 cmd->status = cpu_to_le32(recv->buf_stride);
1254 /* link the previous descriptor to this one */
1255 if (prev_branch) {
1256 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1259 prev_branch = &cmd->branchAddress;
1262 /* the final descriptor's branch address and Z should be left at 0 */
1265 /* listen or unlisten to a specific channel (multi-channel mode only) */
1266 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1268 struct ohci_iso_recv *recv = iso->hostdata;
1269 int reg, i;
1271 if (channel < 32) {
1272 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1273 i = channel;
1274 } else {
1275 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1276 i = channel - 32;
1279 reg_write(recv->ohci, reg, (1 << i));
1281 /* issue a dummy read to force all PCI writes to be posted immediately */
1282 mb();
1283 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1286 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1288 struct ohci_iso_recv *recv = iso->hostdata;
1289 int i;
1291 for (i = 0; i < 64; i++) {
1292 if (mask & (1ULL << i)) {
1293 if (i < 32)
1294 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1295 else
1296 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1297 } else {
1298 if (i < 32)
1299 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1300 else
1301 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1305 /* issue a dummy read to force all PCI writes to be posted immediately */
1306 mb();
1307 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1310 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1312 struct ohci_iso_recv *recv = iso->hostdata;
1313 struct ti_ohci *ohci = recv->ohci;
1314 u32 command, contextMatch;
1316 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1317 wmb();
1319 /* always keep ISO headers */
1320 command = (1 << 30);
1322 if (recv->dma_mode == BUFFER_FILL_MODE)
1323 command |= (1 << 31);
1325 reg_write(recv->ohci, recv->ContextControlSet, command);
1327 /* match on specified tags */
1328 contextMatch = tag_mask << 28;
1330 if (iso->channel == -1) {
1331 /* enable multichannel reception */
1332 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1333 } else {
1334 /* listen on channel */
1335 contextMatch |= iso->channel;
1338 if (cycle != -1) {
1339 u32 seconds;
1341 /* enable cycleMatch */
1342 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1344 /* set starting cycle */
1345 cycle &= 0x1FFF;
1347 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1348 just snarf them from the current time */
1349 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1351 /* advance one second to give some extra time for DMA to start */
1352 seconds += 1;
1354 cycle |= (seconds & 3) << 13;
1356 contextMatch |= cycle << 12;
1359 if (sync != -1) {
1360 /* set sync flag on first DMA descriptor */
1361 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1362 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1364 /* match sync field */
1365 contextMatch |= (sync&0xf)<<8;
1368 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1370 /* address of first descriptor block */
1371 command = dma_prog_region_offset_to_bus(&recv->prog,
1372 recv->block_dma * sizeof(struct dma_cmd));
1373 command |= 1; /* Z=1 */
1375 reg_write(recv->ohci, recv->CommandPtr, command);
1377 /* enable interrupts */
1378 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1380 wmb();
1382 /* run */
1383 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1385 /* issue a dummy read of the cycle timer register to force
1386 all PCI writes to be posted immediately */
1387 mb();
1388 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1390 /* check RUN */
1391 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1392 PRINT(KERN_ERR,
1393 "Error starting IR DMA (ContextControl 0x%08x)\n",
1394 reg_read(recv->ohci, recv->ContextControlSet));
1395 return -1;
1398 return 0;
1401 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1403 /* re-use the DMA descriptor for the block */
1404 /* by linking the previous descriptor to it */
1406 int next_i = block;
1407 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1409 struct dma_cmd *next = &recv->block[next_i];
1410 struct dma_cmd *prev = &recv->block[prev_i];
1412 /* ignore out-of-range requests */
1413 if ((block < 0) || (block > recv->nblocks))
1414 return;
1416 /* 'next' becomes the new end of the DMA chain,
1417 so disable branch and enable interrupt */
1418 next->branchAddress = 0;
1419 next->control |= cpu_to_le32(3 << 20);
1420 next->status = cpu_to_le32(recv->buf_stride);
1422 /* link prev to next */
1423 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1424 sizeof(struct dma_cmd) * next_i)
1425 | 1); /* Z=1 */
1427 /* disable interrupt on previous DMA descriptor, except at intervals */
1428 if ((prev_i % recv->block_irq_interval) == 0) {
1429 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1430 } else {
1431 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1433 wmb();
1435 /* wake up DMA in case it fell asleep */
1436 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1439 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1440 struct hpsb_iso_packet_info *info)
1442 /* release the memory where the packet was */
1443 recv->released_bytes += info->total_len;
1445 /* have we released enough memory for one block? */
1446 while (recv->released_bytes > recv->buf_stride) {
1447 ohci_iso_recv_release_block(recv, recv->block_reader);
1448 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1449 recv->released_bytes -= recv->buf_stride;
1453 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1455 struct ohci_iso_recv *recv = iso->hostdata;
1456 if (recv->dma_mode == BUFFER_FILL_MODE) {
1457 ohci_iso_recv_bufferfill_release(recv, info);
1458 } else {
1459 ohci_iso_recv_release_block(recv, info - iso->infos);
1463 /* parse all packets from blocks that have been fully received */
1464 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1466 int wake = 0;
1467 int runaway = 0;
1468 struct ti_ohci *ohci = recv->ohci;
1470 while (1) {
1471 /* we expect the next parsable packet to begin at recv->dma_offset */
1472 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1474 unsigned int offset;
1475 unsigned short len, cycle, total_len;
1476 unsigned char channel, tag, sy;
1478 unsigned char *p = iso->data_buf.kvirt;
1480 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1482 /* don't loop indefinitely */
1483 if (runaway++ > 100000) {
1484 atomic_inc(&iso->overflows);
1485 PRINT(KERN_ERR,
1486 "IR DMA error - Runaway during buffer parsing!\n");
1487 break;
1490 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1491 if (this_block == recv->block_dma)
1492 break;
1494 wake = 1;
1496 /* parse data length, tag, channel, and sy */
1498 /* note: we keep our own local copies of 'len' and 'offset'
1499 so the user can't mess with them by poking in the mmap area */
1501 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1503 if (len > 4096) {
1504 PRINT(KERN_ERR,
1505 "IR DMA error - bogus 'len' value %u\n", len);
1508 channel = p[recv->dma_offset+1] & 0x3F;
1509 tag = p[recv->dma_offset+1] >> 6;
1510 sy = p[recv->dma_offset+0] & 0xF;
1512 /* advance to data payload */
1513 recv->dma_offset += 4;
1515 /* check for wrap-around */
1516 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1517 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1520 /* dma_offset now points to the first byte of the data payload */
1521 offset = recv->dma_offset;
1523 /* advance to xferStatus/timeStamp */
1524 recv->dma_offset += len;
1526 total_len = len + 8; /* 8 bytes header+trailer in OHCI packet */
1527 /* payload is padded to 4 bytes */
1528 if (len % 4) {
1529 recv->dma_offset += 4 - (len%4);
1530 total_len += 4 - (len%4);
1533 /* check for wrap-around */
1534 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1535 /* uh oh, the packet data wraps from the last
1536 to the first DMA block - make the packet
1537 contiguous by copying its "tail" into the
1538 guard page */
1540 int guard_off = recv->buf_stride*recv->nblocks;
1541 int tail_len = len - (guard_off - offset);
1543 if (tail_len > 0 && tail_len < recv->buf_stride) {
1544 memcpy(iso->data_buf.kvirt + guard_off,
1545 iso->data_buf.kvirt,
1546 tail_len);
1549 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1552 /* parse timestamp */
1553 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1554 cycle &= 0x1FFF;
1556 /* advance to next packet */
1557 recv->dma_offset += 4;
1559 /* check for wrap-around */
1560 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1561 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1564 hpsb_iso_packet_received(iso, offset, len, total_len, cycle, channel, tag, sy);
1567 if (wake)
1568 hpsb_iso_wake(iso);
1571 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1573 int loop;
1574 struct ti_ohci *ohci = recv->ohci;
1576 /* loop over all blocks */
1577 for (loop = 0; loop < recv->nblocks; loop++) {
1579 /* check block_dma to see if it's done */
1580 struct dma_cmd *im = &recv->block[recv->block_dma];
1582 /* check the DMA descriptor for new writes to xferStatus */
1583 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1585 /* rescount is the number of bytes *remaining to be written* in the block */
1586 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1588 unsigned char event = xferstatus & 0x1F;
1590 if (!event) {
1591 /* nothing has happened to this block yet */
1592 break;
1595 if (event != 0x11) {
1596 atomic_inc(&iso->overflows);
1597 PRINT(KERN_ERR,
1598 "IR DMA error - OHCI error code 0x%02x\n", event);
1601 if (rescount != 0) {
1602 /* the card is still writing to this block;
1603 we can't touch it until it's done */
1604 break;
1607 /* OK, the block is finished... */
1609 /* sync our view of the block */
1610 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1612 /* reset the DMA descriptor */
1613 im->status = recv->buf_stride;
1615 /* advance block_dma */
1616 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1618 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1619 atomic_inc(&iso->overflows);
1620 DBGMSG("ISO reception overflow - "
1621 "ran out of DMA blocks");
1625 /* parse any packets that have arrived */
1626 ohci_iso_recv_bufferfill_parse(iso, recv);
1629 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1631 int count;
1632 int wake = 0;
1633 struct ti_ohci *ohci = recv->ohci;
1635 /* loop over the entire buffer */
1636 for (count = 0; count < recv->nblocks; count++) {
1637 u32 packet_len = 0;
1639 /* pointer to the DMA descriptor */
1640 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1642 /* check the DMA descriptor for new writes to xferStatus */
1643 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1644 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1646 unsigned char event = xferstatus & 0x1F;
1648 if (!event) {
1649 /* this packet hasn't come in yet; we are done for now */
1650 goto out;
1653 if (event == 0x11) {
1654 /* packet received successfully! */
1656 /* rescount is the number of bytes *remaining* in the packet buffer,
1657 after the packet was written */
1658 packet_len = recv->buf_stride - rescount;
1660 } else if (event == 0x02) {
1661 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1662 } else if (event) {
1663 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1666 /* sync our view of the buffer */
1667 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1669 /* record the per-packet info */
1671 /* iso header is 8 bytes ahead of the data payload */
1672 unsigned char *hdr;
1674 unsigned int offset;
1675 unsigned short cycle;
1676 unsigned char channel, tag, sy;
1678 offset = iso->pkt_dma * recv->buf_stride;
1679 hdr = iso->data_buf.kvirt + offset;
1681 /* skip iso header */
1682 offset += 8;
1683 packet_len -= 8;
1685 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1686 channel = hdr[5] & 0x3F;
1687 tag = hdr[5] >> 6;
1688 sy = hdr[4] & 0xF;
1690 hpsb_iso_packet_received(iso, offset, packet_len,
1691 recv->buf_stride, cycle, channel, tag, sy);
1694 /* reset the DMA descriptor */
1695 il->status = recv->buf_stride;
1697 wake = 1;
1698 recv->block_dma = iso->pkt_dma;
1701 out:
1702 if (wake)
1703 hpsb_iso_wake(iso);
1706 static void ohci_iso_recv_task(unsigned long data)
1708 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1709 struct ohci_iso_recv *recv = iso->hostdata;
1711 if (recv->dma_mode == BUFFER_FILL_MODE)
1712 ohci_iso_recv_bufferfill_task(iso, recv);
1713 else
1714 ohci_iso_recv_packetperbuf_task(iso, recv);
1717 /***********************************
1718 * rawiso ISO transmission *
1719 ***********************************/
1721 struct ohci_iso_xmit {
1722 struct ti_ohci *ohci;
1723 struct dma_prog_region prog;
1724 struct ohci1394_iso_tasklet task;
1725 int task_active;
1727 u32 ContextControlSet;
1728 u32 ContextControlClear;
1729 u32 CommandPtr;
1732 /* transmission DMA program:
1733 one OUTPUT_MORE_IMMEDIATE for the IT header
1734 one OUTPUT_LAST for the buffer data */
1736 struct iso_xmit_cmd {
1737 struct dma_cmd output_more_immediate;
1738 u8 iso_hdr[8];
1739 u32 unused[2];
1740 struct dma_cmd output_last;
1743 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1744 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1745 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1746 static void ohci_iso_xmit_task(unsigned long data);
1748 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1750 struct ohci_iso_xmit *xmit;
1751 unsigned int prog_size;
1752 int ctx;
1753 int ret = -ENOMEM;
1755 xmit = kmalloc(sizeof(*xmit), GFP_KERNEL);
1756 if (!xmit)
1757 return -ENOMEM;
1759 iso->hostdata = xmit;
1760 xmit->ohci = iso->host->hostdata;
1761 xmit->task_active = 0;
1763 dma_prog_region_init(&xmit->prog);
1765 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1767 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1768 goto err;
1770 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1771 ohci_iso_xmit_task, (unsigned long) iso);
1773 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0) {
1774 ret = -EBUSY;
1775 goto err;
1778 xmit->task_active = 1;
1780 /* xmit context registers are spaced 16 bytes apart */
1781 ctx = xmit->task.context;
1782 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1783 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1784 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1786 return 0;
1788 err:
1789 ohci_iso_xmit_shutdown(iso);
1790 return ret;
1793 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1795 struct ohci_iso_xmit *xmit = iso->hostdata;
1796 struct ti_ohci *ohci = xmit->ohci;
1798 /* disable interrupts */
1799 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1801 /* halt DMA */
1802 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1803 /* XXX the DMA context will lock up if you try to send too much data! */
1804 PRINT(KERN_ERR,
1805 "you probably exceeded the OHCI card's bandwidth limit - "
1806 "reload the module and reduce xmit bandwidth");
1810 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1812 struct ohci_iso_xmit *xmit = iso->hostdata;
1814 if (xmit->task_active) {
1815 ohci_iso_xmit_stop(iso);
1816 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1817 xmit->task_active = 0;
1820 dma_prog_region_free(&xmit->prog);
1821 kfree(xmit);
1822 iso->hostdata = NULL;
1825 static void ohci_iso_xmit_task(unsigned long data)
1827 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1828 struct ohci_iso_xmit *xmit = iso->hostdata;
1829 struct ti_ohci *ohci = xmit->ohci;
1830 int wake = 0;
1831 int count;
1833 /* check the whole buffer if necessary, starting at pkt_dma */
1834 for (count = 0; count < iso->buf_packets; count++) {
1835 int cycle;
1837 /* DMA descriptor */
1838 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1840 /* check for new writes to xferStatus */
1841 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
1842 u8 event = xferstatus & 0x1F;
1844 if (!event) {
1845 /* packet hasn't been sent yet; we are done for now */
1846 break;
1849 if (event != 0x11)
1850 PRINT(KERN_ERR,
1851 "IT DMA error - OHCI error code 0x%02x\n", event);
1853 /* at least one packet went out, so wake up the writer */
1854 wake = 1;
1856 /* parse cycle */
1857 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
1859 /* tell the subsystem the packet has gone out */
1860 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
1862 /* reset the DMA descriptor for next time */
1863 cmd->output_last.status = 0;
1866 if (wake)
1867 hpsb_iso_wake(iso);
1870 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1872 struct ohci_iso_xmit *xmit = iso->hostdata;
1873 struct ti_ohci *ohci = xmit->ohci;
1875 int next_i, prev_i;
1876 struct iso_xmit_cmd *next, *prev;
1878 unsigned int offset;
1879 unsigned short len;
1880 unsigned char tag, sy;
1882 /* check that the packet doesn't cross a page boundary
1883 (we could allow this if we added OUTPUT_MORE descriptor support) */
1884 if (cross_bound(info->offset, info->len)) {
1885 PRINT(KERN_ERR,
1886 "rawiso xmit: packet %u crosses a page boundary",
1887 iso->first_packet);
1888 return -EINVAL;
1891 offset = info->offset;
1892 len = info->len;
1893 tag = info->tag;
1894 sy = info->sy;
1896 /* sync up the card's view of the buffer */
1897 dma_region_sync_for_device(&iso->data_buf, offset, len);
1899 /* append first_packet to the DMA chain */
1900 /* by linking the previous descriptor to it */
1901 /* (next will become the new end of the DMA chain) */
1903 next_i = iso->first_packet;
1904 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
1906 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
1907 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
1909 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
1910 memset(next, 0, sizeof(struct iso_xmit_cmd));
1911 next->output_more_immediate.control = cpu_to_le32(0x02000008);
1913 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
1915 /* tcode = 0xA, and sy */
1916 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
1918 /* tag and channel number */
1919 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
1921 /* transmission speed */
1922 next->iso_hdr[2] = iso->speed & 0x7;
1924 /* payload size */
1925 next->iso_hdr[6] = len & 0xFF;
1926 next->iso_hdr[7] = len >> 8;
1928 /* set up the OUTPUT_LAST */
1929 next->output_last.control = cpu_to_le32(1 << 28);
1930 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
1931 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
1932 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
1933 next->output_last.control |= cpu_to_le32(len);
1935 /* payload bus address */
1936 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
1938 /* leave branchAddress at zero for now */
1940 /* re-write the previous DMA descriptor to chain to this one */
1942 /* set prev branch address to point to next (Z=3) */
1943 prev->output_last.branchAddress = cpu_to_le32(
1944 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
1946 /* disable interrupt, unless required by the IRQ interval */
1947 if (prev_i % iso->irq_interval) {
1948 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
1949 } else {
1950 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
1953 wmb();
1955 /* wake DMA in case it is sleeping */
1956 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
1958 /* issue a dummy read of the cycle timer to force all PCI
1959 writes to be posted immediately */
1960 mb();
1961 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
1963 return 0;
1966 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
1968 struct ohci_iso_xmit *xmit = iso->hostdata;
1969 struct ti_ohci *ohci = xmit->ohci;
1971 /* clear out the control register */
1972 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
1973 wmb();
1975 /* address and length of first descriptor block (Z=3) */
1976 reg_write(xmit->ohci, xmit->CommandPtr,
1977 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
1979 /* cycle match */
1980 if (cycle != -1) {
1981 u32 start = cycle & 0x1FFF;
1983 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1984 just snarf them from the current time */
1985 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1987 /* advance one second to give some extra time for DMA to start */
1988 seconds += 1;
1990 start |= (seconds & 3) << 13;
1992 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
1995 /* enable interrupts */
1996 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
1998 /* run */
1999 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2000 mb();
2002 /* wait 100 usec to give the card time to go active */
2003 udelay(100);
2005 /* check the RUN bit */
2006 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2007 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2008 reg_read(xmit->ohci, xmit->ContextControlSet));
2009 return -1;
2012 return 0;
2015 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2018 switch(cmd) {
2019 case XMIT_INIT:
2020 return ohci_iso_xmit_init(iso);
2021 case XMIT_START:
2022 return ohci_iso_xmit_start(iso, arg);
2023 case XMIT_STOP:
2024 ohci_iso_xmit_stop(iso);
2025 return 0;
2026 case XMIT_QUEUE:
2027 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2028 case XMIT_SHUTDOWN:
2029 ohci_iso_xmit_shutdown(iso);
2030 return 0;
2032 case RECV_INIT:
2033 return ohci_iso_recv_init(iso);
2034 case RECV_START: {
2035 int *args = (int*) arg;
2036 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2038 case RECV_STOP:
2039 ohci_iso_recv_stop(iso);
2040 return 0;
2041 case RECV_RELEASE:
2042 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2043 return 0;
2044 case RECV_FLUSH:
2045 ohci_iso_recv_task((unsigned long) iso);
2046 return 0;
2047 case RECV_SHUTDOWN:
2048 ohci_iso_recv_shutdown(iso);
2049 return 0;
2050 case RECV_LISTEN_CHANNEL:
2051 ohci_iso_recv_change_channel(iso, arg, 1);
2052 return 0;
2053 case RECV_UNLISTEN_CHANNEL:
2054 ohci_iso_recv_change_channel(iso, arg, 0);
2055 return 0;
2056 case RECV_SET_CHANNEL_MASK:
2057 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2058 return 0;
2060 default:
2061 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2062 cmd);
2063 break;
2065 return -EINVAL;
2068 /***************************************
2069 * IEEE-1394 functionality section END *
2070 ***************************************/
2073 /********************************************************
2074 * Global stuff (interrupt handler, init/shutdown code) *
2075 ********************************************************/
2077 static void dma_trm_reset(struct dma_trm_ctx *d)
2079 unsigned long flags;
2080 LIST_HEAD(packet_list);
2081 struct ti_ohci *ohci = d->ohci;
2082 struct hpsb_packet *packet, *ptmp;
2084 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2086 /* Lock the context, reset it and release it. Move the packets
2087 * that were pending in the context to packet_list and free
2088 * them after releasing the lock. */
2090 spin_lock_irqsave(&d->lock, flags);
2092 list_splice(&d->fifo_list, &packet_list);
2093 list_splice(&d->pending_list, &packet_list);
2094 INIT_LIST_HEAD(&d->fifo_list);
2095 INIT_LIST_HEAD(&d->pending_list);
2097 d->branchAddrPtr = NULL;
2098 d->sent_ind = d->prg_ind;
2099 d->free_prgs = d->num_desc;
2101 spin_unlock_irqrestore(&d->lock, flags);
2103 if (list_empty(&packet_list))
2104 return;
2106 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2108 /* Now process subsystem callbacks for the packets from this
2109 * context. */
2110 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2111 list_del_init(&packet->driver_list);
2112 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2116 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2117 quadlet_t rx_event,
2118 quadlet_t tx_event)
2120 struct ohci1394_iso_tasklet *t;
2121 unsigned long mask;
2122 unsigned long flags;
2124 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
2126 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2127 mask = 1 << t->context;
2129 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2130 tasklet_schedule(&t->tasklet);
2131 else if (rx_event & mask)
2132 tasklet_schedule(&t->tasklet);
2135 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
2138 static irqreturn_t ohci_irq_handler(int irq, void *dev_id)
2140 quadlet_t event, node_id;
2141 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2142 struct hpsb_host *host = ohci->host;
2143 int phyid = -1, isroot = 0;
2144 unsigned long flags;
2146 /* Read and clear the interrupt event register. Don't clear
2147 * the busReset event, though. This is done when we get the
2148 * selfIDComplete interrupt. */
2149 spin_lock_irqsave(&ohci->event_lock, flags);
2150 event = reg_read(ohci, OHCI1394_IntEventClear);
2151 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2152 spin_unlock_irqrestore(&ohci->event_lock, flags);
2154 if (!event)
2155 return IRQ_NONE;
2157 /* If event is ~(u32)0 cardbus card was ejected. In this case
2158 * we just return, and clean up in the ohci1394_pci_remove
2159 * function. */
2160 if (event == ~(u32) 0) {
2161 DBGMSG("Device removed.");
2162 return IRQ_NONE;
2165 DBGMSG("IntEvent: %08x", event);
2167 if (event & OHCI1394_unrecoverableError) {
2168 int ctx;
2169 PRINT(KERN_ERR, "Unrecoverable error!");
2171 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2172 PRINT(KERN_ERR, "Async Req Tx Context died: "
2173 "ctrl[%08x] cmdptr[%08x]",
2174 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2175 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2177 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2178 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2179 "ctrl[%08x] cmdptr[%08x]",
2180 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2181 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2183 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2184 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2185 "ctrl[%08x] cmdptr[%08x]",
2186 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2187 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2189 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2190 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2191 "ctrl[%08x] cmdptr[%08x]",
2192 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2193 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2195 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2196 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2197 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2198 "ctrl[%08x] cmdptr[%08x]", ctx,
2199 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2200 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2203 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2204 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2205 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2206 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2207 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2208 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2209 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2212 event &= ~OHCI1394_unrecoverableError;
2214 if (event & OHCI1394_postedWriteErr) {
2215 PRINT(KERN_ERR, "physical posted write error");
2216 /* no recovery strategy yet, had to involve protocol drivers */
2217 event &= ~OHCI1394_postedWriteErr;
2219 if (event & OHCI1394_cycleTooLong) {
2220 if(printk_ratelimit())
2221 PRINT(KERN_WARNING, "isochronous cycle too long");
2222 else
2223 DBGMSG("OHCI1394_cycleTooLong");
2224 reg_write(ohci, OHCI1394_LinkControlSet,
2225 OHCI1394_LinkControl_CycleMaster);
2226 event &= ~OHCI1394_cycleTooLong;
2228 if (event & OHCI1394_cycleInconsistent) {
2229 /* We subscribe to the cycleInconsistent event only to
2230 * clear the corresponding event bit... otherwise,
2231 * isochronous cycleMatch DMA won't work. */
2232 DBGMSG("OHCI1394_cycleInconsistent");
2233 event &= ~OHCI1394_cycleInconsistent;
2235 if (event & OHCI1394_busReset) {
2236 /* The busReset event bit can't be cleared during the
2237 * selfID phase, so we disable busReset interrupts, to
2238 * avoid burying the cpu in interrupt requests. */
2239 spin_lock_irqsave(&ohci->event_lock, flags);
2240 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2242 if (ohci->check_busreset) {
2243 int loop_count = 0;
2245 udelay(10);
2247 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2248 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2250 spin_unlock_irqrestore(&ohci->event_lock, flags);
2251 udelay(10);
2252 spin_lock_irqsave(&ohci->event_lock, flags);
2254 /* The loop counter check is to prevent the driver
2255 * from remaining in this state forever. For the
2256 * initial bus reset, the loop continues for ever
2257 * and the system hangs, until some device is plugged-in
2258 * or out manually into a port! The forced reset seems
2259 * to solve this problem. This mainly effects nForce2. */
2260 if (loop_count > 10000) {
2261 ohci_devctl(host, RESET_BUS, LONG_RESET);
2262 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2263 loop_count = 0;
2266 loop_count++;
2269 spin_unlock_irqrestore(&ohci->event_lock, flags);
2270 if (!host->in_bus_reset) {
2271 DBGMSG("irq_handler: Bus reset requested");
2273 /* Subsystem call */
2274 hpsb_bus_reset(ohci->host);
2276 event &= ~OHCI1394_busReset;
2278 if (event & OHCI1394_reqTxComplete) {
2279 struct dma_trm_ctx *d = &ohci->at_req_context;
2280 DBGMSG("Got reqTxComplete interrupt "
2281 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2282 if (reg_read(ohci, d->ctrlSet) & 0x800)
2283 ohci1394_stop_context(ohci, d->ctrlClear,
2284 "reqTxComplete");
2285 else
2286 dma_trm_tasklet((unsigned long)d);
2287 //tasklet_schedule(&d->task);
2288 event &= ~OHCI1394_reqTxComplete;
2290 if (event & OHCI1394_respTxComplete) {
2291 struct dma_trm_ctx *d = &ohci->at_resp_context;
2292 DBGMSG("Got respTxComplete interrupt "
2293 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2294 if (reg_read(ohci, d->ctrlSet) & 0x800)
2295 ohci1394_stop_context(ohci, d->ctrlClear,
2296 "respTxComplete");
2297 else
2298 tasklet_schedule(&d->task);
2299 event &= ~OHCI1394_respTxComplete;
2301 if (event & OHCI1394_RQPkt) {
2302 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2303 DBGMSG("Got RQPkt interrupt status=0x%08X",
2304 reg_read(ohci, d->ctrlSet));
2305 if (reg_read(ohci, d->ctrlSet) & 0x800)
2306 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2307 else
2308 tasklet_schedule(&d->task);
2309 event &= ~OHCI1394_RQPkt;
2311 if (event & OHCI1394_RSPkt) {
2312 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2313 DBGMSG("Got RSPkt interrupt status=0x%08X",
2314 reg_read(ohci, d->ctrlSet));
2315 if (reg_read(ohci, d->ctrlSet) & 0x800)
2316 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2317 else
2318 tasklet_schedule(&d->task);
2319 event &= ~OHCI1394_RSPkt;
2321 if (event & OHCI1394_isochRx) {
2322 quadlet_t rx_event;
2324 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2325 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2326 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2327 event &= ~OHCI1394_isochRx;
2329 if (event & OHCI1394_isochTx) {
2330 quadlet_t tx_event;
2332 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2333 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2334 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2335 event &= ~OHCI1394_isochTx;
2337 if (event & OHCI1394_selfIDComplete) {
2338 if (host->in_bus_reset) {
2339 node_id = reg_read(ohci, OHCI1394_NodeID);
2341 if (!(node_id & 0x80000000)) {
2342 PRINT(KERN_ERR,
2343 "SelfID received, but NodeID invalid "
2344 "(probably new bus reset occurred): %08X",
2345 node_id);
2346 goto selfid_not_valid;
2349 phyid = node_id & 0x0000003f;
2350 isroot = (node_id & 0x40000000) != 0;
2352 DBGMSG("SelfID interrupt received "
2353 "(phyid %d, %s)", phyid,
2354 (isroot ? "root" : "not root"));
2356 handle_selfid(ohci, host, phyid, isroot);
2358 /* Clear the bus reset event and re-enable the
2359 * busReset interrupt. */
2360 spin_lock_irqsave(&ohci->event_lock, flags);
2361 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2362 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2363 spin_unlock_irqrestore(&ohci->event_lock, flags);
2365 /* Turn on phys dma reception.
2367 * TODO: Enable some sort of filtering management.
2369 if (phys_dma) {
2370 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2371 0xffffffff);
2372 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2373 0xffffffff);
2376 DBGMSG("PhyReqFilter=%08x%08x",
2377 reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2378 reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
2380 hpsb_selfid_complete(host, phyid, isroot);
2381 } else
2382 PRINT(KERN_ERR,
2383 "SelfID received outside of bus reset sequence");
2385 selfid_not_valid:
2386 event &= ~OHCI1394_selfIDComplete;
2389 /* Make sure we handle everything, just in case we accidentally
2390 * enabled an interrupt that we didn't write a handler for. */
2391 if (event)
2392 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2393 event);
2395 return IRQ_HANDLED;
2398 /* Put the buffer back into the dma context */
2399 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2401 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2402 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2404 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2405 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2406 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2407 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2409 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2410 * context program descriptors before it sees the wakeup bit set. */
2411 wmb();
2413 /* wake up the dma context if necessary */
2414 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2415 PRINT(KERN_INFO,
2416 "Waking dma ctx=%d ... processing is probably too slow",
2417 d->ctx);
2420 /* do this always, to avoid race condition */
2421 reg_write(ohci, d->ctrlSet, 0x1000);
2424 #define cond_le32_to_cpu(data, noswap) \
2425 (noswap ? data : le32_to_cpu(data))
2427 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2428 -1, 0, -1, 0, -1, -1, 16, -1};
2431 * Determine the length of a packet in the buffer
2432 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2434 static inline int packet_length(struct dma_rcv_ctx *d, int idx,
2435 quadlet_t *buf_ptr, int offset,
2436 unsigned char tcode, int noswap)
2438 int length = -1;
2440 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2441 length = TCODE_SIZE[tcode];
2442 if (length == 0) {
2443 if (offset + 12 >= d->buf_size) {
2444 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2445 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2446 } else {
2447 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2449 length += 20;
2451 } else if (d->type == DMA_CTX_ISO) {
2452 /* Assumption: buffer fill mode with header/trailer */
2453 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2456 if (length > 0 && length % 4)
2457 length += 4 - (length % 4);
2459 return length;
2462 /* Tasklet that processes dma receive buffers */
2463 static void dma_rcv_tasklet (unsigned long data)
2465 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2466 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2467 unsigned int split_left, idx, offset, rescount;
2468 unsigned char tcode;
2469 int length, bytes_left, ack;
2470 unsigned long flags;
2471 quadlet_t *buf_ptr;
2472 char *split_ptr;
2473 char msg[256];
2475 spin_lock_irqsave(&d->lock, flags);
2477 idx = d->buf_ind;
2478 offset = d->buf_offset;
2479 buf_ptr = d->buf_cpu[idx] + offset/4;
2481 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2482 bytes_left = d->buf_size - rescount - offset;
2484 while (bytes_left > 0) {
2485 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2487 /* packet_length() will return < 4 for an error */
2488 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2490 if (length < 4) { /* something is wrong */
2491 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2492 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2493 d->ctx, length);
2494 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2495 spin_unlock_irqrestore(&d->lock, flags);
2496 return;
2499 /* The first case is where we have a packet that crosses
2500 * over more than one descriptor. The next case is where
2501 * it's all in the first descriptor. */
2502 if ((offset + length) > d->buf_size) {
2503 DBGMSG("Split packet rcv'd");
2504 if (length > d->split_buf_size) {
2505 ohci1394_stop_context(ohci, d->ctrlClear,
2506 "Split packet size exceeded");
2507 d->buf_ind = idx;
2508 d->buf_offset = offset;
2509 spin_unlock_irqrestore(&d->lock, flags);
2510 return;
2513 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2514 == d->buf_size) {
2515 /* Other part of packet not written yet.
2516 * this should never happen I think
2517 * anyway we'll get it on the next call. */
2518 PRINT(KERN_INFO,
2519 "Got only half a packet!");
2520 d->buf_ind = idx;
2521 d->buf_offset = offset;
2522 spin_unlock_irqrestore(&d->lock, flags);
2523 return;
2526 split_left = length;
2527 split_ptr = (char *)d->spb;
2528 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2529 split_left -= d->buf_size-offset;
2530 split_ptr += d->buf_size-offset;
2531 insert_dma_buffer(d, idx);
2532 idx = (idx+1) % d->num_desc;
2533 buf_ptr = d->buf_cpu[idx];
2534 offset=0;
2536 while (split_left >= d->buf_size) {
2537 memcpy(split_ptr,buf_ptr,d->buf_size);
2538 split_ptr += d->buf_size;
2539 split_left -= d->buf_size;
2540 insert_dma_buffer(d, idx);
2541 idx = (idx+1) % d->num_desc;
2542 buf_ptr = d->buf_cpu[idx];
2545 if (split_left > 0) {
2546 memcpy(split_ptr, buf_ptr, split_left);
2547 offset = split_left;
2548 buf_ptr += offset/4;
2550 } else {
2551 DBGMSG("Single packet rcv'd");
2552 memcpy(d->spb, buf_ptr, length);
2553 offset += length;
2554 buf_ptr += length/4;
2555 if (offset==d->buf_size) {
2556 insert_dma_buffer(d, idx);
2557 idx = (idx+1) % d->num_desc;
2558 buf_ptr = d->buf_cpu[idx];
2559 offset=0;
2563 /* We get one phy packet to the async descriptor for each
2564 * bus reset. We always ignore it. */
2565 if (tcode != OHCI1394_TCODE_PHY) {
2566 if (!ohci->no_swap_incoming)
2567 header_le32_to_cpu(d->spb, tcode);
2568 DBGMSG("Packet received from node"
2569 " %d ack=0x%02X spd=%d tcode=0x%X"
2570 " length=%d ctx=%d tlabel=%d",
2571 (d->spb[1]>>16)&0x3f,
2572 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2573 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2574 tcode, length, d->ctx,
2575 (d->spb[0]>>10)&0x3f);
2577 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2578 == 0x11) ? 1 : 0;
2580 hpsb_packet_received(ohci->host, d->spb,
2581 length-4, ack);
2583 #ifdef OHCI1394_DEBUG
2584 else
2585 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2586 d->ctx);
2587 #endif
2589 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2591 bytes_left = d->buf_size - rescount - offset;
2595 d->buf_ind = idx;
2596 d->buf_offset = offset;
2598 spin_unlock_irqrestore(&d->lock, flags);
2601 /* Bottom half that processes sent packets */
2602 static void dma_trm_tasklet (unsigned long data)
2604 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2605 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2606 struct hpsb_packet *packet, *ptmp;
2607 unsigned long flags;
2608 u32 status, ack;
2609 size_t datasize;
2611 spin_lock_irqsave(&d->lock, flags);
2613 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2614 datasize = packet->data_size;
2615 if (datasize && packet->type != hpsb_raw)
2616 status = le32_to_cpu(
2617 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2618 else
2619 status = le32_to_cpu(
2620 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2622 if (status == 0)
2623 /* this packet hasn't been sent yet*/
2624 break;
2626 #ifdef OHCI1394_DEBUG
2627 if (datasize)
2628 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2629 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2630 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2631 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2632 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2633 status&0x1f, (status>>5)&0x3,
2634 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2635 d->ctx);
2636 else
2637 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2638 "%d ack=0x%X spd=%d dataLength=%d ctx=%d",
2639 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2640 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2641 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2642 status&0x1f, (status>>5)&0x3,
2643 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2644 d->ctx);
2645 else
2646 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2647 "%d ack=0x%X spd=%d data=0x%08X ctx=%d",
2648 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2649 >>16)&0x3f,
2650 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2651 >>4)&0xf,
2652 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2653 >>10)&0x3f,
2654 status&0x1f, (status>>5)&0x3,
2655 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2656 d->ctx);
2657 #endif
2659 if (status & 0x10) {
2660 ack = status & 0xf;
2661 } else {
2662 switch (status & 0x1f) {
2663 case EVT_NO_STATUS: /* that should never happen */
2664 case EVT_RESERVED_A: /* that should never happen */
2665 case EVT_LONG_PACKET: /* that should never happen */
2666 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2667 ack = ACKX_SEND_ERROR;
2668 break;
2669 case EVT_MISSING_ACK:
2670 ack = ACKX_TIMEOUT;
2671 break;
2672 case EVT_UNDERRUN:
2673 ack = ACKX_SEND_ERROR;
2674 break;
2675 case EVT_OVERRUN: /* that should never happen */
2676 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2677 ack = ACKX_SEND_ERROR;
2678 break;
2679 case EVT_DESCRIPTOR_READ:
2680 case EVT_DATA_READ:
2681 case EVT_DATA_WRITE:
2682 ack = ACKX_SEND_ERROR;
2683 break;
2684 case EVT_BUS_RESET: /* that should never happen */
2685 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2686 ack = ACKX_SEND_ERROR;
2687 break;
2688 case EVT_TIMEOUT:
2689 ack = ACKX_TIMEOUT;
2690 break;
2691 case EVT_TCODE_ERR:
2692 ack = ACKX_SEND_ERROR;
2693 break;
2694 case EVT_RESERVED_B: /* that should never happen */
2695 case EVT_RESERVED_C: /* that should never happen */
2696 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2697 ack = ACKX_SEND_ERROR;
2698 break;
2699 case EVT_UNKNOWN:
2700 case EVT_FLUSHED:
2701 ack = ACKX_SEND_ERROR;
2702 break;
2703 default:
2704 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2705 ack = ACKX_SEND_ERROR;
2706 BUG();
2710 list_del_init(&packet->driver_list);
2711 hpsb_packet_sent(ohci->host, packet, ack);
2713 if (datasize)
2714 pci_unmap_single(ohci->dev,
2715 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2716 datasize, PCI_DMA_TODEVICE);
2718 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2719 d->free_prgs++;
2722 dma_trm_flush(ohci, d);
2724 spin_unlock_irqrestore(&d->lock, flags);
2727 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2729 int i;
2730 struct ti_ohci *ohci = d->ohci;
2732 if (ohci == NULL)
2733 return;
2735 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2737 if (d->buf_cpu) {
2738 for (i=0; i<d->num_desc; i++)
2739 if (d->buf_cpu[i] && d->buf_bus[i])
2740 pci_free_consistent(
2741 ohci->dev, d->buf_size,
2742 d->buf_cpu[i], d->buf_bus[i]);
2743 kfree(d->buf_cpu);
2744 kfree(d->buf_bus);
2746 if (d->prg_cpu) {
2747 for (i=0; i<d->num_desc; i++)
2748 if (d->prg_cpu[i] && d->prg_bus[i])
2749 pci_pool_free(d->prg_pool, d->prg_cpu[i],
2750 d->prg_bus[i]);
2751 pci_pool_destroy(d->prg_pool);
2752 kfree(d->prg_cpu);
2753 kfree(d->prg_bus);
2755 kfree(d->spb);
2757 /* Mark this context as freed. */
2758 d->ohci = NULL;
2761 static int
2762 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2763 enum context_type type, int ctx, int num_desc,
2764 int buf_size, int split_buf_size, int context_base)
2766 int i, len;
2767 static int num_allocs;
2768 static char pool_name[20];
2770 d->ohci = ohci;
2771 d->type = type;
2772 d->ctx = ctx;
2774 d->num_desc = num_desc;
2775 d->buf_size = buf_size;
2776 d->split_buf_size = split_buf_size;
2778 d->ctrlSet = 0;
2779 d->ctrlClear = 0;
2780 d->cmdPtr = 0;
2782 d->buf_cpu = kzalloc(d->num_desc * sizeof(*d->buf_cpu), GFP_ATOMIC);
2783 d->buf_bus = kzalloc(d->num_desc * sizeof(*d->buf_bus), GFP_ATOMIC);
2785 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2786 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2787 free_dma_rcv_ctx(d);
2788 return -ENOMEM;
2791 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_ATOMIC);
2792 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_ATOMIC);
2794 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2795 PRINT(KERN_ERR, "Failed to allocate dma prg");
2796 free_dma_rcv_ctx(d);
2797 return -ENOMEM;
2800 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2802 if (d->spb == NULL) {
2803 PRINT(KERN_ERR, "Failed to allocate split buffer");
2804 free_dma_rcv_ctx(d);
2805 return -ENOMEM;
2808 len = sprintf(pool_name, "ohci1394_rcv_prg");
2809 sprintf(pool_name+len, "%d", num_allocs);
2810 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2811 sizeof(struct dma_cmd), 4, 0);
2812 if(d->prg_pool == NULL)
2814 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2815 free_dma_rcv_ctx(d);
2816 return -ENOMEM;
2818 num_allocs++;
2820 for (i=0; i<d->num_desc; i++) {
2821 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
2822 d->buf_size,
2823 d->buf_bus+i);
2825 if (d->buf_cpu[i] != NULL) {
2826 memset(d->buf_cpu[i], 0, d->buf_size);
2827 } else {
2828 PRINT(KERN_ERR,
2829 "Failed to allocate dma buffer");
2830 free_dma_rcv_ctx(d);
2831 return -ENOMEM;
2834 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
2836 if (d->prg_cpu[i] != NULL) {
2837 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2838 } else {
2839 PRINT(KERN_ERR,
2840 "Failed to allocate dma prg");
2841 free_dma_rcv_ctx(d);
2842 return -ENOMEM;
2846 spin_lock_init(&d->lock);
2848 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
2849 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
2850 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
2852 tasklet_init(&d->task, dma_rcv_tasklet, (unsigned long) d);
2853 return 0;
2856 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
2858 int i;
2859 struct ti_ohci *ohci = d->ohci;
2861 if (ohci == NULL)
2862 return;
2864 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
2866 if (d->prg_cpu) {
2867 for (i=0; i<d->num_desc; i++)
2868 if (d->prg_cpu[i] && d->prg_bus[i])
2869 pci_pool_free(d->prg_pool, d->prg_cpu[i],
2870 d->prg_bus[i]);
2871 pci_pool_destroy(d->prg_pool);
2872 kfree(d->prg_cpu);
2873 kfree(d->prg_bus);
2876 /* Mark this context as freed. */
2877 d->ohci = NULL;
2880 static int
2881 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
2882 enum context_type type, int ctx, int num_desc,
2883 int context_base)
2885 int i, len;
2886 static char pool_name[20];
2887 static int num_allocs=0;
2889 d->ohci = ohci;
2890 d->type = type;
2891 d->ctx = ctx;
2892 d->num_desc = num_desc;
2893 d->ctrlSet = 0;
2894 d->ctrlClear = 0;
2895 d->cmdPtr = 0;
2897 d->prg_cpu = kzalloc(d->num_desc * sizeof(*d->prg_cpu), GFP_KERNEL);
2898 d->prg_bus = kzalloc(d->num_desc * sizeof(*d->prg_bus), GFP_KERNEL);
2900 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2901 PRINT(KERN_ERR, "Failed to allocate at dma prg");
2902 free_dma_trm_ctx(d);
2903 return -ENOMEM;
2906 len = sprintf(pool_name, "ohci1394_trm_prg");
2907 sprintf(pool_name+len, "%d", num_allocs);
2908 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2909 sizeof(struct at_dma_prg), 4, 0);
2910 if (d->prg_pool == NULL) {
2911 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2912 free_dma_trm_ctx(d);
2913 return -ENOMEM;
2915 num_allocs++;
2917 for (i = 0; i < d->num_desc; i++) {
2918 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, GFP_KERNEL, d->prg_bus+i);
2920 if (d->prg_cpu[i] != NULL) {
2921 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
2922 } else {
2923 PRINT(KERN_ERR,
2924 "Failed to allocate at dma prg");
2925 free_dma_trm_ctx(d);
2926 return -ENOMEM;
2930 spin_lock_init(&d->lock);
2932 /* initialize tasklet */
2933 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
2934 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
2935 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
2936 tasklet_init(&d->task, dma_trm_tasklet, (unsigned long)d);
2937 return 0;
2940 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
2942 struct ti_ohci *ohci = host->hostdata;
2944 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
2945 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
2947 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
2951 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
2952 quadlet_t data, quadlet_t compare)
2954 struct ti_ohci *ohci = host->hostdata;
2955 int i;
2957 reg_write(ohci, OHCI1394_CSRData, data);
2958 reg_write(ohci, OHCI1394_CSRCompareData, compare);
2959 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
2961 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
2962 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
2963 break;
2965 mdelay(1);
2968 return reg_read(ohci, OHCI1394_CSRData);
2971 static struct hpsb_host_driver ohci1394_driver = {
2972 .owner = THIS_MODULE,
2973 .name = OHCI1394_DRIVER_NAME,
2974 .set_hw_config_rom = ohci_set_hw_config_rom,
2975 .transmit_packet = ohci_transmit,
2976 .devctl = ohci_devctl,
2977 .isoctl = ohci_isoctl,
2978 .hw_csr_reg = ohci_hw_csr_reg,
2981 /***********************************
2982 * PCI Driver Interface functions *
2983 ***********************************/
2985 #define FAIL(err, fmt, args...) \
2986 do { \
2987 PRINT_G(KERN_ERR, fmt , ## args); \
2988 ohci1394_pci_remove(dev); \
2989 return err; \
2990 } while (0)
2992 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
2993 const struct pci_device_id *ent)
2995 struct hpsb_host *host;
2996 struct ti_ohci *ohci; /* shortcut to currently handled device */
2997 resource_size_t ohci_base;
2999 #ifdef CONFIG_PPC_PMAC
3000 /* Necessary on some machines if ohci1394 was loaded/ unloaded before */
3001 if (machine_is(powermac)) {
3002 struct device_node *ofn = pci_device_to_OF_node(dev);
3004 if (ofn) {
3005 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3006 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3009 #endif /* CONFIG_PPC_PMAC */
3011 if (pci_enable_device(dev))
3012 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3013 pci_set_master(dev);
3015 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3016 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3018 ohci = host->hostdata;
3019 ohci->dev = dev;
3020 ohci->host = host;
3021 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3022 host->pdev = dev;
3023 pci_set_drvdata(dev, ohci);
3025 /* We don't want hardware swapping */
3026 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3028 /* Some oddball Apple controllers do not order the selfid
3029 * properly, so we make up for it here. */
3030 #ifndef __LITTLE_ENDIAN
3031 /* XXX: Need a better way to check this. I'm wondering if we can
3032 * read the values of the OHCI1394_PCI_HCI_Control and the
3033 * noByteSwapData registers to see if they were not cleared to
3034 * zero. Should this work? Obviously it's not defined what these
3035 * registers will read when they aren't supported. Bleh! */
3036 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3037 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3038 ohci->no_swap_incoming = 1;
3039 ohci->selfid_swap = 0;
3040 } else
3041 ohci->selfid_swap = 1;
3042 #endif
3045 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3046 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3047 #endif
3049 /* These chipsets require a bit of extra care when checking after
3050 * a busreset. */
3051 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3052 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3053 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3054 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3055 ohci->check_busreset = 1;
3057 /* We hardwire the MMIO length, since some CardBus adaptors
3058 * fail to report the right length. Anyway, the ohci spec
3059 * clearly says it's 2kb, so this shouldn't be a problem. */
3060 ohci_base = pci_resource_start(dev, 0);
3061 if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3062 PRINT(KERN_WARNING, "PCI resource length of 0x%llx too small!",
3063 (unsigned long long)pci_resource_len(dev, 0));
3065 if (!request_mem_region(ohci_base, OHCI1394_REGISTER_SIZE,
3066 OHCI1394_DRIVER_NAME))
3067 FAIL(-ENOMEM, "MMIO resource (0x%llx - 0x%llx) unavailable",
3068 (unsigned long long)ohci_base,
3069 (unsigned long long)ohci_base + OHCI1394_REGISTER_SIZE);
3070 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3072 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3073 if (ohci->registers == NULL)
3074 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3075 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3076 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3078 /* csr_config rom allocation */
3079 ohci->csr_config_rom_cpu =
3080 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3081 &ohci->csr_config_rom_bus);
3082 if (ohci->csr_config_rom_cpu == NULL)
3083 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3084 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3086 /* self-id dma buffer allocation */
3087 ohci->selfid_buf_cpu =
3088 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3089 &ohci->selfid_buf_bus);
3090 if (ohci->selfid_buf_cpu == NULL)
3091 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3092 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3094 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3095 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3096 "8Kb boundary... may cause problems on some CXD3222 chip",
3097 ohci->selfid_buf_cpu);
3099 /* No self-id errors at startup */
3100 ohci->self_id_errors = 0;
3102 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3103 /* AR DMA request context allocation */
3104 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3105 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3106 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3107 OHCI1394_AsReqRcvContextBase) < 0)
3108 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3110 /* AR DMA response context allocation */
3111 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3112 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3113 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3114 OHCI1394_AsRspRcvContextBase) < 0)
3115 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3117 /* AT DMA request context */
3118 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3119 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3120 OHCI1394_AsReqTrContextBase) < 0)
3121 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3123 /* AT DMA response context */
3124 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3125 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3126 OHCI1394_AsRspTrContextBase) < 0)
3127 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3129 /* Start off with a soft reset, to clear everything to a sane
3130 * state. */
3131 ohci_soft_reset(ohci);
3133 /* Now enable LPS, which we need in order to start accessing
3134 * most of the registers. In fact, on some cards (ALI M5251),
3135 * accessing registers in the SClk domain without LPS enabled
3136 * will lock up the machine. Wait 50msec to make sure we have
3137 * full link enabled. */
3138 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3140 /* Disable and clear interrupts */
3141 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3142 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3144 mdelay(50);
3146 /* Determine the number of available IR and IT contexts. */
3147 ohci->nb_iso_rcv_ctx =
3148 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3149 ohci->nb_iso_xmit_ctx =
3150 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3152 /* Set the usage bits for non-existent contexts so they can't
3153 * be allocated */
3154 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3155 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3157 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3158 spin_lock_init(&ohci->iso_tasklet_list_lock);
3159 ohci->ISO_channel_usage = 0;
3160 spin_lock_init(&ohci->IR_channel_lock);
3162 spin_lock_init(&ohci->event_lock);
3165 * interrupts are disabled, all right, but... due to IRQF_SHARED we
3166 * might get called anyway. We'll see no event, of course, but
3167 * we need to get to that "no event", so enough should be initialized
3168 * by that point.
3170 if (request_irq(dev->irq, ohci_irq_handler, IRQF_SHARED,
3171 OHCI1394_DRIVER_NAME, ohci))
3172 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3174 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3175 ohci_initialize(ohci);
3177 /* Set certain csr values */
3178 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3179 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3180 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3181 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3182 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3184 if (phys_dma) {
3185 host->low_addr_space =
3186 (u64) reg_read(ohci, OHCI1394_PhyUpperBound) << 16;
3187 if (!host->low_addr_space)
3188 host->low_addr_space = OHCI1394_PHYS_UPPER_BOUND_FIXED;
3190 host->middle_addr_space = OHCI1394_MIDDLE_ADDRESS_SPACE;
3192 /* Tell the highlevel this host is ready */
3193 if (hpsb_add_host(host))
3194 FAIL(-ENOMEM, "Failed to register host with highlevel");
3196 ohci->init_state = OHCI_INIT_DONE;
3198 return 0;
3199 #undef FAIL
3202 static void ohci1394_pci_remove(struct pci_dev *pdev)
3204 struct ti_ohci *ohci;
3205 struct device *dev;
3207 ohci = pci_get_drvdata(pdev);
3208 if (!ohci)
3209 return;
3211 dev = get_device(&ohci->host->device);
3213 switch (ohci->init_state) {
3214 case OHCI_INIT_DONE:
3215 hpsb_remove_host(ohci->host);
3217 /* Clear out BUS Options */
3218 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3219 reg_write(ohci, OHCI1394_BusOptions,
3220 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3221 0x00ff0000);
3222 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3224 case OHCI_INIT_HAVE_IRQ:
3225 /* Clear interrupt registers */
3226 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3227 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3228 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3229 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3230 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3231 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3233 /* Disable IRM Contender */
3234 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3236 /* Clear link control register */
3237 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3239 /* Let all other nodes know to ignore us */
3240 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3242 /* Soft reset before we start - this disables
3243 * interrupts and clears linkEnable and LPS. */
3244 ohci_soft_reset(ohci);
3245 free_irq(ohci->dev->irq, ohci);
3247 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3248 /* The ohci_soft_reset() stops all DMA contexts, so we
3249 * dont need to do this. */
3250 free_dma_rcv_ctx(&ohci->ar_req_context);
3251 free_dma_rcv_ctx(&ohci->ar_resp_context);
3252 free_dma_trm_ctx(&ohci->at_req_context);
3253 free_dma_trm_ctx(&ohci->at_resp_context);
3255 case OHCI_INIT_HAVE_SELFID_BUFFER:
3256 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3257 ohci->selfid_buf_cpu,
3258 ohci->selfid_buf_bus);
3260 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3261 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3262 ohci->csr_config_rom_cpu,
3263 ohci->csr_config_rom_bus);
3265 case OHCI_INIT_HAVE_IOMAPPING:
3266 iounmap(ohci->registers);
3268 case OHCI_INIT_HAVE_MEM_REGION:
3269 release_mem_region(pci_resource_start(ohci->dev, 0),
3270 OHCI1394_REGISTER_SIZE);
3272 #ifdef CONFIG_PPC_PMAC
3273 /* On UniNorth, power down the cable and turn off the chip clock
3274 * to save power on laptops */
3275 if (machine_is(powermac)) {
3276 struct device_node* ofn = pci_device_to_OF_node(ohci->dev);
3278 if (ofn) {
3279 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3280 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3283 #endif /* CONFIG_PPC_PMAC */
3285 case OHCI_INIT_ALLOC_HOST:
3286 pci_set_drvdata(ohci->dev, NULL);
3289 if (dev)
3290 put_device(dev);
3293 #ifdef CONFIG_PM
3294 static int ohci1394_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3296 int err;
3297 struct ti_ohci *ohci = pci_get_drvdata(pdev);
3299 if (!ohci) {
3300 printk(KERN_ERR "%s: tried to suspend nonexisting host\n",
3301 OHCI1394_DRIVER_NAME);
3302 return -ENXIO;
3304 DBGMSG("suspend called");
3306 /* Clear the async DMA contexts and stop using the controller */
3307 hpsb_bus_reset(ohci->host);
3309 /* See ohci1394_pci_remove() for comments on this sequence */
3310 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3311 reg_write(ohci, OHCI1394_BusOptions,
3312 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3313 0x00ff0000);
3314 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3315 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3316 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3317 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3318 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3319 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3320 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3321 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3322 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3323 ohci_soft_reset(ohci);
3325 err = pci_save_state(pdev);
3326 if (err) {
3327 PRINT(KERN_ERR, "pci_save_state failed with %d", err);
3328 return err;
3330 err = pci_set_power_state(pdev, pci_choose_state(pdev, state));
3331 if (err)
3332 DBGMSG("pci_set_power_state failed with %d", err);
3334 /* PowerMac suspend code comes last */
3335 #ifdef CONFIG_PPC_PMAC
3336 if (machine_is(powermac)) {
3337 struct device_node *ofn = pci_device_to_OF_node(pdev);
3339 if (ofn)
3340 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3342 #endif /* CONFIG_PPC_PMAC */
3344 return 0;
3347 static int ohci1394_pci_resume(struct pci_dev *pdev)
3349 int err;
3350 struct ti_ohci *ohci = pci_get_drvdata(pdev);
3352 if (!ohci) {
3353 printk(KERN_ERR "%s: tried to resume nonexisting host\n",
3354 OHCI1394_DRIVER_NAME);
3355 return -ENXIO;
3357 DBGMSG("resume called");
3359 /* PowerMac resume code comes first */
3360 #ifdef CONFIG_PPC_PMAC
3361 if (machine_is(powermac)) {
3362 struct device_node *ofn = pci_device_to_OF_node(pdev);
3364 if (ofn)
3365 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3367 #endif /* CONFIG_PPC_PMAC */
3369 pci_set_power_state(pdev, PCI_D0);
3370 pci_restore_state(pdev);
3371 err = pci_enable_device(pdev);
3372 if (err) {
3373 PRINT(KERN_ERR, "pci_enable_device failed with %d", err);
3374 return err;
3377 /* See ohci1394_pci_probe() for comments on this sequence */
3378 ohci_soft_reset(ohci);
3379 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3380 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3381 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3382 mdelay(50);
3383 ohci_initialize(ohci);
3385 hpsb_resume_host(ohci->host);
3386 return 0;
3388 #endif /* CONFIG_PM */
3390 static struct pci_device_id ohci1394_pci_tbl[] = {
3392 .class = PCI_CLASS_SERIAL_FIREWIRE_OHCI,
3393 .class_mask = PCI_ANY_ID,
3394 .vendor = PCI_ANY_ID,
3395 .device = PCI_ANY_ID,
3396 .subvendor = PCI_ANY_ID,
3397 .subdevice = PCI_ANY_ID,
3399 { 0, },
3402 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3404 static struct pci_driver ohci1394_pci_driver = {
3405 .name = OHCI1394_DRIVER_NAME,
3406 .id_table = ohci1394_pci_tbl,
3407 .probe = ohci1394_pci_probe,
3408 .remove = ohci1394_pci_remove,
3409 #ifdef CONFIG_PM
3410 .resume = ohci1394_pci_resume,
3411 .suspend = ohci1394_pci_suspend,
3412 #endif
3415 /***********************************
3416 * OHCI1394 Video Interface *
3417 ***********************************/
3419 /* essentially the only purpose of this code is to allow another
3420 module to hook into ohci's interrupt handler */
3422 /* returns zero if successful, one if DMA context is locked up */
3423 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3425 int i=0;
3427 /* stop the channel program if it's still running */
3428 reg_write(ohci, reg, 0x8000);
3430 /* Wait until it effectively stops */
3431 while (reg_read(ohci, reg) & 0x400) {
3432 i++;
3433 if (i>5000) {
3434 PRINT(KERN_ERR,
3435 "Runaway loop while stopping context: %s...", msg ? msg : "");
3436 return 1;
3439 mb();
3440 udelay(10);
3442 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3443 return 0;
3446 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3447 void (*func)(unsigned long), unsigned long data)
3449 tasklet_init(&tasklet->tasklet, func, data);
3450 tasklet->type = type;
3451 /* We init the tasklet->link field, so we can list_del() it
3452 * without worrying whether it was added to the list or not. */
3453 INIT_LIST_HEAD(&tasklet->link);
3456 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3457 struct ohci1394_iso_tasklet *tasklet)
3459 unsigned long flags, *usage;
3460 int n, i, r = -EBUSY;
3462 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3463 n = ohci->nb_iso_xmit_ctx;
3464 usage = &ohci->it_ctx_usage;
3466 else {
3467 n = ohci->nb_iso_rcv_ctx;
3468 usage = &ohci->ir_ctx_usage;
3470 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3471 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3472 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3473 return r;
3478 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3480 for (i = 0; i < n; i++)
3481 if (!test_and_set_bit(i, usage)) {
3482 tasklet->context = i;
3483 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3484 r = 0;
3485 break;
3488 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3490 return r;
3493 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3494 struct ohci1394_iso_tasklet *tasklet)
3496 unsigned long flags;
3498 tasklet_kill(&tasklet->tasklet);
3500 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3502 if (tasklet->type == OHCI_ISO_TRANSMIT)
3503 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3504 else {
3505 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3507 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3508 clear_bit(0, &ohci->ir_multichannel_used);
3512 list_del(&tasklet->link);
3514 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3517 EXPORT_SYMBOL(ohci1394_stop_context);
3518 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3519 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3520 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3522 /***********************************
3523 * General module initialization *
3524 ***********************************/
3526 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3527 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3528 MODULE_LICENSE("GPL");
3530 static void __exit ohci1394_cleanup (void)
3532 pci_unregister_driver(&ohci1394_pci_driver);
3535 static int __init ohci1394_init(void)
3537 return pci_register_driver(&ohci1394_pci_driver);
3540 /* Register before most other device drivers.
3541 * Useful for remote debugging via physical DMA, e.g. using firescope. */
3542 fs_initcall(ohci1394_init);
3543 module_exit(ohci1394_cleanup);