[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / drivers / ieee1394 / pcilynx.c
blobbdb3a85cafa68507a82616e88b55952f3ff0a057
1 /*
2 * pcilynx.c - Texas Instruments PCILynx driver
3 * Copyright (C) 1999,2000 Andreas Bombe <andreas.bombe@munich.netsurf.de>,
4 * Stephan Linz <linz@mazet.de>
5 * Manfred Weihs <weihs@ict.tuwien.ac.at>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Contributions:
25 * Manfred Weihs <weihs@ict.tuwien.ac.at>
26 * reading bus info block (containing GUID) from serial
27 * eeprom via i2c and storing it in config ROM
28 * Reworked code for initiating bus resets
29 * (long, short, with or without hold-off)
30 * Enhancements in async and iso send code
33 #include <linux/config.h>
34 #include <linux/kernel.h>
35 #include <linux/slab.h>
36 #include <linux/interrupt.h>
37 #include <linux/wait.h>
38 #include <linux/errno.h>
39 #include <linux/module.h>
40 #include <linux/moduleparam.h>
41 #include <linux/init.h>
42 #include <linux/pci.h>
43 #include <linux/fs.h>
44 #include <linux/poll.h>
45 #include <linux/kdev_t.h>
46 #include <linux/dma-mapping.h>
47 #include <asm/byteorder.h>
48 #include <asm/atomic.h>
49 #include <asm/io.h>
50 #include <asm/uaccess.h>
51 #include <asm/irq.h>
53 #include "csr1212.h"
54 #include "ieee1394.h"
55 #include "ieee1394_types.h"
56 #include "hosts.h"
57 #include "ieee1394_core.h"
58 #include "highlevel.h"
59 #include "pcilynx.h"
61 #include <linux/i2c.h>
62 #include <linux/i2c-algo-bit.h>
64 /* print general (card independent) information */
65 #define PRINT_G(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
66 /* print card specific information */
67 #define PRINT(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
69 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
70 #define PRINT_GD(level, fmt, args...) printk(level "pcilynx: " fmt "\n" , ## args)
71 #define PRINTD(level, card, fmt, args...) printk(level "pcilynx%d: " fmt "\n" , card , ## args)
72 #else
73 #define PRINT_GD(level, fmt, args...) do {} while (0)
74 #define PRINTD(level, card, fmt, args...) do {} while (0)
75 #endif
78 /* Module Parameters */
79 static int skip_eeprom = 0;
80 module_param(skip_eeprom, int, 0444);
81 MODULE_PARM_DESC(skip_eeprom, "Use generic bus info block instead of serial eeprom (default = 0).");
84 static struct hpsb_host_driver lynx_driver;
85 static unsigned int card_id;
90 * I2C stuff
93 /* the i2c stuff was inspired by i2c-philips-par.c */
95 static void bit_setscl(void *data, int state)
97 if (state) {
98 ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000040;
99 } else {
100 ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000040;
102 reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
105 static void bit_setsda(void *data, int state)
107 if (state) {
108 ((struct ti_lynx *) data)->i2c_driven_state |= 0x00000010;
109 } else {
110 ((struct ti_lynx *) data)->i2c_driven_state &= ~0x00000010;
112 reg_write((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL, ((struct ti_lynx *) data)->i2c_driven_state);
115 static int bit_getscl(void *data)
117 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000040;
120 static int bit_getsda(void *data)
122 return reg_read((struct ti_lynx *) data, SERIAL_EEPROM_CONTROL) & 0x00000010;
125 static int bit_reg(struct i2c_client *client)
127 return 0;
130 static int bit_unreg(struct i2c_client *client)
132 return 0;
135 static struct i2c_algo_bit_data bit_data = {
136 .setsda = bit_setsda,
137 .setscl = bit_setscl,
138 .getsda = bit_getsda,
139 .getscl = bit_getscl,
140 .udelay = 5,
141 .mdelay = 5,
142 .timeout = 100,
145 static struct i2c_adapter bit_ops = {
146 .id = 0xAA, //FIXME: probably we should get an id in i2c-id.h
147 .client_register = bit_reg,
148 .client_unregister = bit_unreg,
149 .name = "PCILynx I2C",
155 * PCL handling functions.
158 static pcl_t alloc_pcl(struct ti_lynx *lynx)
160 u8 m;
161 int i, j;
163 spin_lock(&lynx->lock);
164 /* FIXME - use ffz() to make this readable */
165 for (i = 0; i < (LOCALRAM_SIZE / 1024); i++) {
166 m = lynx->pcl_bmap[i];
167 for (j = 0; j < 8; j++) {
168 if (m & 1<<j) {
169 continue;
171 m |= 1<<j;
172 lynx->pcl_bmap[i] = m;
173 spin_unlock(&lynx->lock);
174 return 8 * i + j;
177 spin_unlock(&lynx->lock);
179 return -1;
183 #if 0
184 static void free_pcl(struct ti_lynx *lynx, pcl_t pclid)
186 int off, bit;
188 off = pclid / 8;
189 bit = pclid % 8;
191 if (pclid < 0) {
192 return;
195 spin_lock(&lynx->lock);
196 if (lynx->pcl_bmap[off] & 1<<bit) {
197 lynx->pcl_bmap[off] &= ~(1<<bit);
198 } else {
199 PRINT(KERN_ERR, lynx->id,
200 "attempted to free unallocated PCL %d", pclid);
202 spin_unlock(&lynx->lock);
205 /* functions useful for debugging */
206 static void pretty_print_pcl(const struct ti_pcl *pcl)
208 int i;
210 printk("PCL next %08x, userdata %08x, status %08x, remtrans %08x, nextbuf %08x\n",
211 pcl->next, pcl->user_data, pcl->pcl_status,
212 pcl->remaining_transfer_count, pcl->next_data_buffer);
214 printk("PCL");
215 for (i=0; i<13; i++) {
216 printk(" c%x:%08x d%x:%08x",
217 i, pcl->buffer[i].control, i, pcl->buffer[i].pointer);
218 if (!(i & 0x3) && (i != 12)) printk("\nPCL");
220 printk("\n");
223 static void print_pcl(const struct ti_lynx *lynx, pcl_t pclid)
225 struct ti_pcl pcl;
227 get_pcl(lynx, pclid, &pcl);
228 pretty_print_pcl(&pcl);
230 #endif
234 /***********************************
235 * IEEE-1394 functionality section *
236 ***********************************/
239 static int get_phy_reg(struct ti_lynx *lynx, int addr)
241 int retval;
242 int i = 0;
244 unsigned long flags;
246 if (addr > 15) {
247 PRINT(KERN_ERR, lynx->id,
248 "%s: PHY register address %d out of range",
249 __FUNCTION__, addr);
250 return -1;
253 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
255 reg_write(lynx, LINK_PHY, LINK_PHY_READ | LINK_PHY_ADDR(addr));
256 do {
257 retval = reg_read(lynx, LINK_PHY);
259 if (i > 10000) {
260 PRINT(KERN_ERR, lynx->id, "%s: runaway loop, aborting",
261 __FUNCTION__);
262 retval = -1;
263 break;
265 i++;
266 } while ((retval & 0xf00) != LINK_PHY_RADDR(addr));
268 reg_write(lynx, LINK_INT_STATUS, LINK_INT_PHY_REG_RCVD);
269 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
271 if (retval != -1) {
272 return retval & 0xff;
273 } else {
274 return -1;
278 static int set_phy_reg(struct ti_lynx *lynx, int addr, int val)
280 unsigned long flags;
282 if (addr > 15) {
283 PRINT(KERN_ERR, lynx->id,
284 "%s: PHY register address %d out of range", __FUNCTION__, addr);
285 return -1;
288 if (val > 0xff) {
289 PRINT(KERN_ERR, lynx->id,
290 "%s: PHY register value %d out of range", __FUNCTION__, val);
291 return -1;
294 spin_lock_irqsave(&lynx->phy_reg_lock, flags);
296 reg_write(lynx, LINK_PHY, LINK_PHY_WRITE | LINK_PHY_ADDR(addr)
297 | LINK_PHY_WDATA(val));
299 spin_unlock_irqrestore(&lynx->phy_reg_lock, flags);
301 return 0;
304 static int sel_phy_reg_page(struct ti_lynx *lynx, int page)
306 int reg;
308 if (page > 7) {
309 PRINT(KERN_ERR, lynx->id,
310 "%s: PHY page %d out of range", __FUNCTION__, page);
311 return -1;
314 reg = get_phy_reg(lynx, 7);
315 if (reg != -1) {
316 reg &= 0x1f;
317 reg |= (page << 5);
318 set_phy_reg(lynx, 7, reg);
319 return 0;
320 } else {
321 return -1;
325 #if 0 /* not needed at this time */
326 static int sel_phy_reg_port(struct ti_lynx *lynx, int port)
328 int reg;
330 if (port > 15) {
331 PRINT(KERN_ERR, lynx->id,
332 "%s: PHY port %d out of range", __FUNCTION__, port);
333 return -1;
336 reg = get_phy_reg(lynx, 7);
337 if (reg != -1) {
338 reg &= 0xf0;
339 reg |= port;
340 set_phy_reg(lynx, 7, reg);
341 return 0;
342 } else {
343 return -1;
346 #endif
348 static u32 get_phy_vendorid(struct ti_lynx *lynx)
350 u32 pvid = 0;
351 sel_phy_reg_page(lynx, 1);
352 pvid |= (get_phy_reg(lynx, 10) << 16);
353 pvid |= (get_phy_reg(lynx, 11) << 8);
354 pvid |= get_phy_reg(lynx, 12);
355 PRINT(KERN_INFO, lynx->id, "PHY vendor id 0x%06x", pvid);
356 return pvid;
359 static u32 get_phy_productid(struct ti_lynx *lynx)
361 u32 id = 0;
362 sel_phy_reg_page(lynx, 1);
363 id |= (get_phy_reg(lynx, 13) << 16);
364 id |= (get_phy_reg(lynx, 14) << 8);
365 id |= get_phy_reg(lynx, 15);
366 PRINT(KERN_INFO, lynx->id, "PHY product id 0x%06x", id);
367 return id;
370 static quadlet_t generate_own_selfid(struct ti_lynx *lynx,
371 struct hpsb_host *host)
373 quadlet_t lsid;
374 char phyreg[7];
375 int i;
377 phyreg[0] = lynx->phy_reg0;
378 for (i = 1; i < 7; i++) {
379 phyreg[i] = get_phy_reg(lynx, i);
382 /* FIXME? We assume a TSB21LV03A phy here. This code doesn't support
383 more than 3 ports on the PHY anyway. */
385 lsid = 0x80400000 | ((phyreg[0] & 0xfc) << 22);
386 lsid |= (phyreg[1] & 0x3f) << 16; /* gap count */
387 lsid |= (phyreg[2] & 0xc0) << 8; /* max speed */
388 if (!hpsb_disable_irm)
389 lsid |= (phyreg[6] & 0x01) << 11; /* contender (phy dependent) */
390 /* lsid |= 1 << 11; *//* set contender (hack) */
391 lsid |= (phyreg[6] & 0x10) >> 3; /* initiated reset */
393 for (i = 0; i < (phyreg[2] & 0xf); i++) { /* ports */
394 if (phyreg[3 + i] & 0x4) {
395 lsid |= (((phyreg[3 + i] & 0x8) | 0x10) >> 3)
396 << (6 - i*2);
397 } else {
398 lsid |= 1 << (6 - i*2);
402 cpu_to_be32s(&lsid);
403 PRINT(KERN_DEBUG, lynx->id, "generated own selfid 0x%x", lsid);
404 return lsid;
407 static void handle_selfid(struct ti_lynx *lynx, struct hpsb_host *host)
409 quadlet_t *q = lynx->rcv_page;
410 int phyid, isroot, size;
411 quadlet_t lsid = 0;
412 int i;
414 if (lynx->phy_reg0 == -1 || lynx->selfid_size == -1) return;
416 size = lynx->selfid_size;
417 phyid = lynx->phy_reg0;
419 i = (size > 16 ? 16 : size) / 4 - 1;
420 while (i >= 0) {
421 cpu_to_be32s(&q[i]);
422 i--;
425 if (!lynx->phyic.reg_1394a) {
426 lsid = generate_own_selfid(lynx, host);
429 isroot = (phyid & 2) != 0;
430 phyid >>= 2;
431 PRINT(KERN_INFO, lynx->id, "SelfID process finished (phyid %d, %s)",
432 phyid, (isroot ? "root" : "not root"));
433 reg_write(lynx, LINK_ID, (0xffc0 | phyid) << 16);
435 if (!lynx->phyic.reg_1394a && !size) {
436 hpsb_selfid_received(host, lsid);
439 while (size > 0) {
440 struct selfid *sid = (struct selfid *)q;
442 if (!lynx->phyic.reg_1394a && !sid->extended
443 && (sid->phy_id == (phyid + 1))) {
444 hpsb_selfid_received(host, lsid);
447 if (q[0] == ~q[1]) {
448 PRINT(KERN_DEBUG, lynx->id, "SelfID packet 0x%x rcvd",
449 q[0]);
450 hpsb_selfid_received(host, q[0]);
451 } else {
452 PRINT(KERN_INFO, lynx->id,
453 "inconsistent selfid 0x%x/0x%x", q[0], q[1]);
455 q += 2;
456 size -= 8;
459 if (!lynx->phyic.reg_1394a && isroot && phyid != 0) {
460 hpsb_selfid_received(host, lsid);
463 hpsb_selfid_complete(host, phyid, isroot);
465 if (host->in_bus_reset) return; /* in bus reset again */
467 if (isroot) reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_CYCMASTER); //FIXME: I do not think, we need this here
468 reg_set_bits(lynx, LINK_CONTROL,
469 LINK_CONTROL_RCV_CMP_VALID | LINK_CONTROL_TX_ASYNC_EN
470 | LINK_CONTROL_RX_ASYNC_EN | LINK_CONTROL_CYCTIMEREN);
475 /* This must be called with the respective queue_lock held. */
476 static void send_next(struct ti_lynx *lynx, int what)
478 struct ti_pcl pcl;
479 struct lynx_send_data *d;
480 struct hpsb_packet *packet;
482 d = (what == hpsb_iso ? &lynx->iso_send : &lynx->async);
483 if (!list_empty(&d->pcl_queue)) {
484 PRINT(KERN_ERR, lynx->id, "trying to queue a new packet in nonempty fifo");
485 BUG();
488 packet = driver_packet(d->queue.next);
489 list_move_tail(&packet->driver_list, &d->pcl_queue);
491 d->header_dma = pci_map_single(lynx->dev, packet->header,
492 packet->header_size, PCI_DMA_TODEVICE);
493 if (packet->data_size) {
494 d->data_dma = pci_map_single(lynx->dev, packet->data,
495 packet->data_size,
496 PCI_DMA_TODEVICE);
497 } else {
498 d->data_dma = 0;
501 pcl.next = PCL_NEXT_INVALID;
502 pcl.async_error_next = PCL_NEXT_INVALID;
503 pcl.pcl_status = 0;
504 pcl.buffer[0].control = packet->speed_code << 14 | packet->header_size;
505 #ifndef __BIG_ENDIAN
506 pcl.buffer[0].control |= PCL_BIGENDIAN;
507 #endif
508 pcl.buffer[0].pointer = d->header_dma;
509 pcl.buffer[1].control = PCL_LAST_BUFF | packet->data_size;
510 pcl.buffer[1].pointer = d->data_dma;
512 switch (packet->type) {
513 case hpsb_async:
514 pcl.buffer[0].control |= PCL_CMD_XMT;
515 break;
516 case hpsb_iso:
517 pcl.buffer[0].control |= PCL_CMD_XMT | PCL_ISOMODE;
518 break;
519 case hpsb_raw:
520 pcl.buffer[0].control |= PCL_CMD_UNFXMT;
521 break;
524 put_pcl(lynx, d->pcl, &pcl);
525 run_pcl(lynx, d->pcl_start, d->channel);
529 /* called from subsystem core */
530 static int lynx_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
532 struct ti_lynx *lynx = host->hostdata;
533 struct lynx_send_data *d;
534 unsigned long flags;
536 if (packet->data_size >= 4096) {
537 PRINT(KERN_ERR, lynx->id, "transmit packet data too big (%Zd)",
538 packet->data_size);
539 return -EOVERFLOW;
542 switch (packet->type) {
543 case hpsb_async:
544 case hpsb_raw:
545 d = &lynx->async;
546 break;
547 case hpsb_iso:
548 d = &lynx->iso_send;
549 break;
550 default:
551 PRINT(KERN_ERR, lynx->id, "invalid packet type %d",
552 packet->type);
553 return -EINVAL;
556 if (packet->tcode == TCODE_WRITEQ
557 || packet->tcode == TCODE_READQ_RESPONSE) {
558 cpu_to_be32s(&packet->header[3]);
561 spin_lock_irqsave(&d->queue_lock, flags);
563 list_add_tail(&packet->driver_list, &d->queue);
564 if (list_empty(&d->pcl_queue))
565 send_next(lynx, packet->type);
567 spin_unlock_irqrestore(&d->queue_lock, flags);
569 return 0;
573 /* called from subsystem core */
574 static int lynx_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
576 struct ti_lynx *lynx = host->hostdata;
577 int retval = 0;
578 struct hpsb_packet *packet;
579 LIST_HEAD(packet_list);
580 unsigned long flags;
581 int phy_reg;
583 switch (cmd) {
584 case RESET_BUS:
585 if (reg_read(lynx, LINK_INT_STATUS) & LINK_INT_PHY_BUSRESET) {
586 retval = 0;
587 break;
590 switch (arg) {
591 case SHORT_RESET:
592 if (lynx->phyic.reg_1394a) {
593 phy_reg = get_phy_reg(lynx, 5);
594 if (phy_reg == -1) {
595 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
596 retval = -1;
597 break;
599 phy_reg |= 0x40;
601 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset) on request");
603 lynx->selfid_size = -1;
604 lynx->phy_reg0 = -1;
605 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
606 break;
607 } else {
608 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
609 /* fall through to long bus reset */
611 case LONG_RESET:
612 phy_reg = get_phy_reg(lynx, 1);
613 if (phy_reg == -1) {
614 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
615 retval = -1;
616 break;
618 phy_reg |= 0x40;
620 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset) on request");
622 lynx->selfid_size = -1;
623 lynx->phy_reg0 = -1;
624 set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
625 break;
626 case SHORT_RESET_NO_FORCE_ROOT:
627 if (lynx->phyic.reg_1394a) {
628 phy_reg = get_phy_reg(lynx, 1);
629 if (phy_reg == -1) {
630 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
631 retval = -1;
632 break;
634 if (phy_reg & 0x80) {
635 phy_reg &= ~0x80;
636 set_phy_reg(lynx, 1, phy_reg); /* clear RHB */
639 phy_reg = get_phy_reg(lynx, 5);
640 if (phy_reg == -1) {
641 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
642 retval = -1;
643 break;
645 phy_reg |= 0x40;
647 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, no force_root) on request");
649 lynx->selfid_size = -1;
650 lynx->phy_reg0 = -1;
651 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
652 break;
653 } else {
654 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
655 /* fall through to long bus reset */
657 case LONG_RESET_NO_FORCE_ROOT:
658 phy_reg = get_phy_reg(lynx, 1);
659 if (phy_reg == -1) {
660 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
661 retval = -1;
662 break;
664 phy_reg &= ~0x80;
665 phy_reg |= 0x40;
667 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, no force_root) on request");
669 lynx->selfid_size = -1;
670 lynx->phy_reg0 = -1;
671 set_phy_reg(lynx, 1, phy_reg); /* clear RHB, set IBR */
672 break;
673 case SHORT_RESET_FORCE_ROOT:
674 if (lynx->phyic.reg_1394a) {
675 phy_reg = get_phy_reg(lynx, 1);
676 if (phy_reg == -1) {
677 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
678 retval = -1;
679 break;
681 if (!(phy_reg & 0x80)) {
682 phy_reg |= 0x80;
683 set_phy_reg(lynx, 1, phy_reg); /* set RHB */
686 phy_reg = get_phy_reg(lynx, 5);
687 if (phy_reg == -1) {
688 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
689 retval = -1;
690 break;
692 phy_reg |= 0x40;
694 PRINT(KERN_INFO, lynx->id, "resetting bus (short bus reset, force_root set) on request");
696 lynx->selfid_size = -1;
697 lynx->phy_reg0 = -1;
698 set_phy_reg(lynx, 5, phy_reg); /* set ISBR */
699 break;
700 } else {
701 PRINT(KERN_INFO, lynx->id, "cannot do short bus reset, because of old phy");
702 /* fall through to long bus reset */
704 case LONG_RESET_FORCE_ROOT:
705 phy_reg = get_phy_reg(lynx, 1);
706 if (phy_reg == -1) {
707 PRINT(KERN_ERR, lynx->id, "cannot reset bus, because read phy reg failed");
708 retval = -1;
709 break;
711 phy_reg |= 0xc0;
713 PRINT(KERN_INFO, lynx->id, "resetting bus (long bus reset, force_root set) on request");
715 lynx->selfid_size = -1;
716 lynx->phy_reg0 = -1;
717 set_phy_reg(lynx, 1, phy_reg); /* set IBR and RHB */
718 break;
719 default:
720 PRINT(KERN_ERR, lynx->id, "unknown argument for reset_bus command %d", arg);
721 retval = -1;
724 break;
726 case GET_CYCLE_COUNTER:
727 retval = reg_read(lynx, CYCLE_TIMER);
728 break;
730 case SET_CYCLE_COUNTER:
731 reg_write(lynx, CYCLE_TIMER, arg);
732 break;
734 case SET_BUS_ID:
735 reg_write(lynx, LINK_ID,
736 (arg << 22) | (reg_read(lynx, LINK_ID) & 0x003f0000));
737 break;
739 case ACT_CYCLE_MASTER:
740 if (arg) {
741 reg_set_bits(lynx, LINK_CONTROL,
742 LINK_CONTROL_CYCMASTER);
743 } else {
744 reg_clear_bits(lynx, LINK_CONTROL,
745 LINK_CONTROL_CYCMASTER);
747 break;
749 case CANCEL_REQUESTS:
750 spin_lock_irqsave(&lynx->async.queue_lock, flags);
752 reg_write(lynx, DMA_CHAN_CTRL(CHANNEL_ASYNC_SEND), 0);
753 list_splice(&lynx->async.queue, &packet_list);
754 INIT_LIST_HEAD(&lynx->async.queue);
756 if (list_empty(&lynx->async.pcl_queue)) {
757 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
758 PRINTD(KERN_DEBUG, lynx->id, "no async packet in PCL to cancel");
759 } else {
760 struct ti_pcl pcl;
761 u32 ack;
762 struct hpsb_packet *packet;
764 PRINT(KERN_INFO, lynx->id, "cancelling async packet, that was already in PCL");
766 get_pcl(lynx, lynx->async.pcl, &pcl);
768 packet = driver_packet(lynx->async.pcl_queue.next);
769 list_del_init(&packet->driver_list);
771 pci_unmap_single(lynx->dev, lynx->async.header_dma,
772 packet->header_size, PCI_DMA_TODEVICE);
773 if (packet->data_size) {
774 pci_unmap_single(lynx->dev, lynx->async.data_dma,
775 packet->data_size, PCI_DMA_TODEVICE);
778 spin_unlock_irqrestore(&lynx->async.queue_lock, flags);
780 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
781 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
782 ack = (pcl.pcl_status >> 15) & 0xf;
783 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
784 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
785 } else {
786 ack = (pcl.pcl_status >> 15) & 0xf;
788 } else {
789 PRINT(KERN_INFO, lynx->id, "async packet was not completed");
790 ack = ACKX_ABORTED;
792 hpsb_packet_sent(host, packet, ack);
795 while (!list_empty(&packet_list)) {
796 packet = driver_packet(packet_list.next);
797 list_del_init(&packet->driver_list);
798 hpsb_packet_sent(host, packet, ACKX_ABORTED);
801 break;
803 case ISO_LISTEN_CHANNEL:
804 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
806 if (lynx->iso_rcv.chan_count++ == 0) {
807 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
808 DMA_WORD1_CMP_ENABLE_MASTER);
811 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
812 break;
814 case ISO_UNLISTEN_CHANNEL:
815 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
817 if (--lynx->iso_rcv.chan_count == 0) {
818 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
822 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
823 break;
825 default:
826 PRINT(KERN_ERR, lynx->id, "unknown devctl command %d", cmd);
827 retval = -1;
830 return retval;
834 /***************************************
835 * IEEE-1394 functionality section END *
836 ***************************************/
839 /********************************************************
840 * Global stuff (interrupt handler, init/shutdown code) *
841 ********************************************************/
844 static irqreturn_t lynx_irq_handler(int irq, void *dev_id,
845 struct pt_regs *regs_are_unused)
847 struct ti_lynx *lynx = (struct ti_lynx *)dev_id;
848 struct hpsb_host *host = lynx->host;
849 u32 intmask;
850 u32 linkint;
852 linkint = reg_read(lynx, LINK_INT_STATUS);
853 intmask = reg_read(lynx, PCI_INT_STATUS);
855 if (!(intmask & PCI_INT_INT_PEND))
856 return IRQ_NONE;
858 PRINTD(KERN_DEBUG, lynx->id, "interrupt: 0x%08x / 0x%08x", intmask,
859 linkint);
861 reg_write(lynx, LINK_INT_STATUS, linkint);
862 reg_write(lynx, PCI_INT_STATUS, intmask);
864 if (intmask & PCI_INT_1394) {
865 if (linkint & LINK_INT_PHY_TIMEOUT) {
866 PRINT(KERN_INFO, lynx->id, "PHY timeout occurred");
868 if (linkint & LINK_INT_PHY_BUSRESET) {
869 PRINT(KERN_INFO, lynx->id, "bus reset interrupt");
870 lynx->selfid_size = -1;
871 lynx->phy_reg0 = -1;
872 if (!host->in_bus_reset)
873 hpsb_bus_reset(host);
875 if (linkint & LINK_INT_PHY_REG_RCVD) {
876 u32 reg;
878 spin_lock(&lynx->phy_reg_lock);
879 reg = reg_read(lynx, LINK_PHY);
880 spin_unlock(&lynx->phy_reg_lock);
882 if (!host->in_bus_reset) {
883 PRINT(KERN_INFO, lynx->id,
884 "phy reg received without reset");
885 } else if (reg & 0xf00) {
886 PRINT(KERN_INFO, lynx->id,
887 "unsolicited phy reg %d received",
888 (reg >> 8) & 0xf);
889 } else {
890 lynx->phy_reg0 = reg & 0xff;
891 handle_selfid(lynx, host);
894 if (linkint & LINK_INT_ISO_STUCK) {
895 PRINT(KERN_INFO, lynx->id, "isochronous transmitter stuck");
897 if (linkint & LINK_INT_ASYNC_STUCK) {
898 PRINT(KERN_INFO, lynx->id, "asynchronous transmitter stuck");
900 if (linkint & LINK_INT_SENT_REJECT) {
901 PRINT(KERN_INFO, lynx->id, "sent reject");
903 if (linkint & LINK_INT_TX_INVALID_TC) {
904 PRINT(KERN_INFO, lynx->id, "invalid transaction code");
906 if (linkint & LINK_INT_GRF_OVERFLOW) {
907 /* flush FIFO if overflow happens during reset */
908 if (host->in_bus_reset)
909 reg_write(lynx, FIFO_CONTROL,
910 FIFO_CONTROL_GRF_FLUSH);
911 PRINT(KERN_INFO, lynx->id, "GRF overflow");
913 if (linkint & LINK_INT_ITF_UNDERFLOW) {
914 PRINT(KERN_INFO, lynx->id, "ITF underflow");
916 if (linkint & LINK_INT_ATF_UNDERFLOW) {
917 PRINT(KERN_INFO, lynx->id, "ATF underflow");
921 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_RCV)) {
922 PRINTD(KERN_DEBUG, lynx->id, "iso receive");
924 spin_lock(&lynx->iso_rcv.lock);
926 lynx->iso_rcv.stat[lynx->iso_rcv.next] =
927 reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ISO_RCV));
929 lynx->iso_rcv.used++;
930 lynx->iso_rcv.next = (lynx->iso_rcv.next + 1) % NUM_ISORCV_PCL;
932 if ((lynx->iso_rcv.next == lynx->iso_rcv.last)
933 || !lynx->iso_rcv.chan_count) {
934 PRINTD(KERN_DEBUG, lynx->id, "stopped");
935 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
938 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, lynx->iso_rcv.next,
939 CHANNEL_ISO_RCV);
941 spin_unlock(&lynx->iso_rcv.lock);
943 tasklet_schedule(&lynx->iso_rcv.tq);
946 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_SEND)) {
947 PRINTD(KERN_DEBUG, lynx->id, "async sent");
948 spin_lock(&lynx->async.queue_lock);
950 if (list_empty(&lynx->async.pcl_queue)) {
951 spin_unlock(&lynx->async.queue_lock);
952 PRINT(KERN_WARNING, lynx->id, "async dma halted, but no queued packet (maybe it was cancelled)");
953 } else {
954 struct ti_pcl pcl;
955 u32 ack;
956 struct hpsb_packet *packet;
958 get_pcl(lynx, lynx->async.pcl, &pcl);
960 packet = driver_packet(lynx->async.pcl_queue.next);
961 list_del_init(&packet->driver_list);
963 pci_unmap_single(lynx->dev, lynx->async.header_dma,
964 packet->header_size, PCI_DMA_TODEVICE);
965 if (packet->data_size) {
966 pci_unmap_single(lynx->dev, lynx->async.data_dma,
967 packet->data_size, PCI_DMA_TODEVICE);
970 if (!list_empty(&lynx->async.queue)) {
971 send_next(lynx, hpsb_async);
974 spin_unlock(&lynx->async.queue_lock);
976 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
977 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
978 ack = (pcl.pcl_status >> 15) & 0xf;
979 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
980 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
981 } else {
982 ack = (pcl.pcl_status >> 15) & 0xf;
984 } else {
985 PRINT(KERN_INFO, lynx->id, "async packet was not completed");
986 ack = ACKX_SEND_ERROR;
988 hpsb_packet_sent(host, packet, ack);
992 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ISO_SEND)) {
993 PRINTD(KERN_DEBUG, lynx->id, "iso sent");
994 spin_lock(&lynx->iso_send.queue_lock);
996 if (list_empty(&lynx->iso_send.pcl_queue)) {
997 spin_unlock(&lynx->iso_send.queue_lock);
998 PRINT(KERN_ERR, lynx->id, "iso send dma halted, but no queued packet");
999 } else {
1000 struct ti_pcl pcl;
1001 u32 ack;
1002 struct hpsb_packet *packet;
1004 get_pcl(lynx, lynx->iso_send.pcl, &pcl);
1006 packet = driver_packet(lynx->iso_send.pcl_queue.next);
1007 list_del_init(&packet->driver_list);
1009 pci_unmap_single(lynx->dev, lynx->iso_send.header_dma,
1010 packet->header_size, PCI_DMA_TODEVICE);
1011 if (packet->data_size) {
1012 pci_unmap_single(lynx->dev, lynx->iso_send.data_dma,
1013 packet->data_size, PCI_DMA_TODEVICE);
1016 if (!list_empty(&lynx->iso_send.queue)) {
1017 send_next(lynx, hpsb_iso);
1020 spin_unlock(&lynx->iso_send.queue_lock);
1022 if (pcl.pcl_status & DMA_CHAN_STAT_PKTCMPL) {
1023 if (pcl.pcl_status & DMA_CHAN_STAT_SPECIALACK) {
1024 ack = (pcl.pcl_status >> 15) & 0xf;
1025 PRINTD(KERN_INFO, lynx->id, "special ack %d", ack);
1026 ack = (ack == 1 ? ACKX_TIMEOUT : ACKX_SEND_ERROR);
1027 } else {
1028 ack = (pcl.pcl_status >> 15) & 0xf;
1030 } else {
1031 PRINT(KERN_INFO, lynx->id, "iso send packet was not completed");
1032 ack = ACKX_SEND_ERROR;
1035 hpsb_packet_sent(host, packet, ack); //FIXME: maybe we should just use ACK_COMPLETE and ACKX_SEND_ERROR
1039 if (intmask & PCI_INT_DMA_HLT(CHANNEL_ASYNC_RCV)) {
1040 /* general receive DMA completed */
1041 int stat = reg_read(lynx, DMA_CHAN_STAT(CHANNEL_ASYNC_RCV));
1043 PRINTD(KERN_DEBUG, lynx->id, "received packet size %d",
1044 stat & 0x1fff);
1046 if (stat & DMA_CHAN_STAT_SELFID) {
1047 lynx->selfid_size = stat & 0x1fff;
1048 handle_selfid(lynx, host);
1049 } else {
1050 quadlet_t *q_data = lynx->rcv_page;
1051 if ((*q_data >> 4 & 0xf) == TCODE_READQ_RESPONSE
1052 || (*q_data >> 4 & 0xf) == TCODE_WRITEQ) {
1053 cpu_to_be32s(q_data + 3);
1055 hpsb_packet_received(host, q_data, stat & 0x1fff, 0);
1058 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1061 return IRQ_HANDLED;
1065 static void iso_rcv_bh(struct ti_lynx *lynx)
1067 unsigned int idx;
1068 quadlet_t *data;
1069 unsigned long flags;
1071 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1073 while (lynx->iso_rcv.used) {
1074 idx = lynx->iso_rcv.last;
1075 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1077 data = lynx->iso_rcv.page[idx / ISORCV_PER_PAGE]
1078 + (idx % ISORCV_PER_PAGE) * MAX_ISORCV_SIZE;
1080 if ((*data >> 16) + 4 != (lynx->iso_rcv.stat[idx] & 0x1fff)) {
1081 PRINT(KERN_ERR, lynx->id,
1082 "iso length mismatch 0x%08x/0x%08x", *data,
1083 lynx->iso_rcv.stat[idx]);
1086 if (lynx->iso_rcv.stat[idx]
1087 & (DMA_CHAN_STAT_PCIERR | DMA_CHAN_STAT_PKTERR)) {
1088 PRINT(KERN_INFO, lynx->id,
1089 "iso receive error on %d to 0x%p", idx, data);
1090 } else {
1091 hpsb_packet_received(lynx->host, data,
1092 lynx->iso_rcv.stat[idx] & 0x1fff,
1096 spin_lock_irqsave(&lynx->iso_rcv.lock, flags);
1097 lynx->iso_rcv.last = (idx + 1) % NUM_ISORCV_PCL;
1098 lynx->iso_rcv.used--;
1101 if (lynx->iso_rcv.chan_count) {
1102 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV),
1103 DMA_WORD1_CMP_ENABLE_MASTER);
1105 spin_unlock_irqrestore(&lynx->iso_rcv.lock, flags);
1109 static void remove_card(struct pci_dev *dev)
1111 struct ti_lynx *lynx;
1112 struct device *lynx_dev;
1113 int i;
1115 lynx = pci_get_drvdata(dev);
1116 if (!lynx) return;
1117 pci_set_drvdata(dev, NULL);
1119 lynx_dev = get_device(&lynx->host->device);
1121 switch (lynx->state) {
1122 case is_host:
1123 reg_write(lynx, PCI_INT_ENABLE, 0);
1124 hpsb_remove_host(lynx->host);
1125 case have_intr:
1126 reg_write(lynx, PCI_INT_ENABLE, 0);
1127 free_irq(lynx->dev->irq, lynx);
1129 /* Disable IRM Contender and LCtrl */
1130 if (lynx->phyic.reg_1394a)
1131 set_phy_reg(lynx, 4, ~0xc0 & get_phy_reg(lynx, 4));
1133 /* Let all other nodes know to ignore us */
1134 lynx_devctl(lynx->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
1136 case have_iomappings:
1137 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1138 /* Fix buggy cards with autoboot pin not tied low: */
1139 reg_write(lynx, DMA0_CHAN_CTRL, 0);
1140 iounmap(lynx->registers);
1141 iounmap(lynx->local_rom);
1142 iounmap(lynx->local_ram);
1143 iounmap(lynx->aux_port);
1144 case have_1394_buffers:
1145 for (i = 0; i < ISORCV_PAGES; i++) {
1146 if (lynx->iso_rcv.page[i]) {
1147 pci_free_consistent(lynx->dev, PAGE_SIZE,
1148 lynx->iso_rcv.page[i],
1149 lynx->iso_rcv.page_dma[i]);
1152 pci_free_consistent(lynx->dev, PAGE_SIZE, lynx->rcv_page,
1153 lynx->rcv_page_dma);
1154 case have_aux_buf:
1155 case have_pcl_mem:
1156 pci_free_consistent(lynx->dev, LOCALRAM_SIZE, lynx->pcl_mem,
1157 lynx->pcl_mem_dma);
1158 case clear:
1159 /* do nothing - already freed */
1163 tasklet_kill(&lynx->iso_rcv.tq);
1165 if (lynx_dev)
1166 put_device(lynx_dev);
1170 static int __devinit add_card(struct pci_dev *dev,
1171 const struct pci_device_id *devid_is_unused)
1173 #define FAIL(fmt, args...) do { \
1174 PRINT_G(KERN_ERR, fmt , ## args); \
1175 remove_card(dev); \
1176 return error; \
1177 } while (0)
1179 char irq_buf[16];
1180 struct hpsb_host *host;
1181 struct ti_lynx *lynx; /* shortcut to currently handled device */
1182 struct ti_pcl pcl;
1183 u32 *pcli;
1184 int i;
1185 int error;
1187 error = -ENXIO;
1189 if (pci_set_dma_mask(dev, DMA_32BIT_MASK))
1190 FAIL("DMA address limits not supported for PCILynx hardware");
1191 if (pci_enable_device(dev))
1192 FAIL("failed to enable PCILynx hardware");
1193 pci_set_master(dev);
1195 error = -ENOMEM;
1197 host = hpsb_alloc_host(&lynx_driver, sizeof(struct ti_lynx), &dev->dev);
1198 if (!host) FAIL("failed to allocate control structure memory");
1200 lynx = host->hostdata;
1201 lynx->id = card_id++;
1202 lynx->dev = dev;
1203 lynx->state = clear;
1204 lynx->host = host;
1205 host->pdev = dev;
1206 pci_set_drvdata(dev, lynx);
1208 spin_lock_init(&lynx->lock);
1209 spin_lock_init(&lynx->phy_reg_lock);
1211 lynx->pcl_mem = pci_alloc_consistent(dev, LOCALRAM_SIZE,
1212 &lynx->pcl_mem_dma);
1214 if (lynx->pcl_mem != NULL) {
1215 lynx->state = have_pcl_mem;
1216 PRINT(KERN_INFO, lynx->id,
1217 "allocated PCL memory %d Bytes @ 0x%p", LOCALRAM_SIZE,
1218 lynx->pcl_mem);
1219 } else {
1220 FAIL("failed to allocate PCL memory area");
1223 lynx->rcv_page = pci_alloc_consistent(dev, PAGE_SIZE,
1224 &lynx->rcv_page_dma);
1225 if (lynx->rcv_page == NULL) {
1226 FAIL("failed to allocate receive buffer");
1228 lynx->state = have_1394_buffers;
1230 for (i = 0; i < ISORCV_PAGES; i++) {
1231 lynx->iso_rcv.page[i] =
1232 pci_alloc_consistent(dev, PAGE_SIZE,
1233 &lynx->iso_rcv.page_dma[i]);
1234 if (lynx->iso_rcv.page[i] == NULL) {
1235 FAIL("failed to allocate iso receive buffers");
1239 lynx->registers = ioremap_nocache(pci_resource_start(dev,0),
1240 PCILYNX_MAX_REGISTER);
1241 lynx->local_ram = ioremap(pci_resource_start(dev,1), PCILYNX_MAX_MEMORY);
1242 lynx->aux_port = ioremap(pci_resource_start(dev,2), PCILYNX_MAX_MEMORY);
1243 lynx->local_rom = ioremap(pci_resource_start(dev,PCI_ROM_RESOURCE),
1244 PCILYNX_MAX_MEMORY);
1245 lynx->state = have_iomappings;
1247 if (lynx->registers == NULL) {
1248 FAIL("failed to remap registers - card not accessible");
1251 reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
1252 /* Fix buggy cards with autoboot pin not tied low: */
1253 reg_write(lynx, DMA0_CHAN_CTRL, 0);
1255 #ifndef __sparc__
1256 sprintf (irq_buf, "%d", dev->irq);
1257 #else
1258 sprintf (irq_buf, "%s", __irq_itoa(dev->irq));
1259 #endif
1261 if (!request_irq(dev->irq, lynx_irq_handler, SA_SHIRQ,
1262 PCILYNX_DRIVER_NAME, lynx)) {
1263 PRINT(KERN_INFO, lynx->id, "allocated interrupt %s", irq_buf);
1264 lynx->state = have_intr;
1265 } else {
1266 FAIL("failed to allocate shared interrupt %s", irq_buf);
1269 /* alloc_pcl return values are not checked, it is expected that the
1270 * provided PCL space is sufficient for the initial allocations */
1271 lynx->rcv_pcl = alloc_pcl(lynx);
1272 lynx->rcv_pcl_start = alloc_pcl(lynx);
1273 lynx->async.pcl = alloc_pcl(lynx);
1274 lynx->async.pcl_start = alloc_pcl(lynx);
1275 lynx->iso_send.pcl = alloc_pcl(lynx);
1276 lynx->iso_send.pcl_start = alloc_pcl(lynx);
1278 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1279 lynx->iso_rcv.pcl[i] = alloc_pcl(lynx);
1281 lynx->iso_rcv.pcl_start = alloc_pcl(lynx);
1283 /* all allocations successful - simple init stuff follows */
1285 reg_write(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
1287 tasklet_init(&lynx->iso_rcv.tq, (void (*)(unsigned long))iso_rcv_bh,
1288 (unsigned long)lynx);
1290 spin_lock_init(&lynx->iso_rcv.lock);
1292 spin_lock_init(&lynx->async.queue_lock);
1293 lynx->async.channel = CHANNEL_ASYNC_SEND;
1294 spin_lock_init(&lynx->iso_send.queue_lock);
1295 lynx->iso_send.channel = CHANNEL_ISO_SEND;
1297 PRINT(KERN_INFO, lynx->id, "remapped memory spaces reg 0x%p, rom 0x%p, "
1298 "ram 0x%p, aux 0x%p", lynx->registers, lynx->local_rom,
1299 lynx->local_ram, lynx->aux_port);
1301 /* now, looking for PHY register set */
1302 if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
1303 lynx->phyic.reg_1394a = 1;
1304 PRINT(KERN_INFO, lynx->id,
1305 "found 1394a conform PHY (using extended register set)");
1306 lynx->phyic.vendor = get_phy_vendorid(lynx);
1307 lynx->phyic.product = get_phy_productid(lynx);
1308 } else {
1309 lynx->phyic.reg_1394a = 0;
1310 PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
1313 lynx->selfid_size = -1;
1314 lynx->phy_reg0 = -1;
1316 INIT_LIST_HEAD(&lynx->async.queue);
1317 INIT_LIST_HEAD(&lynx->async.pcl_queue);
1318 INIT_LIST_HEAD(&lynx->iso_send.queue);
1319 INIT_LIST_HEAD(&lynx->iso_send.pcl_queue);
1321 pcl.next = pcl_bus(lynx, lynx->rcv_pcl);
1322 put_pcl(lynx, lynx->rcv_pcl_start, &pcl);
1324 pcl.next = PCL_NEXT_INVALID;
1325 pcl.async_error_next = PCL_NEXT_INVALID;
1327 pcl.buffer[0].control = PCL_CMD_RCV | 16;
1328 #ifndef __BIG_ENDIAN
1329 pcl.buffer[0].control |= PCL_BIGENDIAN;
1330 #endif
1331 pcl.buffer[1].control = PCL_LAST_BUFF | 4080;
1333 pcl.buffer[0].pointer = lynx->rcv_page_dma;
1334 pcl.buffer[1].pointer = lynx->rcv_page_dma + 16;
1335 put_pcl(lynx, lynx->rcv_pcl, &pcl);
1337 pcl.next = pcl_bus(lynx, lynx->async.pcl);
1338 pcl.async_error_next = pcl_bus(lynx, lynx->async.pcl);
1339 put_pcl(lynx, lynx->async.pcl_start, &pcl);
1341 pcl.next = pcl_bus(lynx, lynx->iso_send.pcl);
1342 pcl.async_error_next = PCL_NEXT_INVALID;
1343 put_pcl(lynx, lynx->iso_send.pcl_start, &pcl);
1345 pcl.next = PCL_NEXT_INVALID;
1346 pcl.async_error_next = PCL_NEXT_INVALID;
1347 pcl.buffer[0].control = PCL_CMD_RCV | 4;
1348 #ifndef __BIG_ENDIAN
1349 pcl.buffer[0].control |= PCL_BIGENDIAN;
1350 #endif
1351 pcl.buffer[1].control = PCL_LAST_BUFF | 2044;
1353 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1354 int page = i / ISORCV_PER_PAGE;
1355 int sec = i % ISORCV_PER_PAGE;
1357 pcl.buffer[0].pointer = lynx->iso_rcv.page_dma[page]
1358 + sec * MAX_ISORCV_SIZE;
1359 pcl.buffer[1].pointer = pcl.buffer[0].pointer + 4;
1360 put_pcl(lynx, lynx->iso_rcv.pcl[i], &pcl);
1363 pcli = (u32 *)&pcl;
1364 for (i = 0; i < NUM_ISORCV_PCL; i++) {
1365 pcli[i] = pcl_bus(lynx, lynx->iso_rcv.pcl[i]);
1367 put_pcl(lynx, lynx->iso_rcv.pcl_start, &pcl);
1369 /* FIFO sizes from left to right: ITF=48 ATF=48 GRF=160 */
1370 reg_write(lynx, FIFO_SIZES, 0x003030a0);
1371 /* 20 byte threshold before triggering PCI transfer */
1372 reg_write(lynx, DMA_GLOBAL_REGISTER, 0x2<<24);
1373 /* threshold on both send FIFOs before transmitting:
1374 FIFO size - cache line size - 1 */
1375 i = reg_read(lynx, PCI_LATENCY_CACHELINE) & 0xff;
1376 i = 0x30 - i - 1;
1377 reg_write(lynx, FIFO_XMIT_THRESHOLD, (i << 8) | i);
1379 reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_1394);
1381 reg_write(lynx, LINK_INT_ENABLE, LINK_INT_PHY_TIMEOUT
1382 | LINK_INT_PHY_REG_RCVD | LINK_INT_PHY_BUSRESET
1383 | LINK_INT_ISO_STUCK | LINK_INT_ASYNC_STUCK
1384 | LINK_INT_SENT_REJECT | LINK_INT_TX_INVALID_TC
1385 | LINK_INT_GRF_OVERFLOW | LINK_INT_ITF_UNDERFLOW
1386 | LINK_INT_ATF_UNDERFLOW);
1388 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1389 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ASYNC_RCV), 0xa<<4);
1390 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ASYNC_RCV), 0);
1391 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ASYNC_RCV),
1392 DMA_WORD1_CMP_MATCH_LOCAL_NODE | DMA_WORD1_CMP_MATCH_BROADCAST
1393 | DMA_WORD1_CMP_MATCH_EXACT | DMA_WORD1_CMP_MATCH_BUS_BCAST
1394 | DMA_WORD1_CMP_ENABLE_SELF_ID | DMA_WORD1_CMP_ENABLE_MASTER);
1396 run_pcl(lynx, lynx->rcv_pcl_start, CHANNEL_ASYNC_RCV);
1398 reg_write(lynx, DMA_WORD0_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1399 reg_write(lynx, DMA_WORD0_CMP_ENABLE(CHANNEL_ISO_RCV), 0x9<<4);
1400 reg_write(lynx, DMA_WORD1_CMP_VALUE(CHANNEL_ISO_RCV), 0);
1401 reg_write(lynx, DMA_WORD1_CMP_ENABLE(CHANNEL_ISO_RCV), 0);
1403 run_sub_pcl(lynx, lynx->iso_rcv.pcl_start, 0, CHANNEL_ISO_RCV);
1405 reg_write(lynx, LINK_CONTROL, LINK_CONTROL_RCV_CMP_VALID
1406 | LINK_CONTROL_TX_ISO_EN | LINK_CONTROL_RX_ISO_EN
1407 | LINK_CONTROL_TX_ASYNC_EN | LINK_CONTROL_RX_ASYNC_EN
1408 | LINK_CONTROL_RESET_TX | LINK_CONTROL_RESET_RX);
1410 if (!lynx->phyic.reg_1394a) {
1411 if (!hpsb_disable_irm) {
1412 /* attempt to enable contender bit -FIXME- would this
1413 * work elsewhere? */
1414 reg_set_bits(lynx, GPIO_CTRL_A, 0x1);
1415 reg_write(lynx, GPIO_DATA_BASE + 0x3c, 0x1);
1417 } else {
1418 /* set the contender (if appropriate) and LCtrl bit in the
1419 * extended PHY register set. (Should check that PHY_02_EXTENDED
1420 * is set in register 2?)
1422 i = get_phy_reg(lynx, 4);
1423 i |= PHY_04_LCTRL;
1424 if (hpsb_disable_irm)
1425 i &= !PHY_04_CONTENDER;
1426 else
1427 i |= PHY_04_CONTENDER;
1428 if (i != -1) set_phy_reg(lynx, 4, i);
1431 if (!skip_eeprom)
1433 /* needed for i2c communication with serial eeprom */
1434 struct i2c_adapter *i2c_ad;
1435 struct i2c_algo_bit_data i2c_adapter_data;
1437 error = -ENOMEM;
1438 i2c_ad = kmalloc(sizeof(struct i2c_adapter), SLAB_KERNEL);
1439 if (!i2c_ad) FAIL("failed to allocate I2C adapter memory");
1441 memcpy(i2c_ad, &bit_ops, sizeof(struct i2c_adapter));
1442 i2c_adapter_data = bit_data;
1443 i2c_ad->algo_data = &i2c_adapter_data;
1444 i2c_adapter_data.data = lynx;
1446 PRINTD(KERN_DEBUG, lynx->id,"original eeprom control: %d",
1447 reg_read(lynx, SERIAL_EEPROM_CONTROL));
1449 /* reset hardware to sane state */
1450 lynx->i2c_driven_state = 0x00000070;
1451 reg_write(lynx, SERIAL_EEPROM_CONTROL, lynx->i2c_driven_state);
1453 if (i2c_bit_add_bus(i2c_ad) < 0)
1455 kfree(i2c_ad);
1456 error = -ENXIO;
1457 FAIL("unable to register i2c");
1459 else
1461 /* do i2c stuff */
1462 unsigned char i2c_cmd = 0x10;
1463 struct i2c_msg msg[2] = { { 0x50, 0, 1, &i2c_cmd },
1464 { 0x50, I2C_M_RD, 20, (unsigned char*) lynx->bus_info_block }
1468 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
1469 union i2c_smbus_data data;
1471 if (i2c_smbus_xfer(i2c_ad, 80, 0, I2C_SMBUS_WRITE, 0, I2C_SMBUS_BYTE,NULL))
1472 PRINT(KERN_ERR, lynx->id,"eeprom read start has failed");
1473 else
1475 u16 addr;
1476 for (addr=0x00; addr < 0x100; addr++) {
1477 if (i2c_smbus_xfer(i2c_ad, 80, 0, I2C_SMBUS_READ, 0, I2C_SMBUS_BYTE,& data)) {
1478 PRINT(KERN_ERR, lynx->id, "unable to read i2c %x", addr);
1479 break;
1481 else
1482 PRINT(KERN_DEBUG, lynx->id,"got serial eeprom data at %x: %x",addr, data.byte);
1485 #endif
1487 /* we use i2c_transfer, because i2c_smbus_read_block_data does not work properly and we
1488 do it more efficiently in one transaction rather then using several reads */
1489 if (i2c_transfer(i2c_ad, msg, 2) < 0) {
1490 PRINT(KERN_ERR, lynx->id, "unable to read bus info block from i2c");
1491 } else {
1492 int i;
1494 PRINT(KERN_INFO, lynx->id, "got bus info block from serial eeprom");
1495 /* FIXME: probably we shoud rewrite the max_rec, max_ROM(1394a),
1496 * generation(1394a) and link_spd(1394a) field and recalculate
1497 * the CRC */
1499 for (i = 0; i < 5 ; i++)
1500 PRINTD(KERN_DEBUG, lynx->id, "Businfo block quadlet %i: %08x",
1501 i, be32_to_cpu(lynx->bus_info_block[i]));
1503 /* info_length, crc_length and 1394 magic number to check, if it is really a bus info block */
1504 if (((be32_to_cpu(lynx->bus_info_block[0]) & 0xffff0000) == 0x04040000) &&
1505 (lynx->bus_info_block[1] == __constant_cpu_to_be32(0x31333934)))
1507 PRINT(KERN_DEBUG, lynx->id, "read a valid bus info block from");
1508 } else {
1509 kfree(i2c_ad);
1510 error = -ENXIO;
1511 FAIL("read something from serial eeprom, but it does not seem to be a valid bus info block");
1516 i2c_bit_del_bus(i2c_ad);
1517 kfree(i2c_ad);
1521 host->csr.guid_hi = be32_to_cpu(lynx->bus_info_block[3]);
1522 host->csr.guid_lo = be32_to_cpu(lynx->bus_info_block[4]);
1523 host->csr.cyc_clk_acc = (be32_to_cpu(lynx->bus_info_block[2]) >> 16) & 0xff;
1524 host->csr.max_rec = (be32_to_cpu(lynx->bus_info_block[2]) >> 12) & 0xf;
1525 if (!lynx->phyic.reg_1394a)
1526 host->csr.lnk_spd = (get_phy_reg(lynx, 2) & 0xc0) >> 6;
1527 else
1528 host->csr.lnk_spd = be32_to_cpu(lynx->bus_info_block[2]) & 0x7;
1530 if (hpsb_add_host(host)) {
1531 error = -ENOMEM;
1532 FAIL("Failed to register host with highlevel");
1535 lynx->state = is_host;
1537 return 0;
1538 #undef FAIL
1542 static struct pci_device_id pci_table[] = {
1544 .vendor = PCI_VENDOR_ID_TI,
1545 .device = PCI_DEVICE_ID_TI_PCILYNX,
1546 .subvendor = PCI_ANY_ID,
1547 .subdevice = PCI_ANY_ID,
1549 { } /* Terminating entry */
1552 static struct pci_driver lynx_pci_driver = {
1553 .name = PCILYNX_DRIVER_NAME,
1554 .id_table = pci_table,
1555 .probe = add_card,
1556 .remove = remove_card,
1559 static struct hpsb_host_driver lynx_driver = {
1560 .owner = THIS_MODULE,
1561 .name = PCILYNX_DRIVER_NAME,
1562 .set_hw_config_rom = NULL,
1563 .transmit_packet = lynx_transmit,
1564 .devctl = lynx_devctl,
1565 .isoctl = NULL,
1568 MODULE_AUTHOR("Andreas E. Bombe <andreas.bombe@munich.netsurf.de>");
1569 MODULE_DESCRIPTION("driver for Texas Instruments PCI Lynx IEEE-1394 controller");
1570 MODULE_LICENSE("GPL");
1571 MODULE_SUPPORTED_DEVICE("pcilynx");
1572 MODULE_DEVICE_TABLE(pci, pci_table);
1574 static int __init pcilynx_init(void)
1576 int ret;
1578 ret = pci_register_driver(&lynx_pci_driver);
1579 if (ret < 0) {
1580 PRINT_G(KERN_ERR, "PCI module init failed");
1581 return ret;
1584 return 0;
1587 static void __exit pcilynx_cleanup(void)
1589 pci_unregister_driver(&lynx_pci_driver);
1593 module_init(pcilynx_init);
1594 module_exit(pcilynx_cleanup);