Linux 3.4.102
[linux/fpc-iii.git] / drivers / net / tokenring / olympic.c
blob0e234741cc791af3ca5d6d372862c297e51f5cfe
1 /*
2 * olympic.c (c) 1999 Peter De Schrijver All Rights Reserved
3 * 1999/2000 Mike Phillips (mikep@linuxtr.net)
5 * Linux driver for IBM PCI tokenring cards based on the Pit/Pit-Phy/Olympic
6 * chipset.
8 * Base Driver Skeleton:
9 * Written 1993-94 by Donald Becker.
11 * Copyright 1993 United States Government as represented by the
12 * Director, National Security Agency.
14 * Thanks to Erik De Cock, Adrian Bridgett and Frank Fiene for their
15 * assistance and perserverance with the testing of this driver.
17 * This software may be used and distributed according to the terms
18 * of the GNU General Public License, incorporated herein by reference.
20 * 4/27/99 - Alpha Release 0.1.0
21 * First release to the public
23 * 6/8/99 - Official Release 0.2.0
24 * Merged into the kernel code
25 * 8/18/99 - Updated driver for 2.3.13 kernel to use new pci
26 * resource. Driver also reports the card name returned by
27 * the pci resource.
28 * 1/11/00 - Added spinlocks for smp
29 * 2/23/00 - Updated to dev_kfree_irq
30 * 3/10/00 - Fixed FDX enable which triggered other bugs also
31 * squashed.
32 * 5/20/00 - Changes to handle Olympic on LinuxPPC. Endian changes.
33 * The odd thing about the changes is that the fix for
34 * endian issues with the big-endian data in the arb, asb...
35 * was to always swab() the bytes, no matter what CPU.
36 * That's because the read[wl]() functions always swap the
37 * bytes on the way in on PPC.
38 * Fixing the hardware descriptors was another matter,
39 * because they weren't going through read[wl](), there all
40 * the results had to be in memory in le32 values. kdaaker
42 * 12/23/00 - Added minimal Cardbus support (Thanks Donald).
44 * 03/09/01 - Add new pci api, dev_base_lock, general clean up.
46 * 03/27/01 - Add new dma pci (Thanks to Kyle Lucke) and alloc_trdev
47 * Change proc_fs behaviour, now one entry per adapter.
49 * 04/09/01 - Couple of bug fixes to the dma unmaps and ejecting the
50 * adapter when live does not take the system down with it.
52 * 06/02/01 - Clean up, copy skb for small packets
54 * 06/22/01 - Add EISR error handling routines
56 * 07/19/01 - Improve bad LAA reporting, strip out freemem
57 * into a separate function, its called from 3
58 * different places now.
59 * 02/09/02 - Replaced sleep_on.
60 * 03/01/02 - Replace access to several registers from 32 bit to
61 * 16 bit. Fixes alignment errors on PPC 64 bit machines.
62 * Thanks to Al Trautman for this one.
63 * 03/10/02 - Fix BUG in arb_cmd. Bug was there all along but was
64 * silently ignored until the error checking code
65 * went into version 1.0.0
66 * 06/04/02 - Add correct start up sequence for the cardbus adapters.
67 * Required for strict compliance with pci power mgmt specs.
68 * To Do:
70 * Wake on lan
72 * If Problems do Occur
73 * Most problems can be rectified by either closing and opening the interface
74 * (ifconfig down and up) or rmmod and insmod'ing the driver (a bit difficult
75 * if compiled into the kernel).
78 /* Change OLYMPIC_DEBUG to 1 to get verbose, and I mean really verbose, messages */
80 #define OLYMPIC_DEBUG 0
83 #include <linux/module.h>
84 #include <linux/kernel.h>
85 #include <linux/errno.h>
86 #include <linux/timer.h>
87 #include <linux/in.h>
88 #include <linux/ioport.h>
89 #include <linux/seq_file.h>
90 #include <linux/string.h>
91 #include <linux/proc_fs.h>
92 #include <linux/ptrace.h>
93 #include <linux/skbuff.h>
94 #include <linux/interrupt.h>
95 #include <linux/delay.h>
96 #include <linux/netdevice.h>
97 #include <linux/trdevice.h>
98 #include <linux/stddef.h>
99 #include <linux/init.h>
100 #include <linux/pci.h>
101 #include <linux/spinlock.h>
102 #include <linux/bitops.h>
103 #include <linux/jiffies.h>
105 #include <net/checksum.h>
106 #include <net/net_namespace.h>
108 #include <asm/io.h>
110 #include "olympic.h"
112 /* I've got to put some intelligence into the version number so that Peter and I know
113 * which version of the code somebody has got.
114 * Version Number = a.b.c.d where a.b.c is the level of code and d is the latest author.
115 * So 0.0.1.pds = Peter, 0.0.1.mlp = Mike
117 * Official releases will only have an a.b.c version number format.
120 static char version[] =
121 "Olympic.c v1.0.5 6/04/02 - Peter De Schrijver & Mike Phillips" ;
123 static char *open_maj_error[] = {"No error", "Lobe Media Test", "Physical Insertion",
124 "Address Verification", "Neighbor Notification (Ring Poll)",
125 "Request Parameters","FDX Registration Request",
126 "FDX Duplicate Address Check", "Station registration Query Wait",
127 "Unknown stage"};
129 static char *open_min_error[] = {"No error", "Function Failure", "Signal Lost", "Wire Fault",
130 "Ring Speed Mismatch", "Timeout","Ring Failure","Ring Beaconing",
131 "Duplicate Node Address","Request Parameters","Remove Received",
132 "Reserved", "Reserved", "No Monitor Detected for RPL",
133 "Monitor Contention failer for RPL", "FDX Protocol Error"};
135 /* Module parameters */
137 MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
138 MODULE_DESCRIPTION("Olympic PCI/Cardbus Chipset Driver") ;
140 /* Ring Speed 0,4,16,100
141 * 0 = Autosense
142 * 4,16 = Selected speed only, no autosense
143 * This allows the card to be the first on the ring
144 * and become the active monitor.
145 * 100 = Nothing at present, 100mbps is autodetected
146 * if FDX is turned on. May be implemented in the future to
147 * fail if 100mpbs is not detected.
149 * WARNING: Some hubs will allow you to insert
150 * at the wrong speed
153 static int ringspeed[OLYMPIC_MAX_ADAPTERS] = {0,} ;
154 module_param_array(ringspeed, int, NULL, 0);
156 /* Packet buffer size */
158 static int pkt_buf_sz[OLYMPIC_MAX_ADAPTERS] = {0,} ;
159 module_param_array(pkt_buf_sz, int, NULL, 0) ;
161 /* Message Level */
163 static int message_level[OLYMPIC_MAX_ADAPTERS] = {0,} ;
164 module_param_array(message_level, int, NULL, 0) ;
166 /* Change network_monitor to receive mac frames through the arb channel.
167 * Will also create a /proc/net/olympic_tr%d entry, where %d is the tr
168 * device, i.e. tr0, tr1 etc.
169 * Intended to be used to create a ring-error reporting network module
170 * i.e. it will give you the source address of beaconers on the ring
172 static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
173 module_param_array(network_monitor, int, NULL, 0);
175 static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = {
176 {PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
177 { } /* Terminating Entry */
179 MODULE_DEVICE_TABLE(pci,olympic_pci_tbl) ;
182 static int olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
183 static int olympic_init(struct net_device *dev);
184 static int olympic_open(struct net_device *dev);
185 static netdev_tx_t olympic_xmit(struct sk_buff *skb,
186 struct net_device *dev);
187 static int olympic_close(struct net_device *dev);
188 static void olympic_set_rx_mode(struct net_device *dev);
189 static void olympic_freemem(struct net_device *dev) ;
190 static irqreturn_t olympic_interrupt(int irq, void *dev_id);
191 static int olympic_set_mac_address(struct net_device *dev, void *addr) ;
192 static void olympic_arb_cmd(struct net_device *dev);
193 static int olympic_change_mtu(struct net_device *dev, int mtu);
194 static void olympic_srb_bh(struct net_device *dev) ;
195 static void olympic_asb_bh(struct net_device *dev) ;
196 static const struct file_operations olympic_proc_ops;
198 static const struct net_device_ops olympic_netdev_ops = {
199 .ndo_open = olympic_open,
200 .ndo_stop = olympic_close,
201 .ndo_start_xmit = olympic_xmit,
202 .ndo_change_mtu = olympic_change_mtu,
203 .ndo_set_rx_mode = olympic_set_rx_mode,
204 .ndo_set_mac_address = olympic_set_mac_address,
207 static int __devinit olympic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
209 struct net_device *dev ;
210 struct olympic_private *olympic_priv;
211 static int card_no = -1 ;
212 int i ;
214 card_no++ ;
216 if ((i = pci_enable_device(pdev))) {
217 return i ;
220 pci_set_master(pdev);
222 if ((i = pci_request_regions(pdev,"olympic"))) {
223 goto op_disable_dev;
226 dev = alloc_trdev(sizeof(struct olympic_private)) ;
227 if (!dev) {
228 i = -ENOMEM;
229 goto op_release_dev;
232 olympic_priv = netdev_priv(dev) ;
234 spin_lock_init(&olympic_priv->olympic_lock) ;
236 init_waitqueue_head(&olympic_priv->srb_wait);
237 init_waitqueue_head(&olympic_priv->trb_wait);
238 #if OLYMPIC_DEBUG
239 printk(KERN_INFO "pci_device: %p, dev:%p, dev->priv: %p\n", pdev, dev, netdev_priv(dev));
240 #endif
241 dev->irq=pdev->irq;
242 dev->base_addr=pci_resource_start(pdev, 0);
243 olympic_priv->olympic_card_name = pci_name(pdev);
244 olympic_priv->pdev = pdev;
245 olympic_priv->olympic_mmio = ioremap(pci_resource_start(pdev,1),256);
246 olympic_priv->olympic_lap = ioremap(pci_resource_start(pdev,2),2048);
247 if (!olympic_priv->olympic_mmio || !olympic_priv->olympic_lap) {
248 goto op_free_iomap;
251 if ((pkt_buf_sz[card_no] < 100) || (pkt_buf_sz[card_no] > 18000) )
252 olympic_priv->pkt_buf_sz = PKT_BUF_SZ ;
253 else
254 olympic_priv->pkt_buf_sz = pkt_buf_sz[card_no] ;
256 dev->mtu = olympic_priv->pkt_buf_sz - TR_HLEN ;
257 olympic_priv->olympic_ring_speed = ringspeed[card_no] ;
258 olympic_priv->olympic_message_level = message_level[card_no] ;
259 olympic_priv->olympic_network_monitor = network_monitor[card_no];
261 if ((i = olympic_init(dev))) {
262 goto op_free_iomap;
265 dev->netdev_ops = &olympic_netdev_ops;
266 SET_NETDEV_DEV(dev, &pdev->dev);
268 pci_set_drvdata(pdev,dev) ;
269 register_netdev(dev) ;
270 printk("Olympic: %s registered as: %s\n",olympic_priv->olympic_card_name,dev->name);
271 if (olympic_priv->olympic_network_monitor) { /* Must go after register_netdev as we need the device name */
272 char proc_name[20] ;
273 strcpy(proc_name,"olympic_") ;
274 strcat(proc_name,dev->name) ;
275 proc_create_data(proc_name, 0, init_net.proc_net, &olympic_proc_ops, dev);
276 printk("Olympic: Network Monitor information: /proc/%s\n",proc_name);
278 return 0 ;
280 op_free_iomap:
281 if (olympic_priv->olympic_mmio)
282 iounmap(olympic_priv->olympic_mmio);
283 if (olympic_priv->olympic_lap)
284 iounmap(olympic_priv->olympic_lap);
286 free_netdev(dev);
287 op_release_dev:
288 pci_release_regions(pdev);
290 op_disable_dev:
291 pci_disable_device(pdev);
292 return i;
295 static int olympic_init(struct net_device *dev)
297 struct olympic_private *olympic_priv;
298 u8 __iomem *olympic_mmio, *init_srb,*adapter_addr;
299 unsigned long t;
300 unsigned int uaa_addr;
302 olympic_priv=netdev_priv(dev);
303 olympic_mmio=olympic_priv->olympic_mmio;
305 printk("%s\n", version);
306 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
308 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
309 t=jiffies;
310 while((readl(olympic_mmio+BCTL)) & BCTL_SOFTRESET) {
311 schedule();
312 if(time_after(jiffies, t + 40*HZ)) {
313 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
314 return -ENODEV;
319 /* Needed for cardbus */
320 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
321 writel(readl(olympic_priv->olympic_mmio+FERMASK)|FERMASK_INT_BIT, olympic_mmio+FERMASK);
324 #if OLYMPIC_DEBUG
325 printk("BCTL: %x\n",readl(olympic_mmio+BCTL));
326 printk("GPR: %x\n",readw(olympic_mmio+GPR));
327 printk("SISRMASK: %x\n",readl(olympic_mmio+SISR_MASK));
328 #endif
329 /* Aaaahhh, You have got to be real careful setting GPR, the card
330 holds the previous values from flash memory, including autosense
331 and ring speed */
333 writel(readl(olympic_mmio+BCTL)|BCTL_MIMREB,olympic_mmio+BCTL);
335 if (olympic_priv->olympic_ring_speed == 0) { /* Autosense */
336 writew(readw(olympic_mmio+GPR)|GPR_AUTOSENSE,olympic_mmio+GPR);
337 if (olympic_priv->olympic_message_level)
338 printk(KERN_INFO "%s: Ringspeed autosense mode on\n",olympic_priv->olympic_card_name);
339 } else if (olympic_priv->olympic_ring_speed == 16) {
340 if (olympic_priv->olympic_message_level)
341 printk(KERN_INFO "%s: Trying to open at 16 Mbps as requested\n", olympic_priv->olympic_card_name);
342 writew(GPR_16MBPS, olympic_mmio+GPR);
343 } else if (olympic_priv->olympic_ring_speed == 4) {
344 if (olympic_priv->olympic_message_level)
345 printk(KERN_INFO "%s: Trying to open at 4 Mbps as requested\n", olympic_priv->olympic_card_name) ;
346 writew(0, olympic_mmio+GPR);
349 writew(readw(olympic_mmio+GPR)|GPR_NEPTUNE_BF,olympic_mmio+GPR);
351 #if OLYMPIC_DEBUG
352 printk("GPR = %x\n",readw(olympic_mmio + GPR) ) ;
353 #endif
354 /* Solo has been paused to meet the Cardbus power
355 * specs if the adapter is cardbus. Check to
356 * see its been paused and then restart solo. The
357 * adapter should set the pause bit within 1 second.
360 if(!(readl(olympic_mmio+BCTL) & BCTL_MODE_INDICATOR)) {
361 t=jiffies;
362 while (!(readl(olympic_mmio+CLKCTL) & CLKCTL_PAUSE)) {
363 schedule() ;
364 if(time_after(jiffies, t + 2*HZ)) {
365 printk(KERN_ERR "IBM Cardbus tokenring adapter not responsing.\n") ;
366 return -ENODEV;
369 writel(readl(olympic_mmio+CLKCTL) & ~CLKCTL_PAUSE, olympic_mmio+CLKCTL) ;
372 /* start solo init */
373 writel((1<<15),olympic_mmio+SISR_MASK_SUM);
375 t=jiffies;
376 while(!((readl(olympic_mmio+SISR_RR)) & SISR_SRB_REPLY)) {
377 schedule();
378 if(time_after(jiffies, t + 15*HZ)) {
379 printk(KERN_ERR "IBM PCI tokenring card not responding.\n");
380 return -ENODEV;
384 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
386 #if OLYMPIC_DEBUG
387 printk("LAPWWO: %x, LAPA: %x\n",readl(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
388 #endif
390 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
392 #if OLYMPIC_DEBUG
394 int i;
395 printk("init_srb(%p): ",init_srb);
396 for(i=0;i<20;i++)
397 printk("%x ",readb(init_srb+i));
398 printk("\n");
400 #endif
401 if(readw(init_srb+6)) {
402 printk(KERN_INFO "tokenring card initialization failed. errorcode : %x\n",readw(init_srb+6));
403 return -ENODEV;
406 if (olympic_priv->olympic_message_level) {
407 if ( readb(init_srb +2) & 0x40) {
408 printk(KERN_INFO "Olympic: Adapter is FDX capable.\n") ;
409 } else {
410 printk(KERN_INFO "Olympic: Adapter cannot do FDX.\n");
414 uaa_addr=swab16(readw(init_srb+8));
416 #if OLYMPIC_DEBUG
417 printk("UAA resides at %x\n",uaa_addr);
418 #endif
420 writel(uaa_addr,olympic_mmio+LAPA);
421 adapter_addr=olympic_priv->olympic_lap + (uaa_addr & (~0xf800));
423 memcpy_fromio(&dev->dev_addr[0], adapter_addr,6);
425 #if OLYMPIC_DEBUG
426 printk("adapter address: %pM\n", dev->dev_addr);
427 #endif
429 olympic_priv->olympic_addr_table_addr = swab16(readw(init_srb + 12));
430 olympic_priv->olympic_parms_addr = swab16(readw(init_srb + 14));
432 return 0;
436 static int olympic_open(struct net_device *dev)
438 struct olympic_private *olympic_priv=netdev_priv(dev);
439 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*init_srb;
440 unsigned long flags, t;
441 int i, open_finished = 1 ;
442 u8 resp, err;
444 DECLARE_WAITQUEUE(wait,current) ;
446 olympic_init(dev);
448 if (request_irq(dev->irq, olympic_interrupt, IRQF_SHARED , "olympic",
449 dev))
450 return -EAGAIN;
452 #if OLYMPIC_DEBUG
453 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
454 printk("pending ints: %x\n",readl(olympic_mmio+SISR_RR));
455 #endif
457 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
459 writel(SISR_MI | SISR_SRB_REPLY, olympic_mmio+SISR_MASK); /* more ints later, doesn't stop arb cmd interrupt */
461 writel(LISR_LIE,olympic_mmio+LISR); /* more ints later */
463 /* adapter is closed, so SRB is pointed to by LAPWWO */
465 writel(readw(olympic_mmio+LAPWWO),olympic_mmio+LAPA);
466 init_srb=olympic_priv->olympic_lap + ((readw(olympic_mmio+LAPWWO)) & (~0xf800));
468 #if OLYMPIC_DEBUG
469 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
470 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
471 printk("Before the open command\n");
472 #endif
473 do {
474 memset_io(init_srb,0,SRB_COMMAND_SIZE);
476 writeb(SRB_OPEN_ADAPTER,init_srb) ; /* open */
477 writeb(OLYMPIC_CLEAR_RET_CODE,init_srb+2);
479 /* If Network Monitor, instruct card to copy MAC frames through the ARB */
480 if (olympic_priv->olympic_network_monitor)
481 writew(swab16(OPEN_ADAPTER_ENABLE_FDX | OPEN_ADAPTER_PASS_ADC_MAC | OPEN_ADAPTER_PASS_ATT_MAC | OPEN_ADAPTER_PASS_BEACON), init_srb+8);
482 else
483 writew(swab16(OPEN_ADAPTER_ENABLE_FDX), init_srb+8);
485 /* Test OR of first 3 bytes as its totally possible for
486 * someone to set the first 2 bytes to be zero, although this
487 * is an error, the first byte must have bit 6 set to 1 */
489 if (olympic_priv->olympic_laa[0] | olympic_priv->olympic_laa[1] | olympic_priv->olympic_laa[2]) {
490 writeb(olympic_priv->olympic_laa[0],init_srb+12);
491 writeb(olympic_priv->olympic_laa[1],init_srb+13);
492 writeb(olympic_priv->olympic_laa[2],init_srb+14);
493 writeb(olympic_priv->olympic_laa[3],init_srb+15);
494 writeb(olympic_priv->olympic_laa[4],init_srb+16);
495 writeb(olympic_priv->olympic_laa[5],init_srb+17);
496 memcpy(dev->dev_addr,olympic_priv->olympic_laa,dev->addr_len) ;
498 writeb(1,init_srb+30);
500 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
501 olympic_priv->srb_queued=1;
503 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
504 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
506 t = jiffies ;
508 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
509 set_current_state(TASK_INTERRUPTIBLE) ;
511 while(olympic_priv->srb_queued) {
512 schedule() ;
513 if(signal_pending(current)) {
514 printk(KERN_WARNING "%s: Signal received in open.\n",
515 dev->name);
516 printk(KERN_WARNING "SISR=%x LISR=%x\n",
517 readl(olympic_mmio+SISR),
518 readl(olympic_mmio+LISR));
519 olympic_priv->srb_queued=0;
520 break;
522 if (time_after(jiffies, t + 10*HZ)) {
523 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
524 olympic_priv->srb_queued=0;
525 break ;
527 set_current_state(TASK_INTERRUPTIBLE) ;
529 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
530 set_current_state(TASK_RUNNING) ;
531 olympic_priv->srb_queued = 0 ;
532 #if OLYMPIC_DEBUG
533 printk("init_srb(%p): ",init_srb);
534 for(i=0;i<20;i++)
535 printk("%02x ",readb(init_srb+i));
536 printk("\n");
537 #endif
539 /* If we get the same return response as we set, the interrupt wasn't raised and the open
540 * timed out.
543 switch (resp = readb(init_srb+2)) {
544 case OLYMPIC_CLEAR_RET_CODE:
545 printk(KERN_WARNING "%s: Adapter Open time out or error.\n", dev->name) ;
546 goto out;
547 case 0:
548 open_finished = 1;
549 break;
550 case 0x07:
551 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
552 printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name);
553 open_finished = 0 ;
554 continue;
557 err = readb(init_srb+7);
559 if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
560 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
561 printk(KERN_WARNING "%s: Please try again with a specified ring speed\n",dev->name);
562 } else {
563 printk(KERN_WARNING "%s: %s - %s\n", dev->name,
564 open_maj_error[(err & 0xf0) >> 4],
565 open_min_error[(err & 0x0f)]);
567 goto out;
569 case 0x32:
570 printk(KERN_WARNING "%s: Invalid LAA: %pM\n",
571 dev->name, olympic_priv->olympic_laa);
572 goto out;
574 default:
575 printk(KERN_WARNING "%s: Bad OPEN response: %x\n", dev->name, resp);
576 goto out;
579 } while (!(open_finished)) ; /* Will only loop if ring speed mismatch re-open attempted && autosense is on */
581 if (readb(init_srb+18) & (1<<3))
582 if (olympic_priv->olympic_message_level)
583 printk(KERN_INFO "%s: Opened in FDX Mode\n",dev->name);
585 if (readb(init_srb+18) & (1<<1))
586 olympic_priv->olympic_ring_speed = 100 ;
587 else if (readb(init_srb+18) & 1)
588 olympic_priv->olympic_ring_speed = 16 ;
589 else
590 olympic_priv->olympic_ring_speed = 4 ;
592 if (olympic_priv->olympic_message_level)
593 printk(KERN_INFO "%s: Opened in %d Mbps mode\n",dev->name, olympic_priv->olympic_ring_speed);
595 olympic_priv->asb = swab16(readw(init_srb+8));
596 olympic_priv->srb = swab16(readw(init_srb+10));
597 olympic_priv->arb = swab16(readw(init_srb+12));
598 olympic_priv->trb = swab16(readw(init_srb+16));
600 olympic_priv->olympic_receive_options = 0x01 ;
601 olympic_priv->olympic_copy_all_options = 0 ;
603 /* setup rx ring */
605 writel((3<<16),olympic_mmio+BMCTL_RWM); /* Ensure end of frame generated interrupts */
607 writel(BMCTL_RX_DIS|3,olympic_mmio+BMCTL_RWM); /* Yes, this the enables RX channel */
609 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
611 struct sk_buff *skb;
613 skb=dev_alloc_skb(olympic_priv->pkt_buf_sz);
614 if(skb == NULL)
615 break;
617 skb->dev = dev;
619 olympic_priv->olympic_rx_ring[i].buffer = cpu_to_le32(pci_map_single(olympic_priv->pdev,
620 skb->data,olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE)) ;
621 olympic_priv->olympic_rx_ring[i].res_length = cpu_to_le32(olympic_priv->pkt_buf_sz);
622 olympic_priv->rx_ring_skb[i]=skb;
625 if (i==0) {
626 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
627 goto out;
630 olympic_priv->rx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_rx_ring,
631 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
632 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXDESCQ);
633 writel(olympic_priv->rx_ring_dma_addr, olympic_mmio+RXCDA);
634 writew(i, olympic_mmio+RXDESCQCNT);
636 olympic_priv->rx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_rx_status_ring,
637 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
638 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXSTATQ);
639 writel(olympic_priv->rx_status_ring_dma_addr, olympic_mmio+RXCSA);
641 olympic_priv->rx_ring_last_received = OLYMPIC_RX_RING_SIZE - 1; /* last processed rx status */
642 olympic_priv->rx_status_last_received = OLYMPIC_RX_RING_SIZE - 1;
644 writew(i, olympic_mmio+RXSTATQCNT);
646 #if OLYMPIC_DEBUG
647 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
648 printk("RXCSA: %x, rx_status_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
649 printk(" stat_ring[1]: %p, stat_ring[2]: %p, stat_ring[3]: %p\n", &(olympic_priv->olympic_rx_status_ring[1]), &(olympic_priv->olympic_rx_status_ring[2]), &(olympic_priv->olympic_rx_status_ring[3]) );
650 printk(" stat_ring[4]: %p, stat_ring[5]: %p, stat_ring[6]: %p\n", &(olympic_priv->olympic_rx_status_ring[4]), &(olympic_priv->olympic_rx_status_ring[5]), &(olympic_priv->olympic_rx_status_ring[6]) );
651 printk(" stat_ring[7]: %p\n", &(olympic_priv->olympic_rx_status_ring[7]) );
653 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
654 printk("Rx_ring_dma_addr = %08x, rx_status_dma_addr = %08x\n",
655 olympic_priv->rx_ring_dma_addr,olympic_priv->rx_status_ring_dma_addr) ;
656 #endif
658 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | i,olympic_mmio+RXENQ);
660 #if OLYMPIC_DEBUG
661 printk("# of rx buffers: %d, RXENQ: %x\n",i, readw(olympic_mmio+RXENQ));
662 printk("RXCSA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCSA),&olympic_priv->olympic_rx_status_ring[0]);
663 printk("RXCDA: %x, rx_ring[0]: %p\n",readl(olympic_mmio+RXCDA),&olympic_priv->olympic_rx_ring[0]);
664 #endif
666 writel(SISR_RX_STATUS | SISR_RX_NOBUF,olympic_mmio+SISR_MASK_SUM);
668 /* setup tx ring */
670 writel(BMCTL_TX1_DIS,olympic_mmio+BMCTL_RWM); /* Yes, this enables TX channel 1 */
671 for(i=0;i<OLYMPIC_TX_RING_SIZE;i++)
672 olympic_priv->olympic_tx_ring[i].buffer=cpu_to_le32(0xdeadbeef);
674 olympic_priv->free_tx_ring_entries=OLYMPIC_TX_RING_SIZE;
675 olympic_priv->tx_ring_dma_addr = pci_map_single(olympic_priv->pdev,olympic_priv->olympic_tx_ring,
676 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE,PCI_DMA_TODEVICE) ;
677 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXDESCQ_1);
678 writel(olympic_priv->tx_ring_dma_addr, olympic_mmio+TXCDA_1);
679 writew(OLYMPIC_TX_RING_SIZE, olympic_mmio+TXDESCQCNT_1);
681 olympic_priv->tx_status_ring_dma_addr = pci_map_single(olympic_priv->pdev, olympic_priv->olympic_tx_status_ring,
682 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
683 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXSTATQ_1);
684 writel(olympic_priv->tx_status_ring_dma_addr,olympic_mmio+TXCSA_1);
685 writew(OLYMPIC_TX_RING_SIZE,olympic_mmio+TXSTATQCNT_1);
687 olympic_priv->tx_ring_free=0; /* next entry in tx ring to use */
688 olympic_priv->tx_ring_last_status=OLYMPIC_TX_RING_SIZE-1; /* last processed tx status */
690 writel(0xffffffff, olympic_mmio+EISR_RWM) ; /* clean the eisr */
691 writel(0,olympic_mmio+EISR) ;
692 writel(EISR_MASK_OPTIONS,olympic_mmio+EISR_MASK) ; /* enables most of the TX error interrupts */
693 writel(SISR_TX1_EOF | SISR_ADAPTER_CHECK | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_ASB_FREE | SISR_ERR,olympic_mmio+SISR_MASK_SUM);
695 #if OLYMPIC_DEBUG
696 printk("BMCTL: %x\n",readl(olympic_mmio+BMCTL_SUM));
697 printk("SISR MASK: %x\n",readl(olympic_mmio+SISR_MASK));
698 #endif
700 if (olympic_priv->olympic_network_monitor) {
701 u8 __iomem *oat;
702 u8 __iomem *opt;
703 u8 addr[6];
704 oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr);
705 opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr);
707 for (i = 0; i < 6; i++)
708 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr)+i);
709 printk("%s: Node Address: %pM\n", dev->name, addr);
710 printk("%s: Functional Address: %02x:%02x:%02x:%02x\n",dev->name,
711 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
712 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
713 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
714 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
716 for (i = 0; i < 6; i++)
717 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr)+i);
718 printk("%s: NAUN Address: %pM\n", dev->name, addr);
721 netif_start_queue(dev);
722 return 0;
724 out:
725 free_irq(dev->irq, dev);
726 return -EIO;
730 * When we enter the rx routine we do not know how many frames have been
731 * queued on the rx channel. Therefore we start at the next rx status
732 * position and travel around the receive ring until we have completed
733 * all the frames.
735 * This means that we may process the frame before we receive the end
736 * of frame interrupt. This is why we always test the status instead
737 * of blindly processing the next frame.
739 * We also remove the last 4 bytes from the packet as well, these are
740 * just token ring trailer info and upset protocols that don't check
741 * their own length, i.e. SNA.
744 static void olympic_rx(struct net_device *dev)
746 struct olympic_private *olympic_priv=netdev_priv(dev);
747 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
748 struct olympic_rx_status *rx_status;
749 struct olympic_rx_desc *rx_desc ;
750 int rx_ring_last_received,length, buffer_cnt, cpy_length, frag_len;
751 struct sk_buff *skb, *skb2;
752 int i;
754 rx_status=&(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received + 1) & (OLYMPIC_RX_RING_SIZE - 1)]) ;
756 while (rx_status->status_buffercnt) {
757 u32 l_status_buffercnt;
759 olympic_priv->rx_status_last_received++ ;
760 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
761 #if OLYMPIC_DEBUG
762 printk("rx status: %x rx len: %x\n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
763 #endif
764 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
765 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
766 i = buffer_cnt ; /* Need buffer_cnt later for rxenq update */
767 frag_len = le32_to_cpu(rx_status->fragmentcnt_framelen) >> 16;
769 #if OLYMPIC_DEBUG
770 printk("length: %x, frag_len: %x, buffer_cnt: %x\n", length, frag_len, buffer_cnt);
771 #endif
772 l_status_buffercnt = le32_to_cpu(rx_status->status_buffercnt);
773 if(l_status_buffercnt & 0xC0000000) {
774 if (l_status_buffercnt & 0x3B000000) {
775 if (olympic_priv->olympic_message_level) {
776 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
777 printk(KERN_WARNING "%s: Rx Frame Truncated\n",dev->name);
778 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
779 printk(KERN_WARNING "%s: Rx Frame Receive overrun\n",dev->name);
780 if (l_status_buffercnt & (1<<27)) /* No receive buffers */
781 printk(KERN_WARNING "%s: No receive buffers\n",dev->name);
782 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
783 printk(KERN_WARNING "%s: Receive frame error detect\n",dev->name);
784 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
785 printk(KERN_WARNING "%s: Received Error Detect\n",dev->name);
787 olympic_priv->rx_ring_last_received += i ;
788 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
789 dev->stats.rx_errors++;
790 } else {
792 if (buffer_cnt == 1) {
793 skb = dev_alloc_skb(max_t(int, olympic_priv->pkt_buf_sz,length)) ;
794 } else {
795 skb = dev_alloc_skb(length) ;
798 if (skb == NULL) {
799 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",dev->name) ;
800 dev->stats.rx_dropped++;
801 /* Update counters even though we don't transfer the frame */
802 olympic_priv->rx_ring_last_received += i ;
803 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
804 } else {
805 /* Optimise based upon number of buffers used.
806 If only one buffer is used we can simply swap the buffers around.
807 If more than one then we must use the new buffer and copy the information
808 first. Ideally all frames would be in a single buffer, this can be tuned by
809 altering the buffer size. If the length of the packet is less than
810 1500 bytes we're going to copy it over anyway to stop packets getting
811 dropped from sockets with buffers smaller than our pkt_buf_sz. */
813 if (buffer_cnt==1) {
814 olympic_priv->rx_ring_last_received++ ;
815 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
816 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
817 if (length > 1500) {
818 skb2=olympic_priv->rx_ring_skb[rx_ring_last_received] ;
819 /* unmap buffer */
820 pci_unmap_single(olympic_priv->pdev,
821 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
822 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
823 skb_put(skb2,length-4);
824 skb2->protocol = tr_type_trans(skb2,dev);
825 olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer =
826 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data,
827 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE));
828 olympic_priv->olympic_rx_ring[rx_ring_last_received].res_length =
829 cpu_to_le32(olympic_priv->pkt_buf_sz);
830 olympic_priv->rx_ring_skb[rx_ring_last_received] = skb ;
831 netif_rx(skb2) ;
832 } else {
833 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
834 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
835 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
836 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
837 skb_put(skb,length - 4),
838 length - 4);
839 pci_dma_sync_single_for_device(olympic_priv->pdev,
840 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
841 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
842 skb->protocol = tr_type_trans(skb,dev) ;
843 netif_rx(skb) ;
845 } else {
846 do { /* Walk the buffers */
847 olympic_priv->rx_ring_last_received++ ;
848 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1);
849 rx_ring_last_received = olympic_priv->rx_ring_last_received ;
850 pci_dma_sync_single_for_cpu(olympic_priv->pdev,
851 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
852 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
853 rx_desc = &(olympic_priv->olympic_rx_ring[rx_ring_last_received]);
854 cpy_length = (i == 1 ? frag_len : le32_to_cpu(rx_desc->res_length));
855 skb_copy_from_linear_data(olympic_priv->rx_ring_skb[rx_ring_last_received],
856 skb_put(skb, cpy_length),
857 cpy_length);
858 pci_dma_sync_single_for_device(olympic_priv->pdev,
859 le32_to_cpu(olympic_priv->olympic_rx_ring[rx_ring_last_received].buffer),
860 olympic_priv->pkt_buf_sz,PCI_DMA_FROMDEVICE) ;
861 } while (--i) ;
862 skb_trim(skb,skb->len-4) ;
863 skb->protocol = tr_type_trans(skb,dev);
864 netif_rx(skb) ;
866 dev->stats.rx_packets++ ;
867 dev->stats.rx_bytes += length ;
868 } /* if skb == null */
869 } /* If status & 0x3b */
871 } else { /*if buffercnt & 0xC */
872 olympic_priv->rx_ring_last_received += i ;
873 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE - 1) ;
876 rx_status->fragmentcnt_framelen = 0 ;
877 rx_status->status_buffercnt = 0 ;
878 rx_status = &(olympic_priv->olympic_rx_status_ring[(olympic_priv->rx_status_last_received+1) & (OLYMPIC_RX_RING_SIZE -1) ]);
880 writew((((readw(olympic_mmio+RXENQ)) & 0x8000) ^ 0x8000) | buffer_cnt , olympic_mmio+RXENQ);
881 } /* while */
885 static void olympic_freemem(struct net_device *dev)
887 struct olympic_private *olympic_priv=netdev_priv(dev);
888 int i;
890 for(i=0;i<OLYMPIC_RX_RING_SIZE;i++) {
891 if (olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] != NULL) {
892 dev_kfree_skb_irq(olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received]);
893 olympic_priv->rx_ring_skb[olympic_priv->rx_status_last_received] = NULL;
895 if (olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer != cpu_to_le32(0xdeadbeef)) {
896 pci_unmap_single(olympic_priv->pdev,
897 le32_to_cpu(olympic_priv->olympic_rx_ring[olympic_priv->rx_status_last_received].buffer),
898 olympic_priv->pkt_buf_sz, PCI_DMA_FROMDEVICE);
900 olympic_priv->rx_status_last_received++;
901 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
903 /* unmap rings */
904 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_status_ring_dma_addr,
905 sizeof(struct olympic_rx_status) * OLYMPIC_RX_RING_SIZE, PCI_DMA_FROMDEVICE);
906 pci_unmap_single(olympic_priv->pdev, olympic_priv->rx_ring_dma_addr,
907 sizeof(struct olympic_rx_desc) * OLYMPIC_RX_RING_SIZE, PCI_DMA_TODEVICE);
909 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_status_ring_dma_addr,
910 sizeof(struct olympic_tx_status) * OLYMPIC_TX_RING_SIZE, PCI_DMA_FROMDEVICE);
911 pci_unmap_single(olympic_priv->pdev, olympic_priv->tx_ring_dma_addr,
912 sizeof(struct olympic_tx_desc) * OLYMPIC_TX_RING_SIZE, PCI_DMA_TODEVICE);
914 return ;
917 static irqreturn_t olympic_interrupt(int irq, void *dev_id)
919 struct net_device *dev= (struct net_device *)dev_id;
920 struct olympic_private *olympic_priv=netdev_priv(dev);
921 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
922 u32 sisr;
923 u8 __iomem *adapter_check_area ;
926 * Read sisr but don't reset it yet.
927 * The indication bit may have been set but the interrupt latch
928 * bit may not be set, so we'd lose the interrupt later.
930 sisr=readl(olympic_mmio+SISR) ;
931 if (!(sisr & SISR_MI)) /* Interrupt isn't for us */
932 return IRQ_NONE;
933 sisr=readl(olympic_mmio+SISR_RR) ; /* Read & Reset sisr */
935 spin_lock(&olympic_priv->olympic_lock);
937 /* Hotswap gives us this on removal */
938 if (sisr == 0xffffffff) {
939 printk(KERN_WARNING "%s: Hotswap adapter removal.\n",dev->name) ;
940 spin_unlock(&olympic_priv->olympic_lock) ;
941 return IRQ_NONE;
944 if (sisr & (SISR_SRB_REPLY | SISR_TX1_EOF | SISR_RX_STATUS | SISR_ADAPTER_CHECK |
945 SISR_ASB_FREE | SISR_ARB_CMD | SISR_TRB_REPLY | SISR_RX_NOBUF | SISR_ERR)) {
947 /* If we ever get this the adapter is seriously dead. Only a reset is going to
948 * bring it back to life. We're talking pci bus errors and such like :( */
949 if((sisr & SISR_ERR) && (readl(olympic_mmio+EISR) & EISR_MASK_OPTIONS)) {
950 printk(KERN_ERR "Olympic: EISR Error, EISR=%08x\n",readl(olympic_mmio+EISR)) ;
951 printk(KERN_ERR "The adapter must be reset to clear this condition.\n") ;
952 printk(KERN_ERR "Please report this error to the driver maintainer and/\n") ;
953 printk(KERN_ERR "or the linux-tr mailing list.\n") ;
954 wake_up_interruptible(&olympic_priv->srb_wait);
955 spin_unlock(&olympic_priv->olympic_lock) ;
956 return IRQ_HANDLED;
957 } /* SISR_ERR */
959 if(sisr & SISR_SRB_REPLY) {
960 if(olympic_priv->srb_queued==1) {
961 wake_up_interruptible(&olympic_priv->srb_wait);
962 } else if (olympic_priv->srb_queued==2) {
963 olympic_srb_bh(dev) ;
965 olympic_priv->srb_queued=0;
966 } /* SISR_SRB_REPLY */
968 /* We shouldn't ever miss the Tx interrupt, but the you never know, hence the loop to ensure
969 we get all tx completions. */
970 if (sisr & SISR_TX1_EOF) {
971 while(olympic_priv->olympic_tx_status_ring[(olympic_priv->tx_ring_last_status + 1) & (OLYMPIC_TX_RING_SIZE-1)].status) {
972 olympic_priv->tx_ring_last_status++;
973 olympic_priv->tx_ring_last_status &= (OLYMPIC_TX_RING_SIZE-1);
974 olympic_priv->free_tx_ring_entries++;
975 dev->stats.tx_bytes += olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len;
976 dev->stats.tx_packets++ ;
977 pci_unmap_single(olympic_priv->pdev,
978 le32_to_cpu(olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer),
979 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]->len,PCI_DMA_TODEVICE);
980 dev_kfree_skb_irq(olympic_priv->tx_ring_skb[olympic_priv->tx_ring_last_status]);
981 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_last_status].buffer=cpu_to_le32(0xdeadbeef);
982 olympic_priv->olympic_tx_status_ring[olympic_priv->tx_ring_last_status].status=0;
984 netif_wake_queue(dev);
985 } /* SISR_TX1_EOF */
987 if (sisr & SISR_RX_STATUS) {
988 olympic_rx(dev);
989 } /* SISR_RX_STATUS */
991 if (sisr & SISR_ADAPTER_CHECK) {
992 netif_stop_queue(dev);
993 printk(KERN_WARNING "%s: Adapter Check Interrupt Raised, 8 bytes of information follow:\n", dev->name);
994 writel(readl(olympic_mmio+LAPWWC),olympic_mmio+LAPA);
995 adapter_check_area = olympic_priv->olympic_lap + ((readl(olympic_mmio+LAPWWC)) & (~0xf800)) ;
996 printk(KERN_WARNING "%s: Bytes %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",dev->name, readb(adapter_check_area+0), readb(adapter_check_area+1), readb(adapter_check_area+2), readb(adapter_check_area+3), readb(adapter_check_area+4), readb(adapter_check_area+5), readb(adapter_check_area+6), readb(adapter_check_area+7)) ;
997 spin_unlock(&olympic_priv->olympic_lock) ;
998 return IRQ_HANDLED;
999 } /* SISR_ADAPTER_CHECK */
1001 if (sisr & SISR_ASB_FREE) {
1002 /* Wake up anything that is waiting for the asb response */
1003 if (olympic_priv->asb_queued) {
1004 olympic_asb_bh(dev) ;
1006 } /* SISR_ASB_FREE */
1008 if (sisr & SISR_ARB_CMD) {
1009 olympic_arb_cmd(dev) ;
1010 } /* SISR_ARB_CMD */
1012 if (sisr & SISR_TRB_REPLY) {
1013 /* Wake up anything that is waiting for the trb response */
1014 if (olympic_priv->trb_queued) {
1015 wake_up_interruptible(&olympic_priv->trb_wait);
1017 olympic_priv->trb_queued = 0 ;
1018 } /* SISR_TRB_REPLY */
1020 if (sisr & SISR_RX_NOBUF) {
1021 /* According to the documentation, we don't have to do anything, but trapping it keeps it out of
1022 /var/log/messages. */
1023 } /* SISR_RX_NOBUF */
1024 } else {
1025 printk(KERN_WARNING "%s: Unexpected interrupt: %x\n",dev->name, sisr);
1026 printk(KERN_WARNING "%s: SISR_MASK: %x\n",dev->name, readl(olympic_mmio+SISR_MASK)) ;
1027 } /* One if the interrupts we want */
1028 writel(SISR_MI,olympic_mmio+SISR_MASK_SUM);
1030 spin_unlock(&olympic_priv->olympic_lock) ;
1031 return IRQ_HANDLED;
1034 static netdev_tx_t olympic_xmit(struct sk_buff *skb,
1035 struct net_device *dev)
1037 struct olympic_private *olympic_priv=netdev_priv(dev);
1038 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1039 unsigned long flags ;
1041 spin_lock_irqsave(&olympic_priv->olympic_lock, flags);
1043 netif_stop_queue(dev);
1045 if(olympic_priv->free_tx_ring_entries) {
1046 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].buffer =
1047 cpu_to_le32(pci_map_single(olympic_priv->pdev, skb->data, skb->len,PCI_DMA_TODEVICE));
1048 olympic_priv->olympic_tx_ring[olympic_priv->tx_ring_free].status_length = cpu_to_le32(skb->len | (0x80000000));
1049 olympic_priv->tx_ring_skb[olympic_priv->tx_ring_free]=skb;
1050 olympic_priv->free_tx_ring_entries--;
1052 olympic_priv->tx_ring_free++;
1053 olympic_priv->tx_ring_free &= (OLYMPIC_TX_RING_SIZE-1);
1054 writew((((readw(olympic_mmio+TXENQ_1)) & 0x8000) ^ 0x8000) | 1,olympic_mmio+TXENQ_1);
1055 netif_wake_queue(dev);
1056 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1057 return NETDEV_TX_OK;
1058 } else {
1059 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1060 return NETDEV_TX_BUSY;
1066 static int olympic_close(struct net_device *dev)
1068 struct olympic_private *olympic_priv=netdev_priv(dev);
1069 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio,*srb;
1070 unsigned long t,flags;
1072 DECLARE_WAITQUEUE(wait,current) ;
1074 netif_stop_queue(dev);
1076 writel(olympic_priv->srb,olympic_mmio+LAPA);
1077 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1079 writeb(SRB_CLOSE_ADAPTER,srb+0);
1080 writeb(0,srb+1);
1081 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1083 add_wait_queue(&olympic_priv->srb_wait,&wait) ;
1084 set_current_state(TASK_INTERRUPTIBLE) ;
1086 spin_lock_irqsave(&olympic_priv->olympic_lock,flags);
1087 olympic_priv->srb_queued=1;
1089 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1090 spin_unlock_irqrestore(&olympic_priv->olympic_lock,flags);
1092 while(olympic_priv->srb_queued) {
1094 t = schedule_timeout_interruptible(60*HZ);
1096 if(signal_pending(current)) {
1097 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
1098 printk(KERN_WARNING "SISR=%x MISR=%x\n",readl(olympic_mmio+SISR),readl(olympic_mmio+LISR));
1099 olympic_priv->srb_queued=0;
1100 break;
1103 if (t == 0) {
1104 printk(KERN_WARNING "%s: SRB timed out. May not be fatal.\n",dev->name);
1106 olympic_priv->srb_queued=0;
1108 remove_wait_queue(&olympic_priv->srb_wait,&wait) ;
1110 olympic_priv->rx_status_last_received++;
1111 olympic_priv->rx_status_last_received&=OLYMPIC_RX_RING_SIZE-1;
1113 olympic_freemem(dev) ;
1115 /* reset tx/rx fifo's and busmaster logic */
1117 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1118 udelay(1);
1119 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1121 #if OLYMPIC_DEBUG
1123 int i ;
1124 printk("srb(%p): ",srb);
1125 for(i=0;i<4;i++)
1126 printk("%x ",readb(srb+i));
1127 printk("\n");
1129 #endif
1130 free_irq(dev->irq,dev);
1132 return 0;
1136 static void olympic_set_rx_mode(struct net_device *dev)
1138 struct olympic_private *olympic_priv = netdev_priv(dev);
1139 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1140 u8 options = 0;
1141 u8 __iomem *srb;
1142 struct netdev_hw_addr *ha;
1143 unsigned char dev_mc_address[4] ;
1145 writel(olympic_priv->srb,olympic_mmio+LAPA);
1146 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1147 options = olympic_priv->olympic_copy_all_options;
1149 if (dev->flags&IFF_PROMISC)
1150 options |= 0x61 ;
1151 else
1152 options &= ~0x61 ;
1154 /* Only issue the srb if there is a change in options */
1156 if ((options ^ olympic_priv->olympic_copy_all_options)) {
1158 /* Now to issue the srb command to alter the copy.all.options */
1160 writeb(SRB_MODIFY_RECEIVE_OPTIONS,srb);
1161 writeb(0,srb+1);
1162 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1163 writeb(0,srb+3);
1164 writeb(olympic_priv->olympic_receive_options,srb+4);
1165 writeb(options,srb+5);
1167 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1169 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1171 olympic_priv->olympic_copy_all_options = options ;
1173 return ;
1176 /* Set the functional addresses we need for multicast */
1178 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1180 netdev_for_each_mc_addr(ha, dev) {
1181 dev_mc_address[0] |= ha->addr[2];
1182 dev_mc_address[1] |= ha->addr[3];
1183 dev_mc_address[2] |= ha->addr[4];
1184 dev_mc_address[3] |= ha->addr[5];
1187 writeb(SRB_SET_FUNC_ADDRESS,srb+0);
1188 writeb(0,srb+1);
1189 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1190 writeb(0,srb+3);
1191 writeb(0,srb+4);
1192 writeb(0,srb+5);
1193 writeb(dev_mc_address[0],srb+6);
1194 writeb(dev_mc_address[1],srb+7);
1195 writeb(dev_mc_address[2],srb+8);
1196 writeb(dev_mc_address[3],srb+9);
1198 olympic_priv->srb_queued = 2 ;
1199 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1203 static void olympic_srb_bh(struct net_device *dev)
1205 struct olympic_private *olympic_priv = netdev_priv(dev);
1206 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1207 u8 __iomem *srb;
1209 writel(olympic_priv->srb,olympic_mmio+LAPA);
1210 srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
1212 switch (readb(srb)) {
1214 /* SRB_MODIFY_RECEIVE_OPTIONS i.e. set_multicast_list options (promiscuous)
1215 * At some point we should do something if we get an error, such as
1216 * resetting the IFF_PROMISC flag in dev
1219 case SRB_MODIFY_RECEIVE_OPTIONS:
1220 switch (readb(srb+2)) {
1221 case 0x01:
1222 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name) ;
1223 break ;
1224 case 0x04:
1225 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1226 break ;
1227 default:
1228 if (olympic_priv->olympic_message_level)
1229 printk(KERN_WARNING "%s: Receive Options Modified to %x,%x\n",dev->name,olympic_priv->olympic_copy_all_options, olympic_priv->olympic_receive_options) ;
1230 break ;
1231 } /* switch srb[2] */
1232 break ;
1234 /* SRB_SET_GROUP_ADDRESS - Multicast group setting
1237 case SRB_SET_GROUP_ADDRESS:
1238 switch (readb(srb+2)) {
1239 case 0x00:
1240 break ;
1241 case 0x01:
1242 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1243 break ;
1244 case 0x04:
1245 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
1246 break ;
1247 case 0x3c:
1248 printk(KERN_WARNING "%s: Group/Functional address indicator bits not set correctly\n",dev->name) ;
1249 break ;
1250 case 0x3e: /* If we ever implement individual multicast addresses, will need to deal with this */
1251 printk(KERN_WARNING "%s: Group address registers full\n",dev->name) ;
1252 break ;
1253 case 0x55:
1254 printk(KERN_INFO "%s: Group Address already set.\n",dev->name) ;
1255 break ;
1256 default:
1257 break ;
1258 } /* switch srb[2] */
1259 break ;
1261 /* SRB_RESET_GROUP_ADDRESS - Remove a multicast address from group list
1264 case SRB_RESET_GROUP_ADDRESS:
1265 switch (readb(srb+2)) {
1266 case 0x00:
1267 break ;
1268 case 0x01:
1269 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1270 break ;
1271 case 0x04:
1272 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1273 break ;
1274 case 0x39: /* Must deal with this if individual multicast addresses used */
1275 printk(KERN_INFO "%s: Group address not found\n",dev->name);
1276 break ;
1277 default:
1278 break ;
1279 } /* switch srb[2] */
1280 break ;
1283 /* SRB_SET_FUNC_ADDRESS - Called by the set_rx_mode
1286 case SRB_SET_FUNC_ADDRESS:
1287 switch (readb(srb+2)) {
1288 case 0x00:
1289 if (olympic_priv->olympic_message_level)
1290 printk(KERN_INFO "%s: Functional Address Mask Set\n",dev->name);
1291 break ;
1292 case 0x01:
1293 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1294 break ;
1295 case 0x04:
1296 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1297 break ;
1298 default:
1299 break ;
1300 } /* switch srb[2] */
1301 break ;
1303 /* SRB_READ_LOG - Read and reset the adapter error counters
1306 case SRB_READ_LOG:
1307 switch (readb(srb+2)) {
1308 case 0x00:
1309 if (olympic_priv->olympic_message_level)
1310 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
1311 break ;
1312 case 0x01:
1313 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1314 break ;
1315 case 0x04:
1316 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1317 break ;
1319 } /* switch srb[2] */
1320 break ;
1322 /* SRB_READ_SR_COUNTERS - Read and reset the source routing bridge related counters */
1324 case SRB_READ_SR_COUNTERS:
1325 switch (readb(srb+2)) {
1326 case 0x00:
1327 if (olympic_priv->olympic_message_level)
1328 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
1329 break ;
1330 case 0x01:
1331 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1332 break ;
1333 case 0x04:
1334 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1335 break ;
1336 default:
1337 break ;
1338 } /* switch srb[2] */
1339 break ;
1341 default:
1342 printk(KERN_WARNING "%s: Unrecognized srb bh return value.\n",dev->name);
1343 break ;
1344 } /* switch srb[0] */
1348 static int olympic_set_mac_address (struct net_device *dev, void *addr)
1350 struct sockaddr *saddr = addr ;
1351 struct olympic_private *olympic_priv = netdev_priv(dev);
1353 if (netif_running(dev)) {
1354 printk(KERN_WARNING "%s: Cannot set mac/laa address while card is open\n", dev->name) ;
1355 return -EIO ;
1358 memcpy(olympic_priv->olympic_laa, saddr->sa_data,dev->addr_len) ;
1360 if (olympic_priv->olympic_message_level) {
1361 printk(KERN_INFO "%s: MAC/LAA Set to = %x.%x.%x.%x.%x.%x\n",dev->name, olympic_priv->olympic_laa[0],
1362 olympic_priv->olympic_laa[1], olympic_priv->olympic_laa[2],
1363 olympic_priv->olympic_laa[3], olympic_priv->olympic_laa[4],
1364 olympic_priv->olympic_laa[5]);
1367 return 0 ;
1370 static void olympic_arb_cmd(struct net_device *dev)
1372 struct olympic_private *olympic_priv = netdev_priv(dev);
1373 u8 __iomem *olympic_mmio=olympic_priv->olympic_mmio;
1374 u8 __iomem *arb_block, *asb_block, *srb ;
1375 u8 header_len ;
1376 u16 frame_len, buffer_len ;
1377 struct sk_buff *mac_frame ;
1378 u8 __iomem *buf_ptr ;
1379 u8 __iomem *frame_data ;
1380 u16 buff_off ;
1381 u16 lan_status = 0, lan_status_diff ; /* Initialize to stop compiler warning */
1382 u8 fdx_prot_error ;
1383 u16 next_ptr;
1385 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1386 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1387 srb = (olympic_priv->olympic_lap + olympic_priv->srb) ;
1389 if (readb(arb_block+0) == ARB_RECEIVE_DATA) { /* Receive.data, MAC frames */
1391 header_len = readb(arb_block+8) ; /* 802.5 Token-Ring Header Length */
1392 frame_len = swab16(readw(arb_block + 10)) ;
1394 buff_off = swab16(readw(arb_block + 6)) ;
1396 buf_ptr = olympic_priv->olympic_lap + buff_off ;
1398 #if OLYMPIC_DEBUG
1400 int i;
1401 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1403 for (i=0 ; i < 14 ; i++) {
1404 printk("Loc %d = %02x\n",i,readb(frame_data + i));
1407 printk("next %04x, fs %02x, len %04x\n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1409 #endif
1410 mac_frame = dev_alloc_skb(frame_len) ;
1411 if (!mac_frame) {
1412 printk(KERN_WARNING "%s: Memory squeeze, dropping frame.\n", dev->name);
1413 goto drop_frame;
1416 /* Walk the buffer chain, creating the frame */
1418 do {
1419 frame_data = buf_ptr+offsetof(struct mac_receive_buffer,frame_data) ;
1420 buffer_len = swab16(readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1421 memcpy_fromio(skb_put(mac_frame, buffer_len), frame_data , buffer_len ) ;
1422 next_ptr=readw(buf_ptr+offsetof(struct mac_receive_buffer,next));
1423 } while (next_ptr && (buf_ptr=olympic_priv->olympic_lap + swab16(next_ptr)));
1425 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1427 if (olympic_priv->olympic_network_monitor) {
1428 struct trh_hdr *mac_hdr;
1429 printk(KERN_WARNING "%s: Received MAC Frame, details:\n",dev->name);
1430 mac_hdr = tr_hdr(mac_frame);
1431 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
1432 dev->name, mac_hdr->daddr);
1433 printk(KERN_WARNING "%s: MAC Frame Srce. Addr: %pM\n",
1434 dev->name, mac_hdr->saddr);
1436 netif_rx(mac_frame);
1438 drop_frame:
1439 /* Now tell the card we have dealt with the received frame */
1441 /* Set LISR Bit 1 */
1442 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio + LISR_SUM);
1444 /* Is the ASB free ? */
1446 if (readb(asb_block + 2) != 0xff) {
1447 olympic_priv->asb_queued = 1 ;
1448 writel(LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1449 return ;
1450 /* Drop out and wait for the bottom half to be run */
1453 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1454 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1455 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1456 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1458 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1460 olympic_priv->asb_queued = 2 ;
1462 return ;
1464 } else if (readb(arb_block) == ARB_LAN_CHANGE_STATUS) { /* Lan.change.status */
1465 lan_status = swab16(readw(arb_block+6));
1466 fdx_prot_error = readb(arb_block+8) ;
1468 /* Issue ARB Free */
1469 writel(LISR_ARB_FREE,olympic_priv->olympic_mmio+LISR_SUM);
1471 lan_status_diff = olympic_priv->olympic_lan_status ^ lan_status ;
1473 if (lan_status_diff & (LSC_LWF | LSC_ARW | LSC_FPE | LSC_RR) ) {
1474 if (lan_status_diff & LSC_LWF)
1475 printk(KERN_WARNING "%s: Short circuit detected on the lobe\n",dev->name);
1476 if (lan_status_diff & LSC_ARW)
1477 printk(KERN_WARNING "%s: Auto removal error\n",dev->name);
1478 if (lan_status_diff & LSC_FPE)
1479 printk(KERN_WARNING "%s: FDX Protocol Error\n",dev->name);
1480 if (lan_status_diff & LSC_RR)
1481 printk(KERN_WARNING "%s: Force remove MAC frame received\n",dev->name);
1483 /* Adapter has been closed by the hardware */
1485 /* reset tx/rx fifo's and busmaster logic */
1487 writel(readl(olympic_mmio+BCTL)|(3<<13),olympic_mmio+BCTL);
1488 udelay(1);
1489 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1490 netif_stop_queue(dev);
1491 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
1492 printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
1493 } /* If serious error */
1495 if (olympic_priv->olympic_message_level) {
1496 if (lan_status_diff & LSC_SIG_LOSS)
1497 printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
1498 if (lan_status_diff & LSC_HARD_ERR)
1499 printk(KERN_INFO "%s: Beaconing\n",dev->name);
1500 if (lan_status_diff & LSC_SOFT_ERR)
1501 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
1502 if (lan_status_diff & LSC_TRAN_BCN)
1503 printk(KERN_INFO "%s: We are transmitting the beacon, aaah\n",dev->name);
1504 if (lan_status_diff & LSC_SS)
1505 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1506 if (lan_status_diff & LSC_RING_REC)
1507 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1508 if (lan_status_diff & LSC_FDX_MODE)
1509 printk(KERN_INFO "%s: Operating in FDX mode\n",dev->name);
1512 if (lan_status_diff & LSC_CO) {
1514 if (olympic_priv->olympic_message_level)
1515 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1517 /* Issue READ.LOG command */
1519 writeb(SRB_READ_LOG, srb);
1520 writeb(0,srb+1);
1521 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1522 writeb(0,srb+3);
1523 writeb(0,srb+4);
1524 writeb(0,srb+5);
1526 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1528 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1532 if (lan_status_diff & LSC_SR_CO) {
1534 if (olympic_priv->olympic_message_level)
1535 printk(KERN_INFO "%s: Source routing counters overflow\n", dev->name);
1537 /* Issue a READ.SR.COUNTERS */
1539 writeb(SRB_READ_SR_COUNTERS,srb);
1540 writeb(0,srb+1);
1541 writeb(OLYMPIC_CLEAR_RET_CODE,srb+2);
1542 writeb(0,srb+3);
1544 olympic_priv->srb_queued=2; /* Can't sleep, use srb_bh */
1546 writel(LISR_SRB_CMD,olympic_mmio+LISR_SUM);
1550 olympic_priv->olympic_lan_status = lan_status ;
1552 } /* Lan.change.status */
1553 else
1554 printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
1557 static void olympic_asb_bh(struct net_device *dev)
1559 struct olympic_private *olympic_priv = netdev_priv(dev);
1560 u8 __iomem *arb_block, *asb_block ;
1562 arb_block = (olympic_priv->olympic_lap + olympic_priv->arb) ;
1563 asb_block = (olympic_priv->olympic_lap + olympic_priv->asb) ;
1565 if (olympic_priv->asb_queued == 1) { /* Dropped through the first time */
1567 writeb(ASB_RECEIVE_DATA,asb_block); /* Receive data */
1568 writeb(OLYMPIC_CLEAR_RET_CODE,asb_block+2); /* Necessary ?? */
1569 writeb(readb(arb_block+6),asb_block+6); /* Must send the address back to the adapter */
1570 writeb(readb(arb_block+7),asb_block+7); /* To let it know we have dealt with the data */
1572 writel(LISR_ASB_REPLY | LISR_ASB_FREE_REQ,olympic_priv->olympic_mmio+LISR_SUM);
1573 olympic_priv->asb_queued = 2 ;
1575 return ;
1578 if (olympic_priv->asb_queued == 2) {
1579 switch (readb(asb_block+2)) {
1580 case 0x01:
1581 printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
1582 break ;
1583 case 0x26:
1584 printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
1585 break ;
1586 case 0xFF:
1587 /* Valid response, everything should be ok again */
1588 break ;
1589 default:
1590 printk(KERN_WARNING "%s: Invalid return code in asb\n",dev->name);
1591 break ;
1594 olympic_priv->asb_queued = 0 ;
1597 static int olympic_change_mtu(struct net_device *dev, int mtu)
1599 struct olympic_private *olympic_priv = netdev_priv(dev);
1600 u16 max_mtu ;
1602 if (olympic_priv->olympic_ring_speed == 4)
1603 max_mtu = 4500 ;
1604 else
1605 max_mtu = 18000 ;
1607 if (mtu > max_mtu)
1608 return -EINVAL ;
1609 if (mtu < 100)
1610 return -EINVAL ;
1612 dev->mtu = mtu ;
1613 olympic_priv->pkt_buf_sz = mtu + TR_HLEN ;
1615 return 0 ;
1618 static int olympic_proc_show(struct seq_file *m, void *v)
1620 struct net_device *dev = m->private;
1621 struct olympic_private *olympic_priv=netdev_priv(dev);
1622 u8 __iomem *oat = (olympic_priv->olympic_lap + olympic_priv->olympic_addr_table_addr) ;
1623 u8 __iomem *opt = (olympic_priv->olympic_lap + olympic_priv->olympic_parms_addr) ;
1624 u8 addr[6];
1625 u8 addr2[6];
1626 int i;
1628 seq_printf(m,
1629 "IBM Pit/Pit-Phy/Olympic Chipset Token Ring Adapter %s\n",dev->name);
1630 seq_printf(m, "\n%6s: Adapter Address : Node Address : Functional Addr\n",
1631 dev->name);
1633 for (i = 0 ; i < 6 ; i++)
1634 addr[i] = readb(oat+offsetof(struct olympic_adapter_addr_table,node_addr) + i);
1636 seq_printf(m, "%6s: %pM : %pM : %02x:%02x:%02x:%02x\n",
1637 dev->name,
1638 dev->dev_addr, addr,
1639 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)),
1640 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+1),
1641 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+2),
1642 readb(oat+offsetof(struct olympic_adapter_addr_table,func_addr)+3));
1644 seq_printf(m, "\n%6s: Token Ring Parameters Table:\n", dev->name);
1646 seq_printf(m, "%6s: Physical Addr : Up Node Address : Poll Address : AccPri : Auth Src : Att Code :\n",
1647 dev->name) ;
1649 for (i = 0 ; i < 6 ; i++)
1650 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, up_node_addr) + i);
1651 for (i = 0 ; i < 6 ; i++)
1652 addr2[i] = readb(opt+offsetof(struct olympic_parameters_table, poll_addr) + i);
1654 seq_printf(m, "%6s: %02x:%02x:%02x:%02x : %pM : %pM : %04x : %04x : %04x :\n",
1655 dev->name,
1656 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)),
1657 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+1),
1658 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+2),
1659 readb(opt+offsetof(struct olympic_parameters_table, phys_addr)+3),
1660 addr, addr2,
1661 swab16(readw(opt+offsetof(struct olympic_parameters_table, acc_priority))),
1662 swab16(readw(opt+offsetof(struct olympic_parameters_table, auth_source_class))),
1663 swab16(readw(opt+offsetof(struct olympic_parameters_table, att_code))));
1665 seq_printf(m, "%6s: Source Address : Bcn T : Maj. V : Lan St : Lcl Rg : Mon Err : Frame Correl : \n",
1666 dev->name) ;
1668 for (i = 0 ; i < 6 ; i++)
1669 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, source_addr) + i);
1670 seq_printf(m, "%6s: %pM : %04x : %04x : %04x : %04x : %04x : %04x : \n",
1671 dev->name, addr,
1672 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_type))),
1673 swab16(readw(opt+offsetof(struct olympic_parameters_table, major_vector))),
1674 swab16(readw(opt+offsetof(struct olympic_parameters_table, lan_status))),
1675 swab16(readw(opt+offsetof(struct olympic_parameters_table, local_ring))),
1676 swab16(readw(opt+offsetof(struct olympic_parameters_table, mon_error))),
1677 swab16(readw(opt+offsetof(struct olympic_parameters_table, frame_correl))));
1679 seq_printf(m, "%6s: Beacon Details : Tx : Rx : NAUN Node Address : NAUN Node Phys : \n",
1680 dev->name) ;
1682 for (i = 0 ; i < 6 ; i++)
1683 addr[i] = readb(opt+offsetof(struct olympic_parameters_table, beacon_naun) + i);
1684 seq_printf(m, "%6s: : %02x : %02x : %pM : %02x:%02x:%02x:%02x : \n",
1685 dev->name,
1686 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_transmit))),
1687 swab16(readw(opt+offsetof(struct olympic_parameters_table, beacon_receive))),
1688 addr,
1689 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)),
1690 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+1),
1691 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+2),
1692 readb(opt+offsetof(struct olympic_parameters_table, beacon_phys)+3));
1694 return 0;
1697 static int olympic_proc_open(struct inode *inode, struct file *file)
1699 return single_open(file, olympic_proc_show, PDE(inode)->data);
1702 static const struct file_operations olympic_proc_ops = {
1703 .open = olympic_proc_open,
1704 .read = seq_read,
1705 .llseek = seq_lseek,
1706 .release = single_release,
1709 static void __devexit olympic_remove_one(struct pci_dev *pdev)
1711 struct net_device *dev = pci_get_drvdata(pdev) ;
1712 struct olympic_private *olympic_priv=netdev_priv(dev);
1714 if (olympic_priv->olympic_network_monitor) {
1715 char proc_name[20] ;
1716 strcpy(proc_name,"olympic_") ;
1717 strcat(proc_name,dev->name) ;
1718 remove_proc_entry(proc_name,init_net.proc_net);
1720 unregister_netdev(dev) ;
1721 iounmap(olympic_priv->olympic_mmio) ;
1722 iounmap(olympic_priv->olympic_lap) ;
1723 pci_release_regions(pdev) ;
1724 pci_set_drvdata(pdev,NULL) ;
1725 free_netdev(dev) ;
1728 static struct pci_driver olympic_driver = {
1729 .name = "olympic",
1730 .id_table = olympic_pci_tbl,
1731 .probe = olympic_probe,
1732 .remove = __devexit_p(olympic_remove_one),
1735 static int __init olympic_pci_init(void)
1737 return pci_register_driver(&olympic_driver) ;
1740 static void __exit olympic_pci_cleanup(void)
1742 pci_unregister_driver(&olympic_driver) ;
1746 module_init(olympic_pci_init) ;
1747 module_exit(olympic_pci_cleanup) ;
1749 MODULE_LICENSE("GPL");