1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * rrunner.c: Linux driver for the Essential RoadRunner HIPPI board.
5 * Copyright (C) 1998-2002 by Jes Sorensen, <jes@wildopensource.com>.
7 * Thanks to Essential Communication for providing us with hardware
8 * and very comprehensive documentation without which I would not have
9 * been able to write this driver. A special thank you to John Gibbon
10 * for sorting out the legal issues, with the NDA, allowing the code to
11 * be released under the GPL.
13 * Thanks to Jayaram Bhat from ODS/Essential for fixing some of the
14 * stupid bugs in my code.
16 * Softnet support and various other patches from Val Henson of
19 * PCI DMA mapping code partly based on work by Francois Romieu.
24 #define RX_DMA_SKBUFF 1
25 #define PKT_COPY_THRESHOLD 512
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/errno.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/hippidevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/delay.h>
38 #include <linux/slab.h>
41 #include <asm/cache.h>
42 #include <asm/byteorder.h>
45 #include <linux/uaccess.h>
47 #define rr_if_busy(dev) netif_queue_stopped(dev)
48 #define rr_if_running(dev) netif_running(dev)
52 #define RUN_AT(x) (jiffies + (x))
55 MODULE_AUTHOR("Jes Sorensen <jes@wildopensource.com>");
56 MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver");
57 MODULE_LICENSE("GPL");
59 static const char version
[] =
60 "rrunner.c: v0.50 11/11/2002 Jes Sorensen (jes@wildopensource.com)\n";
63 static const struct net_device_ops rr_netdev_ops
= {
66 .ndo_do_ioctl
= rr_ioctl
,
67 .ndo_start_xmit
= rr_start_xmit
,
68 .ndo_set_mac_address
= hippi_mac_addr
,
72 * Implementation notes:
74 * The DMA engine only allows for DMA within physical 64KB chunks of
75 * memory. The current approach of the driver (and stack) is to use
76 * linear blocks of memory for the skbuffs. However, as the data block
77 * is always the first part of the skb and skbs are 2^n aligned so we
78 * are guarantted to get the whole block within one 64KB align 64KB
81 * On the long term, relying on being able to allocate 64KB linear
82 * chunks of memory is not feasible and the skb handling code and the
83 * stack will need to know about I/O vectors or something similar.
86 static int rr_init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
88 struct net_device
*dev
;
89 static int version_disp
;
91 struct rr_private
*rrpriv
;
96 dev
= alloc_hippi_dev(sizeof(struct rr_private
));
100 ret
= pci_enable_device(pdev
);
106 rrpriv
= netdev_priv(dev
);
108 SET_NETDEV_DEV(dev
, &pdev
->dev
);
110 ret
= pci_request_regions(pdev
, "rrunner");
114 pci_set_drvdata(pdev
, dev
);
116 rrpriv
->pci_dev
= pdev
;
118 spin_lock_init(&rrpriv
->lock
);
120 dev
->netdev_ops
= &rr_netdev_ops
;
122 /* display version info if adapter is found */
124 /* set display flag to TRUE so that */
125 /* we only display this string ONCE */
130 pci_read_config_byte(pdev
, PCI_LATENCY_TIMER
, &pci_latency
);
131 if (pci_latency
<= 0x58){
133 pci_write_config_byte(pdev
, PCI_LATENCY_TIMER
, pci_latency
);
136 pci_set_master(pdev
);
138 printk(KERN_INFO
"%s: Essential RoadRunner serial HIPPI "
139 "at 0x%llx, irq %i, PCI latency %i\n", dev
->name
,
140 (unsigned long long)pci_resource_start(pdev
, 0),
141 pdev
->irq
, pci_latency
);
144 * Remap the MMIO regs into kernel space.
146 rrpriv
->regs
= pci_iomap(pdev
, 0, 0x1000);
148 printk(KERN_ERR
"%s: Unable to map I/O register, "
149 "RoadRunner will be disabled.\n", dev
->name
);
154 tmpptr
= dma_alloc_coherent(&pdev
->dev
, TX_TOTAL_SIZE
, &ring_dma
,
156 rrpriv
->tx_ring
= tmpptr
;
157 rrpriv
->tx_ring_dma
= ring_dma
;
164 tmpptr
= dma_alloc_coherent(&pdev
->dev
, RX_TOTAL_SIZE
, &ring_dma
,
166 rrpriv
->rx_ring
= tmpptr
;
167 rrpriv
->rx_ring_dma
= ring_dma
;
174 tmpptr
= dma_alloc_coherent(&pdev
->dev
, EVT_RING_SIZE
, &ring_dma
,
176 rrpriv
->evt_ring
= tmpptr
;
177 rrpriv
->evt_ring_dma
= ring_dma
;
185 * Don't access any register before this point!
188 writel(readl(&rrpriv
->regs
->HostCtrl
) | NO_SWAP
,
189 &rrpriv
->regs
->HostCtrl
);
192 * Need to add a case for little-endian 64-bit hosts here.
197 ret
= register_netdev(dev
);
203 if (rrpriv
->evt_ring
)
204 dma_free_coherent(&pdev
->dev
, EVT_RING_SIZE
, rrpriv
->evt_ring
,
205 rrpriv
->evt_ring_dma
);
207 dma_free_coherent(&pdev
->dev
, RX_TOTAL_SIZE
, rrpriv
->rx_ring
,
208 rrpriv
->rx_ring_dma
);
210 dma_free_coherent(&pdev
->dev
, TX_TOTAL_SIZE
, rrpriv
->tx_ring
,
211 rrpriv
->tx_ring_dma
);
213 pci_iounmap(pdev
, rrpriv
->regs
);
215 pci_release_regions(pdev
);
222 static void rr_remove_one(struct pci_dev
*pdev
)
224 struct net_device
*dev
= pci_get_drvdata(pdev
);
225 struct rr_private
*rr
= netdev_priv(dev
);
227 if (!(readl(&rr
->regs
->HostCtrl
) & NIC_HALTED
)) {
228 printk(KERN_ERR
"%s: trying to unload running NIC\n",
230 writel(HALT_NIC
, &rr
->regs
->HostCtrl
);
233 unregister_netdev(dev
);
234 dma_free_coherent(&pdev
->dev
, EVT_RING_SIZE
, rr
->evt_ring
,
236 dma_free_coherent(&pdev
->dev
, RX_TOTAL_SIZE
, rr
->rx_ring
,
238 dma_free_coherent(&pdev
->dev
, TX_TOTAL_SIZE
, rr
->tx_ring
,
240 pci_iounmap(pdev
, rr
->regs
);
241 pci_release_regions(pdev
);
242 pci_disable_device(pdev
);
248 * Commands are considered to be slow, thus there is no reason to
251 static void rr_issue_cmd(struct rr_private
*rrpriv
, struct cmd
*cmd
)
253 struct rr_regs __iomem
*regs
;
258 * This is temporary - it will go away in the final version.
259 * We probably also want to make this function inline.
261 if (readl(®s
->HostCtrl
) & NIC_HALTED
){
262 printk("issuing command for halted NIC, code 0x%x, "
263 "HostCtrl %08x\n", cmd
->code
, readl(®s
->HostCtrl
));
264 if (readl(®s
->Mode
) & FATAL_ERR
)
265 printk("error codes Fail1 %02x, Fail2 %02x\n",
266 readl(®s
->Fail1
), readl(®s
->Fail2
));
269 idx
= rrpriv
->info
->cmd_ctrl
.pi
;
271 writel(*(u32
*)(cmd
), ®s
->CmdRing
[idx
]);
274 idx
= (idx
- 1) % CMD_RING_ENTRIES
;
275 rrpriv
->info
->cmd_ctrl
.pi
= idx
;
278 if (readl(®s
->Mode
) & FATAL_ERR
)
279 printk("error code %02x\n", readl(®s
->Fail1
));
284 * Reset the board in a sensible manner. The NIC is already halted
285 * when we get here and a spin-lock is held.
287 static int rr_reset(struct net_device
*dev
)
289 struct rr_private
*rrpriv
;
290 struct rr_regs __iomem
*regs
;
294 rrpriv
= netdev_priv(dev
);
297 rr_load_firmware(dev
);
299 writel(0x01000000, ®s
->TX_state
);
300 writel(0xff800000, ®s
->RX_state
);
301 writel(0, ®s
->AssistState
);
302 writel(CLEAR_INTA
, ®s
->LocalCtrl
);
303 writel(0x01, ®s
->BrkPt
);
304 writel(0, ®s
->Timer
);
305 writel(0, ®s
->TimerRef
);
306 writel(RESET_DMA
, ®s
->DmaReadState
);
307 writel(RESET_DMA
, ®s
->DmaWriteState
);
308 writel(0, ®s
->DmaWriteHostHi
);
309 writel(0, ®s
->DmaWriteHostLo
);
310 writel(0, ®s
->DmaReadHostHi
);
311 writel(0, ®s
->DmaReadHostLo
);
312 writel(0, ®s
->DmaReadLen
);
313 writel(0, ®s
->DmaWriteLen
);
314 writel(0, ®s
->DmaWriteLcl
);
315 writel(0, ®s
->DmaWriteIPchecksum
);
316 writel(0, ®s
->DmaReadLcl
);
317 writel(0, ®s
->DmaReadIPchecksum
);
318 writel(0, ®s
->PciState
);
319 #if (BITS_PER_LONG == 64) && defined __LITTLE_ENDIAN
320 writel(SWAP_DATA
| PTR64BIT
| PTR_WD_SWAP
, ®s
->Mode
);
321 #elif (BITS_PER_LONG == 64)
322 writel(SWAP_DATA
| PTR64BIT
| PTR_WD_NOSWAP
, ®s
->Mode
);
324 writel(SWAP_DATA
| PTR32BIT
| PTR_WD_NOSWAP
, ®s
->Mode
);
329 * Don't worry, this is just black magic.
331 writel(0xdf000, ®s
->RxBase
);
332 writel(0xdf000, ®s
->RxPrd
);
333 writel(0xdf000, ®s
->RxCon
);
334 writel(0xce000, ®s
->TxBase
);
335 writel(0xce000, ®s
->TxPrd
);
336 writel(0xce000, ®s
->TxCon
);
337 writel(0, ®s
->RxIndPro
);
338 writel(0, ®s
->RxIndCon
);
339 writel(0, ®s
->RxIndRef
);
340 writel(0, ®s
->TxIndPro
);
341 writel(0, ®s
->TxIndCon
);
342 writel(0, ®s
->TxIndRef
);
343 writel(0xcc000, ®s
->pad10
[0]);
344 writel(0, ®s
->DrCmndPro
);
345 writel(0, ®s
->DrCmndCon
);
346 writel(0, ®s
->DwCmndPro
);
347 writel(0, ®s
->DwCmndCon
);
348 writel(0, ®s
->DwCmndRef
);
349 writel(0, ®s
->DrDataPro
);
350 writel(0, ®s
->DrDataCon
);
351 writel(0, ®s
->DrDataRef
);
352 writel(0, ®s
->DwDataPro
);
353 writel(0, ®s
->DwDataCon
);
354 writel(0, ®s
->DwDataRef
);
357 writel(0xffffffff, ®s
->MbEvent
);
358 writel(0, ®s
->Event
);
360 writel(0, ®s
->TxPi
);
361 writel(0, ®s
->IpRxPi
);
363 writel(0, ®s
->EvtCon
);
364 writel(0, ®s
->EvtPrd
);
366 rrpriv
->info
->evt_ctrl
.pi
= 0;
368 for (i
= 0; i
< CMD_RING_ENTRIES
; i
++)
369 writel(0, ®s
->CmdRing
[i
]);
372 * Why 32 ? is this not cache line size dependent?
374 writel(RBURST_64
|WBURST_64
, ®s
->PciState
);
377 start_pc
= rr_read_eeprom_word(rrpriv
,
378 offsetof(struct eeprom
, rncd_info
.FwStart
));
381 printk("%s: Executing firmware at address 0x%06x\n",
382 dev
->name
, start_pc
);
385 writel(start_pc
+ 0x800, ®s
->Pc
);
389 writel(start_pc
, ®s
->Pc
);
397 * Read a string from the EEPROM.
399 static unsigned int rr_read_eeprom(struct rr_private
*rrpriv
,
400 unsigned long offset
,
402 unsigned long length
)
404 struct rr_regs __iomem
*regs
= rrpriv
->regs
;
405 u32 misc
, io
, host
, i
;
407 io
= readl(®s
->ExtIo
);
408 writel(0, ®s
->ExtIo
);
409 misc
= readl(®s
->LocalCtrl
);
410 writel(0, ®s
->LocalCtrl
);
411 host
= readl(®s
->HostCtrl
);
412 writel(host
| HALT_NIC
, ®s
->HostCtrl
);
415 for (i
= 0; i
< length
; i
++){
416 writel((EEPROM_BASE
+ ((offset
+i
) << 3)), ®s
->WinBase
);
418 buf
[i
] = (readl(®s
->WinData
) >> 24) & 0xff;
422 writel(host
, ®s
->HostCtrl
);
423 writel(misc
, ®s
->LocalCtrl
);
424 writel(io
, ®s
->ExtIo
);
431 * Shortcut to read one word (4 bytes) out of the EEPROM and convert
432 * it to our CPU byte-order.
434 static u32
rr_read_eeprom_word(struct rr_private
*rrpriv
,
439 if ((rr_read_eeprom(rrpriv
, offset
,
440 (unsigned char *)&word
, 4) == 4))
441 return be32_to_cpu(word
);
447 * Write a string to the EEPROM.
449 * This is only called when the firmware is not running.
451 static unsigned int write_eeprom(struct rr_private
*rrpriv
,
452 unsigned long offset
,
454 unsigned long length
)
456 struct rr_regs __iomem
*regs
= rrpriv
->regs
;
457 u32 misc
, io
, data
, i
, j
, ready
, error
= 0;
459 io
= readl(®s
->ExtIo
);
460 writel(0, ®s
->ExtIo
);
461 misc
= readl(®s
->LocalCtrl
);
462 writel(ENABLE_EEPROM_WRITE
, ®s
->LocalCtrl
);
465 for (i
= 0; i
< length
; i
++){
466 writel((EEPROM_BASE
+ ((offset
+i
) << 3)), ®s
->WinBase
);
470 * Only try to write the data if it is not the same
473 if ((readl(®s
->WinData
) & 0xff000000) != data
){
474 writel(data
, ®s
->WinData
);
480 if ((readl(®s
->WinData
) & 0xff000000) ==
485 printk("data mismatch: %08x, "
486 "WinData %08x\n", data
,
487 readl(®s
->WinData
));
495 writel(misc
, ®s
->LocalCtrl
);
496 writel(io
, ®s
->ExtIo
);
503 static int rr_init(struct net_device
*dev
)
505 struct rr_private
*rrpriv
;
506 struct rr_regs __iomem
*regs
;
509 rrpriv
= netdev_priv(dev
);
512 rev
= readl(®s
->FwRev
);
513 rrpriv
->fw_rev
= rev
;
514 if (rev
> 0x00020024)
515 printk(" Firmware revision: %i.%i.%i\n", (rev
>> 16),
516 ((rev
>> 8) & 0xff), (rev
& 0xff));
517 else if (rev
>= 0x00020000) {
518 printk(" Firmware revision: %i.%i.%i (2.0.37 or "
519 "later is recommended)\n", (rev
>> 16),
520 ((rev
>> 8) & 0xff), (rev
& 0xff));
522 printk(" Firmware revision too old: %i.%i.%i, please "
523 "upgrade to 2.0.37 or later.\n",
524 (rev
>> 16), ((rev
>> 8) & 0xff), (rev
& 0xff));
528 printk(" Maximum receive rings %i\n", readl(®s
->MaxRxRng
));
532 * Read the hardware address from the eeprom. The HW address
533 * is not really necessary for HIPPI but awfully convenient.
534 * The pointer arithmetic to put it in dev_addr is ugly, but
535 * Donald Becker does it this way for the GigE version of this
536 * card and it's shorter and more portable than any
537 * other method I've seen. -VAL
540 *(__be16
*)(dev
->dev_addr
) =
541 htons(rr_read_eeprom_word(rrpriv
, offsetof(struct eeprom
, manf
.BoardULA
)));
542 *(__be32
*)(dev
->dev_addr
+2) =
543 htonl(rr_read_eeprom_word(rrpriv
, offsetof(struct eeprom
, manf
.BoardULA
[4])));
545 printk(" MAC: %pM\n", dev
->dev_addr
);
547 sram_size
= rr_read_eeprom_word(rrpriv
, 8);
548 printk(" SRAM size 0x%06x\n", sram_size
);
554 static int rr_init1(struct net_device
*dev
)
556 struct rr_private
*rrpriv
;
557 struct rr_regs __iomem
*regs
;
558 unsigned long myjif
, flags
;
564 rrpriv
= netdev_priv(dev
);
567 spin_lock_irqsave(&rrpriv
->lock
, flags
);
569 hostctrl
= readl(®s
->HostCtrl
);
570 writel(hostctrl
| HALT_NIC
| RR_CLEAR_INT
, ®s
->HostCtrl
);
573 if (hostctrl
& PARITY_ERR
){
574 printk("%s: Parity error halting NIC - this is serious!\n",
576 spin_unlock_irqrestore(&rrpriv
->lock
, flags
);
581 set_rxaddr(regs
, rrpriv
->rx_ctrl_dma
);
582 set_infoaddr(regs
, rrpriv
->info_dma
);
584 rrpriv
->info
->evt_ctrl
.entry_size
= sizeof(struct event
);
585 rrpriv
->info
->evt_ctrl
.entries
= EVT_RING_ENTRIES
;
586 rrpriv
->info
->evt_ctrl
.mode
= 0;
587 rrpriv
->info
->evt_ctrl
.pi
= 0;
588 set_rraddr(&rrpriv
->info
->evt_ctrl
.rngptr
, rrpriv
->evt_ring_dma
);
590 rrpriv
->info
->cmd_ctrl
.entry_size
= sizeof(struct cmd
);
591 rrpriv
->info
->cmd_ctrl
.entries
= CMD_RING_ENTRIES
;
592 rrpriv
->info
->cmd_ctrl
.mode
= 0;
593 rrpriv
->info
->cmd_ctrl
.pi
= 15;
595 for (i
= 0; i
< CMD_RING_ENTRIES
; i
++) {
596 writel(0, ®s
->CmdRing
[i
]);
599 for (i
= 0; i
< TX_RING_ENTRIES
; i
++) {
600 rrpriv
->tx_ring
[i
].size
= 0;
601 set_rraddr(&rrpriv
->tx_ring
[i
].addr
, 0);
602 rrpriv
->tx_skbuff
[i
] = NULL
;
604 rrpriv
->info
->tx_ctrl
.entry_size
= sizeof(struct tx_desc
);
605 rrpriv
->info
->tx_ctrl
.entries
= TX_RING_ENTRIES
;
606 rrpriv
->info
->tx_ctrl
.mode
= 0;
607 rrpriv
->info
->tx_ctrl
.pi
= 0;
608 set_rraddr(&rrpriv
->info
->tx_ctrl
.rngptr
, rrpriv
->tx_ring_dma
);
611 * Set dirty_tx before we start receiving interrupts, otherwise
612 * the interrupt handler might think it is supposed to process
613 * tx ints before we are up and running, which may cause a null
614 * pointer access in the int handler.
618 rrpriv
->dirty_rx
= rrpriv
->dirty_tx
= 0;
623 writel(0x5000, ®s
->ConRetry
);
624 writel(0x100, ®s
->ConRetryTmr
);
625 writel(0x500000, ®s
->ConTmout
);
626 writel(0x60, ®s
->IntrTmr
);
627 writel(0x500000, ®s
->TxDataMvTimeout
);
628 writel(0x200000, ®s
->RxDataMvTimeout
);
629 writel(0x80, ®s
->WriteDmaThresh
);
630 writel(0x80, ®s
->ReadDmaThresh
);
632 rrpriv
->fw_running
= 0;
635 hostctrl
&= ~(HALT_NIC
| INVALID_INST_B
| PARITY_ERR
);
636 writel(hostctrl
, ®s
->HostCtrl
);
639 spin_unlock_irqrestore(&rrpriv
->lock
, flags
);
641 for (i
= 0; i
< RX_RING_ENTRIES
; i
++) {
645 rrpriv
->rx_ring
[i
].mode
= 0;
646 skb
= alloc_skb(dev
->mtu
+ HIPPI_HLEN
, GFP_ATOMIC
);
648 printk(KERN_WARNING
"%s: Unable to allocate memory "
649 "for receive ring - halting NIC\n", dev
->name
);
653 rrpriv
->rx_skbuff
[i
] = skb
;
654 addr
= dma_map_single(&rrpriv
->pci_dev
->dev
, skb
->data
,
655 dev
->mtu
+ HIPPI_HLEN
, DMA_FROM_DEVICE
);
657 * Sanity test to see if we conflict with the DMA
658 * limitations of the Roadrunner.
660 if ((((unsigned long)skb
->data
) & 0xfff) > ~65320)
661 printk("skb alloc error\n");
663 set_rraddr(&rrpriv
->rx_ring
[i
].addr
, addr
);
664 rrpriv
->rx_ring
[i
].size
= dev
->mtu
+ HIPPI_HLEN
;
667 rrpriv
->rx_ctrl
[4].entry_size
= sizeof(struct rx_desc
);
668 rrpriv
->rx_ctrl
[4].entries
= RX_RING_ENTRIES
;
669 rrpriv
->rx_ctrl
[4].mode
= 8;
670 rrpriv
->rx_ctrl
[4].pi
= 0;
672 set_rraddr(&rrpriv
->rx_ctrl
[4].rngptr
, rrpriv
->rx_ring_dma
);
677 * Now start the FirmWare.
679 cmd
.code
= C_START_FW
;
683 rr_issue_cmd(rrpriv
, &cmd
);
686 * Give the FirmWare time to chew on the `get running' command.
688 myjif
= jiffies
+ 5 * HZ
;
689 while (time_before(jiffies
, myjif
) && !rrpriv
->fw_running
)
692 netif_start_queue(dev
);
698 * We might have gotten here because we are out of memory,
699 * make sure we release everything we allocated before failing
701 for (i
= 0; i
< RX_RING_ENTRIES
; i
++) {
702 struct sk_buff
*skb
= rrpriv
->rx_skbuff
[i
];
705 dma_unmap_single(&rrpriv
->pci_dev
->dev
,
706 rrpriv
->rx_ring
[i
].addr
.addrlo
,
707 dev
->mtu
+ HIPPI_HLEN
,
709 rrpriv
->rx_ring
[i
].size
= 0;
710 set_rraddr(&rrpriv
->rx_ring
[i
].addr
, 0);
712 rrpriv
->rx_skbuff
[i
] = NULL
;
720 * All events are considered to be slow (RX/TX ints do not generate
721 * events) and are handled here, outside the main interrupt handler,
722 * to reduce the size of the handler.
724 static u32
rr_handle_event(struct net_device
*dev
, u32 prodidx
, u32 eidx
)
726 struct rr_private
*rrpriv
;
727 struct rr_regs __iomem
*regs
;
730 rrpriv
= netdev_priv(dev
);
733 while (prodidx
!= eidx
){
734 switch (rrpriv
->evt_ring
[eidx
].code
){
736 tmp
= readl(®s
->FwRev
);
737 printk(KERN_INFO
"%s: Firmware revision %i.%i.%i "
738 "up and running\n", dev
->name
,
739 (tmp
>> 16), ((tmp
>> 8) & 0xff), (tmp
& 0xff));
740 rrpriv
->fw_running
= 1;
741 writel(RX_RING_ENTRIES
- 1, ®s
->IpRxPi
);
745 printk(KERN_INFO
"%s: Optical link ON\n", dev
->name
);
748 printk(KERN_INFO
"%s: Optical link OFF\n", dev
->name
);
751 printk(KERN_WARNING
"%s: RX data not moving\n",
755 printk(KERN_INFO
"%s: The watchdog is here to see "
759 printk(KERN_ERR
"%s: HIPPI Internal NIC error\n",
761 writel(readl(®s
->HostCtrl
)|HALT_NIC
|RR_CLEAR_INT
,
766 printk(KERN_ERR
"%s: Host software error\n",
768 writel(readl(®s
->HostCtrl
)|HALT_NIC
|RR_CLEAR_INT
,
776 printk(KERN_WARNING
"%s: Connection rejected\n",
778 dev
->stats
.tx_aborted_errors
++;
781 printk(KERN_WARNING
"%s: Connection timeout\n",
785 printk(KERN_WARNING
"%s: HIPPI disconnect error\n",
787 dev
->stats
.tx_aborted_errors
++;
790 printk(KERN_ERR
"%s: HIPPI Internal Parity error\n",
792 writel(readl(®s
->HostCtrl
)|HALT_NIC
|RR_CLEAR_INT
,
797 printk(KERN_WARNING
"%s: Transmitter idle\n",
801 printk(KERN_WARNING
"%s: Link lost during transmit\n",
803 dev
->stats
.tx_aborted_errors
++;
804 writel(readl(®s
->HostCtrl
)|HALT_NIC
|RR_CLEAR_INT
,
809 printk(KERN_ERR
"%s: Invalid send ring block\n",
811 writel(readl(®s
->HostCtrl
)|HALT_NIC
|RR_CLEAR_INT
,
816 printk(KERN_ERR
"%s: Invalid send buffer address\n",
818 writel(readl(®s
->HostCtrl
)|HALT_NIC
|RR_CLEAR_INT
,
823 printk(KERN_ERR
"%s: Invalid descriptor address\n",
825 writel(readl(®s
->HostCtrl
)|HALT_NIC
|RR_CLEAR_INT
,
833 printk(KERN_INFO
"%s: Receive ring full\n", dev
->name
);
837 printk(KERN_WARNING
"%s: Receive parity error\n",
841 printk(KERN_WARNING
"%s: Receive LLRC error\n",
845 printk(KERN_WARNING
"%s: Receive packet length "
846 "error\n", dev
->name
);
849 printk(KERN_WARNING
"%s: Data checksum error\n",
853 printk(KERN_WARNING
"%s: Unexpected short burst "
854 "error\n", dev
->name
);
857 printk(KERN_WARNING
"%s: Recv. state transition"
858 " error\n", dev
->name
);
861 printk(KERN_WARNING
"%s: Unexpected data error\n",
865 printk(KERN_WARNING
"%s: Link lost error\n",
869 printk(KERN_WARNING
"%s: Framing Error\n",
873 printk(KERN_WARNING
"%s: Flag sync. lost during "
874 "packet\n", dev
->name
);
877 printk(KERN_ERR
"%s: Invalid receive buffer "
878 "address\n", dev
->name
);
879 writel(readl(®s
->HostCtrl
)|HALT_NIC
|RR_CLEAR_INT
,
884 printk(KERN_ERR
"%s: Invalid receive descriptor "
885 "address\n", dev
->name
);
886 writel(readl(®s
->HostCtrl
)|HALT_NIC
|RR_CLEAR_INT
,
891 printk(KERN_ERR
"%s: Invalid ring block\n",
893 writel(readl(®s
->HostCtrl
)|HALT_NIC
|RR_CLEAR_INT
,
898 /* Label packet to be dropped.
899 * Actual dropping occurs in rx
902 * The index of packet we get to drop is
903 * the index of the packet following
904 * the bad packet. -kbf
907 u16 index
= rrpriv
->evt_ring
[eidx
].index
;
908 index
= (index
+ (RX_RING_ENTRIES
- 1)) %
910 rrpriv
->rx_ring
[index
].mode
|=
911 (PACKET_BAD
| PACKET_END
);
915 printk(KERN_WARNING
"%s: Unhandled event 0x%02x\n",
916 dev
->name
, rrpriv
->evt_ring
[eidx
].code
);
918 eidx
= (eidx
+ 1) % EVT_RING_ENTRIES
;
921 rrpriv
->info
->evt_ctrl
.pi
= eidx
;
927 static void rx_int(struct net_device
*dev
, u32 rxlimit
, u32 index
)
929 struct rr_private
*rrpriv
= netdev_priv(dev
);
930 struct rr_regs __iomem
*regs
= rrpriv
->regs
;
933 struct rx_desc
*desc
;
936 desc
= &(rrpriv
->rx_ring
[index
]);
937 pkt_len
= desc
->size
;
939 printk("index %i, rxlimit %i\n", index
, rxlimit
);
940 printk("len %x, mode %x\n", pkt_len
, desc
->mode
);
942 if ( (rrpriv
->rx_ring
[index
].mode
& PACKET_BAD
) == PACKET_BAD
){
943 dev
->stats
.rx_dropped
++;
948 struct sk_buff
*skb
, *rx_skb
;
950 rx_skb
= rrpriv
->rx_skbuff
[index
];
952 if (pkt_len
< PKT_COPY_THRESHOLD
) {
953 skb
= alloc_skb(pkt_len
, GFP_ATOMIC
);
955 printk(KERN_WARNING
"%s: Unable to allocate skb (%i bytes), deferring packet\n", dev
->name
, pkt_len
);
956 dev
->stats
.rx_dropped
++;
959 dma_sync_single_for_cpu(&rrpriv
->pci_dev
->dev
,
964 skb_put_data(skb
, rx_skb
->data
,
967 dma_sync_single_for_device(&rrpriv
->pci_dev
->dev
,
973 struct sk_buff
*newskb
;
975 newskb
= alloc_skb(dev
->mtu
+ HIPPI_HLEN
,
980 dma_unmap_single(&rrpriv
->pci_dev
->dev
,
982 dev
->mtu
+ HIPPI_HLEN
,
985 skb_put(skb
, pkt_len
);
986 rrpriv
->rx_skbuff
[index
] = newskb
;
987 addr
= dma_map_single(&rrpriv
->pci_dev
->dev
,
989 dev
->mtu
+ HIPPI_HLEN
,
991 set_rraddr(&desc
->addr
, addr
);
993 printk("%s: Out of memory, deferring "
994 "packet\n", dev
->name
);
995 dev
->stats
.rx_dropped
++;
999 skb
->protocol
= hippi_type_trans(skb
, dev
);
1001 netif_rx(skb
); /* send it up */
1003 dev
->stats
.rx_packets
++;
1004 dev
->stats
.rx_bytes
+= pkt_len
;
1008 desc
->size
= dev
->mtu
+ HIPPI_HLEN
;
1010 if ((index
& 7) == 7)
1011 writel(index
, ®s
->IpRxPi
);
1013 index
= (index
+ 1) % RX_RING_ENTRIES
;
1014 } while(index
!= rxlimit
);
1016 rrpriv
->cur_rx
= index
;
1021 static irqreturn_t
rr_interrupt(int irq
, void *dev_id
)
1023 struct rr_private
*rrpriv
;
1024 struct rr_regs __iomem
*regs
;
1025 struct net_device
*dev
= (struct net_device
*)dev_id
;
1026 u32 prodidx
, rxindex
, eidx
, txcsmr
, rxlimit
, txcon
;
1028 rrpriv
= netdev_priv(dev
);
1029 regs
= rrpriv
->regs
;
1031 if (!(readl(®s
->HostCtrl
) & RR_INT
))
1034 spin_lock(&rrpriv
->lock
);
1036 prodidx
= readl(®s
->EvtPrd
);
1037 txcsmr
= (prodidx
>> 8) & 0xff;
1038 rxlimit
= (prodidx
>> 16) & 0xff;
1042 printk("%s: interrupt, prodidx = %i, eidx = %i\n", dev
->name
,
1043 prodidx
, rrpriv
->info
->evt_ctrl
.pi
);
1046 * Order here is important. We must handle events
1047 * before doing anything else in order to catch
1048 * such things as LLRC errors, etc -kbf
1051 eidx
= rrpriv
->info
->evt_ctrl
.pi
;
1052 if (prodidx
!= eidx
)
1053 eidx
= rr_handle_event(dev
, prodidx
, eidx
);
1055 rxindex
= rrpriv
->cur_rx
;
1056 if (rxindex
!= rxlimit
)
1057 rx_int(dev
, rxlimit
, rxindex
);
1059 txcon
= rrpriv
->dirty_tx
;
1060 if (txcsmr
!= txcon
) {
1062 /* Due to occational firmware TX producer/consumer out
1063 * of sync. error need to check entry in ring -kbf
1065 if(rrpriv
->tx_skbuff
[txcon
]){
1066 struct tx_desc
*desc
;
1067 struct sk_buff
*skb
;
1069 desc
= &(rrpriv
->tx_ring
[txcon
]);
1070 skb
= rrpriv
->tx_skbuff
[txcon
];
1072 dev
->stats
.tx_packets
++;
1073 dev
->stats
.tx_bytes
+= skb
->len
;
1075 dma_unmap_single(&rrpriv
->pci_dev
->dev
,
1076 desc
->addr
.addrlo
, skb
->len
,
1078 dev_kfree_skb_irq(skb
);
1080 rrpriv
->tx_skbuff
[txcon
] = NULL
;
1082 set_rraddr(&rrpriv
->tx_ring
[txcon
].addr
, 0);
1085 txcon
= (txcon
+ 1) % TX_RING_ENTRIES
;
1086 } while (txcsmr
!= txcon
);
1089 rrpriv
->dirty_tx
= txcon
;
1090 if (rrpriv
->tx_full
&& rr_if_busy(dev
) &&
1091 (((rrpriv
->info
->tx_ctrl
.pi
+ 1) % TX_RING_ENTRIES
)
1092 != rrpriv
->dirty_tx
)){
1093 rrpriv
->tx_full
= 0;
1094 netif_wake_queue(dev
);
1098 eidx
|= ((txcsmr
<< 8) | (rxlimit
<< 16));
1099 writel(eidx
, ®s
->EvtCon
);
1102 spin_unlock(&rrpriv
->lock
);
1106 static inline void rr_raz_tx(struct rr_private
*rrpriv
,
1107 struct net_device
*dev
)
1111 for (i
= 0; i
< TX_RING_ENTRIES
; i
++) {
1112 struct sk_buff
*skb
= rrpriv
->tx_skbuff
[i
];
1115 struct tx_desc
*desc
= &(rrpriv
->tx_ring
[i
]);
1117 dma_unmap_single(&rrpriv
->pci_dev
->dev
,
1118 desc
->addr
.addrlo
, skb
->len
,
1121 set_rraddr(&desc
->addr
, 0);
1123 rrpriv
->tx_skbuff
[i
] = NULL
;
1129 static inline void rr_raz_rx(struct rr_private
*rrpriv
,
1130 struct net_device
*dev
)
1134 for (i
= 0; i
< RX_RING_ENTRIES
; i
++) {
1135 struct sk_buff
*skb
= rrpriv
->rx_skbuff
[i
];
1138 struct rx_desc
*desc
= &(rrpriv
->rx_ring
[i
]);
1140 dma_unmap_single(&rrpriv
->pci_dev
->dev
,
1142 dev
->mtu
+ HIPPI_HLEN
,
1145 set_rraddr(&desc
->addr
, 0);
1147 rrpriv
->rx_skbuff
[i
] = NULL
;
1152 static void rr_timer(struct timer_list
*t
)
1154 struct rr_private
*rrpriv
= from_timer(rrpriv
, t
, timer
);
1155 struct net_device
*dev
= pci_get_drvdata(rrpriv
->pci_dev
);
1156 struct rr_regs __iomem
*regs
= rrpriv
->regs
;
1157 unsigned long flags
;
1159 if (readl(®s
->HostCtrl
) & NIC_HALTED
){
1160 printk("%s: Restarting nic\n", dev
->name
);
1161 memset(rrpriv
->rx_ctrl
, 0, 256 * sizeof(struct ring_ctrl
));
1162 memset(rrpriv
->info
, 0, sizeof(struct rr_info
));
1165 rr_raz_tx(rrpriv
, dev
);
1166 rr_raz_rx(rrpriv
, dev
);
1168 if (rr_init1(dev
)) {
1169 spin_lock_irqsave(&rrpriv
->lock
, flags
);
1170 writel(readl(®s
->HostCtrl
)|HALT_NIC
|RR_CLEAR_INT
,
1172 spin_unlock_irqrestore(&rrpriv
->lock
, flags
);
1175 rrpriv
->timer
.expires
= RUN_AT(5*HZ
);
1176 add_timer(&rrpriv
->timer
);
1180 static int rr_open(struct net_device
*dev
)
1182 struct rr_private
*rrpriv
= netdev_priv(dev
);
1183 struct pci_dev
*pdev
= rrpriv
->pci_dev
;
1184 struct rr_regs __iomem
*regs
;
1186 unsigned long flags
;
1187 dma_addr_t dma_addr
;
1189 regs
= rrpriv
->regs
;
1191 if (rrpriv
->fw_rev
< 0x00020000) {
1192 printk(KERN_WARNING
"%s: trying to configure device with "
1193 "obsolete firmware\n", dev
->name
);
1198 rrpriv
->rx_ctrl
= dma_alloc_coherent(&pdev
->dev
,
1199 256 * sizeof(struct ring_ctrl
),
1200 &dma_addr
, GFP_KERNEL
);
1201 if (!rrpriv
->rx_ctrl
) {
1205 rrpriv
->rx_ctrl_dma
= dma_addr
;
1207 rrpriv
->info
= dma_alloc_coherent(&pdev
->dev
, sizeof(struct rr_info
),
1208 &dma_addr
, GFP_KERNEL
);
1209 if (!rrpriv
->info
) {
1213 rrpriv
->info_dma
= dma_addr
;
1216 spin_lock_irqsave(&rrpriv
->lock
, flags
);
1217 writel(readl(®s
->HostCtrl
)|HALT_NIC
|RR_CLEAR_INT
, ®s
->HostCtrl
);
1218 readl(®s
->HostCtrl
);
1219 spin_unlock_irqrestore(&rrpriv
->lock
, flags
);
1221 if (request_irq(pdev
->irq
, rr_interrupt
, IRQF_SHARED
, dev
->name
, dev
)) {
1222 printk(KERN_WARNING
"%s: Requested IRQ %d is busy\n",
1223 dev
->name
, pdev
->irq
);
1228 if ((ecode
= rr_init1(dev
)))
1231 /* Set the timer to switch to check for link beat and perhaps switch
1232 to an alternate media type. */
1233 timer_setup(&rrpriv
->timer
, rr_timer
, 0);
1234 rrpriv
->timer
.expires
= RUN_AT(5*HZ
); /* 5 sec. watchdog */
1235 add_timer(&rrpriv
->timer
);
1237 netif_start_queue(dev
);
1242 spin_lock_irqsave(&rrpriv
->lock
, flags
);
1243 writel(readl(®s
->HostCtrl
)|HALT_NIC
|RR_CLEAR_INT
, ®s
->HostCtrl
);
1244 spin_unlock_irqrestore(&rrpriv
->lock
, flags
);
1247 dma_free_coherent(&pdev
->dev
, sizeof(struct rr_info
),
1248 rrpriv
->info
, rrpriv
->info_dma
);
1249 rrpriv
->info
= NULL
;
1251 if (rrpriv
->rx_ctrl
) {
1252 dma_free_coherent(&pdev
->dev
, 256 * sizeof(struct ring_ctrl
),
1253 rrpriv
->rx_ctrl
, rrpriv
->rx_ctrl_dma
);
1254 rrpriv
->rx_ctrl
= NULL
;
1257 netif_stop_queue(dev
);
1263 static void rr_dump(struct net_device
*dev
)
1265 struct rr_private
*rrpriv
;
1266 struct rr_regs __iomem
*regs
;
1271 rrpriv
= netdev_priv(dev
);
1272 regs
= rrpriv
->regs
;
1274 printk("%s: dumping NIC TX rings\n", dev
->name
);
1276 printk("RxPrd %08x, TxPrd %02x, EvtPrd %08x, TxPi %02x, TxCtrlPi %02x\n",
1277 readl(®s
->RxPrd
), readl(®s
->TxPrd
),
1278 readl(®s
->EvtPrd
), readl(®s
->TxPi
),
1279 rrpriv
->info
->tx_ctrl
.pi
);
1281 printk("Error code 0x%x\n", readl(®s
->Fail1
));
1283 index
= (((readl(®s
->EvtPrd
) >> 8) & 0xff) - 1) % TX_RING_ENTRIES
;
1284 cons
= rrpriv
->dirty_tx
;
1285 printk("TX ring index %i, TX consumer %i\n",
1288 if (rrpriv
->tx_skbuff
[index
]){
1289 len
= min_t(int, 0x80, rrpriv
->tx_skbuff
[index
]->len
);
1290 printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index
, len
, rrpriv
->tx_ring
[index
].size
);
1291 for (i
= 0; i
< len
; i
++){
1294 printk("%02x ", (unsigned char) rrpriv
->tx_skbuff
[index
]->data
[i
]);
1299 if (rrpriv
->tx_skbuff
[cons
]){
1300 len
= min_t(int, 0x80, rrpriv
->tx_skbuff
[cons
]->len
);
1301 printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons
, len
, rrpriv
->tx_skbuff
[cons
]->len
);
1302 printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %p, truesize 0x%x\n",
1303 rrpriv
->tx_ring
[cons
].mode
,
1304 rrpriv
->tx_ring
[cons
].size
,
1305 (unsigned long long) rrpriv
->tx_ring
[cons
].addr
.addrlo
,
1306 rrpriv
->tx_skbuff
[cons
]->data
,
1307 (unsigned int)rrpriv
->tx_skbuff
[cons
]->truesize
);
1308 for (i
= 0; i
< len
; i
++){
1311 printk("%02x ", (unsigned char)rrpriv
->tx_ring
[cons
].size
);
1316 printk("dumping TX ring info:\n");
1317 for (i
= 0; i
< TX_RING_ENTRIES
; i
++)
1318 printk("mode 0x%x, size 0x%x, phys-addr %08Lx\n",
1319 rrpriv
->tx_ring
[i
].mode
,
1320 rrpriv
->tx_ring
[i
].size
,
1321 (unsigned long long) rrpriv
->tx_ring
[i
].addr
.addrlo
);
1326 static int rr_close(struct net_device
*dev
)
1328 struct rr_private
*rrpriv
= netdev_priv(dev
);
1329 struct rr_regs __iomem
*regs
= rrpriv
->regs
;
1330 struct pci_dev
*pdev
= rrpriv
->pci_dev
;
1331 unsigned long flags
;
1335 netif_stop_queue(dev
);
1339 * Lock to make sure we are not cleaning up while another CPU
1340 * is handling interrupts.
1342 spin_lock_irqsave(&rrpriv
->lock
, flags
);
1344 tmp
= readl(®s
->HostCtrl
);
1345 if (tmp
& NIC_HALTED
){
1346 printk("%s: NIC already halted\n", dev
->name
);
1349 tmp
|= HALT_NIC
| RR_CLEAR_INT
;
1350 writel(tmp
, ®s
->HostCtrl
);
1351 readl(®s
->HostCtrl
);
1354 rrpriv
->fw_running
= 0;
1356 del_timer_sync(&rrpriv
->timer
);
1358 writel(0, ®s
->TxPi
);
1359 writel(0, ®s
->IpRxPi
);
1361 writel(0, ®s
->EvtCon
);
1362 writel(0, ®s
->EvtPrd
);
1364 for (i
= 0; i
< CMD_RING_ENTRIES
; i
++)
1365 writel(0, ®s
->CmdRing
[i
]);
1367 rrpriv
->info
->tx_ctrl
.entries
= 0;
1368 rrpriv
->info
->cmd_ctrl
.pi
= 0;
1369 rrpriv
->info
->evt_ctrl
.pi
= 0;
1370 rrpriv
->rx_ctrl
[4].entries
= 0;
1372 rr_raz_tx(rrpriv
, dev
);
1373 rr_raz_rx(rrpriv
, dev
);
1375 dma_free_coherent(&pdev
->dev
, 256 * sizeof(struct ring_ctrl
),
1376 rrpriv
->rx_ctrl
, rrpriv
->rx_ctrl_dma
);
1377 rrpriv
->rx_ctrl
= NULL
;
1379 dma_free_coherent(&pdev
->dev
, sizeof(struct rr_info
), rrpriv
->info
,
1381 rrpriv
->info
= NULL
;
1383 spin_unlock_irqrestore(&rrpriv
->lock
, flags
);
1384 free_irq(pdev
->irq
, dev
);
1390 static netdev_tx_t
rr_start_xmit(struct sk_buff
*skb
,
1391 struct net_device
*dev
)
1393 struct rr_private
*rrpriv
= netdev_priv(dev
);
1394 struct rr_regs __iomem
*regs
= rrpriv
->regs
;
1395 struct hippi_cb
*hcb
= (struct hippi_cb
*) skb
->cb
;
1396 struct ring_ctrl
*txctrl
;
1397 unsigned long flags
;
1398 u32 index
, len
= skb
->len
;
1400 struct sk_buff
*new_skb
;
1402 if (readl(®s
->Mode
) & FATAL_ERR
)
1403 printk("error codes Fail1 %02x, Fail2 %02x\n",
1404 readl(®s
->Fail1
), readl(®s
->Fail2
));
1407 * We probably need to deal with tbusy here to prevent overruns.
1410 if (skb_headroom(skb
) < 8){
1411 printk("incoming skb too small - reallocating\n");
1412 if (!(new_skb
= dev_alloc_skb(len
+ 8))) {
1414 netif_wake_queue(dev
);
1415 return NETDEV_TX_OK
;
1417 skb_reserve(new_skb
, 8);
1418 skb_put(new_skb
, len
);
1419 skb_copy_from_linear_data(skb
, new_skb
->data
, len
);
1424 ifield
= skb_push(skb
, 8);
1427 ifield
[1] = hcb
->ifield
;
1430 * We don't need the lock before we are actually going to start
1431 * fiddling with the control blocks.
1433 spin_lock_irqsave(&rrpriv
->lock
, flags
);
1435 txctrl
= &rrpriv
->info
->tx_ctrl
;
1439 rrpriv
->tx_skbuff
[index
] = skb
;
1440 set_rraddr(&rrpriv
->tx_ring
[index
].addr
,
1441 dma_map_single(&rrpriv
->pci_dev
->dev
, skb
->data
, len
+ 8, DMA_TO_DEVICE
));
1442 rrpriv
->tx_ring
[index
].size
= len
+ 8; /* include IFIELD */
1443 rrpriv
->tx_ring
[index
].mode
= PACKET_START
| PACKET_END
;
1444 txctrl
->pi
= (index
+ 1) % TX_RING_ENTRIES
;
1446 writel(txctrl
->pi
, ®s
->TxPi
);
1448 if (txctrl
->pi
== rrpriv
->dirty_tx
){
1449 rrpriv
->tx_full
= 1;
1450 netif_stop_queue(dev
);
1453 spin_unlock_irqrestore(&rrpriv
->lock
, flags
);
1455 return NETDEV_TX_OK
;
1460 * Read the firmware out of the EEPROM and put it into the SRAM
1461 * (or from user space - later)
1463 * This operation requires the NIC to be halted and is performed with
1464 * interrupts disabled and with the spinlock hold.
1466 static int rr_load_firmware(struct net_device
*dev
)
1468 struct rr_private
*rrpriv
;
1469 struct rr_regs __iomem
*regs
;
1470 size_t eptr
, segptr
;
1472 u32 localctrl
, sptr
, len
, tmp
;
1473 u32 p2len
, p2size
, nr_seg
, revision
, io
, sram_size
;
1475 rrpriv
= netdev_priv(dev
);
1476 regs
= rrpriv
->regs
;
1478 if (dev
->flags
& IFF_UP
)
1481 if (!(readl(®s
->HostCtrl
) & NIC_HALTED
)){
1482 printk("%s: Trying to load firmware to a running NIC.\n",
1487 localctrl
= readl(®s
->LocalCtrl
);
1488 writel(0, ®s
->LocalCtrl
);
1490 writel(0, ®s
->EvtPrd
);
1491 writel(0, ®s
->RxPrd
);
1492 writel(0, ®s
->TxPrd
);
1495 * First wipe the entire SRAM, otherwise we might run into all
1496 * kinds of trouble ... sigh, this took almost all afternoon
1499 io
= readl(®s
->ExtIo
);
1500 writel(0, ®s
->ExtIo
);
1501 sram_size
= rr_read_eeprom_word(rrpriv
, 8);
1503 for (i
= 200; i
< sram_size
/ 4; i
++){
1504 writel(i
* 4, ®s
->WinBase
);
1506 writel(0, ®s
->WinData
);
1509 writel(io
, ®s
->ExtIo
);
1512 eptr
= rr_read_eeprom_word(rrpriv
,
1513 offsetof(struct eeprom
, rncd_info
.AddrRunCodeSegs
));
1514 eptr
= ((eptr
& 0x1fffff) >> 3);
1516 p2len
= rr_read_eeprom_word(rrpriv
, 0x83*4);
1517 p2len
= (p2len
<< 2);
1518 p2size
= rr_read_eeprom_word(rrpriv
, 0x84*4);
1519 p2size
= ((p2size
& 0x1fffff) >> 3);
1521 if ((eptr
< p2size
) || (eptr
> (p2size
+ p2len
))){
1522 printk("%s: eptr is invalid\n", dev
->name
);
1526 revision
= rr_read_eeprom_word(rrpriv
,
1527 offsetof(struct eeprom
, manf
.HeaderFmt
));
1530 printk("%s: invalid firmware format (%i)\n",
1531 dev
->name
, revision
);
1535 nr_seg
= rr_read_eeprom_word(rrpriv
, eptr
);
1538 printk("%s: nr_seg %i\n", dev
->name
, nr_seg
);
1541 for (i
= 0; i
< nr_seg
; i
++){
1542 sptr
= rr_read_eeprom_word(rrpriv
, eptr
);
1544 len
= rr_read_eeprom_word(rrpriv
, eptr
);
1546 segptr
= rr_read_eeprom_word(rrpriv
, eptr
);
1547 segptr
= ((segptr
& 0x1fffff) >> 3);
1550 printk("%s: segment %i, sram address %06x, length %04x, segptr %06x\n",
1551 dev
->name
, i
, sptr
, len
, segptr
);
1553 for (j
= 0; j
< len
; j
++){
1554 tmp
= rr_read_eeprom_word(rrpriv
, segptr
);
1555 writel(sptr
, ®s
->WinBase
);
1557 writel(tmp
, ®s
->WinData
);
1565 writel(localctrl
, ®s
->LocalCtrl
);
1571 static int rr_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1573 struct rr_private
*rrpriv
;
1574 unsigned char *image
, *oldimage
;
1575 unsigned long flags
;
1577 int error
= -EOPNOTSUPP
;
1579 rrpriv
= netdev_priv(dev
);
1583 if (!capable(CAP_SYS_RAWIO
)){
1587 image
= kmalloc_array(EEPROM_WORDS
, sizeof(u32
), GFP_KERNEL
);
1591 if (rrpriv
->fw_running
){
1592 printk("%s: Firmware already running\n", dev
->name
);
1597 spin_lock_irqsave(&rrpriv
->lock
, flags
);
1598 i
= rr_read_eeprom(rrpriv
, 0, image
, EEPROM_BYTES
);
1599 spin_unlock_irqrestore(&rrpriv
->lock
, flags
);
1600 if (i
!= EEPROM_BYTES
){
1601 printk(KERN_ERR
"%s: Error reading EEPROM\n",
1606 error
= copy_to_user(rq
->ifr_data
, image
, EEPROM_BYTES
);
1614 if (!capable(CAP_SYS_RAWIO
)){
1618 image
= memdup_user(rq
->ifr_data
, EEPROM_BYTES
);
1620 return PTR_ERR(image
);
1622 oldimage
= kmalloc(EEPROM_BYTES
, GFP_KERNEL
);
1628 if (rrpriv
->fw_running
){
1629 printk("%s: Firmware already running\n", dev
->name
);
1634 printk("%s: Updating EEPROM firmware\n", dev
->name
);
1636 spin_lock_irqsave(&rrpriv
->lock
, flags
);
1637 error
= write_eeprom(rrpriv
, 0, image
, EEPROM_BYTES
);
1639 printk(KERN_ERR
"%s: Error writing EEPROM\n",
1642 i
= rr_read_eeprom(rrpriv
, 0, oldimage
, EEPROM_BYTES
);
1643 spin_unlock_irqrestore(&rrpriv
->lock
, flags
);
1645 if (i
!= EEPROM_BYTES
)
1646 printk(KERN_ERR
"%s: Error reading back EEPROM "
1647 "image\n", dev
->name
);
1649 error
= memcmp(image
, oldimage
, EEPROM_BYTES
);
1651 printk(KERN_ERR
"%s: Error verifying EEPROM image\n",
1661 return put_user(0x52523032, (int __user
*)rq
->ifr_data
);
1667 static const struct pci_device_id rr_pci_tbl
[] = {
1668 { PCI_VENDOR_ID_ESSENTIAL
, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER
,
1669 PCI_ANY_ID
, PCI_ANY_ID
, },
1672 MODULE_DEVICE_TABLE(pci
, rr_pci_tbl
);
1674 static struct pci_driver rr_driver
= {
1676 .id_table
= rr_pci_tbl
,
1677 .probe
= rr_init_one
,
1678 .remove
= rr_remove_one
,
1681 module_pci_driver(rr_driver
);