Linux 2.6.33-rc8
[linux-2.6/lguest.git] / drivers / net / ipg.c
blobba8d246d05a09d310267ee44ac3caff1e38fbef1
1 /*
2 * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter
4 * Copyright (C) 2003, 2007 IC Plus Corp
6 * Original Author:
8 * Craig Rich
9 * Sundance Technology, Inc.
10 * www.sundanceti.com
11 * craig_rich@sundanceti.com
13 * Current Maintainer:
15 * Sorbica Shieh.
16 * http://www.icplus.com.tw
17 * sorbica@icplus.com.tw
19 * Jesse Huang
20 * http://www.icplus.com.tw
21 * jesse@icplus.com.tw
23 #include <linux/crc32.h>
24 #include <linux/ethtool.h>
25 #include <linux/mii.h>
26 #include <linux/mutex.h>
28 #include <asm/div64.h>
30 #define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH)
31 #define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH)
32 #define IPG_RESET_MASK \
33 (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \
34 IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \
35 IPG_AC_AUTO_INIT)
37 #define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg))
38 #define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg))
39 #define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg))
41 #define ipg_r32(reg) ioread32(ioaddr + (reg))
42 #define ipg_r16(reg) ioread16(ioaddr + (reg))
43 #define ipg_r8(reg) ioread8(ioaddr + (reg))
45 enum {
46 netdev_io_size = 128
49 #include "ipg.h"
50 #define DRV_NAME "ipg"
52 MODULE_AUTHOR("IC Plus Corp. 2003");
53 MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver");
54 MODULE_LICENSE("GPL");
57 * Defaults
59 #define IPG_MAX_RXFRAME_SIZE 0x0600
60 #define IPG_RXFRAG_SIZE 0x0600
61 #define IPG_RXSUPPORT_SIZE 0x0600
62 #define IPG_IS_JUMBO false
65 * Variable record -- index by leading revision/length
66 * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
68 static unsigned short DefaultPhyParam[] = {
69 /* 11/12/03 IP1000A v1-3 rev=0x40 */
70 /*--------------------------------------------------------------------------
71 (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
72 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
73 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700,
74 --------------------------------------------------------------------------*/
75 /* 12/17/03 IP1000A v1-4 rev=0x40 */
76 (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
77 0x0000,
78 30, 0x005e, 9, 0x0700,
79 /* 01/09/04 IP1000A v1-5 rev=0x41 */
80 (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
81 0x0000,
82 30, 0x005e, 9, 0x0700,
83 0x0000
86 static const char *ipg_brand_name[] = {
87 "IC PLUS IP1000 1000/100/10 based NIC",
88 "Sundance Technology ST2021 based NIC",
89 "Tamarack Microelectronics TC9020/9021 based NIC",
90 "Tamarack Microelectronics TC9020/9021 based NIC",
91 "D-Link NIC",
92 "D-Link NIC IP1000A"
95 static struct pci_device_id ipg_pci_tbl[] __devinitdata = {
96 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
97 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
98 { PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
99 { PCI_VDEVICE(DLINK, 0x9021), 3 },
100 { PCI_VDEVICE(DLINK, 0x4000), 4 },
101 { PCI_VDEVICE(DLINK, 0x4020), 5 },
102 { 0, }
105 MODULE_DEVICE_TABLE(pci, ipg_pci_tbl);
107 static inline void __iomem *ipg_ioaddr(struct net_device *dev)
109 struct ipg_nic_private *sp = netdev_priv(dev);
110 return sp->ioaddr;
113 #ifdef IPG_DEBUG
114 static void ipg_dump_rfdlist(struct net_device *dev)
116 struct ipg_nic_private *sp = netdev_priv(dev);
117 void __iomem *ioaddr = sp->ioaddr;
118 unsigned int i;
119 u32 offset;
121 IPG_DEBUG_MSG("_dump_rfdlist\n");
123 printk(KERN_INFO "rx_current = %2.2x\n", sp->rx_current);
124 printk(KERN_INFO "rx_dirty = %2.2x\n", sp->rx_dirty);
125 printk(KERN_INFO "RFDList start address = %16.16lx\n",
126 (unsigned long) sp->rxd_map);
127 printk(KERN_INFO "RFDListPtr register = %8.8x%8.8x\n",
128 ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0));
130 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
131 offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd;
132 printk(KERN_INFO "%2.2x %4.4x RFDNextPtr = %16.16lx\n", i,
133 offset, (unsigned long) sp->rxd[i].next_desc);
134 offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd;
135 printk(KERN_INFO "%2.2x %4.4x RFS = %16.16lx\n", i,
136 offset, (unsigned long) sp->rxd[i].rfs);
137 offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd;
138 printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i,
139 offset, (unsigned long) sp->rxd[i].frag_info);
143 static void ipg_dump_tfdlist(struct net_device *dev)
145 struct ipg_nic_private *sp = netdev_priv(dev);
146 void __iomem *ioaddr = sp->ioaddr;
147 unsigned int i;
148 u32 offset;
150 IPG_DEBUG_MSG("_dump_tfdlist\n");
152 printk(KERN_INFO "tx_current = %2.2x\n", sp->tx_current);
153 printk(KERN_INFO "tx_dirty = %2.2x\n", sp->tx_dirty);
154 printk(KERN_INFO "TFDList start address = %16.16lx\n",
155 (unsigned long) sp->txd_map);
156 printk(KERN_INFO "TFDListPtr register = %8.8x%8.8x\n",
157 ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0));
159 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
160 offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd;
161 printk(KERN_INFO "%2.2x %4.4x TFDNextPtr = %16.16lx\n", i,
162 offset, (unsigned long) sp->txd[i].next_desc);
164 offset = (u32) &sp->txd[i].tfc - (u32) sp->txd;
165 printk(KERN_INFO "%2.2x %4.4x TFC = %16.16lx\n", i,
166 offset, (unsigned long) sp->txd[i].tfc);
167 offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd;
168 printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i,
169 offset, (unsigned long) sp->txd[i].frag_info);
172 #endif
174 static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data)
176 ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL);
177 ndelay(IPG_PC_PHYCTRLWAIT_NS);
180 static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data)
182 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data);
183 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data);
186 static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity)
188 phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR;
190 ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity);
193 static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity)
195 ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR |
196 phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL);
199 static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity)
201 u16 bit_data;
203 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity);
205 bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1;
207 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity);
209 return bit_data;
213 * Read a register from the Physical Layer device located
214 * on the IPG NIC, using the IPG PHYCTRL register.
216 static int mdio_read(struct net_device *dev, int phy_id, int phy_reg)
218 void __iomem *ioaddr = ipg_ioaddr(dev);
220 * The GMII mangement frame structure for a read is as follows:
222 * |Preamble|st|op|phyad|regad|ta| data |idle|
223 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
225 * <32 1s> = 32 consecutive logic 1 values
226 * A = bit of Physical Layer device address (MSB first)
227 * R = bit of register address (MSB first)
228 * z = High impedance state
229 * D = bit of read data (MSB first)
231 * Transmission order is 'Preamble' field first, bits transmitted
232 * left to right (first to last).
234 struct {
235 u32 field;
236 unsigned int len;
237 } p[] = {
238 { GMII_PREAMBLE, 32 }, /* Preamble */
239 { GMII_ST, 2 }, /* ST */
240 { GMII_READ, 2 }, /* OP */
241 { phy_id, 5 }, /* PHYAD */
242 { phy_reg, 5 }, /* REGAD */
243 { 0x0000, 2 }, /* TA */
244 { 0x0000, 16 }, /* DATA */
245 { 0x0000, 1 } /* IDLE */
247 unsigned int i, j;
248 u8 polarity, data;
250 polarity = ipg_r8(PHY_CTRL);
251 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
253 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
254 for (j = 0; j < 5; j++) {
255 for (i = 0; i < p[j].len; i++) {
256 /* For each variable length field, the MSB must be
257 * transmitted first. Rotate through the field bits,
258 * starting with the MSB, and move each bit into the
259 * the 1st (2^1) bit position (this is the bit position
260 * corresponding to the MgmtData bit of the PhyCtrl
261 * register for the IPG).
263 * Example: ST = 01;
265 * First write a '0' to bit 1 of the PhyCtrl
266 * register, then write a '1' to bit 1 of the
267 * PhyCtrl register.
269 * To do this, right shift the MSB of ST by the value:
270 * [field length - 1 - #ST bits already written]
271 * then left shift this result by 1.
273 data = (p[j].field >> (p[j].len - 1 - i)) << 1;
274 data &= IPG_PC_MGMTDATA;
275 data |= polarity | IPG_PC_MGMTDIR;
277 ipg_drive_phy_ctl_low_high(ioaddr, data);
281 send_three_state(ioaddr, polarity);
283 read_phy_bit(ioaddr, polarity);
286 * For a read cycle, the bits for the next two fields (TA and
287 * DATA) are driven by the PHY (the IPG reads these bits).
289 for (i = 0; i < p[6].len; i++) {
290 p[6].field |=
291 (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i));
294 send_three_state(ioaddr, polarity);
295 send_three_state(ioaddr, polarity);
296 send_three_state(ioaddr, polarity);
297 send_end(ioaddr, polarity);
299 /* Return the value of the DATA field. */
300 return p[6].field;
304 * Write to a register from the Physical Layer device located
305 * on the IPG NIC, using the IPG PHYCTRL register.
307 static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
309 void __iomem *ioaddr = ipg_ioaddr(dev);
311 * The GMII mangement frame structure for a read is as follows:
313 * |Preamble|st|op|phyad|regad|ta| data |idle|
314 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z |
316 * <32 1s> = 32 consecutive logic 1 values
317 * A = bit of Physical Layer device address (MSB first)
318 * R = bit of register address (MSB first)
319 * z = High impedance state
320 * D = bit of write data (MSB first)
322 * Transmission order is 'Preamble' field first, bits transmitted
323 * left to right (first to last).
325 struct {
326 u32 field;
327 unsigned int len;
328 } p[] = {
329 { GMII_PREAMBLE, 32 }, /* Preamble */
330 { GMII_ST, 2 }, /* ST */
331 { GMII_WRITE, 2 }, /* OP */
332 { phy_id, 5 }, /* PHYAD */
333 { phy_reg, 5 }, /* REGAD */
334 { 0x0002, 2 }, /* TA */
335 { val & 0xffff, 16 }, /* DATA */
336 { 0x0000, 1 } /* IDLE */
338 unsigned int i, j;
339 u8 polarity, data;
341 polarity = ipg_r8(PHY_CTRL);
342 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
344 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
345 for (j = 0; j < 7; j++) {
346 for (i = 0; i < p[j].len; i++) {
347 /* For each variable length field, the MSB must be
348 * transmitted first. Rotate through the field bits,
349 * starting with the MSB, and move each bit into the
350 * the 1st (2^1) bit position (this is the bit position
351 * corresponding to the MgmtData bit of the PhyCtrl
352 * register for the IPG).
354 * Example: ST = 01;
356 * First write a '0' to bit 1 of the PhyCtrl
357 * register, then write a '1' to bit 1 of the
358 * PhyCtrl register.
360 * To do this, right shift the MSB of ST by the value:
361 * [field length - 1 - #ST bits already written]
362 * then left shift this result by 1.
364 data = (p[j].field >> (p[j].len - 1 - i)) << 1;
365 data &= IPG_PC_MGMTDATA;
366 data |= polarity | IPG_PC_MGMTDIR;
368 ipg_drive_phy_ctl_low_high(ioaddr, data);
372 /* The last cycle is a tri-state, so read from the PHY. */
373 for (j = 7; j < 8; j++) {
374 for (i = 0; i < p[j].len; i++) {
375 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
377 p[j].field |= ((ipg_r8(PHY_CTRL) &
378 IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i);
380 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
385 static void ipg_set_led_mode(struct net_device *dev)
387 struct ipg_nic_private *sp = netdev_priv(dev);
388 void __iomem *ioaddr = sp->ioaddr;
389 u32 mode;
391 mode = ipg_r32(ASIC_CTRL);
392 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
394 if ((sp->led_mode & 0x03) > 1)
395 mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */
397 if ((sp->led_mode & 0x01) == 1)
398 mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */
400 if ((sp->led_mode & 0x08) == 8)
401 mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */
403 ipg_w32(mode, ASIC_CTRL);
406 static void ipg_set_phy_set(struct net_device *dev)
408 struct ipg_nic_private *sp = netdev_priv(dev);
409 void __iomem *ioaddr = sp->ioaddr;
410 int physet;
412 physet = ipg_r8(PHY_SET);
413 physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET);
414 physet |= ((sp->led_mode & 0x70) >> 4);
415 ipg_w8(physet, PHY_SET);
418 static int ipg_reset(struct net_device *dev, u32 resetflags)
420 /* Assert functional resets via the IPG AsicCtrl
421 * register as specified by the 'resetflags' input
422 * parameter.
424 void __iomem *ioaddr = ipg_ioaddr(dev);
425 unsigned int timeout_count = 0;
427 IPG_DEBUG_MSG("_reset\n");
429 ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL);
431 /* Delay added to account for problem with 10Mbps reset. */
432 mdelay(IPG_AC_RESETWAIT);
434 while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) {
435 mdelay(IPG_AC_RESETWAIT);
436 if (++timeout_count > IPG_AC_RESET_TIMEOUT)
437 return -ETIME;
439 /* Set LED Mode in Asic Control */
440 ipg_set_led_mode(dev);
442 /* Set PHYSet Register Value */
443 ipg_set_phy_set(dev);
444 return 0;
447 /* Find the GMII PHY address. */
448 static int ipg_find_phyaddr(struct net_device *dev)
450 unsigned int phyaddr, i;
452 for (i = 0; i < 32; i++) {
453 u32 status;
455 /* Search for the correct PHY address among 32 possible. */
456 phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32;
458 /* 10/22/03 Grace change verify from GMII_PHY_STATUS to
459 GMII_PHY_ID1
462 status = mdio_read(dev, phyaddr, MII_BMSR);
464 if ((status != 0xFFFF) && (status != 0))
465 return phyaddr;
468 return 0x1f;
472 * Configure IPG based on result of IEEE 802.3 PHY
473 * auto-negotiation.
475 static int ipg_config_autoneg(struct net_device *dev)
477 struct ipg_nic_private *sp = netdev_priv(dev);
478 void __iomem *ioaddr = sp->ioaddr;
479 unsigned int txflowcontrol;
480 unsigned int rxflowcontrol;
481 unsigned int fullduplex;
482 u32 mac_ctrl_val;
483 u32 asicctrl;
484 u8 phyctrl;
486 IPG_DEBUG_MSG("_config_autoneg\n");
488 asicctrl = ipg_r32(ASIC_CTRL);
489 phyctrl = ipg_r8(PHY_CTRL);
490 mac_ctrl_val = ipg_r32(MAC_CTRL);
492 /* Set flags for use in resolving auto-negotation, assuming
493 * non-1000Mbps, half duplex, no flow control.
495 fullduplex = 0;
496 txflowcontrol = 0;
497 rxflowcontrol = 0;
499 /* To accomodate a problem in 10Mbps operation,
500 * set a global flag if PHY running in 10Mbps mode.
502 sp->tenmbpsmode = 0;
504 printk(KERN_INFO "%s: Link speed = ", dev->name);
506 /* Determine actual speed of operation. */
507 switch (phyctrl & IPG_PC_LINK_SPEED) {
508 case IPG_PC_LINK_SPEED_10MBPS:
509 printk("10Mbps.\n");
510 printk(KERN_INFO "%s: 10Mbps operational mode enabled.\n",
511 dev->name);
512 sp->tenmbpsmode = 1;
513 break;
514 case IPG_PC_LINK_SPEED_100MBPS:
515 printk("100Mbps.\n");
516 break;
517 case IPG_PC_LINK_SPEED_1000MBPS:
518 printk("1000Mbps.\n");
519 break;
520 default:
521 printk("undefined!\n");
522 return 0;
525 if (phyctrl & IPG_PC_DUPLEX_STATUS) {
526 fullduplex = 1;
527 txflowcontrol = 1;
528 rxflowcontrol = 1;
531 /* Configure full duplex, and flow control. */
532 if (fullduplex == 1) {
533 /* Configure IPG for full duplex operation. */
534 printk(KERN_INFO "%s: setting full duplex, ", dev->name);
536 mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD;
538 if (txflowcontrol == 1) {
539 printk("TX flow control");
540 mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE;
541 } else {
542 printk("no TX flow control");
543 mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE;
546 if (rxflowcontrol == 1) {
547 printk(", RX flow control.");
548 mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE;
549 } else {
550 printk(", no RX flow control.");
551 mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
554 printk("\n");
555 } else {
556 /* Configure IPG for half duplex operation. */
557 printk(KERN_INFO "%s: setting half duplex, "
558 "no TX flow control, no RX flow control.\n", dev->name);
560 mac_ctrl_val &= ~IPG_MC_DUPLEX_SELECT_FD &
561 ~IPG_MC_TX_FLOW_CONTROL_ENABLE &
562 ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
564 ipg_w32(mac_ctrl_val, MAC_CTRL);
565 return 0;
568 /* Determine and configure multicast operation and set
569 * receive mode for IPG.
571 static void ipg_nic_set_multicast_list(struct net_device *dev)
573 void __iomem *ioaddr = ipg_ioaddr(dev);
574 struct dev_mc_list *mc_list_ptr;
575 unsigned int hashindex;
576 u32 hashtable[2];
577 u8 receivemode;
579 IPG_DEBUG_MSG("_nic_set_multicast_list\n");
581 receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST;
583 if (dev->flags & IFF_PROMISC) {
584 /* NIC to be configured in promiscuous mode. */
585 receivemode = IPG_RM_RECEIVEALLFRAMES;
586 } else if ((dev->flags & IFF_ALLMULTI) ||
587 ((dev->flags & IFF_MULTICAST) &&
588 (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) {
589 /* NIC to be configured to receive all multicast
590 * frames. */
591 receivemode |= IPG_RM_RECEIVEMULTICAST;
592 } else if ((dev->flags & IFF_MULTICAST) && (dev->mc_count > 0)) {
593 /* NIC to be configured to receive selected
594 * multicast addresses. */
595 receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
598 /* Calculate the bits to set for the 64 bit, IPG HASHTABLE.
599 * The IPG applies a cyclic-redundancy-check (the same CRC
600 * used to calculate the frame data FCS) to the destination
601 * address all incoming multicast frames whose destination
602 * address has the multicast bit set. The least significant
603 * 6 bits of the CRC result are used as an addressing index
604 * into the hash table. If the value of the bit addressed by
605 * this index is a 1, the frame is passed to the host system.
608 /* Clear hashtable. */
609 hashtable[0] = 0x00000000;
610 hashtable[1] = 0x00000000;
612 /* Cycle through all multicast addresses to filter. */
613 for (mc_list_ptr = dev->mc_list;
614 mc_list_ptr != NULL; mc_list_ptr = mc_list_ptr->next) {
615 /* Calculate CRC result for each multicast address. */
616 hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr,
617 ETH_ALEN);
619 /* Use only the least significant 6 bits. */
620 hashindex = hashindex & 0x3F;
622 /* Within "hashtable", set bit number "hashindex"
623 * to a logic 1.
625 set_bit(hashindex, (void *)hashtable);
628 /* Write the value of the hashtable, to the 4, 16 bit
629 * HASHTABLE IPG registers.
631 ipg_w32(hashtable[0], HASHTABLE_0);
632 ipg_w32(hashtable[1], HASHTABLE_1);
634 ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE);
636 IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE));
639 static int ipg_io_config(struct net_device *dev)
641 struct ipg_nic_private *sp = netdev_priv(dev);
642 void __iomem *ioaddr = ipg_ioaddr(dev);
643 u32 origmacctrl;
644 u32 restoremacctrl;
646 IPG_DEBUG_MSG("_io_config\n");
648 origmacctrl = ipg_r32(MAC_CTRL);
650 restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE;
652 /* Based on compilation option, determine if FCS is to be
653 * stripped on receive frames by IPG.
655 if (!IPG_STRIP_FCS_ON_RX)
656 restoremacctrl |= IPG_MC_RCV_FCS;
658 /* Determine if transmitter and/or receiver are
659 * enabled so we may restore MACCTRL correctly.
661 if (origmacctrl & IPG_MC_TX_ENABLED)
662 restoremacctrl |= IPG_MC_TX_ENABLE;
664 if (origmacctrl & IPG_MC_RX_ENABLED)
665 restoremacctrl |= IPG_MC_RX_ENABLE;
667 /* Transmitter and receiver must be disabled before setting
668 * IFSSelect.
670 ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) &
671 IPG_MC_RSVD_MASK, MAC_CTRL);
673 /* Now that transmitter and receiver are disabled, write
674 * to IFSSelect.
676 ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL);
678 /* Set RECEIVEMODE register. */
679 ipg_nic_set_multicast_list(dev);
681 ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE);
683 ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD);
684 ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH);
685 ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH);
686 ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD);
687 ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH);
688 ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH);
689 ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE |
690 IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED |
691 IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT |
692 IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE);
693 ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH);
694 ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH);
696 /* IPG multi-frag frame bug workaround.
697 * Per silicon revision B3 eratta.
699 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL);
701 /* IPG TX poll now bug workaround.
702 * Per silicon revision B3 eratta.
704 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL);
706 /* IPG RX poll now bug workaround.
707 * Per silicon revision B3 eratta.
709 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL);
711 /* Now restore MACCTRL to original setting. */
712 ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL);
714 /* Disable unused RMON statistics. */
715 ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK);
717 /* Disable unused MIB statistics. */
718 ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD |
719 IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES |
720 IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES |
721 IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK |
722 IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS |
723 IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK);
725 return 0;
729 * Create a receive buffer within system memory and update
730 * NIC private structure appropriately.
732 static int ipg_get_rxbuff(struct net_device *dev, int entry)
734 struct ipg_nic_private *sp = netdev_priv(dev);
735 struct ipg_rx *rxfd = sp->rxd + entry;
736 struct sk_buff *skb;
737 u64 rxfragsize;
739 IPG_DEBUG_MSG("_get_rxbuff\n");
741 skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
742 if (!skb) {
743 sp->rx_buff[entry] = NULL;
744 return -ENOMEM;
747 /* Associate the receive buffer with the IPG NIC. */
748 skb->dev = dev;
750 /* Save the address of the sk_buff structure. */
751 sp->rx_buff[entry] = skb;
753 rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
754 sp->rx_buf_sz, PCI_DMA_FROMDEVICE));
756 /* Set the RFD fragment length. */
757 rxfragsize = sp->rxfrag_size;
758 rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN);
760 return 0;
763 static int init_rfdlist(struct net_device *dev)
765 struct ipg_nic_private *sp = netdev_priv(dev);
766 void __iomem *ioaddr = sp->ioaddr;
767 unsigned int i;
769 IPG_DEBUG_MSG("_init_rfdlist\n");
771 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
772 struct ipg_rx *rxfd = sp->rxd + i;
774 if (sp->rx_buff[i]) {
775 pci_unmap_single(sp->pdev,
776 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
777 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
778 dev_kfree_skb_irq(sp->rx_buff[i]);
779 sp->rx_buff[i] = NULL;
782 /* Clear out the RFS field. */
783 rxfd->rfs = 0x0000000000000000;
785 if (ipg_get_rxbuff(dev, i) < 0) {
787 * A receive buffer was not ready, break the
788 * RFD list here.
790 IPG_DEBUG_MSG("Cannot allocate Rx buffer.\n");
792 /* Just in case we cannot allocate a single RFD.
793 * Should not occur.
795 if (i == 0) {
796 printk(KERN_ERR "%s: No memory available"
797 " for RFD list.\n", dev->name);
798 return -ENOMEM;
802 rxfd->next_desc = cpu_to_le64(sp->rxd_map +
803 sizeof(struct ipg_rx)*(i + 1));
805 sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map);
807 sp->rx_current = 0;
808 sp->rx_dirty = 0;
810 /* Write the location of the RFDList to the IPG. */
811 ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0);
812 ipg_w32(0x00000000, RFD_LIST_PTR_1);
814 return 0;
817 static void init_tfdlist(struct net_device *dev)
819 struct ipg_nic_private *sp = netdev_priv(dev);
820 void __iomem *ioaddr = sp->ioaddr;
821 unsigned int i;
823 IPG_DEBUG_MSG("_init_tfdlist\n");
825 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
826 struct ipg_tx *txfd = sp->txd + i;
828 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
830 if (sp->tx_buff[i]) {
831 dev_kfree_skb_irq(sp->tx_buff[i]);
832 sp->tx_buff[i] = NULL;
835 txfd->next_desc = cpu_to_le64(sp->txd_map +
836 sizeof(struct ipg_tx)*(i + 1));
838 sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map);
840 sp->tx_current = 0;
841 sp->tx_dirty = 0;
843 /* Write the location of the TFDList to the IPG. */
844 IPG_DDEBUG_MSG("Starting TFDListPtr = %8.8x\n",
845 (u32) sp->txd_map);
846 ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0);
847 ipg_w32(0x00000000, TFD_LIST_PTR_1);
849 sp->reset_current_tfd = 1;
853 * Free all transmit buffers which have already been transfered
854 * via DMA to the IPG.
856 static void ipg_nic_txfree(struct net_device *dev)
858 struct ipg_nic_private *sp = netdev_priv(dev);
859 unsigned int released, pending, dirty;
861 IPG_DEBUG_MSG("_nic_txfree\n");
863 pending = sp->tx_current - sp->tx_dirty;
864 dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH;
866 for (released = 0; released < pending; released++) {
867 struct sk_buff *skb = sp->tx_buff[dirty];
868 struct ipg_tx *txfd = sp->txd + dirty;
870 IPG_DEBUG_MSG("TFC = %16.16lx\n", (unsigned long) txfd->tfc);
872 /* Look at each TFD's TFC field beginning
873 * at the last freed TFD up to the current TFD.
874 * If the TFDDone bit is set, free the associated
875 * buffer.
877 if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE)))
878 break;
880 /* Free the transmit buffer. */
881 if (skb) {
882 pci_unmap_single(sp->pdev,
883 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
884 skb->len, PCI_DMA_TODEVICE);
886 dev_kfree_skb_irq(skb);
888 sp->tx_buff[dirty] = NULL;
890 dirty = (dirty + 1) % IPG_TFDLIST_LENGTH;
893 sp->tx_dirty += released;
895 if (netif_queue_stopped(dev) &&
896 (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) {
897 netif_wake_queue(dev);
901 static void ipg_tx_timeout(struct net_device *dev)
903 struct ipg_nic_private *sp = netdev_priv(dev);
904 void __iomem *ioaddr = sp->ioaddr;
906 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK |
907 IPG_AC_FIFO);
909 spin_lock_irq(&sp->lock);
911 /* Re-configure after DMA reset. */
912 if (ipg_io_config(dev) < 0) {
913 printk(KERN_INFO "%s: Error during re-configuration.\n",
914 dev->name);
917 init_tfdlist(dev);
919 spin_unlock_irq(&sp->lock);
921 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK,
922 MAC_CTRL);
926 * For TxComplete interrupts, free all transmit
927 * buffers which have already been transfered via DMA
928 * to the IPG.
930 static void ipg_nic_txcleanup(struct net_device *dev)
932 struct ipg_nic_private *sp = netdev_priv(dev);
933 void __iomem *ioaddr = sp->ioaddr;
934 unsigned int i;
936 IPG_DEBUG_MSG("_nic_txcleanup\n");
938 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
939 /* Reading the TXSTATUS register clears the
940 * TX_COMPLETE interrupt.
942 u32 txstatusdword = ipg_r32(TX_STATUS);
944 IPG_DEBUG_MSG("TxStatus = %8.8x\n", txstatusdword);
946 /* Check for Transmit errors. Error bits only valid if
947 * TX_COMPLETE bit in the TXSTATUS register is a 1.
949 if (!(txstatusdword & IPG_TS_TX_COMPLETE))
950 break;
952 /* If in 10Mbps mode, indicate transmit is ready. */
953 if (sp->tenmbpsmode) {
954 netif_wake_queue(dev);
957 /* Transmit error, increment stat counters. */
958 if (txstatusdword & IPG_TS_TX_ERROR) {
959 IPG_DEBUG_MSG("Transmit error.\n");
960 sp->stats.tx_errors++;
963 /* Late collision, re-enable transmitter. */
964 if (txstatusdword & IPG_TS_LATE_COLLISION) {
965 IPG_DEBUG_MSG("Late collision on transmit.\n");
966 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
967 IPG_MC_RSVD_MASK, MAC_CTRL);
970 /* Maximum collisions, re-enable transmitter. */
971 if (txstatusdword & IPG_TS_TX_MAX_COLL) {
972 IPG_DEBUG_MSG("Maximum collisions on transmit.\n");
973 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
974 IPG_MC_RSVD_MASK, MAC_CTRL);
977 /* Transmit underrun, reset and re-enable
978 * transmitter.
980 if (txstatusdword & IPG_TS_TX_UNDERRUN) {
981 IPG_DEBUG_MSG("Transmitter underrun.\n");
982 sp->stats.tx_fifo_errors++;
983 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA |
984 IPG_AC_NETWORK | IPG_AC_FIFO);
986 /* Re-configure after DMA reset. */
987 if (ipg_io_config(dev) < 0) {
988 printk(KERN_INFO
989 "%s: Error during re-configuration.\n",
990 dev->name);
992 init_tfdlist(dev);
994 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
995 IPG_MC_RSVD_MASK, MAC_CTRL);
999 ipg_nic_txfree(dev);
1002 /* Provides statistical information about the IPG NIC. */
1003 static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
1005 struct ipg_nic_private *sp = netdev_priv(dev);
1006 void __iomem *ioaddr = sp->ioaddr;
1007 u16 temp1;
1008 u16 temp2;
1010 IPG_DEBUG_MSG("_nic_get_stats\n");
1012 /* Check to see if the NIC has been initialized via nic_open,
1013 * before trying to read statistic registers.
1015 if (!test_bit(__LINK_STATE_START, &dev->state))
1016 return &sp->stats;
1018 sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
1019 sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK);
1020 sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK);
1021 sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK);
1022 temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS);
1023 sp->stats.rx_errors += temp1;
1024 sp->stats.rx_missed_errors += temp1;
1025 temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) +
1026 ipg_r32(IPG_LATECOLLISIONS);
1027 temp2 = ipg_r16(IPG_CARRIERSENSEERRORS);
1028 sp->stats.collisions += temp1;
1029 sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS);
1030 sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) +
1031 ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2;
1032 sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK);
1034 /* detailed tx_errors */
1035 sp->stats.tx_carrier_errors += temp2;
1037 /* detailed rx_errors */
1038 sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
1039 ipg_r16(IPG_FRAMETOOLONGERRRORS);
1040 sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
1042 /* Unutilized IPG statistic registers. */
1043 ipg_r32(IPG_MCSTFRAMESRCVDOK);
1045 return &sp->stats;
1048 /* Restore used receive buffers. */
1049 static int ipg_nic_rxrestore(struct net_device *dev)
1051 struct ipg_nic_private *sp = netdev_priv(dev);
1052 const unsigned int curr = sp->rx_current;
1053 unsigned int dirty = sp->rx_dirty;
1055 IPG_DEBUG_MSG("_nic_rxrestore\n");
1057 for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) {
1058 unsigned int entry = dirty % IPG_RFDLIST_LENGTH;
1060 /* rx_copybreak may poke hole here and there. */
1061 if (sp->rx_buff[entry])
1062 continue;
1064 /* Generate a new receive buffer to replace the
1065 * current buffer (which will be released by the
1066 * Linux system).
1068 if (ipg_get_rxbuff(dev, entry) < 0) {
1069 IPG_DEBUG_MSG("Cannot allocate new Rx buffer.\n");
1071 break;
1074 /* Reset the RFS field. */
1075 sp->rxd[entry].rfs = 0x0000000000000000;
1077 sp->rx_dirty = dirty;
1079 return 0;
1082 /* use jumboindex and jumbosize to control jumbo frame status
1083 * initial status is jumboindex=-1 and jumbosize=0
1084 * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done.
1085 * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving
1086 * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump
1087 * previous receiving and need to continue dumping the current one
1089 enum {
1090 NORMAL_PACKET,
1091 ERROR_PACKET
1094 enum {
1095 FRAME_NO_START_NO_END = 0,
1096 FRAME_WITH_START = 1,
1097 FRAME_WITH_END = 10,
1098 FRAME_WITH_START_WITH_END = 11
1101 static void ipg_nic_rx_free_skb(struct net_device *dev)
1103 struct ipg_nic_private *sp = netdev_priv(dev);
1104 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1106 if (sp->rx_buff[entry]) {
1107 struct ipg_rx *rxfd = sp->rxd + entry;
1109 pci_unmap_single(sp->pdev,
1110 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1111 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1112 dev_kfree_skb_irq(sp->rx_buff[entry]);
1113 sp->rx_buff[entry] = NULL;
1117 static int ipg_nic_rx_check_frame_type(struct net_device *dev)
1119 struct ipg_nic_private *sp = netdev_priv(dev);
1120 struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH);
1121 int type = FRAME_NO_START_NO_END;
1123 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART)
1124 type += FRAME_WITH_START;
1125 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND)
1126 type += FRAME_WITH_END;
1127 return type;
1130 static int ipg_nic_rx_check_error(struct net_device *dev)
1132 struct ipg_nic_private *sp = netdev_priv(dev);
1133 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
1134 struct ipg_rx *rxfd = sp->rxd + entry;
1136 if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1137 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1138 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1139 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) {
1140 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n",
1141 (unsigned long) rxfd->rfs);
1143 /* Increment general receive error statistic. */
1144 sp->stats.rx_errors++;
1146 /* Increment detailed receive error statistics. */
1147 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1148 IPG_DEBUG_MSG("RX FIFO overrun occured.\n");
1150 sp->stats.rx_fifo_errors++;
1153 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1154 IPG_DEBUG_MSG("RX runt occured.\n");
1155 sp->stats.rx_length_errors++;
1158 /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME,
1159 * error count handled by a IPG statistic register.
1162 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1163 IPG_DEBUG_MSG("RX alignment error occured.\n");
1164 sp->stats.rx_frame_errors++;
1167 /* Do nothing for IPG_RFS_RXFCSERROR, error count
1168 * handled by a IPG statistic register.
1171 /* Free the memory associated with the RX
1172 * buffer since it is erroneous and we will
1173 * not pass it to higher layer processes.
1175 if (sp->rx_buff[entry]) {
1176 pci_unmap_single(sp->pdev,
1177 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1178 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1180 dev_kfree_skb_irq(sp->rx_buff[entry]);
1181 sp->rx_buff[entry] = NULL;
1183 return ERROR_PACKET;
1185 return NORMAL_PACKET;
1188 static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
1189 struct ipg_nic_private *sp,
1190 struct ipg_rx *rxfd, unsigned entry)
1192 struct ipg_jumbo *jumbo = &sp->jumbo;
1193 struct sk_buff *skb;
1194 int framelen;
1196 if (jumbo->found_start) {
1197 dev_kfree_skb_irq(jumbo->skb);
1198 jumbo->found_start = 0;
1199 jumbo->current_size = 0;
1200 jumbo->skb = NULL;
1203 /* 1: found error, 0 no error */
1204 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1205 return;
1207 skb = sp->rx_buff[entry];
1208 if (!skb)
1209 return;
1211 /* accept this frame and send to upper layer */
1212 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1213 if (framelen > sp->rxfrag_size)
1214 framelen = sp->rxfrag_size;
1216 skb_put(skb, framelen);
1217 skb->protocol = eth_type_trans(skb, dev);
1218 skb->ip_summed = CHECKSUM_NONE;
1219 netif_rx(skb);
1220 sp->rx_buff[entry] = NULL;
1223 static void ipg_nic_rx_with_start(struct net_device *dev,
1224 struct ipg_nic_private *sp,
1225 struct ipg_rx *rxfd, unsigned entry)
1227 struct ipg_jumbo *jumbo = &sp->jumbo;
1228 struct pci_dev *pdev = sp->pdev;
1229 struct sk_buff *skb;
1231 /* 1: found error, 0 no error */
1232 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
1233 return;
1235 /* accept this frame and send to upper layer */
1236 skb = sp->rx_buff[entry];
1237 if (!skb)
1238 return;
1240 if (jumbo->found_start)
1241 dev_kfree_skb_irq(jumbo->skb);
1243 pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1244 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1246 skb_put(skb, sp->rxfrag_size);
1248 jumbo->found_start = 1;
1249 jumbo->current_size = sp->rxfrag_size;
1250 jumbo->skb = skb;
1252 sp->rx_buff[entry] = NULL;
1255 static void ipg_nic_rx_with_end(struct net_device *dev,
1256 struct ipg_nic_private *sp,
1257 struct ipg_rx *rxfd, unsigned entry)
1259 struct ipg_jumbo *jumbo = &sp->jumbo;
1261 /* 1: found error, 0 no error */
1262 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1263 struct sk_buff *skb = sp->rx_buff[entry];
1265 if (!skb)
1266 return;
1268 if (jumbo->found_start) {
1269 int framelen, endframelen;
1271 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1273 endframelen = framelen - jumbo->current_size;
1274 if (framelen > sp->rxsupport_size)
1275 dev_kfree_skb_irq(jumbo->skb);
1276 else {
1277 memcpy(skb_put(jumbo->skb, endframelen),
1278 skb->data, endframelen);
1280 jumbo->skb->protocol =
1281 eth_type_trans(jumbo->skb, dev);
1283 jumbo->skb->ip_summed = CHECKSUM_NONE;
1284 netif_rx(jumbo->skb);
1288 jumbo->found_start = 0;
1289 jumbo->current_size = 0;
1290 jumbo->skb = NULL;
1292 ipg_nic_rx_free_skb(dev);
1293 } else {
1294 dev_kfree_skb_irq(jumbo->skb);
1295 jumbo->found_start = 0;
1296 jumbo->current_size = 0;
1297 jumbo->skb = NULL;
1301 static void ipg_nic_rx_no_start_no_end(struct net_device *dev,
1302 struct ipg_nic_private *sp,
1303 struct ipg_rx *rxfd, unsigned entry)
1305 struct ipg_jumbo *jumbo = &sp->jumbo;
1307 /* 1: found error, 0 no error */
1308 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
1309 struct sk_buff *skb = sp->rx_buff[entry];
1311 if (skb) {
1312 if (jumbo->found_start) {
1313 jumbo->current_size += sp->rxfrag_size;
1314 if (jumbo->current_size <= sp->rxsupport_size) {
1315 memcpy(skb_put(jumbo->skb,
1316 sp->rxfrag_size),
1317 skb->data, sp->rxfrag_size);
1320 ipg_nic_rx_free_skb(dev);
1322 } else {
1323 dev_kfree_skb_irq(jumbo->skb);
1324 jumbo->found_start = 0;
1325 jumbo->current_size = 0;
1326 jumbo->skb = NULL;
1330 static int ipg_nic_rx_jumbo(struct net_device *dev)
1332 struct ipg_nic_private *sp = netdev_priv(dev);
1333 unsigned int curr = sp->rx_current;
1334 void __iomem *ioaddr = sp->ioaddr;
1335 unsigned int i;
1337 IPG_DEBUG_MSG("_nic_rx\n");
1339 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1340 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1341 struct ipg_rx *rxfd = sp->rxd + entry;
1343 if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
1344 break;
1346 switch (ipg_nic_rx_check_frame_type(dev)) {
1347 case FRAME_WITH_START_WITH_END:
1348 ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry);
1349 break;
1350 case FRAME_WITH_START:
1351 ipg_nic_rx_with_start(dev, sp, rxfd, entry);
1352 break;
1353 case FRAME_WITH_END:
1354 ipg_nic_rx_with_end(dev, sp, rxfd, entry);
1355 break;
1356 case FRAME_NO_START_NO_END:
1357 ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry);
1358 break;
1362 sp->rx_current = curr;
1364 if (i == IPG_MAXRFDPROCESS_COUNT) {
1365 /* There are more RFDs to process, however the
1366 * allocated amount of RFD processing time has
1367 * expired. Assert Interrupt Requested to make
1368 * sure we come back to process the remaining RFDs.
1370 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1373 ipg_nic_rxrestore(dev);
1375 return 0;
1378 static int ipg_nic_rx(struct net_device *dev)
1380 /* Transfer received Ethernet frames to higher network layers. */
1381 struct ipg_nic_private *sp = netdev_priv(dev);
1382 unsigned int curr = sp->rx_current;
1383 void __iomem *ioaddr = sp->ioaddr;
1384 struct ipg_rx *rxfd;
1385 unsigned int i;
1387 IPG_DEBUG_MSG("_nic_rx\n");
1389 #define __RFS_MASK \
1390 cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND)
1392 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
1393 unsigned int entry = curr % IPG_RFDLIST_LENGTH;
1394 struct sk_buff *skb = sp->rx_buff[entry];
1395 unsigned int framelen;
1397 rxfd = sp->rxd + entry;
1399 if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb)
1400 break;
1402 /* Get received frame length. */
1403 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
1405 /* Check for jumbo frame arrival with too small
1406 * RXFRAG_SIZE.
1408 if (framelen > sp->rxfrag_size) {
1409 IPG_DEBUG_MSG
1410 ("RFS FrameLen > allocated fragment size.\n");
1412 framelen = sp->rxfrag_size;
1415 if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
1416 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
1417 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
1418 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) {
1420 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n",
1421 (unsigned long int) rxfd->rfs);
1423 /* Increment general receive error statistic. */
1424 sp->stats.rx_errors++;
1426 /* Increment detailed receive error statistics. */
1427 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
1428 IPG_DEBUG_MSG("RX FIFO overrun occured.\n");
1429 sp->stats.rx_fifo_errors++;
1432 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
1433 IPG_DEBUG_MSG("RX runt occured.\n");
1434 sp->stats.rx_length_errors++;
1437 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ;
1438 /* Do nothing, error count handled by a IPG
1439 * statistic register.
1442 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
1443 IPG_DEBUG_MSG("RX alignment error occured.\n");
1444 sp->stats.rx_frame_errors++;
1447 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ;
1448 /* Do nothing, error count handled by a IPG
1449 * statistic register.
1452 /* Free the memory associated with the RX
1453 * buffer since it is erroneous and we will
1454 * not pass it to higher layer processes.
1456 if (skb) {
1457 __le64 info = rxfd->frag_info;
1459 pci_unmap_single(sp->pdev,
1460 le64_to_cpu(info) & ~IPG_RFI_FRAGLEN,
1461 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1463 dev_kfree_skb_irq(skb);
1465 } else {
1467 /* Adjust the new buffer length to accomodate the size
1468 * of the received frame.
1470 skb_put(skb, framelen);
1472 /* Set the buffer's protocol field to Ethernet. */
1473 skb->protocol = eth_type_trans(skb, dev);
1475 /* The IPG encountered an error with (or
1476 * there were no) IP/TCP/UDP checksums.
1477 * This may or may not indicate an invalid
1478 * IP/TCP/UDP frame was received. Let the
1479 * upper layer decide.
1481 skb->ip_summed = CHECKSUM_NONE;
1483 /* Hand off frame for higher layer processing.
1484 * The function netif_rx() releases the sk_buff
1485 * when processing completes.
1487 netif_rx(skb);
1490 /* Assure RX buffer is not reused by IPG. */
1491 sp->rx_buff[entry] = NULL;
1495 * If there are more RFDs to proces and the allocated amount of RFD
1496 * processing time has expired, assert Interrupt Requested to make
1497 * sure we come back to process the remaining RFDs.
1499 if (i == IPG_MAXRFDPROCESS_COUNT)
1500 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
1502 #ifdef IPG_DEBUG
1503 /* Check if the RFD list contained no receive frame data. */
1504 if (!i)
1505 sp->EmptyRFDListCount++;
1506 #endif
1507 while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) &&
1508 !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) &&
1509 (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) {
1510 unsigned int entry = curr++ % IPG_RFDLIST_LENGTH;
1512 rxfd = sp->rxd + entry;
1514 IPG_DEBUG_MSG("Frame requires multiple RFDs.\n");
1516 /* An unexpected event, additional code needed to handle
1517 * properly. So for the time being, just disregard the
1518 * frame.
1521 /* Free the memory associated with the RX
1522 * buffer since it is erroneous and we will
1523 * not pass it to higher layer processes.
1525 if (sp->rx_buff[entry]) {
1526 pci_unmap_single(sp->pdev,
1527 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1528 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1529 dev_kfree_skb_irq(sp->rx_buff[entry]);
1532 /* Assure RX buffer is not reused by IPG. */
1533 sp->rx_buff[entry] = NULL;
1536 sp->rx_current = curr;
1538 /* Check to see if there are a minimum number of used
1539 * RFDs before restoring any (should improve performance.)
1541 if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE)
1542 ipg_nic_rxrestore(dev);
1544 return 0;
1547 static void ipg_reset_after_host_error(struct work_struct *work)
1549 struct ipg_nic_private *sp =
1550 container_of(work, struct ipg_nic_private, task.work);
1551 struct net_device *dev = sp->dev;
1553 IPG_DDEBUG_MSG("DMACtrl = %8.8x\n", ioread32(sp->ioaddr + IPG_DMACTRL));
1556 * Acknowledge HostError interrupt by resetting
1557 * IPG DMA and HOST.
1559 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1561 init_rfdlist(dev);
1562 init_tfdlist(dev);
1564 if (ipg_io_config(dev) < 0) {
1565 printk(KERN_INFO "%s: Cannot recover from PCI error.\n",
1566 dev->name);
1567 schedule_delayed_work(&sp->task, HZ);
1571 static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
1573 struct net_device *dev = dev_inst;
1574 struct ipg_nic_private *sp = netdev_priv(dev);
1575 void __iomem *ioaddr = sp->ioaddr;
1576 unsigned int handled = 0;
1577 u16 status;
1579 IPG_DEBUG_MSG("_interrupt_handler\n");
1581 if (sp->is_jumbo)
1582 ipg_nic_rxrestore(dev);
1584 spin_lock(&sp->lock);
1586 /* Get interrupt source information, and acknowledge
1587 * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly,
1588 * IntRequested, MacControlFrame, LinkEvent) interrupts
1589 * if issued. Also, all IPG interrupts are disabled by
1590 * reading IntStatusAck.
1592 status = ipg_r16(INT_STATUS_ACK);
1594 IPG_DEBUG_MSG("IntStatusAck = %4.4x\n", status);
1596 /* Shared IRQ of remove event. */
1597 if (!(status & IPG_IS_RSVD_MASK))
1598 goto out_enable;
1600 handled = 1;
1602 if (unlikely(!netif_running(dev)))
1603 goto out_unlock;
1605 /* If RFDListEnd interrupt, restore all used RFDs. */
1606 if (status & IPG_IS_RFD_LIST_END) {
1607 IPG_DEBUG_MSG("RFDListEnd Interrupt.\n");
1609 /* The RFD list end indicates an RFD was encountered
1610 * with a 0 NextPtr, or with an RFDDone bit set to 1
1611 * (indicating the RFD is not read for use by the
1612 * IPG.) Try to restore all RFDs.
1614 ipg_nic_rxrestore(dev);
1616 #ifdef IPG_DEBUG
1617 /* Increment the RFDlistendCount counter. */
1618 sp->RFDlistendCount++;
1619 #endif
1622 /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or
1623 * IntRequested interrupt, process received frames. */
1624 if ((status & IPG_IS_RX_DMA_PRIORITY) ||
1625 (status & IPG_IS_RFD_LIST_END) ||
1626 (status & IPG_IS_RX_DMA_COMPLETE) ||
1627 (status & IPG_IS_INT_REQUESTED)) {
1628 #ifdef IPG_DEBUG
1629 /* Increment the RFD list checked counter if interrupted
1630 * only to check the RFD list. */
1631 if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END |
1632 IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) &
1633 (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE |
1634 IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE |
1635 IPG_IS_UPDATE_STATS)))
1636 sp->RFDListCheckedCount++;
1637 #endif
1639 if (sp->is_jumbo)
1640 ipg_nic_rx_jumbo(dev);
1641 else
1642 ipg_nic_rx(dev);
1645 /* If TxDMAComplete interrupt, free used TFDs. */
1646 if (status & IPG_IS_TX_DMA_COMPLETE)
1647 ipg_nic_txfree(dev);
1649 /* TxComplete interrupts indicate one of numerous actions.
1650 * Determine what action to take based on TXSTATUS register.
1652 if (status & IPG_IS_TX_COMPLETE)
1653 ipg_nic_txcleanup(dev);
1655 /* If UpdateStats interrupt, update Linux Ethernet statistics */
1656 if (status & IPG_IS_UPDATE_STATS)
1657 ipg_nic_get_stats(dev);
1659 /* If HostError interrupt, reset IPG. */
1660 if (status & IPG_IS_HOST_ERROR) {
1661 IPG_DDEBUG_MSG("HostError Interrupt\n");
1663 schedule_delayed_work(&sp->task, 0);
1666 /* If LinkEvent interrupt, resolve autonegotiation. */
1667 if (status & IPG_IS_LINK_EVENT) {
1668 if (ipg_config_autoneg(dev) < 0)
1669 printk(KERN_INFO "%s: Auto-negotiation error.\n",
1670 dev->name);
1673 /* If MACCtrlFrame interrupt, do nothing. */
1674 if (status & IPG_IS_MAC_CTRL_FRAME)
1675 IPG_DEBUG_MSG("MACCtrlFrame interrupt.\n");
1677 /* If RxComplete interrupt, do nothing. */
1678 if (status & IPG_IS_RX_COMPLETE)
1679 IPG_DEBUG_MSG("RxComplete interrupt.\n");
1681 /* If RxEarly interrupt, do nothing. */
1682 if (status & IPG_IS_RX_EARLY)
1683 IPG_DEBUG_MSG("RxEarly interrupt.\n");
1685 out_enable:
1686 /* Re-enable IPG interrupts. */
1687 ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE |
1688 IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE |
1689 IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE);
1690 out_unlock:
1691 spin_unlock(&sp->lock);
1693 return IRQ_RETVAL(handled);
1696 static void ipg_rx_clear(struct ipg_nic_private *sp)
1698 unsigned int i;
1700 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
1701 if (sp->rx_buff[i]) {
1702 struct ipg_rx *rxfd = sp->rxd + i;
1704 dev_kfree_skb_irq(sp->rx_buff[i]);
1705 sp->rx_buff[i] = NULL;
1706 pci_unmap_single(sp->pdev,
1707 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
1708 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1713 static void ipg_tx_clear(struct ipg_nic_private *sp)
1715 unsigned int i;
1717 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
1718 if (sp->tx_buff[i]) {
1719 struct ipg_tx *txfd = sp->txd + i;
1721 pci_unmap_single(sp->pdev,
1722 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
1723 sp->tx_buff[i]->len, PCI_DMA_TODEVICE);
1725 dev_kfree_skb_irq(sp->tx_buff[i]);
1727 sp->tx_buff[i] = NULL;
1732 static int ipg_nic_open(struct net_device *dev)
1734 struct ipg_nic_private *sp = netdev_priv(dev);
1735 void __iomem *ioaddr = sp->ioaddr;
1736 struct pci_dev *pdev = sp->pdev;
1737 int rc;
1739 IPG_DEBUG_MSG("_nic_open\n");
1741 sp->rx_buf_sz = sp->rxsupport_size;
1743 /* Check for interrupt line conflicts, and request interrupt
1744 * line for IPG.
1746 * IMPORTANT: Disable IPG interrupts prior to registering
1747 * IRQ.
1749 ipg_w16(0x0000, INT_ENABLE);
1751 /* Register the interrupt line to be used by the IPG within
1752 * the Linux system.
1754 rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
1755 dev->name, dev);
1756 if (rc < 0) {
1757 printk(KERN_INFO "%s: Error when requesting interrupt.\n",
1758 dev->name);
1759 goto out;
1762 dev->irq = pdev->irq;
1764 rc = -ENOMEM;
1766 sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES,
1767 &sp->rxd_map, GFP_KERNEL);
1768 if (!sp->rxd)
1769 goto err_free_irq_0;
1771 sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES,
1772 &sp->txd_map, GFP_KERNEL);
1773 if (!sp->txd)
1774 goto err_free_rx_1;
1776 rc = init_rfdlist(dev);
1777 if (rc < 0) {
1778 printk(KERN_INFO "%s: Error during configuration.\n",
1779 dev->name);
1780 goto err_free_tx_2;
1783 init_tfdlist(dev);
1785 rc = ipg_io_config(dev);
1786 if (rc < 0) {
1787 printk(KERN_INFO "%s: Error during configuration.\n",
1788 dev->name);
1789 goto err_release_tfdlist_3;
1792 /* Resolve autonegotiation. */
1793 if (ipg_config_autoneg(dev) < 0)
1794 printk(KERN_INFO "%s: Auto-negotiation error.\n", dev->name);
1796 /* initialize JUMBO Frame control variable */
1797 sp->jumbo.found_start = 0;
1798 sp->jumbo.current_size = 0;
1799 sp->jumbo.skb = NULL;
1801 /* Enable transmit and receive operation of the IPG. */
1802 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) &
1803 IPG_MC_RSVD_MASK, MAC_CTRL);
1805 netif_start_queue(dev);
1806 out:
1807 return rc;
1809 err_release_tfdlist_3:
1810 ipg_tx_clear(sp);
1811 ipg_rx_clear(sp);
1812 err_free_tx_2:
1813 dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1814 err_free_rx_1:
1815 dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1816 err_free_irq_0:
1817 free_irq(pdev->irq, dev);
1818 goto out;
1821 static int ipg_nic_stop(struct net_device *dev)
1823 struct ipg_nic_private *sp = netdev_priv(dev);
1824 void __iomem *ioaddr = sp->ioaddr;
1825 struct pci_dev *pdev = sp->pdev;
1827 IPG_DEBUG_MSG("_nic_stop\n");
1829 netif_stop_queue(dev);
1831 IPG_DDEBUG_MSG("RFDlistendCount = %i\n", sp->RFDlistendCount);
1832 IPG_DDEBUG_MSG("RFDListCheckedCount = %i\n", sp->rxdCheckedCount);
1833 IPG_DDEBUG_MSG("EmptyRFDListCount = %i\n", sp->EmptyRFDListCount);
1834 IPG_DUMPTFDLIST(dev);
1836 do {
1837 (void) ipg_r16(INT_STATUS_ACK);
1839 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
1841 synchronize_irq(pdev->irq);
1842 } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK);
1844 ipg_rx_clear(sp);
1846 ipg_tx_clear(sp);
1848 pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
1849 pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
1851 free_irq(pdev->irq, dev);
1853 return 0;
1856 static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb,
1857 struct net_device *dev)
1859 struct ipg_nic_private *sp = netdev_priv(dev);
1860 void __iomem *ioaddr = sp->ioaddr;
1861 unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH;
1862 unsigned long flags;
1863 struct ipg_tx *txfd;
1865 IPG_DDEBUG_MSG("_nic_hard_start_xmit\n");
1867 /* If in 10Mbps mode, stop the transmit queue so
1868 * no more transmit frames are accepted.
1870 if (sp->tenmbpsmode)
1871 netif_stop_queue(dev);
1873 if (sp->reset_current_tfd) {
1874 sp->reset_current_tfd = 0;
1875 entry = 0;
1878 txfd = sp->txd + entry;
1880 sp->tx_buff[entry] = skb;
1882 /* Clear all TFC fields, except TFDDONE. */
1883 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
1885 /* Specify the TFC field within the TFD. */
1886 txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED |
1887 (IPG_TFC_FRAMEID & sp->tx_current) |
1888 (IPG_TFC_FRAGCOUNT & (1 << 24)));
1890 * 16--17 (WordAlign) <- 3 (disable),
1891 * 0--15 (FrameId) <- sp->tx_current,
1892 * 24--27 (FragCount) <- 1
1895 /* Request TxComplete interrupts at an interval defined
1896 * by the constant IPG_FRAMESBETWEENTXCOMPLETES.
1897 * Request TxComplete interrupt for every frame
1898 * if in 10Mbps mode to accomodate problem with 10Mbps
1899 * processing.
1901 if (sp->tenmbpsmode)
1902 txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE);
1903 txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE);
1904 /* Based on compilation option, determine if FCS is to be
1905 * appended to transmit frame by IPG.
1907 if (!(IPG_APPEND_FCS_ON_TX))
1908 txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE);
1910 /* Based on compilation option, determine if IP, TCP and/or
1911 * UDP checksums are to be added to transmit frame by IPG.
1913 if (IPG_ADD_IPCHECKSUM_ON_TX)
1914 txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE);
1916 if (IPG_ADD_TCPCHECKSUM_ON_TX)
1917 txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE);
1919 if (IPG_ADD_UDPCHECKSUM_ON_TX)
1920 txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE);
1922 /* Based on compilation option, determine if VLAN tag info is to be
1923 * inserted into transmit frame by IPG.
1925 if (IPG_INSERT_MANUAL_VLAN_TAG) {
1926 txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT |
1927 ((u64) IPG_MANUAL_VLAN_VID << 32) |
1928 ((u64) IPG_MANUAL_VLAN_CFI << 44) |
1929 ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45));
1932 /* The fragment start location within system memory is defined
1933 * by the sk_buff structure's data field. The physical address
1934 * of this location within the system's virtual memory space
1935 * is determined using the IPG_HOST2BUS_MAP function.
1937 txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
1938 skb->len, PCI_DMA_TODEVICE));
1940 /* The length of the fragment within system memory is defined by
1941 * the sk_buff structure's len field.
1943 txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN &
1944 ((u64) (skb->len & 0xffff) << 48));
1946 /* Clear the TFDDone bit last to indicate the TFD is ready
1947 * for transfer to the IPG.
1949 txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE);
1951 spin_lock_irqsave(&sp->lock, flags);
1953 sp->tx_current++;
1955 mmiowb();
1957 ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL);
1959 if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH))
1960 netif_stop_queue(dev);
1962 spin_unlock_irqrestore(&sp->lock, flags);
1964 return NETDEV_TX_OK;
1967 static void ipg_set_phy_default_param(unsigned char rev,
1968 struct net_device *dev, int phy_address)
1970 unsigned short length;
1971 unsigned char revision;
1972 unsigned short *phy_param;
1973 unsigned short address, value;
1975 phy_param = &DefaultPhyParam[0];
1976 length = *phy_param & 0x00FF;
1977 revision = (unsigned char)((*phy_param) >> 8);
1978 phy_param++;
1979 while (length != 0) {
1980 if (rev == revision) {
1981 while (length > 1) {
1982 address = *phy_param;
1983 value = *(phy_param + 1);
1984 phy_param += 2;
1985 mdio_write(dev, phy_address, address, value);
1986 length -= 4;
1988 break;
1989 } else {
1990 phy_param += length / 2;
1991 length = *phy_param & 0x00FF;
1992 revision = (unsigned char)((*phy_param) >> 8);
1993 phy_param++;
1998 static int read_eeprom(struct net_device *dev, int eep_addr)
2000 void __iomem *ioaddr = ipg_ioaddr(dev);
2001 unsigned int i;
2002 int ret = 0;
2003 u16 value;
2005 value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff);
2006 ipg_w16(value, EEPROM_CTRL);
2008 for (i = 0; i < 1000; i++) {
2009 u16 data;
2011 mdelay(10);
2012 data = ipg_r16(EEPROM_CTRL);
2013 if (!(data & IPG_EC_EEPROM_BUSY)) {
2014 ret = ipg_r16(EEPROM_DATA);
2015 break;
2018 return ret;
2021 static void ipg_init_mii(struct net_device *dev)
2023 struct ipg_nic_private *sp = netdev_priv(dev);
2024 struct mii_if_info *mii_if = &sp->mii_if;
2025 int phyaddr;
2027 mii_if->dev = dev;
2028 mii_if->mdio_read = mdio_read;
2029 mii_if->mdio_write = mdio_write;
2030 mii_if->phy_id_mask = 0x1f;
2031 mii_if->reg_num_mask = 0x1f;
2033 mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev);
2035 if (phyaddr != 0x1f) {
2036 u16 mii_phyctrl, mii_1000cr;
2037 u8 revisionid = 0;
2039 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000);
2040 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
2041 GMII_PHY_1000BASETCONTROL_PreferMaster;
2042 mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr);
2044 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
2046 /* Set default phyparam */
2047 pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid);
2048 ipg_set_phy_default_param(revisionid, dev, phyaddr);
2050 /* Reset PHY */
2051 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
2052 mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl);
2057 static int ipg_hw_init(struct net_device *dev)
2059 struct ipg_nic_private *sp = netdev_priv(dev);
2060 void __iomem *ioaddr = sp->ioaddr;
2061 unsigned int i;
2062 int rc;
2064 /* Read/Write and Reset EEPROM Value */
2065 /* Read LED Mode Configuration from EEPROM */
2066 sp->led_mode = read_eeprom(dev, 6);
2068 /* Reset all functions within the IPG. Do not assert
2069 * RST_OUT as not compatible with some PHYs.
2071 rc = ipg_reset(dev, IPG_RESET_MASK);
2072 if (rc < 0)
2073 goto out;
2075 ipg_init_mii(dev);
2077 /* Read MAC Address from EEPROM */
2078 for (i = 0; i < 3; i++)
2079 sp->station_addr[i] = read_eeprom(dev, 16 + i);
2081 for (i = 0; i < 3; i++)
2082 ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i);
2084 /* Set station address in ethernet_device structure. */
2085 dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff;
2086 dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8;
2087 dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff;
2088 dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8;
2089 dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff;
2090 dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8;
2091 out:
2092 return rc;
2095 static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2097 struct ipg_nic_private *sp = netdev_priv(dev);
2098 int rc;
2100 mutex_lock(&sp->mii_mutex);
2101 rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL);
2102 mutex_unlock(&sp->mii_mutex);
2104 return rc;
2107 static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu)
2109 struct ipg_nic_private *sp = netdev_priv(dev);
2110 int err;
2112 /* Function to accomodate changes to Maximum Transfer Unit
2113 * (or MTU) of IPG NIC. Cannot use default function since
2114 * the default will not allow for MTU > 1500 bytes.
2117 IPG_DEBUG_MSG("_nic_change_mtu\n");
2120 * Check that the new MTU value is between 68 (14 byte header, 46 byte
2121 * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU.
2123 if (new_mtu < 68 || new_mtu > 10240)
2124 return -EINVAL;
2126 err = ipg_nic_stop(dev);
2127 if (err)
2128 return err;
2130 dev->mtu = new_mtu;
2132 sp->max_rxframe_size = new_mtu;
2134 sp->rxfrag_size = new_mtu;
2135 if (sp->rxfrag_size > 4088)
2136 sp->rxfrag_size = 4088;
2138 sp->rxsupport_size = sp->max_rxframe_size;
2140 if (new_mtu > 0x0600)
2141 sp->is_jumbo = true;
2142 else
2143 sp->is_jumbo = false;
2145 return ipg_nic_open(dev);
2148 static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2150 struct ipg_nic_private *sp = netdev_priv(dev);
2151 int rc;
2153 mutex_lock(&sp->mii_mutex);
2154 rc = mii_ethtool_gset(&sp->mii_if, cmd);
2155 mutex_unlock(&sp->mii_mutex);
2157 return rc;
2160 static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2162 struct ipg_nic_private *sp = netdev_priv(dev);
2163 int rc;
2165 mutex_lock(&sp->mii_mutex);
2166 rc = mii_ethtool_sset(&sp->mii_if, cmd);
2167 mutex_unlock(&sp->mii_mutex);
2169 return rc;
2172 static int ipg_nway_reset(struct net_device *dev)
2174 struct ipg_nic_private *sp = netdev_priv(dev);
2175 int rc;
2177 mutex_lock(&sp->mii_mutex);
2178 rc = mii_nway_restart(&sp->mii_if);
2179 mutex_unlock(&sp->mii_mutex);
2181 return rc;
2184 static const struct ethtool_ops ipg_ethtool_ops = {
2185 .get_settings = ipg_get_settings,
2186 .set_settings = ipg_set_settings,
2187 .nway_reset = ipg_nway_reset,
2190 static void __devexit ipg_remove(struct pci_dev *pdev)
2192 struct net_device *dev = pci_get_drvdata(pdev);
2193 struct ipg_nic_private *sp = netdev_priv(dev);
2195 IPG_DEBUG_MSG("_remove\n");
2197 /* Un-register Ethernet device. */
2198 unregister_netdev(dev);
2200 pci_iounmap(pdev, sp->ioaddr);
2202 pci_release_regions(pdev);
2204 free_netdev(dev);
2205 pci_disable_device(pdev);
2206 pci_set_drvdata(pdev, NULL);
2209 static const struct net_device_ops ipg_netdev_ops = {
2210 .ndo_open = ipg_nic_open,
2211 .ndo_stop = ipg_nic_stop,
2212 .ndo_start_xmit = ipg_nic_hard_start_xmit,
2213 .ndo_get_stats = ipg_nic_get_stats,
2214 .ndo_set_multicast_list = ipg_nic_set_multicast_list,
2215 .ndo_do_ioctl = ipg_ioctl,
2216 .ndo_tx_timeout = ipg_tx_timeout,
2217 .ndo_change_mtu = ipg_nic_change_mtu,
2218 .ndo_set_mac_address = eth_mac_addr,
2219 .ndo_validate_addr = eth_validate_addr,
2222 static int __devinit ipg_probe(struct pci_dev *pdev,
2223 const struct pci_device_id *id)
2225 unsigned int i = id->driver_data;
2226 struct ipg_nic_private *sp;
2227 struct net_device *dev;
2228 void __iomem *ioaddr;
2229 int rc;
2231 rc = pci_enable_device(pdev);
2232 if (rc < 0)
2233 goto out;
2235 printk(KERN_INFO "%s: %s\n", pci_name(pdev), ipg_brand_name[i]);
2237 pci_set_master(pdev);
2239 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2240 if (rc < 0) {
2241 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2242 if (rc < 0) {
2243 printk(KERN_ERR "%s: DMA config failed.\n",
2244 pci_name(pdev));
2245 goto err_disable_0;
2250 * Initialize net device.
2252 dev = alloc_etherdev(sizeof(struct ipg_nic_private));
2253 if (!dev) {
2254 printk(KERN_ERR "%s: alloc_etherdev failed\n", pci_name(pdev));
2255 rc = -ENOMEM;
2256 goto err_disable_0;
2259 sp = netdev_priv(dev);
2260 spin_lock_init(&sp->lock);
2261 mutex_init(&sp->mii_mutex);
2263 sp->is_jumbo = IPG_IS_JUMBO;
2264 sp->rxfrag_size = IPG_RXFRAG_SIZE;
2265 sp->rxsupport_size = IPG_RXSUPPORT_SIZE;
2266 sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE;
2268 /* Declare IPG NIC functions for Ethernet device methods.
2270 dev->netdev_ops = &ipg_netdev_ops;
2271 SET_NETDEV_DEV(dev, &pdev->dev);
2272 SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops);
2274 rc = pci_request_regions(pdev, DRV_NAME);
2275 if (rc)
2276 goto err_free_dev_1;
2278 ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
2279 if (!ioaddr) {
2280 printk(KERN_ERR "%s cannot map MMIO\n", pci_name(pdev));
2281 rc = -EIO;
2282 goto err_release_regions_2;
2285 /* Save the pointer to the PCI device information. */
2286 sp->ioaddr = ioaddr;
2287 sp->pdev = pdev;
2288 sp->dev = dev;
2290 INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error);
2292 pci_set_drvdata(pdev, dev);
2294 rc = ipg_hw_init(dev);
2295 if (rc < 0)
2296 goto err_unmap_3;
2298 rc = register_netdev(dev);
2299 if (rc < 0)
2300 goto err_unmap_3;
2302 printk(KERN_INFO "Ethernet device registered as: %s\n", dev->name);
2303 out:
2304 return rc;
2306 err_unmap_3:
2307 pci_iounmap(pdev, ioaddr);
2308 err_release_regions_2:
2309 pci_release_regions(pdev);
2310 err_free_dev_1:
2311 free_netdev(dev);
2312 err_disable_0:
2313 pci_disable_device(pdev);
2314 goto out;
2317 static struct pci_driver ipg_pci_driver = {
2318 .name = IPG_DRIVER_NAME,
2319 .id_table = ipg_pci_tbl,
2320 .probe = ipg_probe,
2321 .remove = __devexit_p(ipg_remove),
2324 static int __init ipg_init_module(void)
2326 return pci_register_driver(&ipg_pci_driver);
2329 static void __exit ipg_exit_module(void)
2331 pci_unregister_driver(&ipg_pci_driver);
2334 module_init(ipg_init_module);
2335 module_exit(ipg_exit_module);