fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / drivers / net / gianfar.c
blobcc288d8f6a53cc936882c49c19920cc4590ba1c2
1 /*
2 * drivers/net/gianfar.c
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala
12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc.
13 * Copyright (c) 2007 MontaVista Software, Inc.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
20 * Gianfar: AKA Lambda Draconis, "Dragon"
21 * RA 11 31 24.2
22 * Dec +69 19 52
23 * V 3.84
24 * B-V +1.62
26 * Theory of operation
28 * The driver is initialized through platform_device. Structures which
29 * define the configuration needed by the board are defined in a
30 * board structure in arch/ppc/platforms (though I do not
31 * discount the possibility that other architectures could one
32 * day be supported.
34 * The Gianfar Ethernet Controller uses a ring of buffer
35 * descriptors. The beginning is indicated by a register
36 * pointing to the physical address of the start of the ring.
37 * The end is determined by a "wrap" bit being set in the
38 * last descriptor of the ring.
40 * When a packet is received, the RXF bit in the
41 * IEVENT register is set, triggering an interrupt when the
42 * corresponding bit in the IMASK register is also set (if
43 * interrupt coalescing is active, then the interrupt may not
44 * happen immediately, but will wait until either a set number
45 * of frames or amount of time have passed). In NAPI, the
46 * interrupt handler will signal there is work to be done, and
47 * exit. Without NAPI, the packet(s) will be handled
48 * immediately. Both methods will start at the last known empty
49 * descriptor, and process every subsequent descriptor until there
50 * are none left with data (NAPI will stop after a set number of
51 * packets to give time to other tasks, but will eventually
52 * process all the packets). The data arrives inside a
53 * pre-allocated skb, and so after the skb is passed up to the
54 * stack, a new skb must be allocated, and the address field in
55 * the buffer descriptor must be updated to indicate this new
56 * skb.
58 * When the kernel requests that a packet be transmitted, the
59 * driver starts where it left off last time, and points the
60 * descriptor at the buffer which was passed in. The driver
61 * then informs the DMA engine that there are packets ready to
62 * be transmitted. Once the controller is finished transmitting
63 * the packet, an interrupt may be triggered (under the same
64 * conditions as for reception, but depending on the TXF bit).
65 * The driver then cleans up the buffer.
68 #include <linux/kernel.h>
69 #include <linux/string.h>
70 #include <linux/errno.h>
71 #include <linux/unistd.h>
72 #include <linux/slab.h>
73 #include <linux/interrupt.h>
74 #include <linux/init.h>
75 #include <linux/delay.h>
76 #include <linux/netdevice.h>
77 #include <linux/etherdevice.h>
78 #include <linux/skbuff.h>
79 #include <linux/if_vlan.h>
80 #include <linux/spinlock.h>
81 #include <linux/mm.h>
82 #include <linux/platform_device.h>
83 #include <linux/ip.h>
84 #include <linux/tcp.h>
85 #include <linux/udp.h>
86 #include <linux/in.h>
88 #include <asm/io.h>
89 #include <asm/irq.h>
90 #include <asm/uaccess.h>
91 #include <linux/module.h>
92 #include <linux/dma-mapping.h>
93 #include <linux/crc32.h>
94 #include <linux/mii.h>
95 #include <linux/phy.h>
97 #include "gianfar.h"
98 #include "gianfar_mii.h"
100 #define TX_TIMEOUT (1*HZ)
101 #define SKB_ALLOC_TIMEOUT 1000000
102 #undef BRIEF_GFAR_ERRORS
103 #undef VERBOSE_GFAR_ERRORS
105 #ifdef CONFIG_GFAR_NAPI
106 #define RECEIVE(x) netif_receive_skb(x)
107 #else
108 #define RECEIVE(x) netif_rx(x)
109 #endif
111 const char gfar_driver_name[] = "Gianfar Ethernet";
112 const char gfar_driver_version[] = "1.3";
114 static int gfar_enet_open(struct net_device *dev);
115 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev);
116 static void gfar_timeout(struct net_device *dev);
117 static int gfar_close(struct net_device *dev);
118 struct sk_buff *gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp);
119 static int gfar_set_mac_address(struct net_device *dev);
120 static int gfar_change_mtu(struct net_device *dev, int new_mtu);
121 static irqreturn_t gfar_error(int irq, void *dev_id);
122 static irqreturn_t gfar_transmit(int irq, void *dev_id);
123 static irqreturn_t gfar_interrupt(int irq, void *dev_id);
124 static void adjust_link(struct net_device *dev);
125 static void init_registers(struct net_device *dev);
126 static int init_phy(struct net_device *dev);
127 static int gfar_probe(struct platform_device *pdev);
128 static int gfar_remove(struct platform_device *pdev);
129 static void free_skb_resources(struct gfar_private *priv);
130 static void gfar_set_multi(struct net_device *dev);
131 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
132 static void gfar_configure_serdes(struct net_device *dev);
133 extern int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id, int regnum, u16 value);
134 extern int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum);
135 #ifdef CONFIG_GFAR_NAPI
136 static int gfar_poll(struct napi_struct *napi, int budget);
137 #endif
138 #ifdef CONFIG_NET_POLL_CONTROLLER
139 static void gfar_netpoll(struct net_device *dev);
140 #endif
141 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit);
142 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, int length);
143 static void gfar_vlan_rx_register(struct net_device *netdev,
144 struct vlan_group *grp);
145 void gfar_halt(struct net_device *dev);
146 void gfar_start(struct net_device *dev);
147 static void gfar_clear_exact_match(struct net_device *dev);
148 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
150 extern const struct ethtool_ops gfar_ethtool_ops;
152 MODULE_AUTHOR("Freescale Semiconductor, Inc");
153 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
154 MODULE_LICENSE("GPL");
156 /* Returns 1 if incoming frames use an FCB */
157 static inline int gfar_uses_fcb(struct gfar_private *priv)
159 return (priv->vlan_enable || priv->rx_csum_enable);
162 /* Set up the ethernet device structure, private data,
163 * and anything else we need before we start */
164 static int gfar_probe(struct platform_device *pdev)
166 u32 tempval;
167 struct net_device *dev = NULL;
168 struct gfar_private *priv = NULL;
169 struct gianfar_platform_data *einfo;
170 struct resource *r;
171 int err = 0;
172 DECLARE_MAC_BUF(mac);
174 einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
176 if (NULL == einfo) {
177 printk(KERN_ERR "gfar %d: Missing additional data!\n",
178 pdev->id);
180 return -ENODEV;
183 /* Create an ethernet device instance */
184 dev = alloc_etherdev(sizeof (*priv));
186 if (NULL == dev)
187 return -ENOMEM;
189 priv = netdev_priv(dev);
190 priv->dev = dev;
192 /* Set the info in the priv to the current info */
193 priv->einfo = einfo;
195 /* fill out IRQ fields */
196 if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
197 priv->interruptTransmit = platform_get_irq_byname(pdev, "tx");
198 priv->interruptReceive = platform_get_irq_byname(pdev, "rx");
199 priv->interruptError = platform_get_irq_byname(pdev, "error");
200 if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0)
201 goto regs_fail;
202 } else {
203 priv->interruptTransmit = platform_get_irq(pdev, 0);
204 if (priv->interruptTransmit < 0)
205 goto regs_fail;
208 /* get a pointer to the register memory */
209 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
210 priv->regs = ioremap(r->start, sizeof (struct gfar));
212 if (NULL == priv->regs) {
213 err = -ENOMEM;
214 goto regs_fail;
217 spin_lock_init(&priv->txlock);
218 spin_lock_init(&priv->rxlock);
220 platform_set_drvdata(pdev, dev);
222 /* Stop the DMA engine now, in case it was running before */
223 /* (The firmware could have used it, and left it running). */
224 /* To do this, we write Graceful Receive Stop and Graceful */
225 /* Transmit Stop, and then wait until the corresponding bits */
226 /* in IEVENT indicate the stops have completed. */
227 tempval = gfar_read(&priv->regs->dmactrl);
228 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
229 gfar_write(&priv->regs->dmactrl, tempval);
231 tempval = gfar_read(&priv->regs->dmactrl);
232 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
233 gfar_write(&priv->regs->dmactrl, tempval);
235 while (!(gfar_read(&priv->regs->ievent) & (IEVENT_GRSC | IEVENT_GTSC)))
236 cpu_relax();
238 /* Reset MAC layer */
239 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET);
241 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
242 gfar_write(&priv->regs->maccfg1, tempval);
244 /* Initialize MACCFG2. */
245 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS);
247 /* Initialize ECNTRL */
248 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS);
250 /* Copy the station address into the dev structure, */
251 memcpy(dev->dev_addr, einfo->mac_addr, MAC_ADDR_LEN);
253 /* Set the dev->base_addr to the gfar reg region */
254 dev->base_addr = (unsigned long) (priv->regs);
256 SET_NETDEV_DEV(dev, &pdev->dev);
258 /* Fill in the dev structure */
259 dev->open = gfar_enet_open;
260 dev->hard_start_xmit = gfar_start_xmit;
261 dev->tx_timeout = gfar_timeout;
262 dev->watchdog_timeo = TX_TIMEOUT;
263 #ifdef CONFIG_GFAR_NAPI
264 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
265 #endif
266 #ifdef CONFIG_NET_POLL_CONTROLLER
267 dev->poll_controller = gfar_netpoll;
268 #endif
269 dev->stop = gfar_close;
270 dev->change_mtu = gfar_change_mtu;
271 dev->mtu = 1500;
272 dev->set_multicast_list = gfar_set_multi;
274 dev->ethtool_ops = &gfar_ethtool_ops;
276 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
277 priv->rx_csum_enable = 1;
278 dev->features |= NETIF_F_IP_CSUM;
279 } else
280 priv->rx_csum_enable = 0;
282 priv->vlgrp = NULL;
284 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
285 dev->vlan_rx_register = gfar_vlan_rx_register;
287 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
289 priv->vlan_enable = 1;
292 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
293 priv->extended_hash = 1;
294 priv->hash_width = 9;
296 priv->hash_regs[0] = &priv->regs->igaddr0;
297 priv->hash_regs[1] = &priv->regs->igaddr1;
298 priv->hash_regs[2] = &priv->regs->igaddr2;
299 priv->hash_regs[3] = &priv->regs->igaddr3;
300 priv->hash_regs[4] = &priv->regs->igaddr4;
301 priv->hash_regs[5] = &priv->regs->igaddr5;
302 priv->hash_regs[6] = &priv->regs->igaddr6;
303 priv->hash_regs[7] = &priv->regs->igaddr7;
304 priv->hash_regs[8] = &priv->regs->gaddr0;
305 priv->hash_regs[9] = &priv->regs->gaddr1;
306 priv->hash_regs[10] = &priv->regs->gaddr2;
307 priv->hash_regs[11] = &priv->regs->gaddr3;
308 priv->hash_regs[12] = &priv->regs->gaddr4;
309 priv->hash_regs[13] = &priv->regs->gaddr5;
310 priv->hash_regs[14] = &priv->regs->gaddr6;
311 priv->hash_regs[15] = &priv->regs->gaddr7;
313 } else {
314 priv->extended_hash = 0;
315 priv->hash_width = 8;
317 priv->hash_regs[0] = &priv->regs->gaddr0;
318 priv->hash_regs[1] = &priv->regs->gaddr1;
319 priv->hash_regs[2] = &priv->regs->gaddr2;
320 priv->hash_regs[3] = &priv->regs->gaddr3;
321 priv->hash_regs[4] = &priv->regs->gaddr4;
322 priv->hash_regs[5] = &priv->regs->gaddr5;
323 priv->hash_regs[6] = &priv->regs->gaddr6;
324 priv->hash_regs[7] = &priv->regs->gaddr7;
327 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
328 priv->padding = DEFAULT_PADDING;
329 else
330 priv->padding = 0;
332 if (dev->features & NETIF_F_IP_CSUM)
333 dev->hard_header_len += GMAC_FCB_LEN;
335 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
336 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
337 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
339 priv->txcoalescing = DEFAULT_TX_COALESCE;
340 priv->txcount = DEFAULT_TXCOUNT;
341 priv->txtime = DEFAULT_TXTIME;
342 priv->rxcoalescing = DEFAULT_RX_COALESCE;
343 priv->rxcount = DEFAULT_RXCOUNT;
344 priv->rxtime = DEFAULT_RXTIME;
346 /* Enable most messages by default */
347 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
349 err = register_netdev(dev);
351 if (err) {
352 printk(KERN_ERR "%s: Cannot register net device, aborting.\n",
353 dev->name);
354 goto register_fail;
357 /* Create all the sysfs files */
358 gfar_init_sysfs(dev);
360 /* Print out the device info */
361 printk(KERN_INFO DEVICE_NAME "%s\n",
362 dev->name, print_mac(mac, dev->dev_addr));
364 /* Even more device info helps when determining which kernel */
365 /* provided which set of benchmarks. */
366 #ifdef CONFIG_GFAR_NAPI
367 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
368 #else
369 printk(KERN_INFO "%s: Running with NAPI disabled\n", dev->name);
370 #endif
371 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n",
372 dev->name, priv->rx_ring_size, priv->tx_ring_size);
374 return 0;
376 register_fail:
377 iounmap(priv->regs);
378 regs_fail:
379 free_netdev(dev);
380 return err;
383 static int gfar_remove(struct platform_device *pdev)
385 struct net_device *dev = platform_get_drvdata(pdev);
386 struct gfar_private *priv = netdev_priv(dev);
388 platform_set_drvdata(pdev, NULL);
390 iounmap(priv->regs);
391 free_netdev(dev);
393 return 0;
397 /* Reads the controller's registers to determine what interface
398 * connects it to the PHY.
400 static phy_interface_t gfar_get_interface(struct net_device *dev)
402 struct gfar_private *priv = netdev_priv(dev);
403 u32 ecntrl = gfar_read(&priv->regs->ecntrl);
405 if (ecntrl & ECNTRL_SGMII_MODE)
406 return PHY_INTERFACE_MODE_SGMII;
408 if (ecntrl & ECNTRL_TBI_MODE) {
409 if (ecntrl & ECNTRL_REDUCED_MODE)
410 return PHY_INTERFACE_MODE_RTBI;
411 else
412 return PHY_INTERFACE_MODE_TBI;
415 if (ecntrl & ECNTRL_REDUCED_MODE) {
416 if (ecntrl & ECNTRL_REDUCED_MII_MODE)
417 return PHY_INTERFACE_MODE_RMII;
418 else {
419 phy_interface_t interface = priv->einfo->interface;
422 * This isn't autodetected right now, so it must
423 * be set by the device tree or platform code.
425 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
426 return PHY_INTERFACE_MODE_RGMII_ID;
428 return PHY_INTERFACE_MODE_RGMII;
432 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
433 return PHY_INTERFACE_MODE_GMII;
435 return PHY_INTERFACE_MODE_MII;
439 /* Initializes driver's PHY state, and attaches to the PHY.
440 * Returns 0 on success.
442 static int init_phy(struct net_device *dev)
444 struct gfar_private *priv = netdev_priv(dev);
445 uint gigabit_support =
446 priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
447 SUPPORTED_1000baseT_Full : 0;
448 struct phy_device *phydev;
449 char phy_id[BUS_ID_SIZE];
450 phy_interface_t interface;
452 priv->oldlink = 0;
453 priv->oldspeed = 0;
454 priv->oldduplex = -1;
456 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
458 interface = gfar_get_interface(dev);
460 phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface);
462 if (interface == PHY_INTERFACE_MODE_SGMII)
463 gfar_configure_serdes(dev);
465 if (IS_ERR(phydev)) {
466 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
467 return PTR_ERR(phydev);
470 /* Remove any features not supported by the controller */
471 phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
472 phydev->advertising = phydev->supported;
474 priv->phydev = phydev;
476 return 0;
479 static void gfar_configure_serdes(struct net_device *dev)
481 struct gfar_private *priv = netdev_priv(dev);
482 struct gfar_mii __iomem *regs =
483 (void __iomem *)&priv->regs->gfar_mii_regs;
485 /* Initialise TBI i/f to communicate with serdes (lynx phy) */
487 /* Single clk mode, mii mode off(for aerdes communication) */
488 gfar_local_mdio_write(regs, TBIPA_VALUE, MII_TBICON, TBICON_CLK_SELECT);
490 /* Supported pause and full-duplex, no half-duplex */
491 gfar_local_mdio_write(regs, TBIPA_VALUE, MII_ADVERTISE,
492 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
493 ADVERTISE_1000XPSE_ASYM);
495 /* ANEG enable, restart ANEG, full duplex mode, speed[1] set */
496 gfar_local_mdio_write(regs, TBIPA_VALUE, MII_BMCR, BMCR_ANENABLE |
497 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
500 static void init_registers(struct net_device *dev)
502 struct gfar_private *priv = netdev_priv(dev);
504 /* Clear IEVENT */
505 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR);
507 /* Initialize IMASK */
508 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR);
510 /* Init hash registers to zero */
511 gfar_write(&priv->regs->igaddr0, 0);
512 gfar_write(&priv->regs->igaddr1, 0);
513 gfar_write(&priv->regs->igaddr2, 0);
514 gfar_write(&priv->regs->igaddr3, 0);
515 gfar_write(&priv->regs->igaddr4, 0);
516 gfar_write(&priv->regs->igaddr5, 0);
517 gfar_write(&priv->regs->igaddr6, 0);
518 gfar_write(&priv->regs->igaddr7, 0);
520 gfar_write(&priv->regs->gaddr0, 0);
521 gfar_write(&priv->regs->gaddr1, 0);
522 gfar_write(&priv->regs->gaddr2, 0);
523 gfar_write(&priv->regs->gaddr3, 0);
524 gfar_write(&priv->regs->gaddr4, 0);
525 gfar_write(&priv->regs->gaddr5, 0);
526 gfar_write(&priv->regs->gaddr6, 0);
527 gfar_write(&priv->regs->gaddr7, 0);
529 /* Zero out the rmon mib registers if it has them */
530 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
531 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib));
533 /* Mask off the CAM interrupts */
534 gfar_write(&priv->regs->rmon.cam1, 0xffffffff);
535 gfar_write(&priv->regs->rmon.cam2, 0xffffffff);
538 /* Initialize the max receive buffer length */
539 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
541 /* Initialize the Minimum Frame Length Register */
542 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
544 /* Assign the TBI an address which won't conflict with the PHYs */
545 gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
549 /* Halt the receive and transmit queues */
550 void gfar_halt(struct net_device *dev)
552 struct gfar_private *priv = netdev_priv(dev);
553 struct gfar __iomem *regs = priv->regs;
554 u32 tempval;
556 /* Mask all interrupts */
557 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
559 /* Clear all interrupts */
560 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
562 /* Stop the DMA, and wait for it to stop */
563 tempval = gfar_read(&priv->regs->dmactrl);
564 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
565 != (DMACTRL_GRS | DMACTRL_GTS)) {
566 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
567 gfar_write(&priv->regs->dmactrl, tempval);
569 while (!(gfar_read(&priv->regs->ievent) &
570 (IEVENT_GRSC | IEVENT_GTSC)))
571 cpu_relax();
574 /* Disable Rx and Tx */
575 tempval = gfar_read(&regs->maccfg1);
576 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
577 gfar_write(&regs->maccfg1, tempval);
580 void stop_gfar(struct net_device *dev)
582 struct gfar_private *priv = netdev_priv(dev);
583 struct gfar __iomem *regs = priv->regs;
584 unsigned long flags;
586 phy_stop(priv->phydev);
588 /* Lock it down */
589 spin_lock_irqsave(&priv->txlock, flags);
590 spin_lock(&priv->rxlock);
592 gfar_halt(dev);
594 spin_unlock(&priv->rxlock);
595 spin_unlock_irqrestore(&priv->txlock, flags);
597 /* Free the IRQs */
598 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
599 free_irq(priv->interruptError, dev);
600 free_irq(priv->interruptTransmit, dev);
601 free_irq(priv->interruptReceive, dev);
602 } else {
603 free_irq(priv->interruptTransmit, dev);
606 free_skb_resources(priv);
608 dma_free_coherent(NULL,
609 sizeof(struct txbd8)*priv->tx_ring_size
610 + sizeof(struct rxbd8)*priv->rx_ring_size,
611 priv->tx_bd_base,
612 gfar_read(&regs->tbase0));
615 /* If there are any tx skbs or rx skbs still around, free them.
616 * Then free tx_skbuff and rx_skbuff */
617 static void free_skb_resources(struct gfar_private *priv)
619 struct rxbd8 *rxbdp;
620 struct txbd8 *txbdp;
621 int i;
623 /* Go through all the buffer descriptors and free their data buffers */
624 txbdp = priv->tx_bd_base;
626 for (i = 0; i < priv->tx_ring_size; i++) {
628 if (priv->tx_skbuff[i]) {
629 dma_unmap_single(NULL, txbdp->bufPtr,
630 txbdp->length,
631 DMA_TO_DEVICE);
632 dev_kfree_skb_any(priv->tx_skbuff[i]);
633 priv->tx_skbuff[i] = NULL;
637 kfree(priv->tx_skbuff);
639 rxbdp = priv->rx_bd_base;
641 /* rx_skbuff is not guaranteed to be allocated, so only
642 * free it and its contents if it is allocated */
643 if(priv->rx_skbuff != NULL) {
644 for (i = 0; i < priv->rx_ring_size; i++) {
645 if (priv->rx_skbuff[i]) {
646 dma_unmap_single(NULL, rxbdp->bufPtr,
647 priv->rx_buffer_size,
648 DMA_FROM_DEVICE);
650 dev_kfree_skb_any(priv->rx_skbuff[i]);
651 priv->rx_skbuff[i] = NULL;
654 rxbdp->status = 0;
655 rxbdp->length = 0;
656 rxbdp->bufPtr = 0;
658 rxbdp++;
661 kfree(priv->rx_skbuff);
665 void gfar_start(struct net_device *dev)
667 struct gfar_private *priv = netdev_priv(dev);
668 struct gfar __iomem *regs = priv->regs;
669 u32 tempval;
671 /* Enable Rx and Tx in MACCFG1 */
672 tempval = gfar_read(&regs->maccfg1);
673 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
674 gfar_write(&regs->maccfg1, tempval);
676 /* Initialize DMACTRL to have WWR and WOP */
677 tempval = gfar_read(&priv->regs->dmactrl);
678 tempval |= DMACTRL_INIT_SETTINGS;
679 gfar_write(&priv->regs->dmactrl, tempval);
681 /* Make sure we aren't stopped */
682 tempval = gfar_read(&priv->regs->dmactrl);
683 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
684 gfar_write(&priv->regs->dmactrl, tempval);
686 /* Clear THLT/RHLT, so that the DMA starts polling now */
687 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT);
688 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT);
690 /* Unmask the interrupts we look for */
691 gfar_write(&regs->imask, IMASK_DEFAULT);
694 /* Bring the controller up and running */
695 int startup_gfar(struct net_device *dev)
697 struct txbd8 *txbdp;
698 struct rxbd8 *rxbdp;
699 dma_addr_t addr;
700 unsigned long vaddr;
701 int i;
702 struct gfar_private *priv = netdev_priv(dev);
703 struct gfar __iomem *regs = priv->regs;
704 int err = 0;
705 u32 rctrl = 0;
706 u32 attrs = 0;
708 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
710 /* Allocate memory for the buffer descriptors */
711 vaddr = (unsigned long) dma_alloc_coherent(NULL,
712 sizeof (struct txbd8) * priv->tx_ring_size +
713 sizeof (struct rxbd8) * priv->rx_ring_size,
714 &addr, GFP_KERNEL);
716 if (vaddr == 0) {
717 if (netif_msg_ifup(priv))
718 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
719 dev->name);
720 return -ENOMEM;
723 priv->tx_bd_base = (struct txbd8 *) vaddr;
725 /* enet DMA only understands physical addresses */
726 gfar_write(&regs->tbase0, addr);
728 /* Start the rx descriptor ring where the tx ring leaves off */
729 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
730 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
731 priv->rx_bd_base = (struct rxbd8 *) vaddr;
732 gfar_write(&regs->rbase0, addr);
734 /* Setup the skbuff rings */
735 priv->tx_skbuff =
736 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
737 priv->tx_ring_size, GFP_KERNEL);
739 if (NULL == priv->tx_skbuff) {
740 if (netif_msg_ifup(priv))
741 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
742 dev->name);
743 err = -ENOMEM;
744 goto tx_skb_fail;
747 for (i = 0; i < priv->tx_ring_size; i++)
748 priv->tx_skbuff[i] = NULL;
750 priv->rx_skbuff =
751 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
752 priv->rx_ring_size, GFP_KERNEL);
754 if (NULL == priv->rx_skbuff) {
755 if (netif_msg_ifup(priv))
756 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
757 dev->name);
758 err = -ENOMEM;
759 goto rx_skb_fail;
762 for (i = 0; i < priv->rx_ring_size; i++)
763 priv->rx_skbuff[i] = NULL;
765 /* Initialize some variables in our dev structure */
766 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
767 priv->cur_rx = priv->rx_bd_base;
768 priv->skb_curtx = priv->skb_dirtytx = 0;
769 priv->skb_currx = 0;
771 /* Initialize Transmit Descriptor Ring */
772 txbdp = priv->tx_bd_base;
773 for (i = 0; i < priv->tx_ring_size; i++) {
774 txbdp->status = 0;
775 txbdp->length = 0;
776 txbdp->bufPtr = 0;
777 txbdp++;
780 /* Set the last descriptor in the ring to indicate wrap */
781 txbdp--;
782 txbdp->status |= TXBD_WRAP;
784 rxbdp = priv->rx_bd_base;
785 for (i = 0; i < priv->rx_ring_size; i++) {
786 struct sk_buff *skb = NULL;
788 rxbdp->status = 0;
790 skb = gfar_new_skb(dev, rxbdp);
792 priv->rx_skbuff[i] = skb;
794 rxbdp++;
797 /* Set the last descriptor in the ring to wrap */
798 rxbdp--;
799 rxbdp->status |= RXBD_WRAP;
801 /* If the device has multiple interrupts, register for
802 * them. Otherwise, only register for the one */
803 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
804 /* Install our interrupt handlers for Error,
805 * Transmit, and Receive */
806 if (request_irq(priv->interruptError, gfar_error,
807 0, "enet_error", dev) < 0) {
808 if (netif_msg_intr(priv))
809 printk(KERN_ERR "%s: Can't get IRQ %d\n",
810 dev->name, priv->interruptError);
812 err = -1;
813 goto err_irq_fail;
816 if (request_irq(priv->interruptTransmit, gfar_transmit,
817 0, "enet_tx", dev) < 0) {
818 if (netif_msg_intr(priv))
819 printk(KERN_ERR "%s: Can't get IRQ %d\n",
820 dev->name, priv->interruptTransmit);
822 err = -1;
824 goto tx_irq_fail;
827 if (request_irq(priv->interruptReceive, gfar_receive,
828 0, "enet_rx", dev) < 0) {
829 if (netif_msg_intr(priv))
830 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n",
831 dev->name, priv->interruptReceive);
833 err = -1;
834 goto rx_irq_fail;
836 } else {
837 if (request_irq(priv->interruptTransmit, gfar_interrupt,
838 0, "gfar_interrupt", dev) < 0) {
839 if (netif_msg_intr(priv))
840 printk(KERN_ERR "%s: Can't get IRQ %d\n",
841 dev->name, priv->interruptError);
843 err = -1;
844 goto err_irq_fail;
848 phy_start(priv->phydev);
850 /* Configure the coalescing support */
851 if (priv->txcoalescing)
852 gfar_write(&regs->txic,
853 mk_ic_value(priv->txcount, priv->txtime));
854 else
855 gfar_write(&regs->txic, 0);
857 if (priv->rxcoalescing)
858 gfar_write(&regs->rxic,
859 mk_ic_value(priv->rxcount, priv->rxtime));
860 else
861 gfar_write(&regs->rxic, 0);
863 if (priv->rx_csum_enable)
864 rctrl |= RCTRL_CHECKSUMMING;
866 if (priv->extended_hash) {
867 rctrl |= RCTRL_EXTHASH;
869 gfar_clear_exact_match(dev);
870 rctrl |= RCTRL_EMEN;
873 if (priv->vlan_enable)
874 rctrl |= RCTRL_VLAN;
876 if (priv->padding) {
877 rctrl &= ~RCTRL_PAL_MASK;
878 rctrl |= RCTRL_PADDING(priv->padding);
881 /* Init rctrl based on our settings */
882 gfar_write(&priv->regs->rctrl, rctrl);
884 if (dev->features & NETIF_F_IP_CSUM)
885 gfar_write(&priv->regs->tctrl, TCTRL_INIT_CSUM);
887 /* Set the extraction length and index */
888 attrs = ATTRELI_EL(priv->rx_stash_size) |
889 ATTRELI_EI(priv->rx_stash_index);
891 gfar_write(&priv->regs->attreli, attrs);
893 /* Start with defaults, and add stashing or locking
894 * depending on the approprate variables */
895 attrs = ATTR_INIT_SETTINGS;
897 if (priv->bd_stash_en)
898 attrs |= ATTR_BDSTASH;
900 if (priv->rx_stash_size != 0)
901 attrs |= ATTR_BUFSTASH;
903 gfar_write(&priv->regs->attr, attrs);
905 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold);
906 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
907 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
909 /* Start the controller */
910 gfar_start(dev);
912 return 0;
914 rx_irq_fail:
915 free_irq(priv->interruptTransmit, dev);
916 tx_irq_fail:
917 free_irq(priv->interruptError, dev);
918 err_irq_fail:
919 rx_skb_fail:
920 free_skb_resources(priv);
921 tx_skb_fail:
922 dma_free_coherent(NULL,
923 sizeof(struct txbd8)*priv->tx_ring_size
924 + sizeof(struct rxbd8)*priv->rx_ring_size,
925 priv->tx_bd_base,
926 gfar_read(&regs->tbase0));
928 return err;
931 /* Called when something needs to use the ethernet device */
932 /* Returns 0 for success. */
933 static int gfar_enet_open(struct net_device *dev)
935 #ifdef CONFIG_GFAR_NAPI
936 struct gfar_private *priv = netdev_priv(dev);
937 #endif
938 int err;
940 #ifdef CONFIG_GFAR_NAPI
941 napi_enable(&priv->napi);
942 #endif
944 /* Initialize a bunch of registers */
945 init_registers(dev);
947 gfar_set_mac_address(dev);
949 err = init_phy(dev);
951 if(err) {
952 #ifdef CONFIG_GFAR_NAPI
953 napi_disable(&priv->napi);
954 #endif
955 return err;
958 err = startup_gfar(dev);
959 if (err)
960 #ifdef CONFIG_GFAR_NAPI
961 napi_disable(&priv->napi);
962 #endif
964 netif_start_queue(dev);
966 return err;
969 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb, struct txbd8 *bdp)
971 struct txfcb *fcb = (struct txfcb *)skb_push (skb, GMAC_FCB_LEN);
973 memset(fcb, 0, GMAC_FCB_LEN);
975 return fcb;
978 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
980 u8 flags = 0;
982 /* If we're here, it's a IP packet with a TCP or UDP
983 * payload. We set it to checksum, using a pseudo-header
984 * we provide
986 flags = TXFCB_DEFAULT;
988 /* Tell the controller what the protocol is */
989 /* And provide the already calculated phcs */
990 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
991 flags |= TXFCB_UDP;
992 fcb->phcs = udp_hdr(skb)->check;
993 } else
994 fcb->phcs = tcp_hdr(skb)->check;
996 /* l3os is the distance between the start of the
997 * frame (skb->data) and the start of the IP hdr.
998 * l4os is the distance between the start of the
999 * l3 hdr and the l4 hdr */
1000 fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
1001 fcb->l4os = skb_network_header_len(skb);
1003 fcb->flags = flags;
1006 void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1008 fcb->flags |= TXFCB_VLN;
1009 fcb->vlctl = vlan_tx_tag_get(skb);
1012 /* This is called by the kernel when a frame is ready for transmission. */
1013 /* It is pointed to by the dev->hard_start_xmit function pointer */
1014 static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1016 struct gfar_private *priv = netdev_priv(dev);
1017 struct txfcb *fcb = NULL;
1018 struct txbd8 *txbdp;
1019 u16 status;
1020 unsigned long flags;
1022 /* Update transmit stats */
1023 dev->stats.tx_bytes += skb->len;
1025 /* Lock priv now */
1026 spin_lock_irqsave(&priv->txlock, flags);
1028 /* Point at the first free tx descriptor */
1029 txbdp = priv->cur_tx;
1031 /* Clear all but the WRAP status flags */
1032 status = txbdp->status & TXBD_WRAP;
1034 /* Set up checksumming */
1035 if (likely((dev->features & NETIF_F_IP_CSUM)
1036 && (CHECKSUM_PARTIAL == skb->ip_summed))) {
1037 fcb = gfar_add_fcb(skb, txbdp);
1038 status |= TXBD_TOE;
1039 gfar_tx_checksum(skb, fcb);
1042 if (priv->vlan_enable &&
1043 unlikely(priv->vlgrp && vlan_tx_tag_present(skb))) {
1044 if (unlikely(NULL == fcb)) {
1045 fcb = gfar_add_fcb(skb, txbdp);
1046 status |= TXBD_TOE;
1049 gfar_tx_vlan(skb, fcb);
1052 /* Set buffer length and pointer */
1053 txbdp->length = skb->len;
1054 txbdp->bufPtr = dma_map_single(NULL, skb->data,
1055 skb->len, DMA_TO_DEVICE);
1057 /* Save the skb pointer so we can free it later */
1058 priv->tx_skbuff[priv->skb_curtx] = skb;
1060 /* Update the current skb pointer (wrapping if this was the last) */
1061 priv->skb_curtx =
1062 (priv->skb_curtx + 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1064 /* Flag the BD as interrupt-causing */
1065 status |= TXBD_INTERRUPT;
1067 /* Flag the BD as ready to go, last in frame, and */
1068 /* in need of CRC */
1069 status |= (TXBD_READY | TXBD_LAST | TXBD_CRC);
1071 dev->trans_start = jiffies;
1073 /* The powerpc-specific eieio() is used, as wmb() has too strong
1074 * semantics (it requires synchronization between cacheable and
1075 * uncacheable mappings, which eieio doesn't provide and which we
1076 * don't need), thus requiring a more expensive sync instruction. At
1077 * some point, the set of architecture-independent barrier functions
1078 * should be expanded to include weaker barriers.
1081 eieio();
1082 txbdp->status = status;
1084 /* If this was the last BD in the ring, the next one */
1085 /* is at the beginning of the ring */
1086 if (txbdp->status & TXBD_WRAP)
1087 txbdp = priv->tx_bd_base;
1088 else
1089 txbdp++;
1091 /* If the next BD still needs to be cleaned up, then the bds
1092 are full. We need to tell the kernel to stop sending us stuff. */
1093 if (txbdp == priv->dirty_tx) {
1094 netif_stop_queue(dev);
1096 dev->stats.tx_fifo_errors++;
1099 /* Update the current txbd to the next one */
1100 priv->cur_tx = txbdp;
1102 /* Tell the DMA to go go go */
1103 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1105 /* Unlock priv */
1106 spin_unlock_irqrestore(&priv->txlock, flags);
1108 return 0;
1111 /* Stops the kernel queue, and halts the controller */
1112 static int gfar_close(struct net_device *dev)
1114 struct gfar_private *priv = netdev_priv(dev);
1116 #ifdef CONFIG_GFAR_NAPI
1117 napi_disable(&priv->napi);
1118 #endif
1120 stop_gfar(dev);
1122 /* Disconnect from the PHY */
1123 phy_disconnect(priv->phydev);
1124 priv->phydev = NULL;
1126 netif_stop_queue(dev);
1128 return 0;
1131 /* Changes the mac address if the controller is not running. */
1132 int gfar_set_mac_address(struct net_device *dev)
1134 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1136 return 0;
1140 /* Enables and disables VLAN insertion/extraction */
1141 static void gfar_vlan_rx_register(struct net_device *dev,
1142 struct vlan_group *grp)
1144 struct gfar_private *priv = netdev_priv(dev);
1145 unsigned long flags;
1146 u32 tempval;
1148 spin_lock_irqsave(&priv->rxlock, flags);
1150 priv->vlgrp = grp;
1152 if (grp) {
1153 /* Enable VLAN tag insertion */
1154 tempval = gfar_read(&priv->regs->tctrl);
1155 tempval |= TCTRL_VLINS;
1157 gfar_write(&priv->regs->tctrl, tempval);
1159 /* Enable VLAN tag extraction */
1160 tempval = gfar_read(&priv->regs->rctrl);
1161 tempval |= RCTRL_VLEX;
1162 gfar_write(&priv->regs->rctrl, tempval);
1163 } else {
1164 /* Disable VLAN tag insertion */
1165 tempval = gfar_read(&priv->regs->tctrl);
1166 tempval &= ~TCTRL_VLINS;
1167 gfar_write(&priv->regs->tctrl, tempval);
1169 /* Disable VLAN tag extraction */
1170 tempval = gfar_read(&priv->regs->rctrl);
1171 tempval &= ~RCTRL_VLEX;
1172 gfar_write(&priv->regs->rctrl, tempval);
1175 spin_unlock_irqrestore(&priv->rxlock, flags);
1178 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1180 int tempsize, tempval;
1181 struct gfar_private *priv = netdev_priv(dev);
1182 int oldsize = priv->rx_buffer_size;
1183 int frame_size = new_mtu + ETH_HLEN;
1185 if (priv->vlan_enable)
1186 frame_size += VLAN_ETH_HLEN;
1188 if (gfar_uses_fcb(priv))
1189 frame_size += GMAC_FCB_LEN;
1191 frame_size += priv->padding;
1193 if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
1194 if (netif_msg_drv(priv))
1195 printk(KERN_ERR "%s: Invalid MTU setting\n",
1196 dev->name);
1197 return -EINVAL;
1200 tempsize =
1201 (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
1202 INCREMENTAL_BUFFER_SIZE;
1204 /* Only stop and start the controller if it isn't already
1205 * stopped, and we changed something */
1206 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1207 stop_gfar(dev);
1209 priv->rx_buffer_size = tempsize;
1211 dev->mtu = new_mtu;
1213 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size);
1214 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size);
1216 /* If the mtu is larger than the max size for standard
1217 * ethernet frames (ie, a jumbo frame), then set maccfg2
1218 * to allow huge frames, and to check the length */
1219 tempval = gfar_read(&priv->regs->maccfg2);
1221 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1222 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1223 else
1224 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1226 gfar_write(&priv->regs->maccfg2, tempval);
1228 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1229 startup_gfar(dev);
1231 return 0;
1234 /* gfar_timeout gets called when a packet has not been
1235 * transmitted after a set amount of time.
1236 * For now, assume that clearing out all the structures, and
1237 * starting over will fix the problem. */
1238 static void gfar_timeout(struct net_device *dev)
1240 dev->stats.tx_errors++;
1242 if (dev->flags & IFF_UP) {
1243 stop_gfar(dev);
1244 startup_gfar(dev);
1247 netif_schedule(dev);
1250 /* Interrupt Handler for Transmit complete */
1251 static irqreturn_t gfar_transmit(int irq, void *dev_id)
1253 struct net_device *dev = (struct net_device *) dev_id;
1254 struct gfar_private *priv = netdev_priv(dev);
1255 struct txbd8 *bdp;
1257 /* Clear IEVENT */
1258 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1260 /* Lock priv */
1261 spin_lock(&priv->txlock);
1262 bdp = priv->dirty_tx;
1263 while ((bdp->status & TXBD_READY) == 0) {
1264 /* If dirty_tx and cur_tx are the same, then either the */
1265 /* ring is empty or full now (it could only be full in the beginning, */
1266 /* obviously). If it is empty, we are done. */
1267 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1268 break;
1270 dev->stats.tx_packets++;
1272 /* Deferred means some collisions occurred during transmit, */
1273 /* but we eventually sent the packet. */
1274 if (bdp->status & TXBD_DEF)
1275 dev->stats.collisions++;
1277 /* Free the sk buffer associated with this TxBD */
1278 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
1279 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
1280 priv->skb_dirtytx =
1281 (priv->skb_dirtytx +
1282 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1284 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1285 if (bdp->status & TXBD_WRAP)
1286 bdp = priv->tx_bd_base;
1287 else
1288 bdp++;
1290 /* Move dirty_tx to be the next bd */
1291 priv->dirty_tx = bdp;
1293 /* We freed a buffer, so now we can restart transmission */
1294 if (netif_queue_stopped(dev))
1295 netif_wake_queue(dev);
1296 } /* while ((bdp->status & TXBD_READY) == 0) */
1298 /* If we are coalescing the interrupts, reset the timer */
1299 /* Otherwise, clear it */
1300 if (priv->txcoalescing)
1301 gfar_write(&priv->regs->txic,
1302 mk_ic_value(priv->txcount, priv->txtime));
1303 else
1304 gfar_write(&priv->regs->txic, 0);
1306 spin_unlock(&priv->txlock);
1308 return IRQ_HANDLED;
1311 struct sk_buff * gfar_new_skb(struct net_device *dev, struct rxbd8 *bdp)
1313 unsigned int alignamount;
1314 struct gfar_private *priv = netdev_priv(dev);
1315 struct sk_buff *skb = NULL;
1316 unsigned int timeout = SKB_ALLOC_TIMEOUT;
1318 /* We have to allocate the skb, so keep trying till we succeed */
1319 while ((!skb) && timeout--)
1320 skb = dev_alloc_skb(priv->rx_buffer_size + RXBUF_ALIGNMENT);
1322 if (NULL == skb)
1323 return NULL;
1325 alignamount = RXBUF_ALIGNMENT -
1326 (((unsigned long) skb->data) & (RXBUF_ALIGNMENT - 1));
1328 /* We need the data buffer to be aligned properly. We will reserve
1329 * as many bytes as needed to align the data properly
1331 skb_reserve(skb, alignamount);
1333 bdp->bufPtr = dma_map_single(NULL, skb->data,
1334 priv->rx_buffer_size, DMA_FROM_DEVICE);
1336 bdp->length = 0;
1338 /* Mark the buffer empty */
1339 eieio();
1340 bdp->status |= (RXBD_EMPTY | RXBD_INTERRUPT);
1342 return skb;
1345 static inline void count_errors(unsigned short status, struct net_device *dev)
1347 struct gfar_private *priv = netdev_priv(dev);
1348 struct net_device_stats *stats = &dev->stats;
1349 struct gfar_extra_stats *estats = &priv->extra_stats;
1351 /* If the packet was truncated, none of the other errors
1352 * matter */
1353 if (status & RXBD_TRUNCATED) {
1354 stats->rx_length_errors++;
1356 estats->rx_trunc++;
1358 return;
1360 /* Count the errors, if there were any */
1361 if (status & (RXBD_LARGE | RXBD_SHORT)) {
1362 stats->rx_length_errors++;
1364 if (status & RXBD_LARGE)
1365 estats->rx_large++;
1366 else
1367 estats->rx_short++;
1369 if (status & RXBD_NONOCTET) {
1370 stats->rx_frame_errors++;
1371 estats->rx_nonoctet++;
1373 if (status & RXBD_CRCERR) {
1374 estats->rx_crcerr++;
1375 stats->rx_crc_errors++;
1377 if (status & RXBD_OVERRUN) {
1378 estats->rx_overrun++;
1379 stats->rx_crc_errors++;
1383 irqreturn_t gfar_receive(int irq, void *dev_id)
1385 struct net_device *dev = (struct net_device *) dev_id;
1386 struct gfar_private *priv = netdev_priv(dev);
1387 #ifdef CONFIG_GFAR_NAPI
1388 u32 tempval;
1389 #else
1390 unsigned long flags;
1391 #endif
1393 /* Clear IEVENT, so rx interrupt isn't called again
1394 * because of this interrupt */
1395 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1397 /* support NAPI */
1398 #ifdef CONFIG_GFAR_NAPI
1399 if (netif_rx_schedule_prep(dev, &priv->napi)) {
1400 tempval = gfar_read(&priv->regs->imask);
1401 tempval &= IMASK_RX_DISABLED;
1402 gfar_write(&priv->regs->imask, tempval);
1404 __netif_rx_schedule(dev, &priv->napi);
1405 } else {
1406 if (netif_msg_rx_err(priv))
1407 printk(KERN_DEBUG "%s: receive called twice (%x)[%x]\n",
1408 dev->name, gfar_read(&priv->regs->ievent),
1409 gfar_read(&priv->regs->imask));
1411 #else
1413 spin_lock_irqsave(&priv->rxlock, flags);
1414 gfar_clean_rx_ring(dev, priv->rx_ring_size);
1416 /* If we are coalescing interrupts, update the timer */
1417 /* Otherwise, clear it */
1418 if (priv->rxcoalescing)
1419 gfar_write(&priv->regs->rxic,
1420 mk_ic_value(priv->rxcount, priv->rxtime));
1421 else
1422 gfar_write(&priv->regs->rxic, 0);
1424 spin_unlock_irqrestore(&priv->rxlock, flags);
1425 #endif
1427 return IRQ_HANDLED;
1430 static inline int gfar_rx_vlan(struct sk_buff *skb,
1431 struct vlan_group *vlgrp, unsigned short vlctl)
1433 #ifdef CONFIG_GFAR_NAPI
1434 return vlan_hwaccel_receive_skb(skb, vlgrp, vlctl);
1435 #else
1436 return vlan_hwaccel_rx(skb, vlgrp, vlctl);
1437 #endif
1440 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
1442 /* If valid headers were found, and valid sums
1443 * were verified, then we tell the kernel that no
1444 * checksumming is necessary. Otherwise, it is */
1445 if ((fcb->flags & RXFCB_CSUM_MASK) == (RXFCB_CIP | RXFCB_CTU))
1446 skb->ip_summed = CHECKSUM_UNNECESSARY;
1447 else
1448 skb->ip_summed = CHECKSUM_NONE;
1452 static inline struct rxfcb *gfar_get_fcb(struct sk_buff *skb)
1454 struct rxfcb *fcb = (struct rxfcb *)skb->data;
1456 /* Remove the FCB from the skb */
1457 skb_pull(skb, GMAC_FCB_LEN);
1459 return fcb;
1462 /* gfar_process_frame() -- handle one incoming packet if skb
1463 * isn't NULL. */
1464 static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1465 int length)
1467 struct gfar_private *priv = netdev_priv(dev);
1468 struct rxfcb *fcb = NULL;
1470 if (NULL == skb) {
1471 if (netif_msg_rx_err(priv))
1472 printk(KERN_WARNING "%s: Missing skb!!.\n", dev->name);
1473 dev->stats.rx_dropped++;
1474 priv->extra_stats.rx_skbmissing++;
1475 } else {
1476 int ret;
1478 /* Prep the skb for the packet */
1479 skb_put(skb, length);
1481 /* Grab the FCB if there is one */
1482 if (gfar_uses_fcb(priv))
1483 fcb = gfar_get_fcb(skb);
1485 /* Remove the padded bytes, if there are any */
1486 if (priv->padding)
1487 skb_pull(skb, priv->padding);
1489 if (priv->rx_csum_enable)
1490 gfar_rx_checksum(skb, fcb);
1492 /* Tell the skb what kind of packet this is */
1493 skb->protocol = eth_type_trans(skb, dev);
1495 /* Send the packet up the stack */
1496 if (unlikely(priv->vlgrp && (fcb->flags & RXFCB_VLN)))
1497 ret = gfar_rx_vlan(skb, priv->vlgrp, fcb->vlctl);
1498 else
1499 ret = RECEIVE(skb);
1501 if (NET_RX_DROP == ret)
1502 priv->extra_stats.kernel_dropped++;
1505 return 0;
1508 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
1509 * until the budget/quota has been reached. Returns the number
1510 * of frames handled
1512 int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1514 struct rxbd8 *bdp;
1515 struct sk_buff *skb;
1516 u16 pkt_len;
1517 int howmany = 0;
1518 struct gfar_private *priv = netdev_priv(dev);
1520 /* Get the first full descriptor */
1521 bdp = priv->cur_rx;
1523 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
1524 rmb();
1525 skb = priv->rx_skbuff[priv->skb_currx];
1527 if (!(bdp->status &
1528 (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
1529 | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
1530 /* Increment the number of packets */
1531 dev->stats.rx_packets++;
1532 howmany++;
1534 /* Remove the FCS from the packet length */
1535 pkt_len = bdp->length - 4;
1537 gfar_process_frame(dev, skb, pkt_len);
1539 dev->stats.rx_bytes += pkt_len;
1540 } else {
1541 count_errors(bdp->status, dev);
1543 if (skb)
1544 dev_kfree_skb_any(skb);
1546 priv->rx_skbuff[priv->skb_currx] = NULL;
1549 dev->last_rx = jiffies;
1551 /* Clear the status flags for this buffer */
1552 bdp->status &= ~RXBD_STATS;
1554 /* Add another skb for the future */
1555 skb = gfar_new_skb(dev, bdp);
1556 priv->rx_skbuff[priv->skb_currx] = skb;
1558 /* Update to the next pointer */
1559 if (bdp->status & RXBD_WRAP)
1560 bdp = priv->rx_bd_base;
1561 else
1562 bdp++;
1564 /* update to point at the next skb */
1565 priv->skb_currx =
1566 (priv->skb_currx +
1567 1) & RX_RING_MOD_MASK(priv->rx_ring_size);
1571 /* Update the current rxbd pointer to be the next one */
1572 priv->cur_rx = bdp;
1574 return howmany;
1577 #ifdef CONFIG_GFAR_NAPI
1578 static int gfar_poll(struct napi_struct *napi, int budget)
1580 struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
1581 struct net_device *dev = priv->dev;
1582 int howmany;
1584 howmany = gfar_clean_rx_ring(dev, budget);
1586 if (howmany < budget) {
1587 netif_rx_complete(dev, napi);
1589 /* Clear the halt bit in RSTAT */
1590 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1592 gfar_write(&priv->regs->imask, IMASK_DEFAULT);
1594 /* If we are coalescing interrupts, update the timer */
1595 /* Otherwise, clear it */
1596 if (priv->rxcoalescing)
1597 gfar_write(&priv->regs->rxic,
1598 mk_ic_value(priv->rxcount, priv->rxtime));
1599 else
1600 gfar_write(&priv->regs->rxic, 0);
1603 return howmany;
1605 #endif
1607 #ifdef CONFIG_NET_POLL_CONTROLLER
1609 * Polling 'interrupt' - used by things like netconsole to send skbs
1610 * without having to re-enable interrupts. It's not called while
1611 * the interrupt routine is executing.
1613 static void gfar_netpoll(struct net_device *dev)
1615 struct gfar_private *priv = netdev_priv(dev);
1617 /* If the device has multiple interrupts, run tx/rx */
1618 if (priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1619 disable_irq(priv->interruptTransmit);
1620 disable_irq(priv->interruptReceive);
1621 disable_irq(priv->interruptError);
1622 gfar_interrupt(priv->interruptTransmit, dev);
1623 enable_irq(priv->interruptError);
1624 enable_irq(priv->interruptReceive);
1625 enable_irq(priv->interruptTransmit);
1626 } else {
1627 disable_irq(priv->interruptTransmit);
1628 gfar_interrupt(priv->interruptTransmit, dev);
1629 enable_irq(priv->interruptTransmit);
1632 #endif
1634 /* The interrupt handler for devices with one interrupt */
1635 static irqreturn_t gfar_interrupt(int irq, void *dev_id)
1637 struct net_device *dev = dev_id;
1638 struct gfar_private *priv = netdev_priv(dev);
1640 /* Save ievent for future reference */
1641 u32 events = gfar_read(&priv->regs->ievent);
1643 /* Check for reception */
1644 if (events & IEVENT_RX_MASK)
1645 gfar_receive(irq, dev_id);
1647 /* Check for transmit completion */
1648 if (events & IEVENT_TX_MASK)
1649 gfar_transmit(irq, dev_id);
1651 /* Check for errors */
1652 if (events & IEVENT_ERR_MASK)
1653 gfar_error(irq, dev_id);
1655 return IRQ_HANDLED;
1658 /* Called every time the controller might need to be made
1659 * aware of new link state. The PHY code conveys this
1660 * information through variables in the phydev structure, and this
1661 * function converts those variables into the appropriate
1662 * register values, and can bring down the device if needed.
1664 static void adjust_link(struct net_device *dev)
1666 struct gfar_private *priv = netdev_priv(dev);
1667 struct gfar __iomem *regs = priv->regs;
1668 unsigned long flags;
1669 struct phy_device *phydev = priv->phydev;
1670 int new_state = 0;
1672 spin_lock_irqsave(&priv->txlock, flags);
1673 if (phydev->link) {
1674 u32 tempval = gfar_read(&regs->maccfg2);
1675 u32 ecntrl = gfar_read(&regs->ecntrl);
1677 /* Now we make sure that we can be in full duplex mode.
1678 * If not, we operate in half-duplex mode. */
1679 if (phydev->duplex != priv->oldduplex) {
1680 new_state = 1;
1681 if (!(phydev->duplex))
1682 tempval &= ~(MACCFG2_FULL_DUPLEX);
1683 else
1684 tempval |= MACCFG2_FULL_DUPLEX;
1686 priv->oldduplex = phydev->duplex;
1689 if (phydev->speed != priv->oldspeed) {
1690 new_state = 1;
1691 switch (phydev->speed) {
1692 case 1000:
1693 tempval =
1694 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1695 break;
1696 case 100:
1697 case 10:
1698 tempval =
1699 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1701 /* Reduced mode distinguishes
1702 * between 10 and 100 */
1703 if (phydev->speed == SPEED_100)
1704 ecntrl |= ECNTRL_R100;
1705 else
1706 ecntrl &= ~(ECNTRL_R100);
1707 break;
1708 default:
1709 if (netif_msg_link(priv))
1710 printk(KERN_WARNING
1711 "%s: Ack! Speed (%d) is not 10/100/1000!\n",
1712 dev->name, phydev->speed);
1713 break;
1716 priv->oldspeed = phydev->speed;
1719 gfar_write(&regs->maccfg2, tempval);
1720 gfar_write(&regs->ecntrl, ecntrl);
1722 if (!priv->oldlink) {
1723 new_state = 1;
1724 priv->oldlink = 1;
1725 netif_schedule(dev);
1727 } else if (priv->oldlink) {
1728 new_state = 1;
1729 priv->oldlink = 0;
1730 priv->oldspeed = 0;
1731 priv->oldduplex = -1;
1734 if (new_state && netif_msg_link(priv))
1735 phy_print_status(phydev);
1737 spin_unlock_irqrestore(&priv->txlock, flags);
1740 /* Update the hash table based on the current list of multicast
1741 * addresses we subscribe to. Also, change the promiscuity of
1742 * the device based on the flags (this function is called
1743 * whenever dev->flags is changed */
1744 static void gfar_set_multi(struct net_device *dev)
1746 struct dev_mc_list *mc_ptr;
1747 struct gfar_private *priv = netdev_priv(dev);
1748 struct gfar __iomem *regs = priv->regs;
1749 u32 tempval;
1751 if(dev->flags & IFF_PROMISC) {
1752 /* Set RCTRL to PROM */
1753 tempval = gfar_read(&regs->rctrl);
1754 tempval |= RCTRL_PROM;
1755 gfar_write(&regs->rctrl, tempval);
1756 } else {
1757 /* Set RCTRL to not PROM */
1758 tempval = gfar_read(&regs->rctrl);
1759 tempval &= ~(RCTRL_PROM);
1760 gfar_write(&regs->rctrl, tempval);
1763 if(dev->flags & IFF_ALLMULTI) {
1764 /* Set the hash to rx all multicast frames */
1765 gfar_write(&regs->igaddr0, 0xffffffff);
1766 gfar_write(&regs->igaddr1, 0xffffffff);
1767 gfar_write(&regs->igaddr2, 0xffffffff);
1768 gfar_write(&regs->igaddr3, 0xffffffff);
1769 gfar_write(&regs->igaddr4, 0xffffffff);
1770 gfar_write(&regs->igaddr5, 0xffffffff);
1771 gfar_write(&regs->igaddr6, 0xffffffff);
1772 gfar_write(&regs->igaddr7, 0xffffffff);
1773 gfar_write(&regs->gaddr0, 0xffffffff);
1774 gfar_write(&regs->gaddr1, 0xffffffff);
1775 gfar_write(&regs->gaddr2, 0xffffffff);
1776 gfar_write(&regs->gaddr3, 0xffffffff);
1777 gfar_write(&regs->gaddr4, 0xffffffff);
1778 gfar_write(&regs->gaddr5, 0xffffffff);
1779 gfar_write(&regs->gaddr6, 0xffffffff);
1780 gfar_write(&regs->gaddr7, 0xffffffff);
1781 } else {
1782 int em_num;
1783 int idx;
1785 /* zero out the hash */
1786 gfar_write(&regs->igaddr0, 0x0);
1787 gfar_write(&regs->igaddr1, 0x0);
1788 gfar_write(&regs->igaddr2, 0x0);
1789 gfar_write(&regs->igaddr3, 0x0);
1790 gfar_write(&regs->igaddr4, 0x0);
1791 gfar_write(&regs->igaddr5, 0x0);
1792 gfar_write(&regs->igaddr6, 0x0);
1793 gfar_write(&regs->igaddr7, 0x0);
1794 gfar_write(&regs->gaddr0, 0x0);
1795 gfar_write(&regs->gaddr1, 0x0);
1796 gfar_write(&regs->gaddr2, 0x0);
1797 gfar_write(&regs->gaddr3, 0x0);
1798 gfar_write(&regs->gaddr4, 0x0);
1799 gfar_write(&regs->gaddr5, 0x0);
1800 gfar_write(&regs->gaddr6, 0x0);
1801 gfar_write(&regs->gaddr7, 0x0);
1803 /* If we have extended hash tables, we need to
1804 * clear the exact match registers to prepare for
1805 * setting them */
1806 if (priv->extended_hash) {
1807 em_num = GFAR_EM_NUM + 1;
1808 gfar_clear_exact_match(dev);
1809 idx = 1;
1810 } else {
1811 idx = 0;
1812 em_num = 0;
1815 if(dev->mc_count == 0)
1816 return;
1818 /* Parse the list, and set the appropriate bits */
1819 for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
1820 if (idx < em_num) {
1821 gfar_set_mac_for_addr(dev, idx,
1822 mc_ptr->dmi_addr);
1823 idx++;
1824 } else
1825 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
1829 return;
1833 /* Clears each of the exact match registers to zero, so they
1834 * don't interfere with normal reception */
1835 static void gfar_clear_exact_match(struct net_device *dev)
1837 int idx;
1838 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0};
1840 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
1841 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr);
1844 /* Set the appropriate hash bit for the given addr */
1845 /* The algorithm works like so:
1846 * 1) Take the Destination Address (ie the multicast address), and
1847 * do a CRC on it (little endian), and reverse the bits of the
1848 * result.
1849 * 2) Use the 8 most significant bits as a hash into a 256-entry
1850 * table. The table is controlled through 8 32-bit registers:
1851 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
1852 * gaddr7. This means that the 3 most significant bits in the
1853 * hash index which gaddr register to use, and the 5 other bits
1854 * indicate which bit (assuming an IBM numbering scheme, which
1855 * for PowerPC (tm) is usually the case) in the register holds
1856 * the entry. */
1857 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
1859 u32 tempval;
1860 struct gfar_private *priv = netdev_priv(dev);
1861 u32 result = ether_crc(MAC_ADDR_LEN, addr);
1862 int width = priv->hash_width;
1863 u8 whichbit = (result >> (32 - width)) & 0x1f;
1864 u8 whichreg = result >> (32 - width + 5);
1865 u32 value = (1 << (31-whichbit));
1867 tempval = gfar_read(priv->hash_regs[whichreg]);
1868 tempval |= value;
1869 gfar_write(priv->hash_regs[whichreg], tempval);
1871 return;
1875 /* There are multiple MAC Address register pairs on some controllers
1876 * This function sets the numth pair to a given address
1878 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
1880 struct gfar_private *priv = netdev_priv(dev);
1881 int idx;
1882 char tmpbuf[MAC_ADDR_LEN];
1883 u32 tempval;
1884 u32 __iomem *macptr = &priv->regs->macstnaddr1;
1886 macptr += num*2;
1888 /* Now copy it into the mac registers backwards, cuz */
1889 /* little endian is silly */
1890 for (idx = 0; idx < MAC_ADDR_LEN; idx++)
1891 tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
1893 gfar_write(macptr, *((u32 *) (tmpbuf)));
1895 tempval = *((u32 *) (tmpbuf + 4));
1897 gfar_write(macptr+1, tempval);
1900 /* GFAR error interrupt handler */
1901 static irqreturn_t gfar_error(int irq, void *dev_id)
1903 struct net_device *dev = dev_id;
1904 struct gfar_private *priv = netdev_priv(dev);
1906 /* Save ievent for future reference */
1907 u32 events = gfar_read(&priv->regs->ievent);
1909 /* Clear IEVENT */
1910 gfar_write(&priv->regs->ievent, IEVENT_ERR_MASK);
1912 /* Hmm... */
1913 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
1914 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
1915 dev->name, events, gfar_read(&priv->regs->imask));
1917 /* Update the error counters */
1918 if (events & IEVENT_TXE) {
1919 dev->stats.tx_errors++;
1921 if (events & IEVENT_LC)
1922 dev->stats.tx_window_errors++;
1923 if (events & IEVENT_CRL)
1924 dev->stats.tx_aborted_errors++;
1925 if (events & IEVENT_XFUN) {
1926 if (netif_msg_tx_err(priv))
1927 printk(KERN_DEBUG "%s: TX FIFO underrun, "
1928 "packet dropped.\n", dev->name);
1929 dev->stats.tx_dropped++;
1930 priv->extra_stats.tx_underrun++;
1932 /* Reactivate the Tx Queues */
1933 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT);
1935 if (netif_msg_tx_err(priv))
1936 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
1938 if (events & IEVENT_BSY) {
1939 dev->stats.rx_errors++;
1940 priv->extra_stats.rx_bsy++;
1942 gfar_receive(irq, dev_id);
1944 #ifndef CONFIG_GFAR_NAPI
1945 /* Clear the halt bit in RSTAT */
1946 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT);
1947 #endif
1949 if (netif_msg_rx_err(priv))
1950 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
1951 dev->name, gfar_read(&priv->regs->rstat));
1953 if (events & IEVENT_BABR) {
1954 dev->stats.rx_errors++;
1955 priv->extra_stats.rx_babr++;
1957 if (netif_msg_rx_err(priv))
1958 printk(KERN_DEBUG "%s: babbling RX error\n", dev->name);
1960 if (events & IEVENT_EBERR) {
1961 priv->extra_stats.eberr++;
1962 if (netif_msg_rx_err(priv))
1963 printk(KERN_DEBUG "%s: bus error\n", dev->name);
1965 if ((events & IEVENT_RXC) && netif_msg_rx_status(priv))
1966 printk(KERN_DEBUG "%s: control frame\n", dev->name);
1968 if (events & IEVENT_BABT) {
1969 priv->extra_stats.tx_babt++;
1970 if (netif_msg_tx_err(priv))
1971 printk(KERN_DEBUG "%s: babbling TX error\n", dev->name);
1973 return IRQ_HANDLED;
1976 /* Structure for a device driver */
1977 static struct platform_driver gfar_driver = {
1978 .probe = gfar_probe,
1979 .remove = gfar_remove,
1980 .driver = {
1981 .name = "fsl-gianfar",
1985 static int __init gfar_init(void)
1987 int err = gfar_mdio_init();
1989 if (err)
1990 return err;
1992 err = platform_driver_register(&gfar_driver);
1994 if (err)
1995 gfar_mdio_exit();
1997 return err;
2000 static void __exit gfar_exit(void)
2002 platform_driver_unregister(&gfar_driver);
2003 gfar_mdio_exit();
2006 module_init(gfar_init);
2007 module_exit(gfar_exit);