1 /* drivers/net/ethernet/freescale/gianfar.c
3 * Gianfar Ethernet Driver
4 * This driver is designed for the non-CPM ethernet controllers
5 * on the 85xx and 83xx family of integrated processors
6 * Based on 8260_io/fcc_enet.c
9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
13 * Copyright 2007 MontaVista Software, Inc.
15 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the
17 * Free Software Foundation; either version 2 of the License, or (at your
18 * option) any later version.
20 * Gianfar: AKA Lambda Draconis, "Dragon"
28 * The driver is initialized through of_device. Configuration information
29 * is therefore conveyed through an OF-style device tree.
31 * The Gianfar Ethernet Controller uses a ring of buffer
32 * descriptors. The beginning is indicated by a register
33 * pointing to the physical address of the start of the ring.
34 * The end is determined by a "wrap" bit being set in the
35 * last descriptor of the ring.
37 * When a packet is received, the RXF bit in the
38 * IEVENT register is set, triggering an interrupt when the
39 * corresponding bit in the IMASK register is also set (if
40 * interrupt coalescing is active, then the interrupt may not
41 * happen immediately, but will wait until either a set number
42 * of frames or amount of time have passed). In NAPI, the
43 * interrupt handler will signal there is work to be done, and
44 * exit. This method will start at the last known empty
45 * descriptor, and process every subsequent descriptor until there
46 * are none left with data (NAPI will stop after a set number of
47 * packets to give time to other tasks, but will eventually
48 * process all the packets). The data arrives inside a
49 * pre-allocated skb, and so after the skb is passed up to the
50 * stack, a new skb must be allocated, and the address field in
51 * the buffer descriptor must be updated to indicate this new
54 * When the kernel requests that a packet be transmitted, the
55 * driver starts where it left off last time, and points the
56 * descriptor at the buffer which was passed in. The driver
57 * then informs the DMA engine that there are packets ready to
58 * be transmitted. Once the controller is finished transmitting
59 * the packet, an interrupt may be triggered (under the same
60 * conditions as for reception, but depending on the TXF bit).
61 * The driver then cleans up the buffer.
64 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
67 #include <linux/kernel.h>
68 #include <linux/string.h>
69 #include <linux/errno.h>
70 #include <linux/unistd.h>
71 #include <linux/slab.h>
72 #include <linux/interrupt.h>
73 #include <linux/delay.h>
74 #include <linux/netdevice.h>
75 #include <linux/etherdevice.h>
76 #include <linux/skbuff.h>
77 #include <linux/if_vlan.h>
78 #include <linux/spinlock.h>
80 #include <linux/of_address.h>
81 #include <linux/of_irq.h>
82 #include <linux/of_mdio.h>
83 #include <linux/of_platform.h>
85 #include <linux/tcp.h>
86 #include <linux/udp.h>
88 #include <linux/net_tstamp.h>
92 #include <asm/mpc85xx.h>
94 #include <asm/uaccess.h>
95 #include <linux/module.h>
96 #include <linux/dma-mapping.h>
97 #include <linux/crc32.h>
98 #include <linux/mii.h>
99 #include <linux/phy.h>
100 #include <linux/phy_fixed.h>
101 #include <linux/of.h>
102 #include <linux/of_net.h>
106 #define TX_TIMEOUT (1*HZ)
108 const char gfar_driver_version
[] = "1.3";
110 static int gfar_enet_open(struct net_device
*dev
);
111 static int gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
112 static void gfar_reset_task(struct work_struct
*work
);
113 static void gfar_timeout(struct net_device
*dev
);
114 static int gfar_close(struct net_device
*dev
);
115 struct sk_buff
*gfar_new_skb(struct net_device
*dev
);
116 static void gfar_new_rxbdp(struct gfar_priv_rx_q
*rx_queue
, struct rxbd8
*bdp
,
117 struct sk_buff
*skb
);
118 static int gfar_set_mac_address(struct net_device
*dev
);
119 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
);
120 static irqreturn_t
gfar_error(int irq
, void *dev_id
);
121 static irqreturn_t
gfar_transmit(int irq
, void *dev_id
);
122 static irqreturn_t
gfar_interrupt(int irq
, void *dev_id
);
123 static void adjust_link(struct net_device
*dev
);
124 static void init_registers(struct net_device
*dev
);
125 static int init_phy(struct net_device
*dev
);
126 static int gfar_probe(struct platform_device
*ofdev
);
127 static int gfar_remove(struct platform_device
*ofdev
);
128 static void free_skb_resources(struct gfar_private
*priv
);
129 static void gfar_set_multi(struct net_device
*dev
);
130 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
);
131 static void gfar_configure_serdes(struct net_device
*dev
);
132 static int gfar_poll(struct napi_struct
*napi
, int budget
);
133 static int gfar_poll_sq(struct napi_struct
*napi
, int budget
);
134 #ifdef CONFIG_NET_POLL_CONTROLLER
135 static void gfar_netpoll(struct net_device
*dev
);
137 int gfar_clean_rx_ring(struct gfar_priv_rx_q
*rx_queue
, int rx_work_limit
);
138 static void gfar_clean_tx_ring(struct gfar_priv_tx_q
*tx_queue
);
139 static void gfar_process_frame(struct net_device
*dev
, struct sk_buff
*skb
,
140 int amount_pull
, struct napi_struct
*napi
);
141 void gfar_halt(struct net_device
*dev
);
142 static void gfar_halt_nodisable(struct net_device
*dev
);
143 void gfar_start(struct net_device
*dev
);
144 static void gfar_clear_exact_match(struct net_device
*dev
);
145 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
,
147 static int gfar_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
149 MODULE_AUTHOR("Freescale Semiconductor, Inc");
150 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
151 MODULE_LICENSE("GPL");
153 static void gfar_init_rxbdp(struct gfar_priv_rx_q
*rx_queue
, struct rxbd8
*bdp
,
160 lstatus
= BD_LFLAG(RXBD_EMPTY
| RXBD_INTERRUPT
);
161 if (bdp
== rx_queue
->rx_bd_base
+ rx_queue
->rx_ring_size
- 1)
162 lstatus
|= BD_LFLAG(RXBD_WRAP
);
166 bdp
->lstatus
= lstatus
;
169 static int gfar_init_bds(struct net_device
*ndev
)
171 struct gfar_private
*priv
= netdev_priv(ndev
);
172 struct gfar_priv_tx_q
*tx_queue
= NULL
;
173 struct gfar_priv_rx_q
*rx_queue
= NULL
;
178 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
179 tx_queue
= priv
->tx_queue
[i
];
180 /* Initialize some variables in our dev structure */
181 tx_queue
->num_txbdfree
= tx_queue
->tx_ring_size
;
182 tx_queue
->dirty_tx
= tx_queue
->tx_bd_base
;
183 tx_queue
->cur_tx
= tx_queue
->tx_bd_base
;
184 tx_queue
->skb_curtx
= 0;
185 tx_queue
->skb_dirtytx
= 0;
187 /* Initialize Transmit Descriptor Ring */
188 txbdp
= tx_queue
->tx_bd_base
;
189 for (j
= 0; j
< tx_queue
->tx_ring_size
; j
++) {
195 /* Set the last descriptor in the ring to indicate wrap */
197 txbdp
->status
|= TXBD_WRAP
;
200 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
201 rx_queue
= priv
->rx_queue
[i
];
202 rx_queue
->cur_rx
= rx_queue
->rx_bd_base
;
203 rx_queue
->skb_currx
= 0;
204 rxbdp
= rx_queue
->rx_bd_base
;
206 for (j
= 0; j
< rx_queue
->rx_ring_size
; j
++) {
207 struct sk_buff
*skb
= rx_queue
->rx_skbuff
[j
];
210 gfar_init_rxbdp(rx_queue
, rxbdp
,
213 skb
= gfar_new_skb(ndev
);
215 netdev_err(ndev
, "Can't allocate RX buffers\n");
218 rx_queue
->rx_skbuff
[j
] = skb
;
220 gfar_new_rxbdp(rx_queue
, rxbdp
, skb
);
231 static int gfar_alloc_skb_resources(struct net_device
*ndev
)
236 struct gfar_private
*priv
= netdev_priv(ndev
);
237 struct device
*dev
= priv
->dev
;
238 struct gfar_priv_tx_q
*tx_queue
= NULL
;
239 struct gfar_priv_rx_q
*rx_queue
= NULL
;
241 priv
->total_tx_ring_size
= 0;
242 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
243 priv
->total_tx_ring_size
+= priv
->tx_queue
[i
]->tx_ring_size
;
245 priv
->total_rx_ring_size
= 0;
246 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
247 priv
->total_rx_ring_size
+= priv
->rx_queue
[i
]->rx_ring_size
;
249 /* Allocate memory for the buffer descriptors */
250 vaddr
= dma_alloc_coherent(dev
,
251 (priv
->total_tx_ring_size
*
252 sizeof(struct txbd8
)) +
253 (priv
->total_rx_ring_size
*
254 sizeof(struct rxbd8
)),
259 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
260 tx_queue
= priv
->tx_queue
[i
];
261 tx_queue
->tx_bd_base
= vaddr
;
262 tx_queue
->tx_bd_dma_base
= addr
;
263 tx_queue
->dev
= ndev
;
264 /* enet DMA only understands physical addresses */
265 addr
+= sizeof(struct txbd8
) * tx_queue
->tx_ring_size
;
266 vaddr
+= sizeof(struct txbd8
) * tx_queue
->tx_ring_size
;
269 /* Start the rx descriptor ring where the tx ring leaves off */
270 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
271 rx_queue
= priv
->rx_queue
[i
];
272 rx_queue
->rx_bd_base
= vaddr
;
273 rx_queue
->rx_bd_dma_base
= addr
;
274 rx_queue
->dev
= ndev
;
275 addr
+= sizeof(struct rxbd8
) * rx_queue
->rx_ring_size
;
276 vaddr
+= sizeof(struct rxbd8
) * rx_queue
->rx_ring_size
;
279 /* Setup the skbuff rings */
280 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
281 tx_queue
= priv
->tx_queue
[i
];
282 tx_queue
->tx_skbuff
=
283 kmalloc_array(tx_queue
->tx_ring_size
,
284 sizeof(*tx_queue
->tx_skbuff
),
286 if (!tx_queue
->tx_skbuff
)
289 for (k
= 0; k
< tx_queue
->tx_ring_size
; k
++)
290 tx_queue
->tx_skbuff
[k
] = NULL
;
293 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
294 rx_queue
= priv
->rx_queue
[i
];
295 rx_queue
->rx_skbuff
=
296 kmalloc_array(rx_queue
->rx_ring_size
,
297 sizeof(*rx_queue
->rx_skbuff
),
299 if (!rx_queue
->rx_skbuff
)
302 for (j
= 0; j
< rx_queue
->rx_ring_size
; j
++)
303 rx_queue
->rx_skbuff
[j
] = NULL
;
306 if (gfar_init_bds(ndev
))
312 free_skb_resources(priv
);
316 static void gfar_init_tx_rx_base(struct gfar_private
*priv
)
318 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
322 baddr
= ®s
->tbase0
;
323 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
324 gfar_write(baddr
, priv
->tx_queue
[i
]->tx_bd_dma_base
);
328 baddr
= ®s
->rbase0
;
329 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
330 gfar_write(baddr
, priv
->rx_queue
[i
]->rx_bd_dma_base
);
335 static void gfar_init_mac(struct net_device
*ndev
)
337 struct gfar_private
*priv
= netdev_priv(ndev
);
338 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
343 /* write the tx/rx base registers */
344 gfar_init_tx_rx_base(priv
);
346 /* Configure the coalescing support */
347 gfar_configure_coalescing_all(priv
);
349 /* set this when rx hw offload (TOE) functions are being used */
350 priv
->uses_rxfcb
= 0;
352 if (priv
->rx_filer_enable
) {
353 rctrl
|= RCTRL_FILREN
;
354 /* Program the RIR0 reg with the required distribution */
355 gfar_write(®s
->rir0
, DEFAULT_RIR0
);
358 /* Restore PROMISC mode */
359 if (ndev
->flags
& IFF_PROMISC
)
362 if (ndev
->features
& NETIF_F_RXCSUM
) {
363 rctrl
|= RCTRL_CHECKSUMMING
;
364 priv
->uses_rxfcb
= 1;
367 if (priv
->extended_hash
) {
368 rctrl
|= RCTRL_EXTHASH
;
370 gfar_clear_exact_match(ndev
);
375 rctrl
&= ~RCTRL_PAL_MASK
;
376 rctrl
|= RCTRL_PADDING(priv
->padding
);
379 /* Insert receive time stamps into padding alignment bytes */
380 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
) {
381 rctrl
&= ~RCTRL_PAL_MASK
;
382 rctrl
|= RCTRL_PADDING(8);
386 /* Enable HW time stamping if requested from user space */
387 if (priv
->hwts_rx_en
) {
388 rctrl
|= RCTRL_PRSDEP_INIT
| RCTRL_TS_ENABLE
;
389 priv
->uses_rxfcb
= 1;
392 if (ndev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) {
393 rctrl
|= RCTRL_VLEX
| RCTRL_PRSDEP_INIT
;
394 priv
->uses_rxfcb
= 1;
397 /* Init rctrl based on our settings */
398 gfar_write(®s
->rctrl
, rctrl
);
400 if (ndev
->features
& NETIF_F_IP_CSUM
)
401 tctrl
|= TCTRL_INIT_CSUM
;
403 if (priv
->prio_sched_en
)
404 tctrl
|= TCTRL_TXSCHED_PRIO
;
406 tctrl
|= TCTRL_TXSCHED_WRRS
;
407 gfar_write(®s
->tr03wt
, DEFAULT_WRRS_WEIGHT
);
408 gfar_write(®s
->tr47wt
, DEFAULT_WRRS_WEIGHT
);
411 gfar_write(®s
->tctrl
, tctrl
);
413 /* Set the extraction length and index */
414 attrs
= ATTRELI_EL(priv
->rx_stash_size
) |
415 ATTRELI_EI(priv
->rx_stash_index
);
417 gfar_write(®s
->attreli
, attrs
);
419 /* Start with defaults, and add stashing or locking
420 * depending on the approprate variables
422 attrs
= ATTR_INIT_SETTINGS
;
424 if (priv
->bd_stash_en
)
425 attrs
|= ATTR_BDSTASH
;
427 if (priv
->rx_stash_size
!= 0)
428 attrs
|= ATTR_BUFSTASH
;
430 gfar_write(®s
->attr
, attrs
);
432 gfar_write(®s
->fifo_tx_thr
, priv
->fifo_threshold
);
433 gfar_write(®s
->fifo_tx_starve
, priv
->fifo_starve
);
434 gfar_write(®s
->fifo_tx_starve_shutoff
, priv
->fifo_starve_off
);
437 static struct net_device_stats
*gfar_get_stats(struct net_device
*dev
)
439 struct gfar_private
*priv
= netdev_priv(dev
);
440 unsigned long rx_packets
= 0, rx_bytes
= 0, rx_dropped
= 0;
441 unsigned long tx_packets
= 0, tx_bytes
= 0;
444 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
445 rx_packets
+= priv
->rx_queue
[i
]->stats
.rx_packets
;
446 rx_bytes
+= priv
->rx_queue
[i
]->stats
.rx_bytes
;
447 rx_dropped
+= priv
->rx_queue
[i
]->stats
.rx_dropped
;
450 dev
->stats
.rx_packets
= rx_packets
;
451 dev
->stats
.rx_bytes
= rx_bytes
;
452 dev
->stats
.rx_dropped
= rx_dropped
;
454 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
455 tx_bytes
+= priv
->tx_queue
[i
]->stats
.tx_bytes
;
456 tx_packets
+= priv
->tx_queue
[i
]->stats
.tx_packets
;
459 dev
->stats
.tx_bytes
= tx_bytes
;
460 dev
->stats
.tx_packets
= tx_packets
;
465 static const struct net_device_ops gfar_netdev_ops
= {
466 .ndo_open
= gfar_enet_open
,
467 .ndo_start_xmit
= gfar_start_xmit
,
468 .ndo_stop
= gfar_close
,
469 .ndo_change_mtu
= gfar_change_mtu
,
470 .ndo_set_features
= gfar_set_features
,
471 .ndo_set_rx_mode
= gfar_set_multi
,
472 .ndo_tx_timeout
= gfar_timeout
,
473 .ndo_do_ioctl
= gfar_ioctl
,
474 .ndo_get_stats
= gfar_get_stats
,
475 .ndo_set_mac_address
= eth_mac_addr
,
476 .ndo_validate_addr
= eth_validate_addr
,
477 #ifdef CONFIG_NET_POLL_CONTROLLER
478 .ndo_poll_controller
= gfar_netpoll
,
482 void lock_rx_qs(struct gfar_private
*priv
)
486 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
487 spin_lock(&priv
->rx_queue
[i
]->rxlock
);
490 void lock_tx_qs(struct gfar_private
*priv
)
494 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
495 spin_lock(&priv
->tx_queue
[i
]->txlock
);
498 void unlock_rx_qs(struct gfar_private
*priv
)
502 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
503 spin_unlock(&priv
->rx_queue
[i
]->rxlock
);
506 void unlock_tx_qs(struct gfar_private
*priv
)
510 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
511 spin_unlock(&priv
->tx_queue
[i
]->txlock
);
514 static void free_tx_pointers(struct gfar_private
*priv
)
518 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
519 kfree(priv
->tx_queue
[i
]);
522 static void free_rx_pointers(struct gfar_private
*priv
)
526 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
527 kfree(priv
->rx_queue
[i
]);
530 static void unmap_group_regs(struct gfar_private
*priv
)
534 for (i
= 0; i
< MAXGROUPS
; i
++)
535 if (priv
->gfargrp
[i
].regs
)
536 iounmap(priv
->gfargrp
[i
].regs
);
539 static void free_gfar_dev(struct gfar_private
*priv
)
543 for (i
= 0; i
< priv
->num_grps
; i
++)
544 for (j
= 0; j
< GFAR_NUM_IRQS
; j
++) {
545 kfree(priv
->gfargrp
[i
].irqinfo
[j
]);
546 priv
->gfargrp
[i
].irqinfo
[j
] = NULL
;
549 free_netdev(priv
->ndev
);
552 static void disable_napi(struct gfar_private
*priv
)
556 for (i
= 0; i
< priv
->num_grps
; i
++)
557 napi_disable(&priv
->gfargrp
[i
].napi
);
560 static void enable_napi(struct gfar_private
*priv
)
564 for (i
= 0; i
< priv
->num_grps
; i
++)
565 napi_enable(&priv
->gfargrp
[i
].napi
);
568 static int gfar_parse_group(struct device_node
*np
,
569 struct gfar_private
*priv
, const char *model
)
571 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[priv
->num_grps
];
575 for (i
= 0; i
< GFAR_NUM_IRQS
; i
++) {
576 grp
->irqinfo
[i
] = kzalloc(sizeof(struct gfar_irqinfo
),
578 if (!grp
->irqinfo
[i
])
582 grp
->regs
= of_iomap(np
, 0);
586 gfar_irq(grp
, TX
)->irq
= irq_of_parse_and_map(np
, 0);
588 /* If we aren't the FEC we have multiple interrupts */
589 if (model
&& strcasecmp(model
, "FEC")) {
590 gfar_irq(grp
, RX
)->irq
= irq_of_parse_and_map(np
, 1);
591 gfar_irq(grp
, ER
)->irq
= irq_of_parse_and_map(np
, 2);
592 if (gfar_irq(grp
, TX
)->irq
== NO_IRQ
||
593 gfar_irq(grp
, RX
)->irq
== NO_IRQ
||
594 gfar_irq(grp
, ER
)->irq
== NO_IRQ
)
599 spin_lock_init(&grp
->grplock
);
600 if (priv
->mode
== MQ_MG_MODE
) {
601 queue_mask
= (u32
*)of_get_property(np
, "fsl,rx-bit-map", NULL
);
602 grp
->rx_bit_map
= queue_mask
?
603 *queue_mask
: (DEFAULT_MAPPING
>> priv
->num_grps
);
604 queue_mask
= (u32
*)of_get_property(np
, "fsl,tx-bit-map", NULL
);
605 grp
->tx_bit_map
= queue_mask
?
606 *queue_mask
: (DEFAULT_MAPPING
>> priv
->num_grps
);
608 grp
->rx_bit_map
= 0xFF;
609 grp
->tx_bit_map
= 0xFF;
616 static int gfar_of_init(struct platform_device
*ofdev
, struct net_device
**pdev
)
620 const void *mac_addr
;
622 struct net_device
*dev
= NULL
;
623 struct gfar_private
*priv
= NULL
;
624 struct device_node
*np
= ofdev
->dev
.of_node
;
625 struct device_node
*child
= NULL
;
627 const u32
*stash_len
;
628 const u32
*stash_idx
;
629 unsigned int num_tx_qs
, num_rx_qs
;
630 u32
*tx_queues
, *rx_queues
;
632 if (!np
|| !of_device_is_available(np
))
635 /* parse the num of tx and rx queues */
636 tx_queues
= (u32
*)of_get_property(np
, "fsl,num_tx_queues", NULL
);
637 num_tx_qs
= tx_queues
? *tx_queues
: 1;
639 if (num_tx_qs
> MAX_TX_QS
) {
640 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
641 num_tx_qs
, MAX_TX_QS
);
642 pr_err("Cannot do alloc_etherdev, aborting\n");
646 rx_queues
= (u32
*)of_get_property(np
, "fsl,num_rx_queues", NULL
);
647 num_rx_qs
= rx_queues
? *rx_queues
: 1;
649 if (num_rx_qs
> MAX_RX_QS
) {
650 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
651 num_rx_qs
, MAX_RX_QS
);
652 pr_err("Cannot do alloc_etherdev, aborting\n");
656 *pdev
= alloc_etherdev_mq(sizeof(*priv
), num_tx_qs
);
661 priv
= netdev_priv(dev
);
664 priv
->num_tx_queues
= num_tx_qs
;
665 netif_set_real_num_rx_queues(dev
, num_rx_qs
);
666 priv
->num_rx_queues
= num_rx_qs
;
667 priv
->num_grps
= 0x0;
669 /* Init Rx queue filer rule set linked list */
670 INIT_LIST_HEAD(&priv
->rx_list
.list
);
671 priv
->rx_list
.count
= 0;
672 mutex_init(&priv
->rx_queue_access
);
674 model
= of_get_property(np
, "model", NULL
);
676 for (i
= 0; i
< MAXGROUPS
; i
++)
677 priv
->gfargrp
[i
].regs
= NULL
;
679 /* Parse and initialize group specific information */
680 if (of_device_is_compatible(np
, "fsl,etsec2")) {
681 priv
->mode
= MQ_MG_MODE
;
682 for_each_child_of_node(np
, child
) {
683 err
= gfar_parse_group(child
, priv
, model
);
688 priv
->mode
= SQ_SG_MODE
;
689 err
= gfar_parse_group(np
, priv
, model
);
694 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
695 priv
->tx_queue
[i
] = NULL
;
696 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
697 priv
->rx_queue
[i
] = NULL
;
699 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
700 priv
->tx_queue
[i
] = kzalloc(sizeof(struct gfar_priv_tx_q
),
702 if (!priv
->tx_queue
[i
]) {
704 goto tx_alloc_failed
;
706 priv
->tx_queue
[i
]->tx_skbuff
= NULL
;
707 priv
->tx_queue
[i
]->qindex
= i
;
708 priv
->tx_queue
[i
]->dev
= dev
;
709 spin_lock_init(&(priv
->tx_queue
[i
]->txlock
));
712 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
713 priv
->rx_queue
[i
] = kzalloc(sizeof(struct gfar_priv_rx_q
),
715 if (!priv
->rx_queue
[i
]) {
717 goto rx_alloc_failed
;
719 priv
->rx_queue
[i
]->rx_skbuff
= NULL
;
720 priv
->rx_queue
[i
]->qindex
= i
;
721 priv
->rx_queue
[i
]->dev
= dev
;
722 spin_lock_init(&(priv
->rx_queue
[i
]->rxlock
));
726 stash
= of_get_property(np
, "bd-stash", NULL
);
729 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_BD_STASHING
;
730 priv
->bd_stash_en
= 1;
733 stash_len
= of_get_property(np
, "rx-stash-len", NULL
);
736 priv
->rx_stash_size
= *stash_len
;
738 stash_idx
= of_get_property(np
, "rx-stash-idx", NULL
);
741 priv
->rx_stash_index
= *stash_idx
;
743 if (stash_len
|| stash_idx
)
744 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_BUF_STASHING
;
746 mac_addr
= of_get_mac_address(np
);
749 memcpy(dev
->dev_addr
, mac_addr
, ETH_ALEN
);
751 if (model
&& !strcasecmp(model
, "TSEC"))
752 priv
->device_flags
= FSL_GIANFAR_DEV_HAS_GIGABIT
|
753 FSL_GIANFAR_DEV_HAS_COALESCE
|
754 FSL_GIANFAR_DEV_HAS_RMON
|
755 FSL_GIANFAR_DEV_HAS_MULTI_INTR
;
757 if (model
&& !strcasecmp(model
, "eTSEC"))
758 priv
->device_flags
= FSL_GIANFAR_DEV_HAS_GIGABIT
|
759 FSL_GIANFAR_DEV_HAS_COALESCE
|
760 FSL_GIANFAR_DEV_HAS_RMON
|
761 FSL_GIANFAR_DEV_HAS_MULTI_INTR
|
762 FSL_GIANFAR_DEV_HAS_PADDING
|
763 FSL_GIANFAR_DEV_HAS_CSUM
|
764 FSL_GIANFAR_DEV_HAS_VLAN
|
765 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
|
766 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH
|
767 FSL_GIANFAR_DEV_HAS_TIMER
;
769 ctype
= of_get_property(np
, "phy-connection-type", NULL
);
771 /* We only care about rgmii-id. The rest are autodetected */
772 if (ctype
&& !strcmp(ctype
, "rgmii-id"))
773 priv
->interface
= PHY_INTERFACE_MODE_RGMII_ID
;
775 priv
->interface
= PHY_INTERFACE_MODE_MII
;
777 if (of_get_property(np
, "fsl,magic-packet", NULL
))
778 priv
->device_flags
|= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
;
780 priv
->phy_node
= of_parse_phandle(np
, "phy-handle", 0);
782 /* Find the TBI PHY. If it's not there, we don't support SGMII */
783 priv
->tbi_node
= of_parse_phandle(np
, "tbi-handle", 0);
788 free_rx_pointers(priv
);
790 free_tx_pointers(priv
);
792 unmap_group_regs(priv
);
797 static int gfar_hwtstamp_set(struct net_device
*netdev
, struct ifreq
*ifr
)
799 struct hwtstamp_config config
;
800 struct gfar_private
*priv
= netdev_priv(netdev
);
802 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
805 /* reserved for future extensions */
809 switch (config
.tx_type
) {
810 case HWTSTAMP_TX_OFF
:
811 priv
->hwts_tx_en
= 0;
814 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
))
816 priv
->hwts_tx_en
= 1;
822 switch (config
.rx_filter
) {
823 case HWTSTAMP_FILTER_NONE
:
824 if (priv
->hwts_rx_en
) {
826 priv
->hwts_rx_en
= 0;
827 startup_gfar(netdev
);
831 if (!(priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
))
833 if (!priv
->hwts_rx_en
) {
835 priv
->hwts_rx_en
= 1;
836 startup_gfar(netdev
);
838 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
842 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
846 static int gfar_hwtstamp_get(struct net_device
*netdev
, struct ifreq
*ifr
)
848 struct hwtstamp_config config
;
849 struct gfar_private
*priv
= netdev_priv(netdev
);
852 config
.tx_type
= priv
->hwts_tx_en
? HWTSTAMP_TX_ON
: HWTSTAMP_TX_OFF
;
853 config
.rx_filter
= (priv
->hwts_rx_en
?
854 HWTSTAMP_FILTER_ALL
: HWTSTAMP_FILTER_NONE
);
856 return copy_to_user(ifr
->ifr_data
, &config
, sizeof(config
)) ?
860 static int gfar_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
862 struct gfar_private
*priv
= netdev_priv(dev
);
864 if (!netif_running(dev
))
867 if (cmd
== SIOCSHWTSTAMP
)
868 return gfar_hwtstamp_set(dev
, rq
);
869 if (cmd
== SIOCGHWTSTAMP
)
870 return gfar_hwtstamp_get(dev
, rq
);
875 return phy_mii_ioctl(priv
->phydev
, rq
, cmd
);
878 static unsigned int reverse_bitmap(unsigned int bit_map
, unsigned int max_qs
)
880 unsigned int new_bit_map
= 0x0;
881 int mask
= 0x1 << (max_qs
- 1), i
;
883 for (i
= 0; i
< max_qs
; i
++) {
885 new_bit_map
= new_bit_map
+ (1 << i
);
891 static u32
cluster_entry_per_class(struct gfar_private
*priv
, u32 rqfar
,
894 u32 rqfpr
= FPR_FILER_MASK
;
898 rqfcr
= RQFCR_CLE
| RQFCR_PID_MASK
| RQFCR_CMP_EXACT
;
899 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
900 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
901 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
904 rqfcr
= RQFCR_CMP_NOMATCH
;
905 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
906 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
907 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
910 rqfcr
= RQFCR_CMP_EXACT
| RQFCR_PID_PARSE
| RQFCR_CLE
| RQFCR_AND
;
912 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
913 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
914 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
917 rqfcr
= RQFCR_CMP_EXACT
| RQFCR_PID_MASK
| RQFCR_AND
;
919 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
920 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
921 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
926 static void gfar_init_filer_table(struct gfar_private
*priv
)
929 u32 rqfar
= MAX_FILER_IDX
;
931 u32 rqfpr
= FPR_FILER_MASK
;
934 rqfcr
= RQFCR_CMP_MATCH
;
935 priv
->ftp_rqfcr
[rqfar
] = rqfcr
;
936 priv
->ftp_rqfpr
[rqfar
] = rqfpr
;
937 gfar_write_filer(priv
, rqfar
, rqfcr
, rqfpr
);
939 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
);
940 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
| RQFPR_UDP
);
941 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV6
| RQFPR_TCP
);
942 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
);
943 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
| RQFPR_UDP
);
944 rqfar
= cluster_entry_per_class(priv
, rqfar
, RQFPR_IPV4
| RQFPR_TCP
);
946 /* cur_filer_idx indicated the first non-masked rule */
947 priv
->cur_filer_idx
= rqfar
;
949 /* Rest are masked rules */
950 rqfcr
= RQFCR_CMP_NOMATCH
;
951 for (i
= 0; i
< rqfar
; i
++) {
952 priv
->ftp_rqfcr
[i
] = rqfcr
;
953 priv
->ftp_rqfpr
[i
] = rqfpr
;
954 gfar_write_filer(priv
, i
, rqfcr
, rqfpr
);
958 static void __gfar_detect_errata_83xx(struct gfar_private
*priv
)
960 unsigned int pvr
= mfspr(SPRN_PVR
);
961 unsigned int svr
= mfspr(SPRN_SVR
);
962 unsigned int mod
= (svr
>> 16) & 0xfff6; /* w/o E suffix */
963 unsigned int rev
= svr
& 0xffff;
965 /* MPC8313 Rev 2.0 and higher; All MPC837x */
966 if ((pvr
== 0x80850010 && mod
== 0x80b0 && rev
>= 0x0020) ||
967 (pvr
== 0x80861010 && (mod
& 0xfff9) == 0x80c0))
968 priv
->errata
|= GFAR_ERRATA_74
;
970 /* MPC8313 and MPC837x all rev */
971 if ((pvr
== 0x80850010 && mod
== 0x80b0) ||
972 (pvr
== 0x80861010 && (mod
& 0xfff9) == 0x80c0))
973 priv
->errata
|= GFAR_ERRATA_76
;
975 /* MPC8313 Rev < 2.0 */
976 if (pvr
== 0x80850010 && mod
== 0x80b0 && rev
< 0x0020)
977 priv
->errata
|= GFAR_ERRATA_12
;
980 static void __gfar_detect_errata_85xx(struct gfar_private
*priv
)
982 unsigned int svr
= mfspr(SPRN_SVR
);
984 if ((SVR_SOC_VER(svr
) == SVR_8548
) && (SVR_REV(svr
) == 0x20))
985 priv
->errata
|= GFAR_ERRATA_12
;
986 if (((SVR_SOC_VER(svr
) == SVR_P2020
) && (SVR_REV(svr
) < 0x20)) ||
987 ((SVR_SOC_VER(svr
) == SVR_P2010
) && (SVR_REV(svr
) < 0x20)))
988 priv
->errata
|= GFAR_ERRATA_76
; /* aka eTSEC 20 */
991 static void gfar_detect_errata(struct gfar_private
*priv
)
993 struct device
*dev
= &priv
->ofdev
->dev
;
995 /* no plans to fix */
996 priv
->errata
|= GFAR_ERRATA_A002
;
998 if (pvr_version_is(PVR_VER_E500V1
) || pvr_version_is(PVR_VER_E500V2
))
999 __gfar_detect_errata_85xx(priv
);
1000 else /* non-mpc85xx parts, i.e. e300 core based */
1001 __gfar_detect_errata_83xx(priv
);
1004 dev_info(dev
, "enabled errata workarounds, flags: 0x%x\n",
1008 /* Set up the ethernet device structure, private data,
1009 * and anything else we need before we start
1011 static int gfar_probe(struct platform_device
*ofdev
)
1014 struct net_device
*dev
= NULL
;
1015 struct gfar_private
*priv
= NULL
;
1016 struct gfar __iomem
*regs
= NULL
;
1017 int err
= 0, i
, grp_idx
= 0;
1018 u32 rstat
= 0, tstat
= 0, rqueue
= 0, tqueue
= 0;
1022 err
= gfar_of_init(ofdev
, &dev
);
1027 priv
= netdev_priv(dev
);
1029 priv
->ofdev
= ofdev
;
1030 priv
->dev
= &ofdev
->dev
;
1031 SET_NETDEV_DEV(dev
, &ofdev
->dev
);
1033 spin_lock_init(&priv
->bflock
);
1034 INIT_WORK(&priv
->reset_task
, gfar_reset_task
);
1036 platform_set_drvdata(ofdev
, priv
);
1037 regs
= priv
->gfargrp
[0].regs
;
1039 gfar_detect_errata(priv
);
1041 /* Stop the DMA engine now, in case it was running before
1042 * (The firmware could have used it, and left it running).
1046 /* Reset MAC layer */
1047 gfar_write(®s
->maccfg1
, MACCFG1_SOFT_RESET
);
1049 /* We need to delay at least 3 TX clocks */
1053 if (!priv
->pause_aneg_en
&& priv
->tx_pause_en
)
1054 tempval
|= MACCFG1_TX_FLOW
;
1055 if (!priv
->pause_aneg_en
&& priv
->rx_pause_en
)
1056 tempval
|= MACCFG1_RX_FLOW
;
1057 /* the soft reset bit is not self-resetting, so we need to
1058 * clear it before resuming normal operation
1060 gfar_write(®s
->maccfg1
, tempval
);
1062 /* Initialize MACCFG2. */
1063 tempval
= MACCFG2_INIT_SETTINGS
;
1064 if (gfar_has_errata(priv
, GFAR_ERRATA_74
))
1065 tempval
|= MACCFG2_HUGEFRAME
| MACCFG2_LENGTHCHECK
;
1066 gfar_write(®s
->maccfg2
, tempval
);
1068 /* Initialize ECNTRL */
1069 gfar_write(®s
->ecntrl
, ECNTRL_INIT_SETTINGS
);
1071 /* Set the dev->base_addr to the gfar reg region */
1072 dev
->base_addr
= (unsigned long) regs
;
1074 /* Fill in the dev structure */
1075 dev
->watchdog_timeo
= TX_TIMEOUT
;
1077 dev
->netdev_ops
= &gfar_netdev_ops
;
1078 dev
->ethtool_ops
= &gfar_ethtool_ops
;
1080 /* Register for napi ...We are registering NAPI for each grp */
1081 if (priv
->mode
== SQ_SG_MODE
)
1082 netif_napi_add(dev
, &priv
->gfargrp
[0].napi
, gfar_poll_sq
,
1085 for (i
= 0; i
< priv
->num_grps
; i
++)
1086 netif_napi_add(dev
, &priv
->gfargrp
[i
].napi
, gfar_poll
,
1089 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_CSUM
) {
1090 dev
->hw_features
= NETIF_F_IP_CSUM
| NETIF_F_SG
|
1092 dev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_SG
|
1093 NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
;
1096 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_VLAN
) {
1097 dev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
|
1098 NETIF_F_HW_VLAN_CTAG_RX
;
1099 dev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
;
1102 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_EXTENDED_HASH
) {
1103 priv
->extended_hash
= 1;
1104 priv
->hash_width
= 9;
1106 priv
->hash_regs
[0] = ®s
->igaddr0
;
1107 priv
->hash_regs
[1] = ®s
->igaddr1
;
1108 priv
->hash_regs
[2] = ®s
->igaddr2
;
1109 priv
->hash_regs
[3] = ®s
->igaddr3
;
1110 priv
->hash_regs
[4] = ®s
->igaddr4
;
1111 priv
->hash_regs
[5] = ®s
->igaddr5
;
1112 priv
->hash_regs
[6] = ®s
->igaddr6
;
1113 priv
->hash_regs
[7] = ®s
->igaddr7
;
1114 priv
->hash_regs
[8] = ®s
->gaddr0
;
1115 priv
->hash_regs
[9] = ®s
->gaddr1
;
1116 priv
->hash_regs
[10] = ®s
->gaddr2
;
1117 priv
->hash_regs
[11] = ®s
->gaddr3
;
1118 priv
->hash_regs
[12] = ®s
->gaddr4
;
1119 priv
->hash_regs
[13] = ®s
->gaddr5
;
1120 priv
->hash_regs
[14] = ®s
->gaddr6
;
1121 priv
->hash_regs
[15] = ®s
->gaddr7
;
1124 priv
->extended_hash
= 0;
1125 priv
->hash_width
= 8;
1127 priv
->hash_regs
[0] = ®s
->gaddr0
;
1128 priv
->hash_regs
[1] = ®s
->gaddr1
;
1129 priv
->hash_regs
[2] = ®s
->gaddr2
;
1130 priv
->hash_regs
[3] = ®s
->gaddr3
;
1131 priv
->hash_regs
[4] = ®s
->gaddr4
;
1132 priv
->hash_regs
[5] = ®s
->gaddr5
;
1133 priv
->hash_regs
[6] = ®s
->gaddr6
;
1134 priv
->hash_regs
[7] = ®s
->gaddr7
;
1137 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_PADDING
)
1138 priv
->padding
= DEFAULT_PADDING
;
1142 if (dev
->features
& NETIF_F_IP_CSUM
||
1143 priv
->device_flags
& FSL_GIANFAR_DEV_HAS_TIMER
)
1144 dev
->needed_headroom
= GMAC_FCB_LEN
;
1146 /* Program the isrg regs only if number of grps > 1 */
1147 if (priv
->num_grps
> 1) {
1148 baddr
= ®s
->isrg0
;
1149 for (i
= 0; i
< priv
->num_grps
; i
++) {
1150 isrg
|= (priv
->gfargrp
[i
].rx_bit_map
<< ISRG_SHIFT_RX
);
1151 isrg
|= (priv
->gfargrp
[i
].tx_bit_map
<< ISRG_SHIFT_TX
);
1152 gfar_write(baddr
, isrg
);
1158 /* Need to reverse the bit maps as bit_map's MSB is q0
1159 * but, for_each_set_bit parses from right to left, which
1160 * basically reverses the queue numbers
1162 for (i
= 0; i
< priv
->num_grps
; i
++) {
1163 priv
->gfargrp
[i
].tx_bit_map
=
1164 reverse_bitmap(priv
->gfargrp
[i
].tx_bit_map
, MAX_TX_QS
);
1165 priv
->gfargrp
[i
].rx_bit_map
=
1166 reverse_bitmap(priv
->gfargrp
[i
].rx_bit_map
, MAX_RX_QS
);
1169 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
1170 * also assign queues to groups
1172 for (grp_idx
= 0; grp_idx
< priv
->num_grps
; grp_idx
++) {
1173 priv
->gfargrp
[grp_idx
].num_rx_queues
= 0x0;
1175 for_each_set_bit(i
, &priv
->gfargrp
[grp_idx
].rx_bit_map
,
1176 priv
->num_rx_queues
) {
1177 priv
->gfargrp
[grp_idx
].num_rx_queues
++;
1178 priv
->rx_queue
[i
]->grp
= &priv
->gfargrp
[grp_idx
];
1179 rstat
= rstat
| (RSTAT_CLEAR_RHALT
>> i
);
1180 rqueue
= rqueue
| ((RQUEUE_EN0
| RQUEUE_EX0
) >> i
);
1182 priv
->gfargrp
[grp_idx
].num_tx_queues
= 0x0;
1184 for_each_set_bit(i
, &priv
->gfargrp
[grp_idx
].tx_bit_map
,
1185 priv
->num_tx_queues
) {
1186 priv
->gfargrp
[grp_idx
].num_tx_queues
++;
1187 priv
->tx_queue
[i
]->grp
= &priv
->gfargrp
[grp_idx
];
1188 tstat
= tstat
| (TSTAT_CLEAR_THALT
>> i
);
1189 tqueue
= tqueue
| (TQUEUE_EN0
>> i
);
1191 priv
->gfargrp
[grp_idx
].rstat
= rstat
;
1192 priv
->gfargrp
[grp_idx
].tstat
= tstat
;
1196 gfar_write(®s
->rqueue
, rqueue
);
1197 gfar_write(®s
->tqueue
, tqueue
);
1199 priv
->rx_buffer_size
= DEFAULT_RX_BUFFER_SIZE
;
1201 /* Initializing some of the rx/tx queue level parameters */
1202 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
1203 priv
->tx_queue
[i
]->tx_ring_size
= DEFAULT_TX_RING_SIZE
;
1204 priv
->tx_queue
[i
]->num_txbdfree
= DEFAULT_TX_RING_SIZE
;
1205 priv
->tx_queue
[i
]->txcoalescing
= DEFAULT_TX_COALESCE
;
1206 priv
->tx_queue
[i
]->txic
= DEFAULT_TXIC
;
1209 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
1210 priv
->rx_queue
[i
]->rx_ring_size
= DEFAULT_RX_RING_SIZE
;
1211 priv
->rx_queue
[i
]->rxcoalescing
= DEFAULT_RX_COALESCE
;
1212 priv
->rx_queue
[i
]->rxic
= DEFAULT_RXIC
;
1215 /* always enable rx filer */
1216 priv
->rx_filer_enable
= 1;
1217 /* Enable most messages by default */
1218 priv
->msg_enable
= (NETIF_MSG_IFUP
<< 1 ) - 1;
1219 /* use pritority h/w tx queue scheduling for single queue devices */
1220 if (priv
->num_tx_queues
== 1)
1221 priv
->prio_sched_en
= 1;
1223 /* Carrier starts down, phylib will bring it up */
1224 netif_carrier_off(dev
);
1226 err
= register_netdev(dev
);
1229 pr_err("%s: Cannot register net device, aborting\n", dev
->name
);
1233 device_init_wakeup(&dev
->dev
,
1234 priv
->device_flags
&
1235 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
1237 /* fill out IRQ number and name fields */
1238 for (i
= 0; i
< priv
->num_grps
; i
++) {
1239 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[i
];
1240 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
1241 sprintf(gfar_irq(grp
, TX
)->name
, "%s%s%c%s",
1242 dev
->name
, "_g", '0' + i
, "_tx");
1243 sprintf(gfar_irq(grp
, RX
)->name
, "%s%s%c%s",
1244 dev
->name
, "_g", '0' + i
, "_rx");
1245 sprintf(gfar_irq(grp
, ER
)->name
, "%s%s%c%s",
1246 dev
->name
, "_g", '0' + i
, "_er");
1248 strcpy(gfar_irq(grp
, TX
)->name
, dev
->name
);
1251 /* Initialize the filer table */
1252 gfar_init_filer_table(priv
);
1254 /* Create all the sysfs files */
1255 gfar_init_sysfs(dev
);
1257 /* Print out the device info */
1258 netdev_info(dev
, "mac: %pM\n", dev
->dev_addr
);
1260 /* Even more device info helps when determining which kernel
1261 * provided which set of benchmarks.
1263 netdev_info(dev
, "Running with NAPI enabled\n");
1264 for (i
= 0; i
< priv
->num_rx_queues
; i
++)
1265 netdev_info(dev
, "RX BD ring size for Q[%d]: %d\n",
1266 i
, priv
->rx_queue
[i
]->rx_ring_size
);
1267 for (i
= 0; i
< priv
->num_tx_queues
; i
++)
1268 netdev_info(dev
, "TX BD ring size for Q[%d]: %d\n",
1269 i
, priv
->tx_queue
[i
]->tx_ring_size
);
1274 unmap_group_regs(priv
);
1275 free_tx_pointers(priv
);
1276 free_rx_pointers(priv
);
1278 of_node_put(priv
->phy_node
);
1280 of_node_put(priv
->tbi_node
);
1281 free_gfar_dev(priv
);
1285 static int gfar_remove(struct platform_device
*ofdev
)
1287 struct gfar_private
*priv
= platform_get_drvdata(ofdev
);
1290 of_node_put(priv
->phy_node
);
1292 of_node_put(priv
->tbi_node
);
1294 unregister_netdev(priv
->ndev
);
1295 unmap_group_regs(priv
);
1296 free_gfar_dev(priv
);
1303 static int gfar_suspend(struct device
*dev
)
1305 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1306 struct net_device
*ndev
= priv
->ndev
;
1307 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1308 unsigned long flags
;
1311 int magic_packet
= priv
->wol_en
&&
1312 (priv
->device_flags
&
1313 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
1315 netif_device_detach(ndev
);
1317 if (netif_running(ndev
)) {
1319 local_irq_save(flags
);
1323 gfar_halt_nodisable(ndev
);
1325 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
1326 tempval
= gfar_read(®s
->maccfg1
);
1328 tempval
&= ~MACCFG1_TX_EN
;
1331 tempval
&= ~MACCFG1_RX_EN
;
1333 gfar_write(®s
->maccfg1
, tempval
);
1337 local_irq_restore(flags
);
1342 /* Enable interrupt on Magic Packet */
1343 gfar_write(®s
->imask
, IMASK_MAG
);
1345 /* Enable Magic Packet mode */
1346 tempval
= gfar_read(®s
->maccfg2
);
1347 tempval
|= MACCFG2_MPEN
;
1348 gfar_write(®s
->maccfg2
, tempval
);
1350 phy_stop(priv
->phydev
);
1357 static int gfar_resume(struct device
*dev
)
1359 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1360 struct net_device
*ndev
= priv
->ndev
;
1361 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1362 unsigned long flags
;
1364 int magic_packet
= priv
->wol_en
&&
1365 (priv
->device_flags
&
1366 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
);
1368 if (!netif_running(ndev
)) {
1369 netif_device_attach(ndev
);
1373 if (!magic_packet
&& priv
->phydev
)
1374 phy_start(priv
->phydev
);
1376 /* Disable Magic Packet mode, in case something
1379 local_irq_save(flags
);
1383 tempval
= gfar_read(®s
->maccfg2
);
1384 tempval
&= ~MACCFG2_MPEN
;
1385 gfar_write(®s
->maccfg2
, tempval
);
1391 local_irq_restore(flags
);
1393 netif_device_attach(ndev
);
1400 static int gfar_restore(struct device
*dev
)
1402 struct gfar_private
*priv
= dev_get_drvdata(dev
);
1403 struct net_device
*ndev
= priv
->ndev
;
1405 if (!netif_running(ndev
)) {
1406 netif_device_attach(ndev
);
1411 if (gfar_init_bds(ndev
)) {
1412 free_skb_resources(priv
);
1416 init_registers(ndev
);
1417 gfar_set_mac_address(ndev
);
1418 gfar_init_mac(ndev
);
1423 priv
->oldduplex
= -1;
1426 phy_start(priv
->phydev
);
1428 netif_device_attach(ndev
);
1434 static struct dev_pm_ops gfar_pm_ops
= {
1435 .suspend
= gfar_suspend
,
1436 .resume
= gfar_resume
,
1437 .freeze
= gfar_suspend
,
1438 .thaw
= gfar_resume
,
1439 .restore
= gfar_restore
,
1442 #define GFAR_PM_OPS (&gfar_pm_ops)
1446 #define GFAR_PM_OPS NULL
1450 /* Reads the controller's registers to determine what interface
1451 * connects it to the PHY.
1453 static phy_interface_t
gfar_get_interface(struct net_device
*dev
)
1455 struct gfar_private
*priv
= netdev_priv(dev
);
1456 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1459 ecntrl
= gfar_read(®s
->ecntrl
);
1461 if (ecntrl
& ECNTRL_SGMII_MODE
)
1462 return PHY_INTERFACE_MODE_SGMII
;
1464 if (ecntrl
& ECNTRL_TBI_MODE
) {
1465 if (ecntrl
& ECNTRL_REDUCED_MODE
)
1466 return PHY_INTERFACE_MODE_RTBI
;
1468 return PHY_INTERFACE_MODE_TBI
;
1471 if (ecntrl
& ECNTRL_REDUCED_MODE
) {
1472 if (ecntrl
& ECNTRL_REDUCED_MII_MODE
) {
1473 return PHY_INTERFACE_MODE_RMII
;
1476 phy_interface_t interface
= priv
->interface
;
1478 /* This isn't autodetected right now, so it must
1479 * be set by the device tree or platform code.
1481 if (interface
== PHY_INTERFACE_MODE_RGMII_ID
)
1482 return PHY_INTERFACE_MODE_RGMII_ID
;
1484 return PHY_INTERFACE_MODE_RGMII
;
1488 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
)
1489 return PHY_INTERFACE_MODE_GMII
;
1491 return PHY_INTERFACE_MODE_MII
;
1495 /* Initializes driver's PHY state, and attaches to the PHY.
1496 * Returns 0 on success.
1498 static int init_phy(struct net_device
*dev
)
1500 struct gfar_private
*priv
= netdev_priv(dev
);
1501 uint gigabit_support
=
1502 priv
->device_flags
& FSL_GIANFAR_DEV_HAS_GIGABIT
?
1503 GFAR_SUPPORTED_GBIT
: 0;
1504 phy_interface_t interface
;
1508 priv
->oldduplex
= -1;
1510 interface
= gfar_get_interface(dev
);
1512 priv
->phydev
= of_phy_connect(dev
, priv
->phy_node
, &adjust_link
, 0,
1515 priv
->phydev
= of_phy_connect_fixed_link(dev
, &adjust_link
,
1517 if (!priv
->phydev
) {
1518 dev_err(&dev
->dev
, "could not attach to PHY\n");
1522 if (interface
== PHY_INTERFACE_MODE_SGMII
)
1523 gfar_configure_serdes(dev
);
1525 /* Remove any features not supported by the controller */
1526 priv
->phydev
->supported
&= (GFAR_SUPPORTED
| gigabit_support
);
1527 priv
->phydev
->advertising
= priv
->phydev
->supported
;
1532 /* Initialize TBI PHY interface for communicating with the
1533 * SERDES lynx PHY on the chip. We communicate with this PHY
1534 * through the MDIO bus on each controller, treating it as a
1535 * "normal" PHY at the address found in the TBIPA register. We assume
1536 * that the TBIPA register is valid. Either the MDIO bus code will set
1537 * it to a value that doesn't conflict with other PHYs on the bus, or the
1538 * value doesn't matter, as there are no other PHYs on the bus.
1540 static void gfar_configure_serdes(struct net_device
*dev
)
1542 struct gfar_private
*priv
= netdev_priv(dev
);
1543 struct phy_device
*tbiphy
;
1545 if (!priv
->tbi_node
) {
1546 dev_warn(&dev
->dev
, "error: SGMII mode requires that the "
1547 "device tree specify a tbi-handle\n");
1551 tbiphy
= of_phy_find_device(priv
->tbi_node
);
1553 dev_err(&dev
->dev
, "error: Could not get TBI device\n");
1557 /* If the link is already up, we must already be ok, and don't need to
1558 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1559 * everything for us? Resetting it takes the link down and requires
1560 * several seconds for it to come back.
1562 if (phy_read(tbiphy
, MII_BMSR
) & BMSR_LSTATUS
)
1565 /* Single clk mode, mii mode off(for serdes communication) */
1566 phy_write(tbiphy
, MII_TBICON
, TBICON_CLK_SELECT
);
1568 phy_write(tbiphy
, MII_ADVERTISE
,
1569 ADVERTISE_1000XFULL
| ADVERTISE_1000XPAUSE
|
1570 ADVERTISE_1000XPSE_ASYM
);
1572 phy_write(tbiphy
, MII_BMCR
,
1573 BMCR_ANENABLE
| BMCR_ANRESTART
| BMCR_FULLDPLX
|
1577 static void init_registers(struct net_device
*dev
)
1579 struct gfar_private
*priv
= netdev_priv(dev
);
1580 struct gfar __iomem
*regs
= NULL
;
1583 for (i
= 0; i
< priv
->num_grps
; i
++) {
1584 regs
= priv
->gfargrp
[i
].regs
;
1586 gfar_write(®s
->ievent
, IEVENT_INIT_CLEAR
);
1588 /* Initialize IMASK */
1589 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
1592 regs
= priv
->gfargrp
[0].regs
;
1593 /* Init hash registers to zero */
1594 gfar_write(®s
->igaddr0
, 0);
1595 gfar_write(®s
->igaddr1
, 0);
1596 gfar_write(®s
->igaddr2
, 0);
1597 gfar_write(®s
->igaddr3
, 0);
1598 gfar_write(®s
->igaddr4
, 0);
1599 gfar_write(®s
->igaddr5
, 0);
1600 gfar_write(®s
->igaddr6
, 0);
1601 gfar_write(®s
->igaddr7
, 0);
1603 gfar_write(®s
->gaddr0
, 0);
1604 gfar_write(®s
->gaddr1
, 0);
1605 gfar_write(®s
->gaddr2
, 0);
1606 gfar_write(®s
->gaddr3
, 0);
1607 gfar_write(®s
->gaddr4
, 0);
1608 gfar_write(®s
->gaddr5
, 0);
1609 gfar_write(®s
->gaddr6
, 0);
1610 gfar_write(®s
->gaddr7
, 0);
1612 /* Zero out the rmon mib registers if it has them */
1613 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_RMON
) {
1614 memset_io(&(regs
->rmon
), 0, sizeof (struct rmon_mib
));
1616 /* Mask off the CAM interrupts */
1617 gfar_write(®s
->rmon
.cam1
, 0xffffffff);
1618 gfar_write(®s
->rmon
.cam2
, 0xffffffff);
1621 /* Initialize the max receive buffer length */
1622 gfar_write(®s
->mrblr
, priv
->rx_buffer_size
);
1624 /* Initialize the Minimum Frame Length Register */
1625 gfar_write(®s
->minflr
, MINFLR_INIT_SETTINGS
);
1628 static int __gfar_is_rx_idle(struct gfar_private
*priv
)
1632 /* Normaly TSEC should not hang on GRS commands, so we should
1633 * actually wait for IEVENT_GRSC flag.
1635 if (!gfar_has_errata(priv
, GFAR_ERRATA_A002
))
1638 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
1639 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
1640 * and the Rx can be safely reset.
1642 res
= gfar_read((void __iomem
*)priv
->gfargrp
[0].regs
+ 0xd1c);
1644 if ((res
& 0xffff) == (res
>> 16))
1650 /* Halt the receive and transmit queues */
1651 static void gfar_halt_nodisable(struct net_device
*dev
)
1653 struct gfar_private
*priv
= netdev_priv(dev
);
1654 struct gfar __iomem
*regs
= NULL
;
1658 for (i
= 0; i
< priv
->num_grps
; i
++) {
1659 regs
= priv
->gfargrp
[i
].regs
;
1660 /* Mask all interrupts */
1661 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
1663 /* Clear all interrupts */
1664 gfar_write(®s
->ievent
, IEVENT_INIT_CLEAR
);
1667 regs
= priv
->gfargrp
[0].regs
;
1668 /* Stop the DMA, and wait for it to stop */
1669 tempval
= gfar_read(®s
->dmactrl
);
1670 if ((tempval
& (DMACTRL_GRS
| DMACTRL_GTS
)) !=
1671 (DMACTRL_GRS
| DMACTRL_GTS
)) {
1674 tempval
|= (DMACTRL_GRS
| DMACTRL_GTS
);
1675 gfar_write(®s
->dmactrl
, tempval
);
1678 ret
= spin_event_timeout(((gfar_read(®s
->ievent
) &
1679 (IEVENT_GRSC
| IEVENT_GTSC
)) ==
1680 (IEVENT_GRSC
| IEVENT_GTSC
)), 1000000, 0);
1681 if (!ret
&& !(gfar_read(®s
->ievent
) & IEVENT_GRSC
))
1682 ret
= __gfar_is_rx_idle(priv
);
1687 /* Halt the receive and transmit queues */
1688 void gfar_halt(struct net_device
*dev
)
1690 struct gfar_private
*priv
= netdev_priv(dev
);
1691 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1694 gfar_halt_nodisable(dev
);
1696 /* Disable Rx and Tx */
1697 tempval
= gfar_read(®s
->maccfg1
);
1698 tempval
&= ~(MACCFG1_RX_EN
| MACCFG1_TX_EN
);
1699 gfar_write(®s
->maccfg1
, tempval
);
1702 static void free_grp_irqs(struct gfar_priv_grp
*grp
)
1704 free_irq(gfar_irq(grp
, TX
)->irq
, grp
);
1705 free_irq(gfar_irq(grp
, RX
)->irq
, grp
);
1706 free_irq(gfar_irq(grp
, ER
)->irq
, grp
);
1709 void stop_gfar(struct net_device
*dev
)
1711 struct gfar_private
*priv
= netdev_priv(dev
);
1712 unsigned long flags
;
1715 phy_stop(priv
->phydev
);
1719 local_irq_save(flags
);
1727 local_irq_restore(flags
);
1730 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
1731 for (i
= 0; i
< priv
->num_grps
; i
++)
1732 free_grp_irqs(&priv
->gfargrp
[i
]);
1734 for (i
= 0; i
< priv
->num_grps
; i
++)
1735 free_irq(gfar_irq(&priv
->gfargrp
[i
], TX
)->irq
,
1739 free_skb_resources(priv
);
1742 static void free_skb_tx_queue(struct gfar_priv_tx_q
*tx_queue
)
1744 struct txbd8
*txbdp
;
1745 struct gfar_private
*priv
= netdev_priv(tx_queue
->dev
);
1748 txbdp
= tx_queue
->tx_bd_base
;
1750 for (i
= 0; i
< tx_queue
->tx_ring_size
; i
++) {
1751 if (!tx_queue
->tx_skbuff
[i
])
1754 dma_unmap_single(priv
->dev
, txbdp
->bufPtr
,
1755 txbdp
->length
, DMA_TO_DEVICE
);
1757 for (j
= 0; j
< skb_shinfo(tx_queue
->tx_skbuff
[i
])->nr_frags
;
1760 dma_unmap_page(priv
->dev
, txbdp
->bufPtr
,
1761 txbdp
->length
, DMA_TO_DEVICE
);
1764 dev_kfree_skb_any(tx_queue
->tx_skbuff
[i
]);
1765 tx_queue
->tx_skbuff
[i
] = NULL
;
1767 kfree(tx_queue
->tx_skbuff
);
1768 tx_queue
->tx_skbuff
= NULL
;
1771 static void free_skb_rx_queue(struct gfar_priv_rx_q
*rx_queue
)
1773 struct rxbd8
*rxbdp
;
1774 struct gfar_private
*priv
= netdev_priv(rx_queue
->dev
);
1777 rxbdp
= rx_queue
->rx_bd_base
;
1779 for (i
= 0; i
< rx_queue
->rx_ring_size
; i
++) {
1780 if (rx_queue
->rx_skbuff
[i
]) {
1781 dma_unmap_single(priv
->dev
, rxbdp
->bufPtr
,
1782 priv
->rx_buffer_size
,
1784 dev_kfree_skb_any(rx_queue
->rx_skbuff
[i
]);
1785 rx_queue
->rx_skbuff
[i
] = NULL
;
1791 kfree(rx_queue
->rx_skbuff
);
1792 rx_queue
->rx_skbuff
= NULL
;
1795 /* If there are any tx skbs or rx skbs still around, free them.
1796 * Then free tx_skbuff and rx_skbuff
1798 static void free_skb_resources(struct gfar_private
*priv
)
1800 struct gfar_priv_tx_q
*tx_queue
= NULL
;
1801 struct gfar_priv_rx_q
*rx_queue
= NULL
;
1804 /* Go through all the buffer descriptors and free their data buffers */
1805 for (i
= 0; i
< priv
->num_tx_queues
; i
++) {
1806 struct netdev_queue
*txq
;
1808 tx_queue
= priv
->tx_queue
[i
];
1809 txq
= netdev_get_tx_queue(tx_queue
->dev
, tx_queue
->qindex
);
1810 if (tx_queue
->tx_skbuff
)
1811 free_skb_tx_queue(tx_queue
);
1812 netdev_tx_reset_queue(txq
);
1815 for (i
= 0; i
< priv
->num_rx_queues
; i
++) {
1816 rx_queue
= priv
->rx_queue
[i
];
1817 if (rx_queue
->rx_skbuff
)
1818 free_skb_rx_queue(rx_queue
);
1821 dma_free_coherent(priv
->dev
,
1822 sizeof(struct txbd8
) * priv
->total_tx_ring_size
+
1823 sizeof(struct rxbd8
) * priv
->total_rx_ring_size
,
1824 priv
->tx_queue
[0]->tx_bd_base
,
1825 priv
->tx_queue
[0]->tx_bd_dma_base
);
1828 void gfar_start(struct net_device
*dev
)
1830 struct gfar_private
*priv
= netdev_priv(dev
);
1831 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1835 /* Enable Rx and Tx in MACCFG1 */
1836 tempval
= gfar_read(®s
->maccfg1
);
1837 tempval
|= (MACCFG1_RX_EN
| MACCFG1_TX_EN
);
1838 gfar_write(®s
->maccfg1
, tempval
);
1840 /* Initialize DMACTRL to have WWR and WOP */
1841 tempval
= gfar_read(®s
->dmactrl
);
1842 tempval
|= DMACTRL_INIT_SETTINGS
;
1843 gfar_write(®s
->dmactrl
, tempval
);
1845 /* Make sure we aren't stopped */
1846 tempval
= gfar_read(®s
->dmactrl
);
1847 tempval
&= ~(DMACTRL_GRS
| DMACTRL_GTS
);
1848 gfar_write(®s
->dmactrl
, tempval
);
1850 for (i
= 0; i
< priv
->num_grps
; i
++) {
1851 regs
= priv
->gfargrp
[i
].regs
;
1852 /* Clear THLT/RHLT, so that the DMA starts polling now */
1853 gfar_write(®s
->tstat
, priv
->gfargrp
[i
].tstat
);
1854 gfar_write(®s
->rstat
, priv
->gfargrp
[i
].rstat
);
1855 /* Unmask the interrupts we look for */
1856 gfar_write(®s
->imask
, IMASK_DEFAULT
);
1859 dev
->trans_start
= jiffies
; /* prevent tx timeout */
1862 static void gfar_configure_coalescing(struct gfar_private
*priv
,
1863 unsigned long tx_mask
, unsigned long rx_mask
)
1865 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
1868 if (priv
->mode
== MQ_MG_MODE
) {
1871 baddr
= ®s
->txic0
;
1872 for_each_set_bit(i
, &tx_mask
, priv
->num_tx_queues
) {
1873 gfar_write(baddr
+ i
, 0);
1874 if (likely(priv
->tx_queue
[i
]->txcoalescing
))
1875 gfar_write(baddr
+ i
, priv
->tx_queue
[i
]->txic
);
1878 baddr
= ®s
->rxic0
;
1879 for_each_set_bit(i
, &rx_mask
, priv
->num_rx_queues
) {
1880 gfar_write(baddr
+ i
, 0);
1881 if (likely(priv
->rx_queue
[i
]->rxcoalescing
))
1882 gfar_write(baddr
+ i
, priv
->rx_queue
[i
]->rxic
);
1885 /* Backward compatible case -- even if we enable
1886 * multiple queues, there's only single reg to program
1888 gfar_write(®s
->txic
, 0);
1889 if (likely(priv
->tx_queue
[0]->txcoalescing
))
1890 gfar_write(®s
->txic
, priv
->tx_queue
[0]->txic
);
1892 gfar_write(®s
->rxic
, 0);
1893 if (unlikely(priv
->rx_queue
[0]->rxcoalescing
))
1894 gfar_write(®s
->rxic
, priv
->rx_queue
[0]->rxic
);
1898 void gfar_configure_coalescing_all(struct gfar_private
*priv
)
1900 gfar_configure_coalescing(priv
, 0xFF, 0xFF);
1903 static int register_grp_irqs(struct gfar_priv_grp
*grp
)
1905 struct gfar_private
*priv
= grp
->priv
;
1906 struct net_device
*dev
= priv
->ndev
;
1909 /* If the device has multiple interrupts, register for
1910 * them. Otherwise, only register for the one
1912 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
1913 /* Install our interrupt handlers for Error,
1914 * Transmit, and Receive
1916 err
= request_irq(gfar_irq(grp
, ER
)->irq
, gfar_error
, 0,
1917 gfar_irq(grp
, ER
)->name
, grp
);
1919 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
1920 gfar_irq(grp
, ER
)->irq
);
1924 err
= request_irq(gfar_irq(grp
, TX
)->irq
, gfar_transmit
, 0,
1925 gfar_irq(grp
, TX
)->name
, grp
);
1927 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
1928 gfar_irq(grp
, TX
)->irq
);
1931 err
= request_irq(gfar_irq(grp
, RX
)->irq
, gfar_receive
, 0,
1932 gfar_irq(grp
, RX
)->name
, grp
);
1934 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
1935 gfar_irq(grp
, RX
)->irq
);
1939 err
= request_irq(gfar_irq(grp
, TX
)->irq
, gfar_interrupt
, 0,
1940 gfar_irq(grp
, TX
)->name
, grp
);
1942 netif_err(priv
, intr
, dev
, "Can't get IRQ %d\n",
1943 gfar_irq(grp
, TX
)->irq
);
1951 free_irq(gfar_irq(grp
, TX
)->irq
, grp
);
1953 free_irq(gfar_irq(grp
, ER
)->irq
, grp
);
1959 /* Bring the controller up and running */
1960 int startup_gfar(struct net_device
*ndev
)
1962 struct gfar_private
*priv
= netdev_priv(ndev
);
1963 struct gfar __iomem
*regs
= NULL
;
1966 for (i
= 0; i
< priv
->num_grps
; i
++) {
1967 regs
= priv
->gfargrp
[i
].regs
;
1968 gfar_write(®s
->imask
, IMASK_INIT_CLEAR
);
1971 regs
= priv
->gfargrp
[0].regs
;
1972 err
= gfar_alloc_skb_resources(ndev
);
1976 gfar_init_mac(ndev
);
1978 for (i
= 0; i
< priv
->num_grps
; i
++) {
1979 err
= register_grp_irqs(&priv
->gfargrp
[i
]);
1981 for (j
= 0; j
< i
; j
++)
1982 free_grp_irqs(&priv
->gfargrp
[j
]);
1987 /* Start the controller */
1990 phy_start(priv
->phydev
);
1992 gfar_configure_coalescing_all(priv
);
1997 free_skb_resources(priv
);
2001 /* Called when something needs to use the ethernet device
2002 * Returns 0 for success.
2004 static int gfar_enet_open(struct net_device
*dev
)
2006 struct gfar_private
*priv
= netdev_priv(dev
);
2011 /* Initialize a bunch of registers */
2012 init_registers(dev
);
2014 gfar_set_mac_address(dev
);
2016 err
= init_phy(dev
);
2023 err
= startup_gfar(dev
);
2029 netif_tx_start_all_queues(dev
);
2031 device_set_wakeup_enable(&dev
->dev
, priv
->wol_en
);
2036 static inline struct txfcb
*gfar_add_fcb(struct sk_buff
*skb
)
2038 struct txfcb
*fcb
= (struct txfcb
*)skb_push(skb
, GMAC_FCB_LEN
);
2040 memset(fcb
, 0, GMAC_FCB_LEN
);
2045 static inline void gfar_tx_checksum(struct sk_buff
*skb
, struct txfcb
*fcb
,
2048 /* If we're here, it's a IP packet with a TCP or UDP
2049 * payload. We set it to checksum, using a pseudo-header
2052 u8 flags
= TXFCB_DEFAULT
;
2054 /* Tell the controller what the protocol is
2055 * And provide the already calculated phcs
2057 if (ip_hdr(skb
)->protocol
== IPPROTO_UDP
) {
2059 fcb
->phcs
= udp_hdr(skb
)->check
;
2061 fcb
->phcs
= tcp_hdr(skb
)->check
;
2063 /* l3os is the distance between the start of the
2064 * frame (skb->data) and the start of the IP hdr.
2065 * l4os is the distance between the start of the
2066 * l3 hdr and the l4 hdr
2068 fcb
->l3os
= (u16
)(skb_network_offset(skb
) - fcb_length
);
2069 fcb
->l4os
= skb_network_header_len(skb
);
2074 void inline gfar_tx_vlan(struct sk_buff
*skb
, struct txfcb
*fcb
)
2076 fcb
->flags
|= TXFCB_VLN
;
2077 fcb
->vlctl
= vlan_tx_tag_get(skb
);
2080 static inline struct txbd8
*skip_txbd(struct txbd8
*bdp
, int stride
,
2081 struct txbd8
*base
, int ring_size
)
2083 struct txbd8
*new_bd
= bdp
+ stride
;
2085 return (new_bd
>= (base
+ ring_size
)) ? (new_bd
- ring_size
) : new_bd
;
2088 static inline struct txbd8
*next_txbd(struct txbd8
*bdp
, struct txbd8
*base
,
2091 return skip_txbd(bdp
, 1, base
, ring_size
);
2094 /* eTSEC12: csum generation not supported for some fcb offsets */
2095 static inline bool gfar_csum_errata_12(struct gfar_private
*priv
,
2096 unsigned long fcb_addr
)
2098 return (gfar_has_errata(priv
, GFAR_ERRATA_12
) &&
2099 (fcb_addr
% 0x20) > 0x18);
2102 /* eTSEC76: csum generation for frames larger than 2500 may
2103 * cause excess delays before start of transmission
2105 static inline bool gfar_csum_errata_76(struct gfar_private
*priv
,
2108 return (gfar_has_errata(priv
, GFAR_ERRATA_76
) &&
2112 /* This is called by the kernel when a frame is ready for transmission.
2113 * It is pointed to by the dev->hard_start_xmit function pointer
2115 static int gfar_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
2117 struct gfar_private
*priv
= netdev_priv(dev
);
2118 struct gfar_priv_tx_q
*tx_queue
= NULL
;
2119 struct netdev_queue
*txq
;
2120 struct gfar __iomem
*regs
= NULL
;
2121 struct txfcb
*fcb
= NULL
;
2122 struct txbd8
*txbdp
, *txbdp_start
, *base
, *txbdp_tstamp
= NULL
;
2125 int do_tstamp
, do_csum
, do_vlan
;
2127 unsigned long flags
;
2128 unsigned int nr_frags
, nr_txbds
, bytes_sent
, fcb_len
= 0;
2130 rq
= skb
->queue_mapping
;
2131 tx_queue
= priv
->tx_queue
[rq
];
2132 txq
= netdev_get_tx_queue(dev
, rq
);
2133 base
= tx_queue
->tx_bd_base
;
2134 regs
= tx_queue
->grp
->regs
;
2136 do_csum
= (CHECKSUM_PARTIAL
== skb
->ip_summed
);
2137 do_vlan
= vlan_tx_tag_present(skb
);
2138 do_tstamp
= (skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
) &&
2141 if (do_csum
|| do_vlan
)
2142 fcb_len
= GMAC_FCB_LEN
;
2144 /* check if time stamp should be generated */
2145 if (unlikely(do_tstamp
))
2146 fcb_len
= GMAC_FCB_LEN
+ GMAC_TXPAL_LEN
;
2148 /* make space for additional header when fcb is needed */
2149 if (fcb_len
&& unlikely(skb_headroom(skb
) < fcb_len
)) {
2150 struct sk_buff
*skb_new
;
2152 skb_new
= skb_realloc_headroom(skb
, fcb_len
);
2154 dev
->stats
.tx_errors
++;
2156 return NETDEV_TX_OK
;
2160 skb_set_owner_w(skb_new
, skb
->sk
);
2165 /* total number of fragments in the SKB */
2166 nr_frags
= skb_shinfo(skb
)->nr_frags
;
2168 /* calculate the required number of TxBDs for this skb */
2169 if (unlikely(do_tstamp
))
2170 nr_txbds
= nr_frags
+ 2;
2172 nr_txbds
= nr_frags
+ 1;
2174 /* check if there is space to queue this packet */
2175 if (nr_txbds
> tx_queue
->num_txbdfree
) {
2176 /* no space, stop the queue */
2177 netif_tx_stop_queue(txq
);
2178 dev
->stats
.tx_fifo_errors
++;
2179 return NETDEV_TX_BUSY
;
2182 /* Update transmit stats */
2183 bytes_sent
= skb
->len
;
2184 tx_queue
->stats
.tx_bytes
+= bytes_sent
;
2185 /* keep Tx bytes on wire for BQL accounting */
2186 GFAR_CB(skb
)->bytes_sent
= bytes_sent
;
2187 tx_queue
->stats
.tx_packets
++;
2189 txbdp
= txbdp_start
= tx_queue
->cur_tx
;
2190 lstatus
= txbdp
->lstatus
;
2192 /* Time stamp insertion requires one additional TxBD */
2193 if (unlikely(do_tstamp
))
2194 txbdp_tstamp
= txbdp
= next_txbd(txbdp
, base
,
2195 tx_queue
->tx_ring_size
);
2197 if (nr_frags
== 0) {
2198 if (unlikely(do_tstamp
))
2199 txbdp_tstamp
->lstatus
|= BD_LFLAG(TXBD_LAST
|
2202 lstatus
|= BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
2204 /* Place the fragment addresses and lengths into the TxBDs */
2205 for (i
= 0; i
< nr_frags
; i
++) {
2206 unsigned int frag_len
;
2207 /* Point at the next BD, wrapping as needed */
2208 txbdp
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2210 frag_len
= skb_shinfo(skb
)->frags
[i
].size
;
2212 lstatus
= txbdp
->lstatus
| frag_len
|
2213 BD_LFLAG(TXBD_READY
);
2215 /* Handle the last BD specially */
2216 if (i
== nr_frags
- 1)
2217 lstatus
|= BD_LFLAG(TXBD_LAST
| TXBD_INTERRUPT
);
2219 bufaddr
= skb_frag_dma_map(priv
->dev
,
2220 &skb_shinfo(skb
)->frags
[i
],
2225 /* set the TxBD length and buffer pointer */
2226 txbdp
->bufPtr
= bufaddr
;
2227 txbdp
->lstatus
= lstatus
;
2230 lstatus
= txbdp_start
->lstatus
;
2233 /* Add TxPAL between FCB and frame if required */
2234 if (unlikely(do_tstamp
)) {
2235 skb_push(skb
, GMAC_TXPAL_LEN
);
2236 memset(skb
->data
, 0, GMAC_TXPAL_LEN
);
2239 /* Add TxFCB if required */
2241 fcb
= gfar_add_fcb(skb
);
2242 lstatus
|= BD_LFLAG(TXBD_TOE
);
2245 /* Set up checksumming */
2247 gfar_tx_checksum(skb
, fcb
, fcb_len
);
2249 if (unlikely(gfar_csum_errata_12(priv
, (unsigned long)fcb
)) ||
2250 unlikely(gfar_csum_errata_76(priv
, skb
->len
))) {
2251 __skb_pull(skb
, GMAC_FCB_LEN
);
2252 skb_checksum_help(skb
);
2253 if (do_vlan
|| do_tstamp
) {
2254 /* put back a new fcb for vlan/tstamp TOE */
2255 fcb
= gfar_add_fcb(skb
);
2257 /* Tx TOE not used */
2258 lstatus
&= ~(BD_LFLAG(TXBD_TOE
));
2265 gfar_tx_vlan(skb
, fcb
);
2267 /* Setup tx hardware time stamping if requested */
2268 if (unlikely(do_tstamp
)) {
2269 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
2273 txbdp_start
->bufPtr
= dma_map_single(priv
->dev
, skb
->data
,
2274 skb_headlen(skb
), DMA_TO_DEVICE
);
2276 /* If time stamping is requested one additional TxBD must be set up. The
2277 * first TxBD points to the FCB and must have a data length of
2278 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2279 * the full frame length.
2281 if (unlikely(do_tstamp
)) {
2282 txbdp_tstamp
->bufPtr
= txbdp_start
->bufPtr
+ fcb_len
;
2283 txbdp_tstamp
->lstatus
|= BD_LFLAG(TXBD_READY
) |
2284 (skb_headlen(skb
) - fcb_len
);
2285 lstatus
|= BD_LFLAG(TXBD_CRC
| TXBD_READY
) | GMAC_FCB_LEN
;
2287 lstatus
|= BD_LFLAG(TXBD_CRC
| TXBD_READY
) | skb_headlen(skb
);
2290 netdev_tx_sent_queue(txq
, bytes_sent
);
2292 /* We can work in parallel with gfar_clean_tx_ring(), except
2293 * when modifying num_txbdfree. Note that we didn't grab the lock
2294 * when we were reading the num_txbdfree and checking for available
2295 * space, that's because outside of this function it can only grow,
2296 * and once we've got needed space, it cannot suddenly disappear.
2298 * The lock also protects us from gfar_error(), which can modify
2299 * regs->tstat and thus retrigger the transfers, which is why we
2300 * also must grab the lock before setting ready bit for the first
2301 * to be transmitted BD.
2303 spin_lock_irqsave(&tx_queue
->txlock
, flags
);
2305 /* The powerpc-specific eieio() is used, as wmb() has too strong
2306 * semantics (it requires synchronization between cacheable and
2307 * uncacheable mappings, which eieio doesn't provide and which we
2308 * don't need), thus requiring a more expensive sync instruction. At
2309 * some point, the set of architecture-independent barrier functions
2310 * should be expanded to include weaker barriers.
2314 txbdp_start
->lstatus
= lstatus
;
2316 eieio(); /* force lstatus write before tx_skbuff */
2318 tx_queue
->tx_skbuff
[tx_queue
->skb_curtx
] = skb
;
2320 /* Update the current skb pointer to the next entry we will use
2321 * (wrapping if necessary)
2323 tx_queue
->skb_curtx
= (tx_queue
->skb_curtx
+ 1) &
2324 TX_RING_MOD_MASK(tx_queue
->tx_ring_size
);
2326 tx_queue
->cur_tx
= next_txbd(txbdp
, base
, tx_queue
->tx_ring_size
);
2328 /* reduce TxBD free count */
2329 tx_queue
->num_txbdfree
-= (nr_txbds
);
2331 /* If the next BD still needs to be cleaned up, then the bds
2332 * are full. We need to tell the kernel to stop sending us stuff.
2334 if (!tx_queue
->num_txbdfree
) {
2335 netif_tx_stop_queue(txq
);
2337 dev
->stats
.tx_fifo_errors
++;
2340 /* Tell the DMA to go go go */
2341 gfar_write(®s
->tstat
, TSTAT_CLEAR_THALT
>> tx_queue
->qindex
);
2344 spin_unlock_irqrestore(&tx_queue
->txlock
, flags
);
2346 return NETDEV_TX_OK
;
2349 /* Stops the kernel queue, and halts the controller */
2350 static int gfar_close(struct net_device
*dev
)
2352 struct gfar_private
*priv
= netdev_priv(dev
);
2356 cancel_work_sync(&priv
->reset_task
);
2359 /* Disconnect from the PHY */
2360 phy_disconnect(priv
->phydev
);
2361 priv
->phydev
= NULL
;
2363 netif_tx_stop_all_queues(dev
);
2368 /* Changes the mac address if the controller is not running. */
2369 static int gfar_set_mac_address(struct net_device
*dev
)
2371 gfar_set_mac_for_addr(dev
, 0, dev
->dev_addr
);
2376 /* Check if rx parser should be activated */
2377 void gfar_check_rx_parser_mode(struct gfar_private
*priv
)
2379 struct gfar __iomem
*regs
;
2382 regs
= priv
->gfargrp
[0].regs
;
2384 tempval
= gfar_read(®s
->rctrl
);
2385 /* If parse is no longer required, then disable parser */
2386 if (tempval
& RCTRL_REQ_PARSER
) {
2387 tempval
|= RCTRL_PRSDEP_INIT
;
2388 priv
->uses_rxfcb
= 1;
2390 tempval
&= ~RCTRL_PRSDEP_INIT
;
2391 priv
->uses_rxfcb
= 0;
2393 gfar_write(®s
->rctrl
, tempval
);
2396 /* Enables and disables VLAN insertion/extraction */
2397 void gfar_vlan_mode(struct net_device
*dev
, netdev_features_t features
)
2399 struct gfar_private
*priv
= netdev_priv(dev
);
2400 struct gfar __iomem
*regs
= NULL
;
2401 unsigned long flags
;
2404 regs
= priv
->gfargrp
[0].regs
;
2405 local_irq_save(flags
);
2408 if (features
& NETIF_F_HW_VLAN_CTAG_TX
) {
2409 /* Enable VLAN tag insertion */
2410 tempval
= gfar_read(®s
->tctrl
);
2411 tempval
|= TCTRL_VLINS
;
2412 gfar_write(®s
->tctrl
, tempval
);
2414 /* Disable VLAN tag insertion */
2415 tempval
= gfar_read(®s
->tctrl
);
2416 tempval
&= ~TCTRL_VLINS
;
2417 gfar_write(®s
->tctrl
, tempval
);
2420 if (features
& NETIF_F_HW_VLAN_CTAG_RX
) {
2421 /* Enable VLAN tag extraction */
2422 tempval
= gfar_read(®s
->rctrl
);
2423 tempval
|= (RCTRL_VLEX
| RCTRL_PRSDEP_INIT
);
2424 gfar_write(®s
->rctrl
, tempval
);
2425 priv
->uses_rxfcb
= 1;
2427 /* Disable VLAN tag extraction */
2428 tempval
= gfar_read(®s
->rctrl
);
2429 tempval
&= ~RCTRL_VLEX
;
2430 gfar_write(®s
->rctrl
, tempval
);
2432 gfar_check_rx_parser_mode(priv
);
2435 gfar_change_mtu(dev
, dev
->mtu
);
2438 local_irq_restore(flags
);
2441 static int gfar_change_mtu(struct net_device
*dev
, int new_mtu
)
2443 int tempsize
, tempval
;
2444 struct gfar_private
*priv
= netdev_priv(dev
);
2445 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
2446 int oldsize
= priv
->rx_buffer_size
;
2447 int frame_size
= new_mtu
+ ETH_HLEN
;
2449 if ((frame_size
< 64) || (frame_size
> JUMBO_FRAME_SIZE
)) {
2450 netif_err(priv
, drv
, dev
, "Invalid MTU setting\n");
2454 if (priv
->uses_rxfcb
)
2455 frame_size
+= GMAC_FCB_LEN
;
2457 frame_size
+= priv
->padding
;
2459 tempsize
= (frame_size
& ~(INCREMENTAL_BUFFER_SIZE
- 1)) +
2460 INCREMENTAL_BUFFER_SIZE
;
2462 /* Only stop and start the controller if it isn't already
2463 * stopped, and we changed something
2465 if ((oldsize
!= tempsize
) && (dev
->flags
& IFF_UP
))
2468 priv
->rx_buffer_size
= tempsize
;
2472 gfar_write(®s
->mrblr
, priv
->rx_buffer_size
);
2473 gfar_write(®s
->maxfrm
, priv
->rx_buffer_size
);
2475 /* If the mtu is larger than the max size for standard
2476 * ethernet frames (ie, a jumbo frame), then set maccfg2
2477 * to allow huge frames, and to check the length
2479 tempval
= gfar_read(®s
->maccfg2
);
2481 if (priv
->rx_buffer_size
> DEFAULT_RX_BUFFER_SIZE
||
2482 gfar_has_errata(priv
, GFAR_ERRATA_74
))
2483 tempval
|= (MACCFG2_HUGEFRAME
| MACCFG2_LENGTHCHECK
);
2485 tempval
&= ~(MACCFG2_HUGEFRAME
| MACCFG2_LENGTHCHECK
);
2487 gfar_write(®s
->maccfg2
, tempval
);
2489 if ((oldsize
!= tempsize
) && (dev
->flags
& IFF_UP
))
2495 /* gfar_reset_task gets scheduled when a packet has not been
2496 * transmitted after a set amount of time.
2497 * For now, assume that clearing out all the structures, and
2498 * starting over will fix the problem.
2500 static void gfar_reset_task(struct work_struct
*work
)
2502 struct gfar_private
*priv
= container_of(work
, struct gfar_private
,
2504 struct net_device
*dev
= priv
->ndev
;
2506 if (dev
->flags
& IFF_UP
) {
2507 netif_tx_stop_all_queues(dev
);
2510 netif_tx_start_all_queues(dev
);
2513 netif_tx_schedule_all(dev
);
2516 static void gfar_timeout(struct net_device
*dev
)
2518 struct gfar_private
*priv
= netdev_priv(dev
);
2520 dev
->stats
.tx_errors
++;
2521 schedule_work(&priv
->reset_task
);
2524 static void gfar_align_skb(struct sk_buff
*skb
)
2526 /* We need the data buffer to be aligned properly. We will reserve
2527 * as many bytes as needed to align the data properly
2529 skb_reserve(skb
, RXBUF_ALIGNMENT
-
2530 (((unsigned long) skb
->data
) & (RXBUF_ALIGNMENT
- 1)));
2533 /* Interrupt Handler for Transmit complete */
2534 static void gfar_clean_tx_ring(struct gfar_priv_tx_q
*tx_queue
)
2536 struct net_device
*dev
= tx_queue
->dev
;
2537 struct netdev_queue
*txq
;
2538 struct gfar_private
*priv
= netdev_priv(dev
);
2539 struct txbd8
*bdp
, *next
= NULL
;
2540 struct txbd8
*lbdp
= NULL
;
2541 struct txbd8
*base
= tx_queue
->tx_bd_base
;
2542 struct sk_buff
*skb
;
2544 int tx_ring_size
= tx_queue
->tx_ring_size
;
2545 int frags
= 0, nr_txbds
= 0;
2548 int tqi
= tx_queue
->qindex
;
2549 unsigned int bytes_sent
= 0;
2553 txq
= netdev_get_tx_queue(dev
, tqi
);
2554 bdp
= tx_queue
->dirty_tx
;
2555 skb_dirtytx
= tx_queue
->skb_dirtytx
;
2557 while ((skb
= tx_queue
->tx_skbuff
[skb_dirtytx
])) {
2558 unsigned long flags
;
2560 frags
= skb_shinfo(skb
)->nr_frags
;
2562 /* When time stamping, one additional TxBD must be freed.
2563 * Also, we need to dma_unmap_single() the TxPAL.
2565 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
))
2566 nr_txbds
= frags
+ 2;
2568 nr_txbds
= frags
+ 1;
2570 lbdp
= skip_txbd(bdp
, nr_txbds
- 1, base
, tx_ring_size
);
2572 lstatus
= lbdp
->lstatus
;
2574 /* Only clean completed frames */
2575 if ((lstatus
& BD_LFLAG(TXBD_READY
)) &&
2576 (lstatus
& BD_LENGTH_MASK
))
2579 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
)) {
2580 next
= next_txbd(bdp
, base
, tx_ring_size
);
2581 buflen
= next
->length
+ GMAC_FCB_LEN
+ GMAC_TXPAL_LEN
;
2583 buflen
= bdp
->length
;
2585 dma_unmap_single(priv
->dev
, bdp
->bufPtr
,
2586 buflen
, DMA_TO_DEVICE
);
2588 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
)) {
2589 struct skb_shared_hwtstamps shhwtstamps
;
2590 u64
*ns
= (u64
*) (((u32
)skb
->data
+ 0x10) & ~0x7);
2592 memset(&shhwtstamps
, 0, sizeof(shhwtstamps
));
2593 shhwtstamps
.hwtstamp
= ns_to_ktime(*ns
);
2594 skb_pull(skb
, GMAC_FCB_LEN
+ GMAC_TXPAL_LEN
);
2595 skb_tstamp_tx(skb
, &shhwtstamps
);
2596 bdp
->lstatus
&= BD_LFLAG(TXBD_WRAP
);
2600 bdp
->lstatus
&= BD_LFLAG(TXBD_WRAP
);
2601 bdp
= next_txbd(bdp
, base
, tx_ring_size
);
2603 for (i
= 0; i
< frags
; i
++) {
2604 dma_unmap_page(priv
->dev
, bdp
->bufPtr
,
2605 bdp
->length
, DMA_TO_DEVICE
);
2606 bdp
->lstatus
&= BD_LFLAG(TXBD_WRAP
);
2607 bdp
= next_txbd(bdp
, base
, tx_ring_size
);
2610 bytes_sent
+= GFAR_CB(skb
)->bytes_sent
;
2612 dev_kfree_skb_any(skb
);
2614 tx_queue
->tx_skbuff
[skb_dirtytx
] = NULL
;
2616 skb_dirtytx
= (skb_dirtytx
+ 1) &
2617 TX_RING_MOD_MASK(tx_ring_size
);
2620 spin_lock_irqsave(&tx_queue
->txlock
, flags
);
2621 tx_queue
->num_txbdfree
+= nr_txbds
;
2622 spin_unlock_irqrestore(&tx_queue
->txlock
, flags
);
2625 /* If we freed a buffer, we can restart transmission, if necessary */
2626 if (netif_tx_queue_stopped(txq
) && tx_queue
->num_txbdfree
)
2627 netif_wake_subqueue(dev
, tqi
);
2629 /* Update dirty indicators */
2630 tx_queue
->skb_dirtytx
= skb_dirtytx
;
2631 tx_queue
->dirty_tx
= bdp
;
2633 netdev_tx_completed_queue(txq
, howmany
, bytes_sent
);
2636 static void gfar_schedule_cleanup(struct gfar_priv_grp
*gfargrp
)
2638 unsigned long flags
;
2640 spin_lock_irqsave(&gfargrp
->grplock
, flags
);
2641 if (napi_schedule_prep(&gfargrp
->napi
)) {
2642 gfar_write(&gfargrp
->regs
->imask
, IMASK_RTX_DISABLED
);
2643 __napi_schedule(&gfargrp
->napi
);
2645 /* Clear IEVENT, so interrupts aren't called again
2646 * because of the packets that have already arrived.
2648 gfar_write(&gfargrp
->regs
->ievent
, IEVENT_RTX_MASK
);
2650 spin_unlock_irqrestore(&gfargrp
->grplock
, flags
);
2654 /* Interrupt Handler for Transmit complete */
2655 static irqreturn_t
gfar_transmit(int irq
, void *grp_id
)
2657 gfar_schedule_cleanup((struct gfar_priv_grp
*)grp_id
);
2661 static void gfar_new_rxbdp(struct gfar_priv_rx_q
*rx_queue
, struct rxbd8
*bdp
,
2662 struct sk_buff
*skb
)
2664 struct net_device
*dev
= rx_queue
->dev
;
2665 struct gfar_private
*priv
= netdev_priv(dev
);
2668 buf
= dma_map_single(priv
->dev
, skb
->data
,
2669 priv
->rx_buffer_size
, DMA_FROM_DEVICE
);
2670 gfar_init_rxbdp(rx_queue
, bdp
, buf
);
2673 static struct sk_buff
*gfar_alloc_skb(struct net_device
*dev
)
2675 struct gfar_private
*priv
= netdev_priv(dev
);
2676 struct sk_buff
*skb
;
2678 skb
= netdev_alloc_skb(dev
, priv
->rx_buffer_size
+ RXBUF_ALIGNMENT
);
2682 gfar_align_skb(skb
);
2687 struct sk_buff
*gfar_new_skb(struct net_device
*dev
)
2689 return gfar_alloc_skb(dev
);
2692 static inline void count_errors(unsigned short status
, struct net_device
*dev
)
2694 struct gfar_private
*priv
= netdev_priv(dev
);
2695 struct net_device_stats
*stats
= &dev
->stats
;
2696 struct gfar_extra_stats
*estats
= &priv
->extra_stats
;
2698 /* If the packet was truncated, none of the other errors matter */
2699 if (status
& RXBD_TRUNCATED
) {
2700 stats
->rx_length_errors
++;
2702 atomic64_inc(&estats
->rx_trunc
);
2706 /* Count the errors, if there were any */
2707 if (status
& (RXBD_LARGE
| RXBD_SHORT
)) {
2708 stats
->rx_length_errors
++;
2710 if (status
& RXBD_LARGE
)
2711 atomic64_inc(&estats
->rx_large
);
2713 atomic64_inc(&estats
->rx_short
);
2715 if (status
& RXBD_NONOCTET
) {
2716 stats
->rx_frame_errors
++;
2717 atomic64_inc(&estats
->rx_nonoctet
);
2719 if (status
& RXBD_CRCERR
) {
2720 atomic64_inc(&estats
->rx_crcerr
);
2721 stats
->rx_crc_errors
++;
2723 if (status
& RXBD_OVERRUN
) {
2724 atomic64_inc(&estats
->rx_overrun
);
2725 stats
->rx_crc_errors
++;
2729 irqreturn_t
gfar_receive(int irq
, void *grp_id
)
2731 gfar_schedule_cleanup((struct gfar_priv_grp
*)grp_id
);
2735 static inline void gfar_rx_checksum(struct sk_buff
*skb
, struct rxfcb
*fcb
)
2737 /* If valid headers were found, and valid sums
2738 * were verified, then we tell the kernel that no
2739 * checksumming is necessary. Otherwise, it is [FIXME]
2741 if ((fcb
->flags
& RXFCB_CSUM_MASK
) == (RXFCB_CIP
| RXFCB_CTU
))
2742 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2744 skb_checksum_none_assert(skb
);
2748 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
2749 static void gfar_process_frame(struct net_device
*dev
, struct sk_buff
*skb
,
2750 int amount_pull
, struct napi_struct
*napi
)
2752 struct gfar_private
*priv
= netdev_priv(dev
);
2753 struct rxfcb
*fcb
= NULL
;
2755 /* fcb is at the beginning if exists */
2756 fcb
= (struct rxfcb
*)skb
->data
;
2758 /* Remove the FCB from the skb
2759 * Remove the padded bytes, if there are any
2762 skb_record_rx_queue(skb
, fcb
->rq
);
2763 skb_pull(skb
, amount_pull
);
2766 /* Get receive timestamp from the skb */
2767 if (priv
->hwts_rx_en
) {
2768 struct skb_shared_hwtstamps
*shhwtstamps
= skb_hwtstamps(skb
);
2769 u64
*ns
= (u64
*) skb
->data
;
2771 memset(shhwtstamps
, 0, sizeof(*shhwtstamps
));
2772 shhwtstamps
->hwtstamp
= ns_to_ktime(*ns
);
2776 skb_pull(skb
, priv
->padding
);
2778 if (dev
->features
& NETIF_F_RXCSUM
)
2779 gfar_rx_checksum(skb
, fcb
);
2781 /* Tell the skb what kind of packet this is */
2782 skb
->protocol
= eth_type_trans(skb
, dev
);
2784 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2785 * Even if vlan rx accel is disabled, on some chips
2786 * RXFCB_VLN is pseudo randomly set.
2788 if (dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
&&
2789 fcb
->flags
& RXFCB_VLN
)
2790 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), fcb
->vlctl
);
2792 /* Send the packet up the stack */
2793 napi_gro_receive(napi
, skb
);
2797 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2798 * until the budget/quota has been reached. Returns the number
2801 int gfar_clean_rx_ring(struct gfar_priv_rx_q
*rx_queue
, int rx_work_limit
)
2803 struct net_device
*dev
= rx_queue
->dev
;
2804 struct rxbd8
*bdp
, *base
;
2805 struct sk_buff
*skb
;
2809 struct gfar_private
*priv
= netdev_priv(dev
);
2811 /* Get the first full descriptor */
2812 bdp
= rx_queue
->cur_rx
;
2813 base
= rx_queue
->rx_bd_base
;
2815 amount_pull
= priv
->uses_rxfcb
? GMAC_FCB_LEN
: 0;
2817 while (!((bdp
->status
& RXBD_EMPTY
) || (--rx_work_limit
< 0))) {
2818 struct sk_buff
*newskb
;
2822 /* Add another skb for the future */
2823 newskb
= gfar_new_skb(dev
);
2825 skb
= rx_queue
->rx_skbuff
[rx_queue
->skb_currx
];
2827 dma_unmap_single(priv
->dev
, bdp
->bufPtr
,
2828 priv
->rx_buffer_size
, DMA_FROM_DEVICE
);
2830 if (unlikely(!(bdp
->status
& RXBD_ERR
) &&
2831 bdp
->length
> priv
->rx_buffer_size
))
2832 bdp
->status
= RXBD_LARGE
;
2834 /* We drop the frame if we failed to allocate a new buffer */
2835 if (unlikely(!newskb
|| !(bdp
->status
& RXBD_LAST
) ||
2836 bdp
->status
& RXBD_ERR
)) {
2837 count_errors(bdp
->status
, dev
);
2839 if (unlikely(!newskb
))
2844 /* Increment the number of packets */
2845 rx_queue
->stats
.rx_packets
++;
2849 pkt_len
= bdp
->length
- ETH_FCS_LEN
;
2850 /* Remove the FCS from the packet length */
2851 skb_put(skb
, pkt_len
);
2852 rx_queue
->stats
.rx_bytes
+= pkt_len
;
2853 skb_record_rx_queue(skb
, rx_queue
->qindex
);
2854 gfar_process_frame(dev
, skb
, amount_pull
,
2855 &rx_queue
->grp
->napi
);
2858 netif_warn(priv
, rx_err
, dev
, "Missing skb!\n");
2859 rx_queue
->stats
.rx_dropped
++;
2860 atomic64_inc(&priv
->extra_stats
.rx_skbmissing
);
2865 rx_queue
->rx_skbuff
[rx_queue
->skb_currx
] = newskb
;
2867 /* Setup the new bdp */
2868 gfar_new_rxbdp(rx_queue
, bdp
, newskb
);
2870 /* Update to the next pointer */
2871 bdp
= next_bd(bdp
, base
, rx_queue
->rx_ring_size
);
2873 /* update to point at the next skb */
2874 rx_queue
->skb_currx
= (rx_queue
->skb_currx
+ 1) &
2875 RX_RING_MOD_MASK(rx_queue
->rx_ring_size
);
2878 /* Update the current rxbd pointer to be the next one */
2879 rx_queue
->cur_rx
= bdp
;
2884 static int gfar_poll_sq(struct napi_struct
*napi
, int budget
)
2886 struct gfar_priv_grp
*gfargrp
=
2887 container_of(napi
, struct gfar_priv_grp
, napi
);
2888 struct gfar __iomem
*regs
= gfargrp
->regs
;
2889 struct gfar_priv_tx_q
*tx_queue
= gfargrp
->priv
->tx_queue
[0];
2890 struct gfar_priv_rx_q
*rx_queue
= gfargrp
->priv
->rx_queue
[0];
2893 /* Clear IEVENT, so interrupts aren't called again
2894 * because of the packets that have already arrived
2896 gfar_write(®s
->ievent
, IEVENT_RTX_MASK
);
2898 /* run Tx cleanup to completion */
2899 if (tx_queue
->tx_skbuff
[tx_queue
->skb_dirtytx
])
2900 gfar_clean_tx_ring(tx_queue
);
2902 work_done
= gfar_clean_rx_ring(rx_queue
, budget
);
2904 if (work_done
< budget
) {
2905 napi_complete(napi
);
2906 /* Clear the halt bit in RSTAT */
2907 gfar_write(®s
->rstat
, gfargrp
->rstat
);
2909 gfar_write(®s
->imask
, IMASK_DEFAULT
);
2911 /* If we are coalescing interrupts, update the timer
2912 * Otherwise, clear it
2914 gfar_write(®s
->txic
, 0);
2915 if (likely(tx_queue
->txcoalescing
))
2916 gfar_write(®s
->txic
, tx_queue
->txic
);
2918 gfar_write(®s
->rxic
, 0);
2919 if (unlikely(rx_queue
->rxcoalescing
))
2920 gfar_write(®s
->rxic
, rx_queue
->rxic
);
2926 static int gfar_poll(struct napi_struct
*napi
, int budget
)
2928 struct gfar_priv_grp
*gfargrp
=
2929 container_of(napi
, struct gfar_priv_grp
, napi
);
2930 struct gfar_private
*priv
= gfargrp
->priv
;
2931 struct gfar __iomem
*regs
= gfargrp
->regs
;
2932 struct gfar_priv_tx_q
*tx_queue
= NULL
;
2933 struct gfar_priv_rx_q
*rx_queue
= NULL
;
2934 int work_done
= 0, work_done_per_q
= 0;
2935 int i
, budget_per_q
= 0;
2936 int has_tx_work
= 0;
2937 unsigned long rstat_rxf
;
2940 /* Clear IEVENT, so interrupts aren't called again
2941 * because of the packets that have already arrived
2943 gfar_write(®s
->ievent
, IEVENT_RTX_MASK
);
2945 rstat_rxf
= gfar_read(®s
->rstat
) & RSTAT_RXF_MASK
;
2947 num_act_queues
= bitmap_weight(&rstat_rxf
, MAX_RX_QS
);
2949 budget_per_q
= budget
/num_act_queues
;
2951 for_each_set_bit(i
, &gfargrp
->tx_bit_map
, priv
->num_tx_queues
) {
2952 tx_queue
= priv
->tx_queue
[i
];
2953 /* run Tx cleanup to completion */
2954 if (tx_queue
->tx_skbuff
[tx_queue
->skb_dirtytx
]) {
2955 gfar_clean_tx_ring(tx_queue
);
2960 for_each_set_bit(i
, &gfargrp
->rx_bit_map
, priv
->num_rx_queues
) {
2961 /* skip queue if not active */
2962 if (!(rstat_rxf
& (RSTAT_CLEAR_RXF0
>> i
)))
2965 rx_queue
= priv
->rx_queue
[i
];
2967 gfar_clean_rx_ring(rx_queue
, budget_per_q
);
2968 work_done
+= work_done_per_q
;
2970 /* finished processing this queue */
2971 if (work_done_per_q
< budget_per_q
) {
2972 /* clear active queue hw indication */
2973 gfar_write(®s
->rstat
,
2974 RSTAT_CLEAR_RXF0
>> i
);
2977 if (!num_act_queues
)
2982 if (!num_act_queues
&& !has_tx_work
) {
2984 napi_complete(napi
);
2986 /* Clear the halt bit in RSTAT */
2987 gfar_write(®s
->rstat
, gfargrp
->rstat
);
2989 gfar_write(®s
->imask
, IMASK_DEFAULT
);
2991 /* If we are coalescing interrupts, update the timer
2992 * Otherwise, clear it
2994 gfar_configure_coalescing(priv
, gfargrp
->rx_bit_map
,
2995 gfargrp
->tx_bit_map
);
3001 #ifdef CONFIG_NET_POLL_CONTROLLER
3002 /* Polling 'interrupt' - used by things like netconsole to send skbs
3003 * without having to re-enable interrupts. It's not called while
3004 * the interrupt routine is executing.
3006 static void gfar_netpoll(struct net_device
*dev
)
3008 struct gfar_private
*priv
= netdev_priv(dev
);
3011 /* If the device has multiple interrupts, run tx/rx */
3012 if (priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MULTI_INTR
) {
3013 for (i
= 0; i
< priv
->num_grps
; i
++) {
3014 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[i
];
3016 disable_irq(gfar_irq(grp
, TX
)->irq
);
3017 disable_irq(gfar_irq(grp
, RX
)->irq
);
3018 disable_irq(gfar_irq(grp
, ER
)->irq
);
3019 gfar_interrupt(gfar_irq(grp
, TX
)->irq
, grp
);
3020 enable_irq(gfar_irq(grp
, ER
)->irq
);
3021 enable_irq(gfar_irq(grp
, RX
)->irq
);
3022 enable_irq(gfar_irq(grp
, TX
)->irq
);
3025 for (i
= 0; i
< priv
->num_grps
; i
++) {
3026 struct gfar_priv_grp
*grp
= &priv
->gfargrp
[i
];
3028 disable_irq(gfar_irq(grp
, TX
)->irq
);
3029 gfar_interrupt(gfar_irq(grp
, TX
)->irq
, grp
);
3030 enable_irq(gfar_irq(grp
, TX
)->irq
);
3036 /* The interrupt handler for devices with one interrupt */
3037 static irqreturn_t
gfar_interrupt(int irq
, void *grp_id
)
3039 struct gfar_priv_grp
*gfargrp
= grp_id
;
3041 /* Save ievent for future reference */
3042 u32 events
= gfar_read(&gfargrp
->regs
->ievent
);
3044 /* Check for reception */
3045 if (events
& IEVENT_RX_MASK
)
3046 gfar_receive(irq
, grp_id
);
3048 /* Check for transmit completion */
3049 if (events
& IEVENT_TX_MASK
)
3050 gfar_transmit(irq
, grp_id
);
3052 /* Check for errors */
3053 if (events
& IEVENT_ERR_MASK
)
3054 gfar_error(irq
, grp_id
);
3059 static u32
gfar_get_flowctrl_cfg(struct gfar_private
*priv
)
3061 struct phy_device
*phydev
= priv
->phydev
;
3064 if (!phydev
->duplex
)
3067 if (!priv
->pause_aneg_en
) {
3068 if (priv
->tx_pause_en
)
3069 val
|= MACCFG1_TX_FLOW
;
3070 if (priv
->rx_pause_en
)
3071 val
|= MACCFG1_RX_FLOW
;
3073 u16 lcl_adv
, rmt_adv
;
3075 /* get link partner capabilities */
3078 rmt_adv
= LPA_PAUSE_CAP
;
3079 if (phydev
->asym_pause
)
3080 rmt_adv
|= LPA_PAUSE_ASYM
;
3082 lcl_adv
= mii_advertise_flowctrl(phydev
->advertising
);
3084 flowctrl
= mii_resolve_flowctrl_fdx(lcl_adv
, rmt_adv
);
3085 if (flowctrl
& FLOW_CTRL_TX
)
3086 val
|= MACCFG1_TX_FLOW
;
3087 if (flowctrl
& FLOW_CTRL_RX
)
3088 val
|= MACCFG1_RX_FLOW
;
3094 /* Called every time the controller might need to be made
3095 * aware of new link state. The PHY code conveys this
3096 * information through variables in the phydev structure, and this
3097 * function converts those variables into the appropriate
3098 * register values, and can bring down the device if needed.
3100 static void adjust_link(struct net_device
*dev
)
3102 struct gfar_private
*priv
= netdev_priv(dev
);
3103 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
3104 unsigned long flags
;
3105 struct phy_device
*phydev
= priv
->phydev
;
3108 local_irq_save(flags
);
3112 u32 tempval1
= gfar_read(®s
->maccfg1
);
3113 u32 tempval
= gfar_read(®s
->maccfg2
);
3114 u32 ecntrl
= gfar_read(®s
->ecntrl
);
3116 /* Now we make sure that we can be in full duplex mode.
3117 * If not, we operate in half-duplex mode.
3119 if (phydev
->duplex
!= priv
->oldduplex
) {
3121 if (!(phydev
->duplex
))
3122 tempval
&= ~(MACCFG2_FULL_DUPLEX
);
3124 tempval
|= MACCFG2_FULL_DUPLEX
;
3126 priv
->oldduplex
= phydev
->duplex
;
3129 if (phydev
->speed
!= priv
->oldspeed
) {
3131 switch (phydev
->speed
) {
3134 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_GMII
);
3136 ecntrl
&= ~(ECNTRL_R100
);
3141 ((tempval
& ~(MACCFG2_IF
)) | MACCFG2_MII
);
3143 /* Reduced mode distinguishes
3144 * between 10 and 100
3146 if (phydev
->speed
== SPEED_100
)
3147 ecntrl
|= ECNTRL_R100
;
3149 ecntrl
&= ~(ECNTRL_R100
);
3152 netif_warn(priv
, link
, dev
,
3153 "Ack! Speed (%d) is not 10/100/1000!\n",
3158 priv
->oldspeed
= phydev
->speed
;
3161 tempval1
&= ~(MACCFG1_TX_FLOW
| MACCFG1_RX_FLOW
);
3162 tempval1
|= gfar_get_flowctrl_cfg(priv
);
3164 gfar_write(®s
->maccfg1
, tempval1
);
3165 gfar_write(®s
->maccfg2
, tempval
);
3166 gfar_write(®s
->ecntrl
, ecntrl
);
3168 if (!priv
->oldlink
) {
3172 } else if (priv
->oldlink
) {
3176 priv
->oldduplex
= -1;
3179 if (new_state
&& netif_msg_link(priv
))
3180 phy_print_status(phydev
);
3182 local_irq_restore(flags
);
3185 /* Update the hash table based on the current list of multicast
3186 * addresses we subscribe to. Also, change the promiscuity of
3187 * the device based on the flags (this function is called
3188 * whenever dev->flags is changed
3190 static void gfar_set_multi(struct net_device
*dev
)
3192 struct netdev_hw_addr
*ha
;
3193 struct gfar_private
*priv
= netdev_priv(dev
);
3194 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
3197 if (dev
->flags
& IFF_PROMISC
) {
3198 /* Set RCTRL to PROM */
3199 tempval
= gfar_read(®s
->rctrl
);
3200 tempval
|= RCTRL_PROM
;
3201 gfar_write(®s
->rctrl
, tempval
);
3203 /* Set RCTRL to not PROM */
3204 tempval
= gfar_read(®s
->rctrl
);
3205 tempval
&= ~(RCTRL_PROM
);
3206 gfar_write(®s
->rctrl
, tempval
);
3209 if (dev
->flags
& IFF_ALLMULTI
) {
3210 /* Set the hash to rx all multicast frames */
3211 gfar_write(®s
->igaddr0
, 0xffffffff);
3212 gfar_write(®s
->igaddr1
, 0xffffffff);
3213 gfar_write(®s
->igaddr2
, 0xffffffff);
3214 gfar_write(®s
->igaddr3
, 0xffffffff);
3215 gfar_write(®s
->igaddr4
, 0xffffffff);
3216 gfar_write(®s
->igaddr5
, 0xffffffff);
3217 gfar_write(®s
->igaddr6
, 0xffffffff);
3218 gfar_write(®s
->igaddr7
, 0xffffffff);
3219 gfar_write(®s
->gaddr0
, 0xffffffff);
3220 gfar_write(®s
->gaddr1
, 0xffffffff);
3221 gfar_write(®s
->gaddr2
, 0xffffffff);
3222 gfar_write(®s
->gaddr3
, 0xffffffff);
3223 gfar_write(®s
->gaddr4
, 0xffffffff);
3224 gfar_write(®s
->gaddr5
, 0xffffffff);
3225 gfar_write(®s
->gaddr6
, 0xffffffff);
3226 gfar_write(®s
->gaddr7
, 0xffffffff);
3231 /* zero out the hash */
3232 gfar_write(®s
->igaddr0
, 0x0);
3233 gfar_write(®s
->igaddr1
, 0x0);
3234 gfar_write(®s
->igaddr2
, 0x0);
3235 gfar_write(®s
->igaddr3
, 0x0);
3236 gfar_write(®s
->igaddr4
, 0x0);
3237 gfar_write(®s
->igaddr5
, 0x0);
3238 gfar_write(®s
->igaddr6
, 0x0);
3239 gfar_write(®s
->igaddr7
, 0x0);
3240 gfar_write(®s
->gaddr0
, 0x0);
3241 gfar_write(®s
->gaddr1
, 0x0);
3242 gfar_write(®s
->gaddr2
, 0x0);
3243 gfar_write(®s
->gaddr3
, 0x0);
3244 gfar_write(®s
->gaddr4
, 0x0);
3245 gfar_write(®s
->gaddr5
, 0x0);
3246 gfar_write(®s
->gaddr6
, 0x0);
3247 gfar_write(®s
->gaddr7
, 0x0);
3249 /* If we have extended hash tables, we need to
3250 * clear the exact match registers to prepare for
3253 if (priv
->extended_hash
) {
3254 em_num
= GFAR_EM_NUM
+ 1;
3255 gfar_clear_exact_match(dev
);
3262 if (netdev_mc_empty(dev
))
3265 /* Parse the list, and set the appropriate bits */
3266 netdev_for_each_mc_addr(ha
, dev
) {
3268 gfar_set_mac_for_addr(dev
, idx
, ha
->addr
);
3271 gfar_set_hash_for_addr(dev
, ha
->addr
);
3277 /* Clears each of the exact match registers to zero, so they
3278 * don't interfere with normal reception
3280 static void gfar_clear_exact_match(struct net_device
*dev
)
3283 static const u8 zero_arr
[ETH_ALEN
] = {0, 0, 0, 0, 0, 0};
3285 for (idx
= 1; idx
< GFAR_EM_NUM
+ 1; idx
++)
3286 gfar_set_mac_for_addr(dev
, idx
, zero_arr
);
3289 /* Set the appropriate hash bit for the given addr */
3290 /* The algorithm works like so:
3291 * 1) Take the Destination Address (ie the multicast address), and
3292 * do a CRC on it (little endian), and reverse the bits of the
3294 * 2) Use the 8 most significant bits as a hash into a 256-entry
3295 * table. The table is controlled through 8 32-bit registers:
3296 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
3297 * gaddr7. This means that the 3 most significant bits in the
3298 * hash index which gaddr register to use, and the 5 other bits
3299 * indicate which bit (assuming an IBM numbering scheme, which
3300 * for PowerPC (tm) is usually the case) in the register holds
3303 static void gfar_set_hash_for_addr(struct net_device
*dev
, u8
*addr
)
3306 struct gfar_private
*priv
= netdev_priv(dev
);
3307 u32 result
= ether_crc(ETH_ALEN
, addr
);
3308 int width
= priv
->hash_width
;
3309 u8 whichbit
= (result
>> (32 - width
)) & 0x1f;
3310 u8 whichreg
= result
>> (32 - width
+ 5);
3311 u32 value
= (1 << (31-whichbit
));
3313 tempval
= gfar_read(priv
->hash_regs
[whichreg
]);
3315 gfar_write(priv
->hash_regs
[whichreg
], tempval
);
3319 /* There are multiple MAC Address register pairs on some controllers
3320 * This function sets the numth pair to a given address
3322 static void gfar_set_mac_for_addr(struct net_device
*dev
, int num
,
3325 struct gfar_private
*priv
= netdev_priv(dev
);
3326 struct gfar __iomem
*regs
= priv
->gfargrp
[0].regs
;
3328 char tmpbuf
[ETH_ALEN
];
3330 u32 __iomem
*macptr
= ®s
->macstnaddr1
;
3334 /* Now copy it into the mac registers backwards, cuz
3335 * little endian is silly
3337 for (idx
= 0; idx
< ETH_ALEN
; idx
++)
3338 tmpbuf
[ETH_ALEN
- 1 - idx
] = addr
[idx
];
3340 gfar_write(macptr
, *((u32
*) (tmpbuf
)));
3342 tempval
= *((u32
*) (tmpbuf
+ 4));
3344 gfar_write(macptr
+1, tempval
);
3347 /* GFAR error interrupt handler */
3348 static irqreturn_t
gfar_error(int irq
, void *grp_id
)
3350 struct gfar_priv_grp
*gfargrp
= grp_id
;
3351 struct gfar __iomem
*regs
= gfargrp
->regs
;
3352 struct gfar_private
*priv
= gfargrp
->priv
;
3353 struct net_device
*dev
= priv
->ndev
;
3355 /* Save ievent for future reference */
3356 u32 events
= gfar_read(®s
->ievent
);
3359 gfar_write(®s
->ievent
, events
& IEVENT_ERR_MASK
);
3361 /* Magic Packet is not an error. */
3362 if ((priv
->device_flags
& FSL_GIANFAR_DEV_HAS_MAGIC_PACKET
) &&
3363 (events
& IEVENT_MAG
))
3364 events
&= ~IEVENT_MAG
;
3367 if (netif_msg_rx_err(priv
) || netif_msg_tx_err(priv
))
3369 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
3370 events
, gfar_read(®s
->imask
));
3372 /* Update the error counters */
3373 if (events
& IEVENT_TXE
) {
3374 dev
->stats
.tx_errors
++;
3376 if (events
& IEVENT_LC
)
3377 dev
->stats
.tx_window_errors
++;
3378 if (events
& IEVENT_CRL
)
3379 dev
->stats
.tx_aborted_errors
++;
3380 if (events
& IEVENT_XFUN
) {
3381 unsigned long flags
;
3383 netif_dbg(priv
, tx_err
, dev
,
3384 "TX FIFO underrun, packet dropped\n");
3385 dev
->stats
.tx_dropped
++;
3386 atomic64_inc(&priv
->extra_stats
.tx_underrun
);
3388 local_irq_save(flags
);
3391 /* Reactivate the Tx Queues */
3392 gfar_write(®s
->tstat
, gfargrp
->tstat
);
3395 local_irq_restore(flags
);
3397 netif_dbg(priv
, tx_err
, dev
, "Transmit Error\n");
3399 if (events
& IEVENT_BSY
) {
3400 dev
->stats
.rx_errors
++;
3401 atomic64_inc(&priv
->extra_stats
.rx_bsy
);
3403 gfar_receive(irq
, grp_id
);
3405 netif_dbg(priv
, rx_err
, dev
, "busy error (rstat: %x)\n",
3406 gfar_read(®s
->rstat
));
3408 if (events
& IEVENT_BABR
) {
3409 dev
->stats
.rx_errors
++;
3410 atomic64_inc(&priv
->extra_stats
.rx_babr
);
3412 netif_dbg(priv
, rx_err
, dev
, "babbling RX error\n");
3414 if (events
& IEVENT_EBERR
) {
3415 atomic64_inc(&priv
->extra_stats
.eberr
);
3416 netif_dbg(priv
, rx_err
, dev
, "bus error\n");
3418 if (events
& IEVENT_RXC
)
3419 netif_dbg(priv
, rx_status
, dev
, "control frame\n");
3421 if (events
& IEVENT_BABT
) {
3422 atomic64_inc(&priv
->extra_stats
.tx_babt
);
3423 netif_dbg(priv
, tx_err
, dev
, "babbling TX error\n");
3428 static struct of_device_id gfar_match
[] =
3432 .compatible
= "gianfar",
3435 .compatible
= "fsl,etsec2",
3439 MODULE_DEVICE_TABLE(of
, gfar_match
);
3441 /* Structure for a device driver */
3442 static struct platform_driver gfar_driver
= {
3444 .name
= "fsl-gianfar",
3445 .owner
= THIS_MODULE
,
3447 .of_match_table
= gfar_match
,
3449 .probe
= gfar_probe
,
3450 .remove
= gfar_remove
,
3453 module_platform_driver(gfar_driver
);