1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for Xilinx TEMAC Ethernet device
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
9 * This is a driver for the Xilinx ll_temac ipcore which is often used
10 * in the Virtex and Spartan series of chips.
13 * - The ll_temac hardware uses indirect access for many of the TEMAC
14 * registers, include the MDIO bus. However, indirect access to MDIO
15 * registers take considerably more clock cycles than to TEMAC registers.
16 * MDIO accesses are long, so threads doing them should probably sleep
17 * rather than busywait. However, since only one indirect access can be
18 * in progress at any given time, that means that *all* indirect accesses
19 * could end up sleeping (to wait for an MDIO access to complete).
20 * Fortunately none of the indirect accesses are on the 'hot' path for tx
21 * or rx, so this should be okay.
24 * - Factor out locallink DMA code into separate driver
25 * - Fix support for hardware checksumming.
26 * - Testing. Lots and lots of testing.
30 #include <linux/delay.h>
31 #include <linux/etherdevice.h>
32 #include <linux/mii.h>
33 #include <linux/module.h>
34 #include <linux/mutex.h>
35 #include <linux/netdevice.h>
36 #include <linux/if_ether.h>
38 #include <linux/of_device.h>
39 #include <linux/of_irq.h>
40 #include <linux/of_mdio.h>
41 #include <linux/of_net.h>
42 #include <linux/of_platform.h>
43 #include <linux/of_address.h>
44 #include <linux/skbuff.h>
45 #include <linux/spinlock.h>
46 #include <linux/tcp.h> /* needed for sizeof(tcphdr) */
47 #include <linux/udp.h> /* needed for sizeof(udphdr) */
48 #include <linux/phy.h>
52 #include <linux/slab.h>
53 #include <linux/interrupt.h>
54 #include <linux/workqueue.h>
55 #include <linux/dma-mapping.h>
56 #include <linux/processor.h>
57 #include <linux/platform_data/xilinx-ll-temac.h>
61 /* Descriptors defines for Tx and Rx DMA */
62 #define TX_BD_NUM_DEFAULT 64
63 #define RX_BD_NUM_DEFAULT 1024
64 #define TX_BD_NUM_MAX 4096
65 #define RX_BD_NUM_MAX 4096
67 /* ---------------------------------------------------------------------
68 * Low level register access functions
71 static u32
_temac_ior_be(struct temac_local
*lp
, int offset
)
73 return ioread32be(lp
->regs
+ offset
);
76 static void _temac_iow_be(struct temac_local
*lp
, int offset
, u32 value
)
78 return iowrite32be(value
, lp
->regs
+ offset
);
81 static u32
_temac_ior_le(struct temac_local
*lp
, int offset
)
83 return ioread32(lp
->regs
+ offset
);
86 static void _temac_iow_le(struct temac_local
*lp
, int offset
, u32 value
)
88 return iowrite32(value
, lp
->regs
+ offset
);
91 static bool hard_acs_rdy(struct temac_local
*lp
)
93 return temac_ior(lp
, XTE_RDY0_OFFSET
) & XTE_RDY0_HARD_ACS_RDY_MASK
;
96 static bool hard_acs_rdy_or_timeout(struct temac_local
*lp
, ktime_t timeout
)
98 ktime_t cur
= ktime_get();
100 return hard_acs_rdy(lp
) || ktime_after(cur
, timeout
);
103 /* Poll for maximum 20 ms. This is similar to the 2 jiffies @ 100 Hz
104 * that was used before, and should cover MDIO bus speed down to 3200
107 #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
110 * temac_indirect_busywait - Wait for current indirect register access
113 int temac_indirect_busywait(struct temac_local
*lp
)
115 ktime_t timeout
= ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS
);
117 spin_until_cond(hard_acs_rdy_or_timeout(lp
, timeout
));
118 if (WARN_ON(!hard_acs_rdy(lp
)))
125 * temac_indirect_in32 - Indirect register read access. This function
126 * must be called without lp->indirect_lock being held.
128 u32
temac_indirect_in32(struct temac_local
*lp
, int reg
)
133 spin_lock_irqsave(lp
->indirect_lock
, flags
);
134 val
= temac_indirect_in32_locked(lp
, reg
);
135 spin_unlock_irqrestore(lp
->indirect_lock
, flags
);
140 * temac_indirect_in32_locked - Indirect register read access. This
141 * function must be called with lp->indirect_lock being held. Use
142 * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
143 * repeated lock/unlock and to ensure uninterrupted access to indirect
146 u32
temac_indirect_in32_locked(struct temac_local
*lp
, int reg
)
148 /* This initial wait should normally not spin, as we always
149 * try to wait for indirect access to complete before
150 * releasing the indirect_lock.
152 if (WARN_ON(temac_indirect_busywait(lp
)))
154 /* Initiate read from indirect register */
155 temac_iow(lp
, XTE_CTL0_OFFSET
, reg
);
156 /* Wait for indirect register access to complete. We really
157 * should not see timeouts, and could even end up causing
158 * problem for following indirect access, so let's make a bit
161 if (WARN_ON(temac_indirect_busywait(lp
)))
163 /* Value is ready now */
164 return temac_ior(lp
, XTE_LSW0_OFFSET
);
168 * temac_indirect_out32 - Indirect register write access. This function
169 * must be called without lp->indirect_lock being held.
171 void temac_indirect_out32(struct temac_local
*lp
, int reg
, u32 value
)
175 spin_lock_irqsave(lp
->indirect_lock
, flags
);
176 temac_indirect_out32_locked(lp
, reg
, value
);
177 spin_unlock_irqrestore(lp
->indirect_lock
, flags
);
181 * temac_indirect_out32_locked - Indirect register write access. This
182 * function must be called with lp->indirect_lock being held. Use
183 * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
184 * repeated lock/unlock and to ensure uninterrupted access to indirect
187 void temac_indirect_out32_locked(struct temac_local
*lp
, int reg
, u32 value
)
189 /* As in temac_indirect_in32_locked(), we should normally not
190 * spin here. And if it happens, we actually end up silently
191 * ignoring the write request. Ouch.
193 if (WARN_ON(temac_indirect_busywait(lp
)))
195 /* Initiate write to indirect register */
196 temac_iow(lp
, XTE_LSW0_OFFSET
, value
);
197 temac_iow(lp
, XTE_CTL0_OFFSET
, CNTLREG_WRITE_ENABLE_MASK
| reg
);
198 /* As in temac_indirect_in32_locked(), we should not see timeouts
199 * here. And if it happens, we continue before the write has
200 * completed. Not good.
202 WARN_ON(temac_indirect_busywait(lp
));
206 * temac_dma_in32_* - Memory mapped DMA read, these function expects a
207 * register input that is based on DCR word addresses which are then
208 * converted to memory mapped byte addresses. To be assigned to
211 static u32
temac_dma_in32_be(struct temac_local
*lp
, int reg
)
213 return ioread32be(lp
->sdma_regs
+ (reg
<< 2));
216 static u32
temac_dma_in32_le(struct temac_local
*lp
, int reg
)
218 return ioread32(lp
->sdma_regs
+ (reg
<< 2));
222 * temac_dma_out32_* - Memory mapped DMA read, these function expects
223 * a register input that is based on DCR word addresses which are then
224 * converted to memory mapped byte addresses. To be assigned to
227 static void temac_dma_out32_be(struct temac_local
*lp
, int reg
, u32 value
)
229 iowrite32be(value
, lp
->sdma_regs
+ (reg
<< 2));
232 static void temac_dma_out32_le(struct temac_local
*lp
, int reg
, u32 value
)
234 iowrite32(value
, lp
->sdma_regs
+ (reg
<< 2));
237 /* DMA register access functions can be DCR based or memory mapped.
238 * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
241 #ifdef CONFIG_PPC_DCR
244 * temac_dma_dcr_in32 - DCR based DMA read
246 static u32
temac_dma_dcr_in(struct temac_local
*lp
, int reg
)
248 return dcr_read(lp
->sdma_dcrs
, reg
);
252 * temac_dma_dcr_out32 - DCR based DMA write
254 static void temac_dma_dcr_out(struct temac_local
*lp
, int reg
, u32 value
)
256 dcr_write(lp
->sdma_dcrs
, reg
, value
);
260 * temac_dcr_setup - If the DMA is DCR based, then setup the address and
263 static int temac_dcr_setup(struct temac_local
*lp
, struct platform_device
*op
,
264 struct device_node
*np
)
268 /* setup the dcr address mapping if it's in the device tree */
270 dcrs
= dcr_resource_start(np
, 0);
272 lp
->sdma_dcrs
= dcr_map(np
, dcrs
, dcr_resource_len(np
, 0));
273 lp
->dma_in
= temac_dma_dcr_in
;
274 lp
->dma_out
= temac_dma_dcr_out
;
275 dev_dbg(&op
->dev
, "DCR base: %x\n", dcrs
);
278 /* no DCR in the device tree, indicate a failure */
285 * temac_dcr_setup - This is a stub for when DCR is not supported,
286 * such as with MicroBlaze and x86
288 static int temac_dcr_setup(struct temac_local
*lp
, struct platform_device
*op
,
289 struct device_node
*np
)
297 * temac_dma_bd_release - Release buffer descriptor rings
299 static void temac_dma_bd_release(struct net_device
*ndev
)
301 struct temac_local
*lp
= netdev_priv(ndev
);
304 /* Reset Local Link (DMA) */
305 lp
->dma_out(lp
, DMA_CONTROL_REG
, DMA_CONTROL_RST
);
307 for (i
= 0; i
< lp
->rx_bd_num
; i
++) {
311 dma_unmap_single(ndev
->dev
.parent
, lp
->rx_bd_v
[i
].phys
,
312 XTE_MAX_JUMBO_FRAME_SIZE
, DMA_FROM_DEVICE
);
313 dev_kfree_skb(lp
->rx_skb
[i
]);
317 dma_free_coherent(ndev
->dev
.parent
,
318 sizeof(*lp
->rx_bd_v
) * lp
->rx_bd_num
,
319 lp
->rx_bd_v
, lp
->rx_bd_p
);
321 dma_free_coherent(ndev
->dev
.parent
,
322 sizeof(*lp
->tx_bd_v
) * lp
->tx_bd_num
,
323 lp
->tx_bd_v
, lp
->tx_bd_p
);
327 * temac_dma_bd_init - Setup buffer descriptor rings
329 static int temac_dma_bd_init(struct net_device
*ndev
)
331 struct temac_local
*lp
= netdev_priv(ndev
);
333 dma_addr_t skb_dma_addr
;
336 lp
->rx_skb
= devm_kcalloc(&ndev
->dev
, lp
->rx_bd_num
,
337 sizeof(*lp
->rx_skb
), GFP_KERNEL
);
341 /* allocate the tx and rx ring buffer descriptors. */
342 /* returns a virtual address and a physical address. */
343 lp
->tx_bd_v
= dma_alloc_coherent(ndev
->dev
.parent
,
344 sizeof(*lp
->tx_bd_v
) * lp
->tx_bd_num
,
345 &lp
->tx_bd_p
, GFP_KERNEL
);
349 lp
->rx_bd_v
= dma_alloc_coherent(ndev
->dev
.parent
,
350 sizeof(*lp
->rx_bd_v
) * lp
->rx_bd_num
,
351 &lp
->rx_bd_p
, GFP_KERNEL
);
355 for (i
= 0; i
< lp
->tx_bd_num
; i
++) {
356 lp
->tx_bd_v
[i
].next
= cpu_to_be32(lp
->tx_bd_p
357 + sizeof(*lp
->tx_bd_v
) * ((i
+ 1) % lp
->tx_bd_num
));
360 for (i
= 0; i
< lp
->rx_bd_num
; i
++) {
361 lp
->rx_bd_v
[i
].next
= cpu_to_be32(lp
->rx_bd_p
362 + sizeof(*lp
->rx_bd_v
) * ((i
+ 1) % lp
->rx_bd_num
));
364 skb
= netdev_alloc_skb_ip_align(ndev
,
365 XTE_MAX_JUMBO_FRAME_SIZE
);
370 /* returns physical address of skb->data */
371 skb_dma_addr
= dma_map_single(ndev
->dev
.parent
, skb
->data
,
372 XTE_MAX_JUMBO_FRAME_SIZE
,
374 if (dma_mapping_error(ndev
->dev
.parent
, skb_dma_addr
))
376 lp
->rx_bd_v
[i
].phys
= cpu_to_be32(skb_dma_addr
);
377 lp
->rx_bd_v
[i
].len
= cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE
);
378 lp
->rx_bd_v
[i
].app0
= cpu_to_be32(STS_CTRL_APP0_IRQONEND
);
381 /* Configure DMA channel (irq setup) */
382 lp
->dma_out(lp
, TX_CHNL_CTRL
,
383 lp
->coalesce_delay_tx
<< 24 | lp
->coalesce_count_tx
<< 16 |
384 0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
385 CHNL_CTRL_IRQ_EN
| CHNL_CTRL_IRQ_ERR_EN
|
386 CHNL_CTRL_IRQ_DLY_EN
| CHNL_CTRL_IRQ_COAL_EN
);
387 lp
->dma_out(lp
, RX_CHNL_CTRL
,
388 lp
->coalesce_delay_rx
<< 24 | lp
->coalesce_count_rx
<< 16 |
390 CHNL_CTRL_IRQ_EN
| CHNL_CTRL_IRQ_ERR_EN
|
391 CHNL_CTRL_IRQ_DLY_EN
| CHNL_CTRL_IRQ_COAL_EN
);
393 /* Init descriptor indexes */
397 lp
->rx_bd_tail
= lp
->rx_bd_num
- 1;
399 /* Enable RX DMA transfers */
401 lp
->dma_out(lp
, RX_CURDESC_PTR
, lp
->rx_bd_p
);
402 lp
->dma_out(lp
, RX_TAILDESC_PTR
,
403 lp
->rx_bd_p
+ (sizeof(*lp
->rx_bd_v
) * lp
->rx_bd_tail
));
405 /* Prepare for TX DMA transfer */
406 lp
->dma_out(lp
, TX_CURDESC_PTR
, lp
->tx_bd_p
);
411 temac_dma_bd_release(ndev
);
415 /* ---------------------------------------------------------------------
419 static void temac_do_set_mac_address(struct net_device
*ndev
)
421 struct temac_local
*lp
= netdev_priv(ndev
);
424 /* set up unicast MAC address filter set its mac address */
425 spin_lock_irqsave(lp
->indirect_lock
, flags
);
426 temac_indirect_out32_locked(lp
, XTE_UAW0_OFFSET
,
427 (ndev
->dev_addr
[0]) |
428 (ndev
->dev_addr
[1] << 8) |
429 (ndev
->dev_addr
[2] << 16) |
430 (ndev
->dev_addr
[3] << 24));
431 /* There are reserved bits in EUAW1
432 * so don't affect them Set MAC bits [47:32] in EUAW1 */
433 temac_indirect_out32_locked(lp
, XTE_UAW1_OFFSET
,
434 (ndev
->dev_addr
[4] & 0x000000ff) |
435 (ndev
->dev_addr
[5] << 8));
436 spin_unlock_irqrestore(lp
->indirect_lock
, flags
);
439 static int temac_init_mac_address(struct net_device
*ndev
, const void *address
)
441 ether_addr_copy(ndev
->dev_addr
, address
);
442 if (!is_valid_ether_addr(ndev
->dev_addr
))
443 eth_hw_addr_random(ndev
);
444 temac_do_set_mac_address(ndev
);
448 static int temac_set_mac_address(struct net_device
*ndev
, void *p
)
450 struct sockaddr
*addr
= p
;
452 if (!is_valid_ether_addr(addr
->sa_data
))
453 return -EADDRNOTAVAIL
;
454 memcpy(ndev
->dev_addr
, addr
->sa_data
, ETH_ALEN
);
455 temac_do_set_mac_address(ndev
);
459 static void temac_set_multicast_list(struct net_device
*ndev
)
461 struct temac_local
*lp
= netdev_priv(ndev
);
462 u32 multi_addr_msw
, multi_addr_lsw
;
465 bool promisc_mode_disabled
= false;
467 if (ndev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
) ||
468 (netdev_mc_count(ndev
) > MULTICAST_CAM_TABLE_NUM
)) {
469 temac_indirect_out32(lp
, XTE_AFM_OFFSET
, XTE_AFM_EPPRM_MASK
);
470 dev_info(&ndev
->dev
, "Promiscuous mode enabled.\n");
474 spin_lock_irqsave(lp
->indirect_lock
, flags
);
476 if (!netdev_mc_empty(ndev
)) {
477 struct netdev_hw_addr
*ha
;
479 netdev_for_each_mc_addr(ha
, ndev
) {
480 if (WARN_ON(i
>= MULTICAST_CAM_TABLE_NUM
))
482 multi_addr_msw
= ((ha
->addr
[3] << 24) |
483 (ha
->addr
[2] << 16) |
486 temac_indirect_out32_locked(lp
, XTE_MAW0_OFFSET
,
488 multi_addr_lsw
= ((ha
->addr
[5] << 8) |
489 (ha
->addr
[4]) | (i
<< 16));
490 temac_indirect_out32_locked(lp
, XTE_MAW1_OFFSET
,
496 /* Clear all or remaining/unused address table entries */
497 while (i
< MULTICAST_CAM_TABLE_NUM
) {
498 temac_indirect_out32_locked(lp
, XTE_MAW0_OFFSET
, 0);
499 temac_indirect_out32_locked(lp
, XTE_MAW1_OFFSET
, i
<< 16);
503 /* Enable address filter block if currently disabled */
504 if (temac_indirect_in32_locked(lp
, XTE_AFM_OFFSET
)
505 & XTE_AFM_EPPRM_MASK
) {
506 temac_indirect_out32_locked(lp
, XTE_AFM_OFFSET
, 0);
507 promisc_mode_disabled
= true;
510 spin_unlock_irqrestore(lp
->indirect_lock
, flags
);
512 if (promisc_mode_disabled
)
513 dev_info(&ndev
->dev
, "Promiscuous mode disabled.\n");
516 static struct temac_option
{
522 } temac_options
[] = {
523 /* Turn on jumbo packet support for both Rx and Tx */
525 .opt
= XTE_OPTION_JUMBO
,
526 .reg
= XTE_TXC_OFFSET
,
527 .m_or
= XTE_TXC_TXJMBO_MASK
,
530 .opt
= XTE_OPTION_JUMBO
,
531 .reg
= XTE_RXC1_OFFSET
,
532 .m_or
=XTE_RXC1_RXJMBO_MASK
,
534 /* Turn on VLAN packet support for both Rx and Tx */
536 .opt
= XTE_OPTION_VLAN
,
537 .reg
= XTE_TXC_OFFSET
,
538 .m_or
=XTE_TXC_TXVLAN_MASK
,
541 .opt
= XTE_OPTION_VLAN
,
542 .reg
= XTE_RXC1_OFFSET
,
543 .m_or
=XTE_RXC1_RXVLAN_MASK
,
545 /* Turn on FCS stripping on receive packets */
547 .opt
= XTE_OPTION_FCS_STRIP
,
548 .reg
= XTE_RXC1_OFFSET
,
549 .m_or
=XTE_RXC1_RXFCS_MASK
,
551 /* Turn on FCS insertion on transmit packets */
553 .opt
= XTE_OPTION_FCS_INSERT
,
554 .reg
= XTE_TXC_OFFSET
,
555 .m_or
=XTE_TXC_TXFCS_MASK
,
557 /* Turn on length/type field checking on receive packets */
559 .opt
= XTE_OPTION_LENTYPE_ERR
,
560 .reg
= XTE_RXC1_OFFSET
,
561 .m_or
=XTE_RXC1_RXLT_MASK
,
563 /* Turn on flow control */
565 .opt
= XTE_OPTION_FLOW_CONTROL
,
566 .reg
= XTE_FCC_OFFSET
,
567 .m_or
=XTE_FCC_RXFLO_MASK
,
569 /* Turn on flow control */
571 .opt
= XTE_OPTION_FLOW_CONTROL
,
572 .reg
= XTE_FCC_OFFSET
,
573 .m_or
=XTE_FCC_TXFLO_MASK
,
575 /* Turn on promiscuous frame filtering (all frames are received ) */
577 .opt
= XTE_OPTION_PROMISC
,
578 .reg
= XTE_AFM_OFFSET
,
579 .m_or
=XTE_AFM_EPPRM_MASK
,
581 /* Enable transmitter if not already enabled */
583 .opt
= XTE_OPTION_TXEN
,
584 .reg
= XTE_TXC_OFFSET
,
585 .m_or
=XTE_TXC_TXEN_MASK
,
587 /* Enable receiver? */
589 .opt
= XTE_OPTION_RXEN
,
590 .reg
= XTE_RXC1_OFFSET
,
591 .m_or
=XTE_RXC1_RXEN_MASK
,
599 static u32
temac_setoptions(struct net_device
*ndev
, u32 options
)
601 struct temac_local
*lp
= netdev_priv(ndev
);
602 struct temac_option
*tp
= &temac_options
[0];
606 spin_lock_irqsave(lp
->indirect_lock
, flags
);
608 reg
= temac_indirect_in32_locked(lp
, tp
->reg
) & ~tp
->m_or
;
609 if (options
& tp
->opt
) {
611 temac_indirect_out32_locked(lp
, tp
->reg
, reg
);
615 spin_unlock_irqrestore(lp
->indirect_lock
, flags
);
616 lp
->options
|= options
;
621 /* Initialize temac */
622 static void temac_device_reset(struct net_device
*ndev
)
624 struct temac_local
*lp
= netdev_priv(ndev
);
629 /* Perform a software reset */
631 /* 0x300 host enable bit ? */
632 /* reset PHY through control register ?:1 */
634 dev_dbg(&ndev
->dev
, "%s()\n", __func__
);
636 /* Reset the receiver and wait for it to finish reset */
637 temac_indirect_out32(lp
, XTE_RXC1_OFFSET
, XTE_RXC1_RXRST_MASK
);
639 while (temac_indirect_in32(lp
, XTE_RXC1_OFFSET
) & XTE_RXC1_RXRST_MASK
) {
641 if (--timeout
== 0) {
643 "temac_device_reset RX reset timeout!!\n");
648 /* Reset the transmitter and wait for it to finish reset */
649 temac_indirect_out32(lp
, XTE_TXC_OFFSET
, XTE_TXC_TXRST_MASK
);
651 while (temac_indirect_in32(lp
, XTE_TXC_OFFSET
) & XTE_TXC_TXRST_MASK
) {
653 if (--timeout
== 0) {
655 "temac_device_reset TX reset timeout!!\n");
660 /* Disable the receiver */
661 spin_lock_irqsave(lp
->indirect_lock
, flags
);
662 val
= temac_indirect_in32_locked(lp
, XTE_RXC1_OFFSET
);
663 temac_indirect_out32_locked(lp
, XTE_RXC1_OFFSET
,
664 val
& ~XTE_RXC1_RXEN_MASK
);
665 spin_unlock_irqrestore(lp
->indirect_lock
, flags
);
667 /* Reset Local Link (DMA) */
668 lp
->dma_out(lp
, DMA_CONTROL_REG
, DMA_CONTROL_RST
);
670 while (lp
->dma_in(lp
, DMA_CONTROL_REG
) & DMA_CONTROL_RST
) {
672 if (--timeout
== 0) {
674 "temac_device_reset DMA reset timeout!!\n");
678 lp
->dma_out(lp
, DMA_CONTROL_REG
, DMA_TAIL_ENABLE
);
680 if (temac_dma_bd_init(ndev
)) {
682 "temac_device_reset descriptor allocation failed\n");
685 spin_lock_irqsave(lp
->indirect_lock
, flags
);
686 temac_indirect_out32_locked(lp
, XTE_RXC0_OFFSET
, 0);
687 temac_indirect_out32_locked(lp
, XTE_RXC1_OFFSET
, 0);
688 temac_indirect_out32_locked(lp
, XTE_TXC_OFFSET
, 0);
689 temac_indirect_out32_locked(lp
, XTE_FCC_OFFSET
, XTE_FCC_RXFLO_MASK
);
690 spin_unlock_irqrestore(lp
->indirect_lock
, flags
);
692 /* Sync default options with HW
693 * but leave receiver and transmitter disabled. */
694 temac_setoptions(ndev
,
695 lp
->options
& ~(XTE_OPTION_TXEN
| XTE_OPTION_RXEN
));
697 temac_do_set_mac_address(ndev
);
699 /* Set address filter table */
700 temac_set_multicast_list(ndev
);
701 if (temac_setoptions(ndev
, lp
->options
))
702 dev_err(&ndev
->dev
, "Error setting TEMAC options\n");
704 /* Init Driver variable */
705 netif_trans_update(ndev
); /* prevent tx timeout */
708 static void temac_adjust_link(struct net_device
*ndev
)
710 struct temac_local
*lp
= netdev_priv(ndev
);
711 struct phy_device
*phy
= ndev
->phydev
;
716 /* hash together the state values to decide if something has changed */
717 link_state
= phy
->speed
| (phy
->duplex
<< 1) | phy
->link
;
719 if (lp
->last_link
!= link_state
) {
720 spin_lock_irqsave(lp
->indirect_lock
, flags
);
721 mii_speed
= temac_indirect_in32_locked(lp
, XTE_EMCFG_OFFSET
);
722 mii_speed
&= ~XTE_EMCFG_LINKSPD_MASK
;
724 switch (phy
->speed
) {
725 case SPEED_1000
: mii_speed
|= XTE_EMCFG_LINKSPD_1000
; break;
726 case SPEED_100
: mii_speed
|= XTE_EMCFG_LINKSPD_100
; break;
727 case SPEED_10
: mii_speed
|= XTE_EMCFG_LINKSPD_10
; break;
730 /* Write new speed setting out to TEMAC */
731 temac_indirect_out32_locked(lp
, XTE_EMCFG_OFFSET
, mii_speed
);
732 spin_unlock_irqrestore(lp
->indirect_lock
, flags
);
734 lp
->last_link
= link_state
;
735 phy_print_status(phy
);
741 static void ptr_to_txbd(void *p
, struct cdmac_bd
*bd
)
743 bd
->app3
= (u32
)(((u64
)p
) >> 32);
744 bd
->app4
= (u32
)((u64
)p
& 0xFFFFFFFF);
747 static void *ptr_from_txbd(struct cdmac_bd
*bd
)
749 return (void *)(((u64
)(bd
->app3
) << 32) | bd
->app4
);
754 static void ptr_to_txbd(void *p
, struct cdmac_bd
*bd
)
759 static void *ptr_from_txbd(struct cdmac_bd
*bd
)
761 return (void *)(bd
->app4
);
766 static void temac_start_xmit_done(struct net_device
*ndev
)
768 struct temac_local
*lp
= netdev_priv(ndev
);
769 struct cdmac_bd
*cur_p
;
770 unsigned int stat
= 0;
773 cur_p
= &lp
->tx_bd_v
[lp
->tx_bd_ci
];
774 stat
= be32_to_cpu(cur_p
->app0
);
776 while (stat
& STS_CTRL_APP0_CMPLT
) {
777 dma_unmap_single(ndev
->dev
.parent
, be32_to_cpu(cur_p
->phys
),
778 be32_to_cpu(cur_p
->len
), DMA_TO_DEVICE
);
779 skb
= (struct sk_buff
*)ptr_from_txbd(cur_p
);
781 dev_consume_skb_irq(skb
);
788 ndev
->stats
.tx_packets
++;
789 ndev
->stats
.tx_bytes
+= be32_to_cpu(cur_p
->len
);
792 if (lp
->tx_bd_ci
>= lp
->tx_bd_num
)
795 cur_p
= &lp
->tx_bd_v
[lp
->tx_bd_ci
];
796 stat
= be32_to_cpu(cur_p
->app0
);
799 /* Matches barrier in temac_start_xmit */
802 netif_wake_queue(ndev
);
805 static inline int temac_check_tx_bd_space(struct temac_local
*lp
, int num_frag
)
807 struct cdmac_bd
*cur_p
;
810 tail
= lp
->tx_bd_tail
;
811 cur_p
= &lp
->tx_bd_v
[tail
];
815 return NETDEV_TX_BUSY
;
818 if (tail
>= lp
->tx_bd_num
)
821 cur_p
= &lp
->tx_bd_v
[tail
];
823 } while (num_frag
>= 0);
829 temac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
831 struct temac_local
*lp
= netdev_priv(ndev
);
832 struct cdmac_bd
*cur_p
;
833 dma_addr_t tail_p
, skb_dma_addr
;
835 unsigned long num_frag
;
838 num_frag
= skb_shinfo(skb
)->nr_frags
;
839 frag
= &skb_shinfo(skb
)->frags
[0];
840 cur_p
= &lp
->tx_bd_v
[lp
->tx_bd_tail
];
842 if (temac_check_tx_bd_space(lp
, num_frag
+ 1)) {
843 if (netif_queue_stopped(ndev
))
844 return NETDEV_TX_BUSY
;
846 netif_stop_queue(ndev
);
848 /* Matches barrier in temac_start_xmit_done */
851 /* Space might have just been freed - check again */
852 if (temac_check_tx_bd_space(lp
, num_frag
))
853 return NETDEV_TX_BUSY
;
855 netif_wake_queue(ndev
);
859 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
860 unsigned int csum_start_off
= skb_checksum_start_offset(skb
);
861 unsigned int csum_index_off
= csum_start_off
+ skb
->csum_offset
;
863 cur_p
->app0
|= cpu_to_be32(0x000001); /* TX Checksum Enabled */
864 cur_p
->app1
= cpu_to_be32((csum_start_off
<< 16)
866 cur_p
->app2
= 0; /* initial checksum seed */
869 cur_p
->app0
|= cpu_to_be32(STS_CTRL_APP0_SOP
);
870 skb_dma_addr
= dma_map_single(ndev
->dev
.parent
, skb
->data
,
871 skb_headlen(skb
), DMA_TO_DEVICE
);
872 cur_p
->len
= cpu_to_be32(skb_headlen(skb
));
873 if (WARN_ON_ONCE(dma_mapping_error(ndev
->dev
.parent
, skb_dma_addr
))) {
874 dev_kfree_skb_any(skb
);
875 ndev
->stats
.tx_dropped
++;
878 cur_p
->phys
= cpu_to_be32(skb_dma_addr
);
879 ptr_to_txbd((void *)skb
, cur_p
);
881 for (ii
= 0; ii
< num_frag
; ii
++) {
882 if (++lp
->tx_bd_tail
>= lp
->tx_bd_num
)
885 cur_p
= &lp
->tx_bd_v
[lp
->tx_bd_tail
];
886 skb_dma_addr
= dma_map_single(ndev
->dev
.parent
,
887 skb_frag_address(frag
),
890 if (dma_mapping_error(ndev
->dev
.parent
, skb_dma_addr
)) {
891 if (--lp
->tx_bd_tail
< 0)
892 lp
->tx_bd_tail
= lp
->tx_bd_num
- 1;
893 cur_p
= &lp
->tx_bd_v
[lp
->tx_bd_tail
];
896 dma_unmap_single(ndev
->dev
.parent
,
897 be32_to_cpu(cur_p
->phys
),
900 if (--lp
->tx_bd_tail
< 0)
901 lp
->tx_bd_tail
= lp
->tx_bd_num
- 1;
902 cur_p
= &lp
->tx_bd_v
[lp
->tx_bd_tail
];
904 dma_unmap_single(ndev
->dev
.parent
,
905 be32_to_cpu(cur_p
->phys
),
906 skb_headlen(skb
), DMA_TO_DEVICE
);
907 dev_kfree_skb_any(skb
);
908 ndev
->stats
.tx_dropped
++;
911 cur_p
->phys
= cpu_to_be32(skb_dma_addr
);
912 cur_p
->len
= cpu_to_be32(skb_frag_size(frag
));
916 cur_p
->app0
|= cpu_to_be32(STS_CTRL_APP0_EOP
);
918 tail_p
= lp
->tx_bd_p
+ sizeof(*lp
->tx_bd_v
) * lp
->tx_bd_tail
;
920 if (lp
->tx_bd_tail
>= lp
->tx_bd_num
)
923 skb_tx_timestamp(skb
);
925 /* Kick off the transfer */
927 lp
->dma_out(lp
, TX_TAILDESC_PTR
, tail_p
); /* DMA start */
932 static int ll_temac_recv_buffers_available(struct temac_local
*lp
)
936 if (!lp
->rx_skb
[lp
->rx_bd_ci
])
938 available
= 1 + lp
->rx_bd_tail
- lp
->rx_bd_ci
;
940 available
+= lp
->rx_bd_num
;
944 static void ll_temac_recv(struct net_device
*ndev
)
946 struct temac_local
*lp
= netdev_priv(ndev
);
949 bool update_tail
= false;
951 spin_lock_irqsave(&lp
->rx_lock
, flags
);
953 /* Process all received buffers, passing them on network
954 * stack. After this, the buffer descriptors will be in an
955 * un-allocated stage, where no skb is allocated for it, and
956 * they are therefore not available for TEMAC/DMA.
959 struct cdmac_bd
*bd
= &lp
->rx_bd_v
[lp
->rx_bd_ci
];
960 struct sk_buff
*skb
= lp
->rx_skb
[lp
->rx_bd_ci
];
961 unsigned int bdstat
= be32_to_cpu(bd
->app0
);
964 /* While this should not normally happen, we can end
965 * here when GFP_ATOMIC allocations fail, and we
966 * therefore have un-allocated buffers.
971 /* Loop over all completed buffer descriptors */
972 if (!(bdstat
& STS_CTRL_APP0_CMPLT
))
975 dma_unmap_single(ndev
->dev
.parent
, be32_to_cpu(bd
->phys
),
976 XTE_MAX_JUMBO_FRAME_SIZE
, DMA_FROM_DEVICE
);
977 /* The buffer is not valid for DMA anymore */
981 length
= be32_to_cpu(bd
->app4
) & 0x3FFF;
982 skb_put(skb
, length
);
983 skb
->protocol
= eth_type_trans(skb
, ndev
);
984 skb_checksum_none_assert(skb
);
986 /* if we're doing rx csum offload, set it up */
987 if (((lp
->temac_features
& TEMAC_FEATURE_RX_CSUM
) != 0) &&
988 (skb
->protocol
== htons(ETH_P_IP
)) &&
991 /* Convert from device endianness (be32) to cpu
992 * endiannes, and if necessary swap the bytes
993 * (back) for proper IP checksum byte order
996 skb
->csum
= htons(be32_to_cpu(bd
->app3
) & 0xFFFF);
997 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1000 if (!skb_defer_rx_timestamp(skb
))
1002 /* The skb buffer is now owned by network stack above */
1003 lp
->rx_skb
[lp
->rx_bd_ci
] = NULL
;
1005 ndev
->stats
.rx_packets
++;
1006 ndev
->stats
.rx_bytes
+= length
;
1008 rx_bd
= lp
->rx_bd_ci
;
1009 if (++lp
->rx_bd_ci
>= lp
->rx_bd_num
)
1011 } while (rx_bd
!= lp
->rx_bd_tail
);
1013 /* DMA operations will halt when the last buffer descriptor is
1014 * processed (ie. the one pointed to by RX_TAILDESC_PTR).
1015 * When that happens, no more interrupt events will be
1016 * generated. No IRQ_COAL or IRQ_DLY, and not even an
1017 * IRQ_ERR. To avoid stalling, we schedule a delayed work
1018 * when there is a potential risk of that happening. The work
1019 * will call this function, and thus re-schedule itself until
1020 * enough buffers are available again.
1022 if (ll_temac_recv_buffers_available(lp
) < lp
->coalesce_count_rx
)
1023 schedule_delayed_work(&lp
->restart_work
, HZ
/ 1000);
1025 /* Allocate new buffers for those buffer descriptors that were
1026 * passed to network stack. Note that GFP_ATOMIC allocations
1027 * can fail (e.g. when a larger burst of GFP_ATOMIC
1028 * allocations occurs), so while we try to allocate all
1029 * buffers in the same interrupt where they were processed, we
1030 * continue with what we could get in case of allocation
1031 * failure. Allocation of remaining buffers will be retried
1032 * in following calls.
1035 struct sk_buff
*skb
;
1036 struct cdmac_bd
*bd
;
1037 dma_addr_t skb_dma_addr
;
1039 rx_bd
= lp
->rx_bd_tail
+ 1;
1040 if (rx_bd
>= lp
->rx_bd_num
)
1042 bd
= &lp
->rx_bd_v
[rx_bd
];
1045 break; /* All skb's allocated */
1047 skb
= netdev_alloc_skb_ip_align(ndev
, XTE_MAX_JUMBO_FRAME_SIZE
);
1049 dev_warn(&ndev
->dev
, "skb alloc failed\n");
1053 skb_dma_addr
= dma_map_single(ndev
->dev
.parent
, skb
->data
,
1054 XTE_MAX_JUMBO_FRAME_SIZE
,
1056 if (WARN_ON_ONCE(dma_mapping_error(ndev
->dev
.parent
,
1058 dev_kfree_skb_any(skb
);
1062 bd
->phys
= cpu_to_be32(skb_dma_addr
);
1063 bd
->len
= cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE
);
1064 bd
->app0
= cpu_to_be32(STS_CTRL_APP0_IRQONEND
);
1065 lp
->rx_skb
[rx_bd
] = skb
;
1067 lp
->rx_bd_tail
= rx_bd
;
1071 /* Move tail pointer when buffers have been allocated */
1073 lp
->dma_out(lp
, RX_TAILDESC_PTR
,
1074 lp
->rx_bd_p
+ sizeof(*lp
->rx_bd_v
) * lp
->rx_bd_tail
);
1077 spin_unlock_irqrestore(&lp
->rx_lock
, flags
);
1080 /* Function scheduled to ensure a restart in case of DMA halt
1081 * condition caused by running out of buffer descriptors.
1083 static void ll_temac_restart_work_func(struct work_struct
*work
)
1085 struct temac_local
*lp
= container_of(work
, struct temac_local
,
1087 struct net_device
*ndev
= lp
->ndev
;
1089 ll_temac_recv(ndev
);
1092 static irqreturn_t
ll_temac_tx_irq(int irq
, void *_ndev
)
1094 struct net_device
*ndev
= _ndev
;
1095 struct temac_local
*lp
= netdev_priv(ndev
);
1096 unsigned int status
;
1098 status
= lp
->dma_in(lp
, TX_IRQ_REG
);
1099 lp
->dma_out(lp
, TX_IRQ_REG
, status
);
1101 if (status
& (IRQ_COAL
| IRQ_DLY
))
1102 temac_start_xmit_done(lp
->ndev
);
1103 if (status
& (IRQ_ERR
| IRQ_DMAERR
))
1104 dev_err_ratelimited(&ndev
->dev
,
1105 "TX error 0x%x TX_CHNL_STS=0x%08x\n",
1106 status
, lp
->dma_in(lp
, TX_CHNL_STS
));
1111 static irqreturn_t
ll_temac_rx_irq(int irq
, void *_ndev
)
1113 struct net_device
*ndev
= _ndev
;
1114 struct temac_local
*lp
= netdev_priv(ndev
);
1115 unsigned int status
;
1117 /* Read and clear the status registers */
1118 status
= lp
->dma_in(lp
, RX_IRQ_REG
);
1119 lp
->dma_out(lp
, RX_IRQ_REG
, status
);
1121 if (status
& (IRQ_COAL
| IRQ_DLY
))
1122 ll_temac_recv(lp
->ndev
);
1123 if (status
& (IRQ_ERR
| IRQ_DMAERR
))
1124 dev_err_ratelimited(&ndev
->dev
,
1125 "RX error 0x%x RX_CHNL_STS=0x%08x\n",
1126 status
, lp
->dma_in(lp
, RX_CHNL_STS
));
1131 static int temac_open(struct net_device
*ndev
)
1133 struct temac_local
*lp
= netdev_priv(ndev
);
1134 struct phy_device
*phydev
= NULL
;
1137 dev_dbg(&ndev
->dev
, "temac_open()\n");
1140 phydev
= of_phy_connect(lp
->ndev
, lp
->phy_node
,
1141 temac_adjust_link
, 0, 0);
1143 dev_err(lp
->dev
, "of_phy_connect() failed\n");
1147 } else if (strlen(lp
->phy_name
) > 0) {
1148 phydev
= phy_connect(lp
->ndev
, lp
->phy_name
, temac_adjust_link
,
1150 if (IS_ERR(phydev
)) {
1151 dev_err(lp
->dev
, "phy_connect() failed\n");
1152 return PTR_ERR(phydev
);
1157 temac_device_reset(ndev
);
1159 rc
= request_irq(lp
->tx_irq
, ll_temac_tx_irq
, 0, ndev
->name
, ndev
);
1162 rc
= request_irq(lp
->rx_irq
, ll_temac_rx_irq
, 0, ndev
->name
, ndev
);
1169 free_irq(lp
->tx_irq
, ndev
);
1172 phy_disconnect(phydev
);
1173 dev_err(lp
->dev
, "request_irq() failed\n");
1177 static int temac_stop(struct net_device
*ndev
)
1179 struct temac_local
*lp
= netdev_priv(ndev
);
1180 struct phy_device
*phydev
= ndev
->phydev
;
1182 dev_dbg(&ndev
->dev
, "temac_close()\n");
1184 cancel_delayed_work_sync(&lp
->restart_work
);
1186 free_irq(lp
->tx_irq
, ndev
);
1187 free_irq(lp
->rx_irq
, ndev
);
1190 phy_disconnect(phydev
);
1192 temac_dma_bd_release(ndev
);
1197 #ifdef CONFIG_NET_POLL_CONTROLLER
1199 temac_poll_controller(struct net_device
*ndev
)
1201 struct temac_local
*lp
= netdev_priv(ndev
);
1203 disable_irq(lp
->tx_irq
);
1204 disable_irq(lp
->rx_irq
);
1206 ll_temac_rx_irq(lp
->tx_irq
, ndev
);
1207 ll_temac_tx_irq(lp
->rx_irq
, ndev
);
1209 enable_irq(lp
->tx_irq
);
1210 enable_irq(lp
->rx_irq
);
1214 static const struct net_device_ops temac_netdev_ops
= {
1215 .ndo_open
= temac_open
,
1216 .ndo_stop
= temac_stop
,
1217 .ndo_start_xmit
= temac_start_xmit
,
1218 .ndo_set_rx_mode
= temac_set_multicast_list
,
1219 .ndo_set_mac_address
= temac_set_mac_address
,
1220 .ndo_validate_addr
= eth_validate_addr
,
1221 .ndo_do_ioctl
= phy_do_ioctl_running
,
1222 #ifdef CONFIG_NET_POLL_CONTROLLER
1223 .ndo_poll_controller
= temac_poll_controller
,
1227 /* ---------------------------------------------------------------------
1228 * SYSFS device attributes
1230 static ssize_t
temac_show_llink_regs(struct device
*dev
,
1231 struct device_attribute
*attr
, char *buf
)
1233 struct net_device
*ndev
= dev_get_drvdata(dev
);
1234 struct temac_local
*lp
= netdev_priv(ndev
);
1237 for (i
= 0; i
< 0x11; i
++)
1238 len
+= sprintf(buf
+ len
, "%.8x%s", lp
->dma_in(lp
, i
),
1239 (i
% 8) == 7 ? "\n" : " ");
1240 len
+= sprintf(buf
+ len
, "\n");
1245 static DEVICE_ATTR(llink_regs
, 0440, temac_show_llink_regs
, NULL
);
1247 static struct attribute
*temac_device_attrs
[] = {
1248 &dev_attr_llink_regs
.attr
,
1252 static const struct attribute_group temac_attr_group
= {
1253 .attrs
= temac_device_attrs
,
1256 /* ---------------------------------------------------------------------
1260 static void ll_temac_ethtools_get_ringparam(struct net_device
*ndev
,
1261 struct ethtool_ringparam
*ering
)
1263 struct temac_local
*lp
= netdev_priv(ndev
);
1265 ering
->rx_max_pending
= RX_BD_NUM_MAX
;
1266 ering
->rx_mini_max_pending
= 0;
1267 ering
->rx_jumbo_max_pending
= 0;
1268 ering
->tx_max_pending
= TX_BD_NUM_MAX
;
1269 ering
->rx_pending
= lp
->rx_bd_num
;
1270 ering
->rx_mini_pending
= 0;
1271 ering
->rx_jumbo_pending
= 0;
1272 ering
->tx_pending
= lp
->tx_bd_num
;
1275 static int ll_temac_ethtools_set_ringparam(struct net_device
*ndev
,
1276 struct ethtool_ringparam
*ering
)
1278 struct temac_local
*lp
= netdev_priv(ndev
);
1280 if (ering
->rx_pending
> RX_BD_NUM_MAX
||
1281 ering
->rx_mini_pending
||
1282 ering
->rx_jumbo_pending
||
1283 ering
->rx_pending
> TX_BD_NUM_MAX
)
1286 if (netif_running(ndev
))
1289 lp
->rx_bd_num
= ering
->rx_pending
;
1290 lp
->tx_bd_num
= ering
->tx_pending
;
1294 static int ll_temac_ethtools_get_coalesce(struct net_device
*ndev
,
1295 struct ethtool_coalesce
*ec
)
1297 struct temac_local
*lp
= netdev_priv(ndev
);
1299 ec
->rx_max_coalesced_frames
= lp
->coalesce_count_rx
;
1300 ec
->tx_max_coalesced_frames
= lp
->coalesce_count_tx
;
1301 ec
->rx_coalesce_usecs
= (lp
->coalesce_delay_rx
* 512) / 100;
1302 ec
->tx_coalesce_usecs
= (lp
->coalesce_delay_tx
* 512) / 100;
1306 static int ll_temac_ethtools_set_coalesce(struct net_device
*ndev
,
1307 struct ethtool_coalesce
*ec
)
1309 struct temac_local
*lp
= netdev_priv(ndev
);
1311 if (netif_running(ndev
)) {
1313 "Please stop netif before applying configuration\n");
1317 if (ec
->rx_max_coalesced_frames
)
1318 lp
->coalesce_count_rx
= ec
->rx_max_coalesced_frames
;
1319 if (ec
->tx_max_coalesced_frames
)
1320 lp
->coalesce_count_tx
= ec
->tx_max_coalesced_frames
;
1321 /* With typical LocalLink clock speed of 200 MHz and
1322 * C_PRESCALAR=1023, each delay count corresponds to 5.12 us.
1324 if (ec
->rx_coalesce_usecs
)
1325 lp
->coalesce_delay_rx
=
1326 min(255U, (ec
->rx_coalesce_usecs
* 100) / 512);
1327 if (ec
->tx_coalesce_usecs
)
1328 lp
->coalesce_delay_tx
=
1329 min(255U, (ec
->tx_coalesce_usecs
* 100) / 512);
1334 static const struct ethtool_ops temac_ethtool_ops
= {
1335 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
1336 ETHTOOL_COALESCE_MAX_FRAMES
,
1337 .nway_reset
= phy_ethtool_nway_reset
,
1338 .get_link
= ethtool_op_get_link
,
1339 .get_ts_info
= ethtool_op_get_ts_info
,
1340 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
1341 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
1342 .get_ringparam
= ll_temac_ethtools_get_ringparam
,
1343 .set_ringparam
= ll_temac_ethtools_set_ringparam
,
1344 .get_coalesce
= ll_temac_ethtools_get_coalesce
,
1345 .set_coalesce
= ll_temac_ethtools_set_coalesce
,
1348 static int temac_probe(struct platform_device
*pdev
)
1350 struct ll_temac_platform_data
*pdata
= dev_get_platdata(&pdev
->dev
);
1351 struct device_node
*temac_np
= dev_of_node(&pdev
->dev
), *dma_np
;
1352 struct temac_local
*lp
;
1353 struct net_device
*ndev
;
1354 struct resource
*res
;
1360 /* Init network device structure */
1361 ndev
= devm_alloc_etherdev(&pdev
->dev
, sizeof(*lp
));
1365 platform_set_drvdata(pdev
, ndev
);
1366 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1367 ndev
->features
= NETIF_F_SG
;
1368 ndev
->netdev_ops
= &temac_netdev_ops
;
1369 ndev
->ethtool_ops
= &temac_ethtool_ops
;
1371 ndev
->features
|= NETIF_F_IP_CSUM
; /* Can checksum TCP/UDP over IPv4. */
1372 ndev
->features
|= NETIF_F_HW_CSUM
; /* Can checksum all the packets. */
1373 ndev
->features
|= NETIF_F_IPV6_CSUM
; /* Can checksum IPV6 TCP/UDP */
1374 ndev
->features
|= NETIF_F_HIGHDMA
; /* Can DMA to high memory. */
1375 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_TX
; /* Transmit VLAN hw accel */
1376 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
; /* Receive VLAN hw acceleration */
1377 ndev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
; /* Receive VLAN filtering */
1378 ndev
->features
|= NETIF_F_VLAN_CHALLENGED
; /* cannot handle VLAN pkts */
1379 ndev
->features
|= NETIF_F_GSO
; /* Enable software GSO. */
1380 ndev
->features
|= NETIF_F_MULTI_QUEUE
; /* Has multiple TX/RX queues */
1381 ndev
->features
|= NETIF_F_LRO
; /* large receive offload */
1384 /* setup temac private info structure */
1385 lp
= netdev_priv(ndev
);
1387 lp
->dev
= &pdev
->dev
;
1388 lp
->options
= XTE_OPTION_DEFAULTS
;
1389 lp
->rx_bd_num
= RX_BD_NUM_DEFAULT
;
1390 lp
->tx_bd_num
= TX_BD_NUM_DEFAULT
;
1391 spin_lock_init(&lp
->rx_lock
);
1392 INIT_DELAYED_WORK(&lp
->restart_work
, ll_temac_restart_work_func
);
1394 /* Setup mutex for synchronization of indirect register access */
1396 if (!pdata
->indirect_lock
) {
1398 "indirect_lock missing in platform_data\n");
1401 lp
->indirect_lock
= pdata
->indirect_lock
;
1403 lp
->indirect_lock
= devm_kmalloc(&pdev
->dev
,
1404 sizeof(*lp
->indirect_lock
),
1406 spin_lock_init(lp
->indirect_lock
);
1409 /* map device registers */
1410 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1411 lp
->regs
= devm_ioremap(&pdev
->dev
, res
->start
,
1412 resource_size(res
));
1413 if (IS_ERR(lp
->regs
)) {
1414 dev_err(&pdev
->dev
, "could not map TEMAC registers\n");
1415 return PTR_ERR(lp
->regs
);
1418 /* Select register access functions with the specified
1419 * endianness mode. Default for OF devices is big-endian.
1421 little_endian
= false;
1423 if (of_get_property(temac_np
, "little-endian", NULL
))
1424 little_endian
= true;
1426 little_endian
= pdata
->reg_little_endian
;
1428 if (little_endian
) {
1429 lp
->temac_ior
= _temac_ior_le
;
1430 lp
->temac_iow
= _temac_iow_le
;
1432 lp
->temac_ior
= _temac_ior_be
;
1433 lp
->temac_iow
= _temac_iow_be
;
1436 /* Setup checksum offload, but default to off if not specified */
1437 lp
->temac_features
= 0;
1439 p
= (__be32
*)of_get_property(temac_np
, "xlnx,txcsum", NULL
);
1440 if (p
&& be32_to_cpu(*p
))
1441 lp
->temac_features
|= TEMAC_FEATURE_TX_CSUM
;
1442 p
= (__be32
*)of_get_property(temac_np
, "xlnx,rxcsum", NULL
);
1443 if (p
&& be32_to_cpu(*p
))
1444 lp
->temac_features
|= TEMAC_FEATURE_RX_CSUM
;
1447 lp
->temac_features
|= TEMAC_FEATURE_TX_CSUM
;
1449 lp
->temac_features
|= TEMAC_FEATURE_RX_CSUM
;
1451 if (lp
->temac_features
& TEMAC_FEATURE_TX_CSUM
)
1452 /* Can checksum TCP/UDP over IPv4. */
1453 ndev
->features
|= NETIF_F_IP_CSUM
;
1455 /* Defaults for IRQ delay/coalescing setup. These are
1456 * configuration values, so does not belong in device-tree.
1458 lp
->coalesce_delay_tx
= 0x10;
1459 lp
->coalesce_count_tx
= 0x22;
1460 lp
->coalesce_delay_rx
= 0xff;
1461 lp
->coalesce_count_rx
= 0x07;
1463 /* Setup LocalLink DMA */
1465 /* Find the DMA node, map the DMA registers, and
1466 * decode the DMA IRQs.
1468 dma_np
= of_parse_phandle(temac_np
, "llink-connected", 0);
1470 dev_err(&pdev
->dev
, "could not find DMA node\n");
1474 /* Setup the DMA register accesses, could be DCR or
1477 if (temac_dcr_setup(lp
, pdev
, dma_np
)) {
1478 /* no DCR in the device tree, try non-DCR */
1479 lp
->sdma_regs
= devm_of_iomap(&pdev
->dev
, dma_np
, 0,
1481 if (IS_ERR(lp
->sdma_regs
)) {
1483 "unable to map DMA registers\n");
1484 of_node_put(dma_np
);
1485 return PTR_ERR(lp
->sdma_regs
);
1487 if (of_get_property(dma_np
, "little-endian", NULL
)) {
1488 lp
->dma_in
= temac_dma_in32_le
;
1489 lp
->dma_out
= temac_dma_out32_le
;
1491 lp
->dma_in
= temac_dma_in32_be
;
1492 lp
->dma_out
= temac_dma_out32_be
;
1494 dev_dbg(&pdev
->dev
, "MEM base: %p\n", lp
->sdma_regs
);
1497 /* Get DMA RX and TX interrupts */
1498 lp
->rx_irq
= irq_of_parse_and_map(dma_np
, 0);
1499 lp
->tx_irq
= irq_of_parse_and_map(dma_np
, 1);
1501 /* Finished with the DMA node; drop the reference */
1502 of_node_put(dma_np
);
1504 /* 2nd memory resource specifies DMA registers */
1505 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
1506 lp
->sdma_regs
= devm_ioremap(&pdev
->dev
, res
->start
,
1507 resource_size(res
));
1508 if (IS_ERR(lp
->sdma_regs
)) {
1510 "could not map DMA registers\n");
1511 return PTR_ERR(lp
->sdma_regs
);
1513 if (pdata
->dma_little_endian
) {
1514 lp
->dma_in
= temac_dma_in32_le
;
1515 lp
->dma_out
= temac_dma_out32_le
;
1517 lp
->dma_in
= temac_dma_in32_be
;
1518 lp
->dma_out
= temac_dma_out32_be
;
1521 /* Get DMA RX and TX interrupts */
1522 lp
->rx_irq
= platform_get_irq(pdev
, 0);
1523 lp
->tx_irq
= platform_get_irq(pdev
, 1);
1525 /* IRQ delay/coalescing setup */
1526 if (pdata
->tx_irq_timeout
|| pdata
->tx_irq_count
) {
1527 lp
->coalesce_delay_tx
= pdata
->tx_irq_timeout
;
1528 lp
->coalesce_count_tx
= pdata
->tx_irq_count
;
1530 if (pdata
->rx_irq_timeout
|| pdata
->rx_irq_count
) {
1531 lp
->coalesce_delay_rx
= pdata
->rx_irq_timeout
;
1532 lp
->coalesce_count_rx
= pdata
->rx_irq_count
;
1536 /* Error handle returned DMA RX and TX interrupts */
1537 if (lp
->rx_irq
< 0) {
1538 if (lp
->rx_irq
!= -EPROBE_DEFER
)
1539 dev_err(&pdev
->dev
, "could not get DMA RX irq\n");
1542 if (lp
->tx_irq
< 0) {
1543 if (lp
->tx_irq
!= -EPROBE_DEFER
)
1544 dev_err(&pdev
->dev
, "could not get DMA TX irq\n");
1549 /* Retrieve the MAC address */
1550 addr
= of_get_mac_address(temac_np
);
1552 dev_err(&pdev
->dev
, "could not find MAC address\n");
1555 temac_init_mac_address(ndev
, addr
);
1557 temac_init_mac_address(ndev
, pdata
->mac_addr
);
1560 rc
= temac_mdio_setup(lp
, pdev
);
1562 dev_warn(&pdev
->dev
, "error registering MDIO bus\n");
1565 lp
->phy_node
= of_parse_phandle(temac_np
, "phy-handle", 0);
1567 dev_dbg(lp
->dev
, "using PHY node %pOF\n", temac_np
);
1569 snprintf(lp
->phy_name
, sizeof(lp
->phy_name
),
1570 PHY_ID_FMT
, lp
->mii_bus
->id
, pdata
->phy_addr
);
1571 lp
->phy_interface
= pdata
->phy_interface
;
1574 /* Add the device attributes */
1575 rc
= sysfs_create_group(&lp
->dev
->kobj
, &temac_attr_group
);
1577 dev_err(lp
->dev
, "Error creating sysfs files\n");
1578 goto err_sysfs_create
;
1581 rc
= register_netdev(lp
->ndev
);
1583 dev_err(lp
->dev
, "register_netdev() error (%i)\n", rc
);
1584 goto err_register_ndev
;
1590 sysfs_remove_group(&lp
->dev
->kobj
, &temac_attr_group
);
1593 of_node_put(lp
->phy_node
);
1594 temac_mdio_teardown(lp
);
1598 static int temac_remove(struct platform_device
*pdev
)
1600 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1601 struct temac_local
*lp
= netdev_priv(ndev
);
1603 unregister_netdev(ndev
);
1604 sysfs_remove_group(&lp
->dev
->kobj
, &temac_attr_group
);
1606 of_node_put(lp
->phy_node
);
1607 temac_mdio_teardown(lp
);
1611 static const struct of_device_id temac_of_match
[] = {
1612 { .compatible
= "xlnx,xps-ll-temac-1.01.b", },
1613 { .compatible
= "xlnx,xps-ll-temac-2.00.a", },
1614 { .compatible
= "xlnx,xps-ll-temac-2.02.a", },
1615 { .compatible
= "xlnx,xps-ll-temac-2.03.a", },
1618 MODULE_DEVICE_TABLE(of
, temac_of_match
);
1620 static struct platform_driver temac_driver
= {
1621 .probe
= temac_probe
,
1622 .remove
= temac_remove
,
1624 .name
= "xilinx_temac",
1625 .of_match_table
= temac_of_match
,
1629 module_platform_driver(temac_driver
);
1631 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
1632 MODULE_AUTHOR("Yoshio Kashiwagi");
1633 MODULE_LICENSE("GPL");