2 * Driver for Xilinx TEMAC Ethernet device
4 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
5 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
6 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8 * This is a driver for the Xilinx ll_temac ipcore which is often used
9 * in the Virtex and Spartan series of chips.
12 * - The ll_temac hardware uses indirect access for many of the TEMAC
13 * registers, include the MDIO bus. However, indirect access to MDIO
14 * registers take considerably more clock cycles than to TEMAC registers.
15 * MDIO accesses are long, so threads doing them should probably sleep
16 * rather than busywait. However, since only one indirect access can be
17 * in progress at any given time, that means that *all* indirect accesses
18 * could end up sleeping (to wait for an MDIO access to complete).
19 * Fortunately none of the indirect accesses are on the 'hot' path for tx
20 * or rx, so this should be okay.
23 * - Fix driver to work on more than just Virtex5. Right now the driver
24 * assumes that the locallink DMA registers are accessed via DCR
26 * - Factor out locallink DMA code into separate driver
27 * - Fix multicast assignment.
28 * - Fix support for hardware checksumming.
29 * - Testing. Lots and lots of testing.
33 #include <linux/delay.h>
34 #include <linux/etherdevice.h>
35 #include <linux/init.h>
36 #include <linux/mii.h>
37 #include <linux/module.h>
38 #include <linux/mutex.h>
39 #include <linux/netdevice.h>
41 #include <linux/of_device.h>
42 #include <linux/of_mdio.h>
43 #include <linux/of_platform.h>
44 #include <linux/skbuff.h>
45 #include <linux/spinlock.h>
46 #include <linux/tcp.h> /* needed for sizeof(tcphdr) */
47 #include <linux/udp.h> /* needed for sizeof(udphdr) */
48 #include <linux/phy.h>
58 /* ---------------------------------------------------------------------
59 * Low level register access functions
62 u32
temac_ior(struct temac_local
*lp
, int offset
)
64 return in_be32((u32
*)(lp
->regs
+ offset
));
67 void temac_iow(struct temac_local
*lp
, int offset
, u32 value
)
69 out_be32((u32
*) (lp
->regs
+ offset
), value
);
72 int temac_indirect_busywait(struct temac_local
*lp
)
74 long end
= jiffies
+ 2;
76 while (!(temac_ior(lp
, XTE_RDY0_OFFSET
) & XTE_RDY0_HARD_ACS_RDY_MASK
)) {
77 if (end
- jiffies
<= 0) {
89 * lp->indirect_mutex must be held when calling this function
91 u32
temac_indirect_in32(struct temac_local
*lp
, int reg
)
95 if (temac_indirect_busywait(lp
))
97 temac_iow(lp
, XTE_CTL0_OFFSET
, reg
);
98 if (temac_indirect_busywait(lp
))
100 val
= temac_ior(lp
, XTE_LSW0_OFFSET
);
106 * temac_indirect_out32
108 * lp->indirect_mutex must be held when calling this function
110 void temac_indirect_out32(struct temac_local
*lp
, int reg
, u32 value
)
112 if (temac_indirect_busywait(lp
))
114 temac_iow(lp
, XTE_LSW0_OFFSET
, value
);
115 temac_iow(lp
, XTE_CTL0_OFFSET
, CNTLREG_WRITE_ENABLE_MASK
| reg
);
118 static u32
temac_dma_in32(struct temac_local
*lp
, int reg
)
120 return dcr_read(lp
->sdma_dcrs
, reg
);
123 static void temac_dma_out32(struct temac_local
*lp
, int reg
, u32 value
)
125 dcr_write(lp
->sdma_dcrs
, reg
, value
);
129 * temac_dma_bd_init - Setup buffer descriptor rings
131 static int temac_dma_bd_init(struct net_device
*ndev
)
133 struct temac_local
*lp
= netdev_priv(ndev
);
137 lp
->rx_skb
= kzalloc(sizeof(*lp
->rx_skb
) * RX_BD_NUM
, GFP_KERNEL
);
138 /* allocate the tx and rx ring buffer descriptors. */
139 /* returns a virtual addres and a physical address. */
140 lp
->tx_bd_v
= dma_alloc_coherent(ndev
->dev
.parent
,
141 sizeof(*lp
->tx_bd_v
) * TX_BD_NUM
,
142 &lp
->tx_bd_p
, GFP_KERNEL
);
143 lp
->rx_bd_v
= dma_alloc_coherent(ndev
->dev
.parent
,
144 sizeof(*lp
->rx_bd_v
) * RX_BD_NUM
,
145 &lp
->rx_bd_p
, GFP_KERNEL
);
147 memset(lp
->tx_bd_v
, 0, sizeof(*lp
->tx_bd_v
) * TX_BD_NUM
);
148 for (i
= 0; i
< TX_BD_NUM
; i
++) {
149 lp
->tx_bd_v
[i
].next
= lp
->tx_bd_p
+
150 sizeof(*lp
->tx_bd_v
) * ((i
+ 1) % TX_BD_NUM
);
153 memset(lp
->rx_bd_v
, 0, sizeof(*lp
->rx_bd_v
) * RX_BD_NUM
);
154 for (i
= 0; i
< RX_BD_NUM
; i
++) {
155 lp
->rx_bd_v
[i
].next
= lp
->rx_bd_p
+
156 sizeof(*lp
->rx_bd_v
) * ((i
+ 1) % RX_BD_NUM
);
158 skb
= alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE
159 + XTE_ALIGN
, GFP_ATOMIC
);
161 dev_err(&ndev
->dev
, "alloc_skb error %d\n", i
);
165 skb_reserve(skb
, BUFFER_ALIGN(skb
->data
));
166 /* returns physical address of skb->data */
167 lp
->rx_bd_v
[i
].phys
= dma_map_single(ndev
->dev
.parent
,
169 XTE_MAX_JUMBO_FRAME_SIZE
,
171 lp
->rx_bd_v
[i
].len
= XTE_MAX_JUMBO_FRAME_SIZE
;
172 lp
->rx_bd_v
[i
].app0
= STS_CTRL_APP0_IRQONEND
;
175 temac_dma_out32(lp
, TX_CHNL_CTRL
, 0x10220400 |
177 CHNL_CTRL_IRQ_DLY_EN
|
178 CHNL_CTRL_IRQ_COAL_EN
);
181 temac_dma_out32(lp
, RX_CHNL_CTRL
, 0xff010000 |
183 CHNL_CTRL_IRQ_DLY_EN
|
184 CHNL_CTRL_IRQ_COAL_EN
|
188 temac_dma_out32(lp
, RX_CURDESC_PTR
, lp
->rx_bd_p
);
189 temac_dma_out32(lp
, RX_TAILDESC_PTR
,
190 lp
->rx_bd_p
+ (sizeof(*lp
->rx_bd_v
) * (RX_BD_NUM
- 1)));
191 temac_dma_out32(lp
, TX_CURDESC_PTR
, lp
->tx_bd_p
);
196 /* ---------------------------------------------------------------------
200 static int temac_set_mac_address(struct net_device
*ndev
, void *address
)
202 struct temac_local
*lp
= netdev_priv(ndev
);
205 memcpy(ndev
->dev_addr
, address
, ETH_ALEN
);
207 if (!is_valid_ether_addr(ndev
->dev_addr
))
208 random_ether_addr(ndev
->dev_addr
);
210 /* set up unicast MAC address filter set its mac address */
211 mutex_lock(&lp
->indirect_mutex
);
212 temac_indirect_out32(lp
, XTE_UAW0_OFFSET
,
213 (ndev
->dev_addr
[0]) |
214 (ndev
->dev_addr
[1] << 8) |
215 (ndev
->dev_addr
[2] << 16) |
216 (ndev
->dev_addr
[3] << 24));
217 /* There are reserved bits in EUAW1
218 * so don't affect them Set MAC bits [47:32] in EUAW1 */
219 temac_indirect_out32(lp
, XTE_UAW1_OFFSET
,
220 (ndev
->dev_addr
[4] & 0x000000ff) |
221 (ndev
->dev_addr
[5] << 8));
222 mutex_unlock(&lp
->indirect_mutex
);
227 static int netdev_set_mac_address(struct net_device
*ndev
, void *p
)
229 struct sockaddr
*addr
= p
;
231 return temac_set_mac_address(ndev
, addr
->sa_data
);
234 static void temac_set_multicast_list(struct net_device
*ndev
)
236 struct temac_local
*lp
= netdev_priv(ndev
);
237 u32 multi_addr_msw
, multi_addr_lsw
, val
;
240 mutex_lock(&lp
->indirect_mutex
);
241 if (ndev
->flags
& (IFF_ALLMULTI
| IFF_PROMISC
) ||
242 netdev_mc_count(ndev
) > MULTICAST_CAM_TABLE_NUM
) {
244 * We must make the kernel realise we had to move
245 * into promisc mode or we start all out war on
246 * the cable. If it was a promisc request the
247 * flag is already set. If not we assert it.
249 ndev
->flags
|= IFF_PROMISC
;
250 temac_indirect_out32(lp
, XTE_AFM_OFFSET
, XTE_AFM_EPPRM_MASK
);
251 dev_info(&ndev
->dev
, "Promiscuous mode enabled.\n");
252 } else if (!netdev_mc_empty(ndev
)) {
253 struct dev_mc_list
*mclist
;
256 netdev_for_each_mc_addr(mclist
, ndev
) {
257 if (i
>= MULTICAST_CAM_TABLE_NUM
)
259 multi_addr_msw
= ((mclist
->dmi_addr
[3] << 24) |
260 (mclist
->dmi_addr
[2] << 16) |
261 (mclist
->dmi_addr
[1] << 8) |
262 (mclist
->dmi_addr
[0]));
263 temac_indirect_out32(lp
, XTE_MAW0_OFFSET
,
265 multi_addr_lsw
= ((mclist
->dmi_addr
[5] << 8) |
266 (mclist
->dmi_addr
[4]) | (i
<< 16));
267 temac_indirect_out32(lp
, XTE_MAW1_OFFSET
,
272 val
= temac_indirect_in32(lp
, XTE_AFM_OFFSET
);
273 temac_indirect_out32(lp
, XTE_AFM_OFFSET
,
274 val
& ~XTE_AFM_EPPRM_MASK
);
275 temac_indirect_out32(lp
, XTE_MAW0_OFFSET
, 0);
276 temac_indirect_out32(lp
, XTE_MAW1_OFFSET
, 0);
277 dev_info(&ndev
->dev
, "Promiscuous mode disabled.\n");
279 mutex_unlock(&lp
->indirect_mutex
);
282 struct temac_option
{
288 } temac_options
[] = {
289 /* Turn on jumbo packet support for both Rx and Tx */
291 .opt
= XTE_OPTION_JUMBO
,
292 .reg
= XTE_TXC_OFFSET
,
293 .m_or
= XTE_TXC_TXJMBO_MASK
,
296 .opt
= XTE_OPTION_JUMBO
,
297 .reg
= XTE_RXC1_OFFSET
,
298 .m_or
=XTE_RXC1_RXJMBO_MASK
,
300 /* Turn on VLAN packet support for both Rx and Tx */
302 .opt
= XTE_OPTION_VLAN
,
303 .reg
= XTE_TXC_OFFSET
,
304 .m_or
=XTE_TXC_TXVLAN_MASK
,
307 .opt
= XTE_OPTION_VLAN
,
308 .reg
= XTE_RXC1_OFFSET
,
309 .m_or
=XTE_RXC1_RXVLAN_MASK
,
311 /* Turn on FCS stripping on receive packets */
313 .opt
= XTE_OPTION_FCS_STRIP
,
314 .reg
= XTE_RXC1_OFFSET
,
315 .m_or
=XTE_RXC1_RXFCS_MASK
,
317 /* Turn on FCS insertion on transmit packets */
319 .opt
= XTE_OPTION_FCS_INSERT
,
320 .reg
= XTE_TXC_OFFSET
,
321 .m_or
=XTE_TXC_TXFCS_MASK
,
323 /* Turn on length/type field checking on receive packets */
325 .opt
= XTE_OPTION_LENTYPE_ERR
,
326 .reg
= XTE_RXC1_OFFSET
,
327 .m_or
=XTE_RXC1_RXLT_MASK
,
329 /* Turn on flow control */
331 .opt
= XTE_OPTION_FLOW_CONTROL
,
332 .reg
= XTE_FCC_OFFSET
,
333 .m_or
=XTE_FCC_RXFLO_MASK
,
335 /* Turn on flow control */
337 .opt
= XTE_OPTION_FLOW_CONTROL
,
338 .reg
= XTE_FCC_OFFSET
,
339 .m_or
=XTE_FCC_TXFLO_MASK
,
341 /* Turn on promiscuous frame filtering (all frames are received ) */
343 .opt
= XTE_OPTION_PROMISC
,
344 .reg
= XTE_AFM_OFFSET
,
345 .m_or
=XTE_AFM_EPPRM_MASK
,
347 /* Enable transmitter if not already enabled */
349 .opt
= XTE_OPTION_TXEN
,
350 .reg
= XTE_TXC_OFFSET
,
351 .m_or
=XTE_TXC_TXEN_MASK
,
353 /* Enable receiver? */
355 .opt
= XTE_OPTION_RXEN
,
356 .reg
= XTE_RXC1_OFFSET
,
357 .m_or
=XTE_RXC1_RXEN_MASK
,
365 static u32
temac_setoptions(struct net_device
*ndev
, u32 options
)
367 struct temac_local
*lp
= netdev_priv(ndev
);
368 struct temac_option
*tp
= &temac_options
[0];
371 mutex_lock(&lp
->indirect_mutex
);
373 reg
= temac_indirect_in32(lp
, tp
->reg
) & ~tp
->m_or
;
374 if (options
& tp
->opt
)
376 temac_indirect_out32(lp
, tp
->reg
, reg
);
379 lp
->options
|= options
;
380 mutex_unlock(&lp
->indirect_mutex
);
385 /* Initilize temac */
386 static void temac_device_reset(struct net_device
*ndev
)
388 struct temac_local
*lp
= netdev_priv(ndev
);
392 /* Perform a software reset */
394 /* 0x300 host enable bit ? */
395 /* reset PHY through control register ?:1 */
397 dev_dbg(&ndev
->dev
, "%s()\n", __func__
);
399 mutex_lock(&lp
->indirect_mutex
);
400 /* Reset the receiver and wait for it to finish reset */
401 temac_indirect_out32(lp
, XTE_RXC1_OFFSET
, XTE_RXC1_RXRST_MASK
);
403 while (temac_indirect_in32(lp
, XTE_RXC1_OFFSET
) & XTE_RXC1_RXRST_MASK
) {
405 if (--timeout
== 0) {
407 "temac_device_reset RX reset timeout!!\n");
412 /* Reset the transmitter and wait for it to finish reset */
413 temac_indirect_out32(lp
, XTE_TXC_OFFSET
, XTE_TXC_TXRST_MASK
);
415 while (temac_indirect_in32(lp
, XTE_TXC_OFFSET
) & XTE_TXC_TXRST_MASK
) {
417 if (--timeout
== 0) {
419 "temac_device_reset TX reset timeout!!\n");
424 /* Disable the receiver */
425 val
= temac_indirect_in32(lp
, XTE_RXC1_OFFSET
);
426 temac_indirect_out32(lp
, XTE_RXC1_OFFSET
, val
& ~XTE_RXC1_RXEN_MASK
);
428 /* Reset Local Link (DMA) */
429 temac_dma_out32(lp
, DMA_CONTROL_REG
, DMA_CONTROL_RST
);
431 while (temac_dma_in32(lp
, DMA_CONTROL_REG
) & DMA_CONTROL_RST
) {
433 if (--timeout
== 0) {
435 "temac_device_reset DMA reset timeout!!\n");
439 temac_dma_out32(lp
, DMA_CONTROL_REG
, DMA_TAIL_ENABLE
);
441 temac_dma_bd_init(ndev
);
443 temac_indirect_out32(lp
, XTE_RXC0_OFFSET
, 0);
444 temac_indirect_out32(lp
, XTE_RXC1_OFFSET
, 0);
445 temac_indirect_out32(lp
, XTE_TXC_OFFSET
, 0);
446 temac_indirect_out32(lp
, XTE_FCC_OFFSET
, XTE_FCC_RXFLO_MASK
);
448 mutex_unlock(&lp
->indirect_mutex
);
450 /* Sync default options with HW
451 * but leave receiver and transmitter disabled. */
452 temac_setoptions(ndev
,
453 lp
->options
& ~(XTE_OPTION_TXEN
| XTE_OPTION_RXEN
));
455 temac_set_mac_address(ndev
, NULL
);
457 /* Set address filter table */
458 temac_set_multicast_list(ndev
);
459 if (temac_setoptions(ndev
, lp
->options
))
460 dev_err(&ndev
->dev
, "Error setting TEMAC options\n");
462 /* Init Driver variable */
463 ndev
->trans_start
= 0;
466 void temac_adjust_link(struct net_device
*ndev
)
468 struct temac_local
*lp
= netdev_priv(ndev
);
469 struct phy_device
*phy
= lp
->phy_dev
;
473 /* hash together the state values to decide if something has changed */
474 link_state
= phy
->speed
| (phy
->duplex
<< 1) | phy
->link
;
476 mutex_lock(&lp
->indirect_mutex
);
477 if (lp
->last_link
!= link_state
) {
478 mii_speed
= temac_indirect_in32(lp
, XTE_EMCFG_OFFSET
);
479 mii_speed
&= ~XTE_EMCFG_LINKSPD_MASK
;
481 switch (phy
->speed
) {
482 case SPEED_1000
: mii_speed
|= XTE_EMCFG_LINKSPD_1000
; break;
483 case SPEED_100
: mii_speed
|= XTE_EMCFG_LINKSPD_100
; break;
484 case SPEED_10
: mii_speed
|= XTE_EMCFG_LINKSPD_10
; break;
487 /* Write new speed setting out to TEMAC */
488 temac_indirect_out32(lp
, XTE_EMCFG_OFFSET
, mii_speed
);
489 lp
->last_link
= link_state
;
490 phy_print_status(phy
);
492 mutex_unlock(&lp
->indirect_mutex
);
495 static void temac_start_xmit_done(struct net_device
*ndev
)
497 struct temac_local
*lp
= netdev_priv(ndev
);
498 struct cdmac_bd
*cur_p
;
499 unsigned int stat
= 0;
501 cur_p
= &lp
->tx_bd_v
[lp
->tx_bd_ci
];
504 while (stat
& STS_CTRL_APP0_CMPLT
) {
505 dma_unmap_single(ndev
->dev
.parent
, cur_p
->phys
, cur_p
->len
,
508 dev_kfree_skb_irq((struct sk_buff
*)cur_p
->app4
);
511 ndev
->stats
.tx_packets
++;
512 ndev
->stats
.tx_bytes
+= cur_p
->len
;
515 if (lp
->tx_bd_ci
>= TX_BD_NUM
)
518 cur_p
= &lp
->tx_bd_v
[lp
->tx_bd_ci
];
522 netif_wake_queue(ndev
);
525 static int temac_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
527 struct temac_local
*lp
= netdev_priv(ndev
);
528 struct cdmac_bd
*cur_p
;
529 dma_addr_t start_p
, tail_p
;
531 unsigned long num_frag
;
534 num_frag
= skb_shinfo(skb
)->nr_frags
;
535 frag
= &skb_shinfo(skb
)->frags
[0];
536 start_p
= lp
->tx_bd_p
+ sizeof(*lp
->tx_bd_v
) * lp
->tx_bd_tail
;
537 cur_p
= &lp
->tx_bd_v
[lp
->tx_bd_tail
];
539 if (cur_p
->app0
& STS_CTRL_APP0_CMPLT
) {
540 if (!netif_queue_stopped(ndev
)) {
541 netif_stop_queue(ndev
);
542 return NETDEV_TX_BUSY
;
544 return NETDEV_TX_BUSY
;
548 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
549 const struct iphdr
*ip
= ip_hdr(skb
);
550 int length
= 0, start
= 0, insert
= 0;
552 switch (ip
->protocol
) {
554 start
= sizeof(struct iphdr
) + ETH_HLEN
;
555 insert
= sizeof(struct iphdr
) + ETH_HLEN
+ 16;
556 length
= ip
->tot_len
- sizeof(struct iphdr
);
559 start
= sizeof(struct iphdr
) + ETH_HLEN
;
560 insert
= sizeof(struct iphdr
) + ETH_HLEN
+ 6;
561 length
= ip
->tot_len
- sizeof(struct iphdr
);
566 cur_p
->app1
= ((start
<< 16) | insert
);
567 cur_p
->app2
= csum_tcpudp_magic(ip
->saddr
, ip
->daddr
,
568 length
, ip
->protocol
, 0);
569 skb
->data
[insert
] = 0;
570 skb
->data
[insert
+ 1] = 0;
572 cur_p
->app0
|= STS_CTRL_APP0_SOP
;
573 cur_p
->len
= skb_headlen(skb
);
574 cur_p
->phys
= dma_map_single(ndev
->dev
.parent
, skb
->data
, skb
->len
,
576 cur_p
->app4
= (unsigned long)skb
;
578 for (ii
= 0; ii
< num_frag
; ii
++) {
580 if (lp
->tx_bd_tail
>= TX_BD_NUM
)
583 cur_p
= &lp
->tx_bd_v
[lp
->tx_bd_tail
];
584 cur_p
->phys
= dma_map_single(ndev
->dev
.parent
,
585 (void *)page_address(frag
->page
) +
587 frag
->size
, DMA_TO_DEVICE
);
588 cur_p
->len
= frag
->size
;
592 cur_p
->app0
|= STS_CTRL_APP0_EOP
;
594 tail_p
= lp
->tx_bd_p
+ sizeof(*lp
->tx_bd_v
) * lp
->tx_bd_tail
;
596 if (lp
->tx_bd_tail
>= TX_BD_NUM
)
599 /* Kick off the transfer */
600 temac_dma_out32(lp
, TX_TAILDESC_PTR
, tail_p
); /* DMA start */
606 static void ll_temac_recv(struct net_device
*ndev
)
608 struct temac_local
*lp
= netdev_priv(ndev
);
609 struct sk_buff
*skb
, *new_skb
;
611 struct cdmac_bd
*cur_p
;
614 unsigned long skb_vaddr
;
617 spin_lock_irqsave(&lp
->rx_lock
, flags
);
619 tail_p
= lp
->rx_bd_p
+ sizeof(*lp
->rx_bd_v
) * lp
->rx_bd_ci
;
620 cur_p
= &lp
->rx_bd_v
[lp
->rx_bd_ci
];
622 bdstat
= cur_p
->app0
;
623 while ((bdstat
& STS_CTRL_APP0_CMPLT
)) {
625 skb
= lp
->rx_skb
[lp
->rx_bd_ci
];
626 length
= cur_p
->app4
& 0x3FFF;
628 skb_vaddr
= virt_to_bus(skb
->data
);
629 dma_unmap_single(ndev
->dev
.parent
, skb_vaddr
, length
,
632 skb_put(skb
, length
);
634 skb
->protocol
= eth_type_trans(skb
, ndev
);
635 skb
->ip_summed
= CHECKSUM_NONE
;
639 ndev
->stats
.rx_packets
++;
640 ndev
->stats
.rx_bytes
+= length
;
642 new_skb
= alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE
+ XTE_ALIGN
,
645 dev_err(&ndev
->dev
, "no memory for new sk_buff\n");
646 spin_unlock_irqrestore(&lp
->rx_lock
, flags
);
650 skb_reserve(new_skb
, BUFFER_ALIGN(new_skb
->data
));
652 cur_p
->app0
= STS_CTRL_APP0_IRQONEND
;
653 cur_p
->phys
= dma_map_single(ndev
->dev
.parent
, new_skb
->data
,
654 XTE_MAX_JUMBO_FRAME_SIZE
,
656 cur_p
->len
= XTE_MAX_JUMBO_FRAME_SIZE
;
657 lp
->rx_skb
[lp
->rx_bd_ci
] = new_skb
;
660 if (lp
->rx_bd_ci
>= RX_BD_NUM
)
663 cur_p
= &lp
->rx_bd_v
[lp
->rx_bd_ci
];
664 bdstat
= cur_p
->app0
;
666 temac_dma_out32(lp
, RX_TAILDESC_PTR
, tail_p
);
668 spin_unlock_irqrestore(&lp
->rx_lock
, flags
);
671 static irqreturn_t
ll_temac_tx_irq(int irq
, void *_ndev
)
673 struct net_device
*ndev
= _ndev
;
674 struct temac_local
*lp
= netdev_priv(ndev
);
677 status
= temac_dma_in32(lp
, TX_IRQ_REG
);
678 temac_dma_out32(lp
, TX_IRQ_REG
, status
);
680 if (status
& (IRQ_COAL
| IRQ_DLY
))
681 temac_start_xmit_done(lp
->ndev
);
683 dev_err(&ndev
->dev
, "DMA error 0x%x\n", status
);
688 static irqreturn_t
ll_temac_rx_irq(int irq
, void *_ndev
)
690 struct net_device
*ndev
= _ndev
;
691 struct temac_local
*lp
= netdev_priv(ndev
);
694 /* Read and clear the status registers */
695 status
= temac_dma_in32(lp
, RX_IRQ_REG
);
696 temac_dma_out32(lp
, RX_IRQ_REG
, status
);
698 if (status
& (IRQ_COAL
| IRQ_DLY
))
699 ll_temac_recv(lp
->ndev
);
704 static int temac_open(struct net_device
*ndev
)
706 struct temac_local
*lp
= netdev_priv(ndev
);
709 dev_dbg(&ndev
->dev
, "temac_open()\n");
712 lp
->phy_dev
= of_phy_connect(lp
->ndev
, lp
->phy_node
,
713 temac_adjust_link
, 0, 0);
715 dev_err(lp
->dev
, "of_phy_connect() failed\n");
719 phy_start(lp
->phy_dev
);
722 rc
= request_irq(lp
->tx_irq
, ll_temac_tx_irq
, 0, ndev
->name
, ndev
);
725 rc
= request_irq(lp
->rx_irq
, ll_temac_rx_irq
, 0, ndev
->name
, ndev
);
729 temac_device_reset(ndev
);
733 free_irq(lp
->tx_irq
, ndev
);
736 phy_disconnect(lp
->phy_dev
);
738 dev_err(lp
->dev
, "request_irq() failed\n");
742 static int temac_stop(struct net_device
*ndev
)
744 struct temac_local
*lp
= netdev_priv(ndev
);
746 dev_dbg(&ndev
->dev
, "temac_close()\n");
748 free_irq(lp
->tx_irq
, ndev
);
749 free_irq(lp
->rx_irq
, ndev
);
752 phy_disconnect(lp
->phy_dev
);
758 #ifdef CONFIG_NET_POLL_CONTROLLER
760 temac_poll_controller(struct net_device
*ndev
)
762 struct temac_local
*lp
= netdev_priv(ndev
);
764 disable_irq(lp
->tx_irq
);
765 disable_irq(lp
->rx_irq
);
767 ll_temac_rx_irq(lp
->tx_irq
, lp
);
768 ll_temac_tx_irq(lp
->rx_irq
, lp
);
770 enable_irq(lp
->tx_irq
);
771 enable_irq(lp
->rx_irq
);
775 static const struct net_device_ops temac_netdev_ops
= {
776 .ndo_open
= temac_open
,
777 .ndo_stop
= temac_stop
,
778 .ndo_start_xmit
= temac_start_xmit
,
779 .ndo_set_mac_address
= netdev_set_mac_address
,
780 //.ndo_set_multicast_list = temac_set_multicast_list,
781 #ifdef CONFIG_NET_POLL_CONTROLLER
782 .ndo_poll_controller
= temac_poll_controller
,
786 /* ---------------------------------------------------------------------
787 * SYSFS device attributes
789 static ssize_t
temac_show_llink_regs(struct device
*dev
,
790 struct device_attribute
*attr
, char *buf
)
792 struct net_device
*ndev
= dev_get_drvdata(dev
);
793 struct temac_local
*lp
= netdev_priv(ndev
);
796 for (i
= 0; i
< 0x11; i
++)
797 len
+= sprintf(buf
+ len
, "%.8x%s", temac_dma_in32(lp
, i
),
798 (i
% 8) == 7 ? "\n" : " ");
799 len
+= sprintf(buf
+ len
, "\n");
804 static DEVICE_ATTR(llink_regs
, 0440, temac_show_llink_regs
, NULL
);
806 static struct attribute
*temac_device_attrs
[] = {
807 &dev_attr_llink_regs
.attr
,
811 static const struct attribute_group temac_attr_group
= {
812 .attrs
= temac_device_attrs
,
816 temac_of_probe(struct of_device
*op
, const struct of_device_id
*match
)
818 struct device_node
*np
;
819 struct temac_local
*lp
;
820 struct net_device
*ndev
;
825 /* Init network device structure */
826 ndev
= alloc_etherdev(sizeof(*lp
));
828 dev_err(&op
->dev
, "could not allocate device.\n");
832 dev_set_drvdata(&op
->dev
, ndev
);
833 SET_NETDEV_DEV(ndev
, &op
->dev
);
834 ndev
->flags
&= ~IFF_MULTICAST
; /* clear multicast */
835 ndev
->features
= NETIF_F_SG
| NETIF_F_FRAGLIST
;
836 ndev
->netdev_ops
= &temac_netdev_ops
;
838 ndev
->features
|= NETIF_F_IP_CSUM
; /* Can checksum TCP/UDP over IPv4. */
839 ndev
->features
|= NETIF_F_HW_CSUM
; /* Can checksum all the packets. */
840 ndev
->features
|= NETIF_F_IPV6_CSUM
; /* Can checksum IPV6 TCP/UDP */
841 ndev
->features
|= NETIF_F_HIGHDMA
; /* Can DMA to high memory. */
842 ndev
->features
|= NETIF_F_HW_VLAN_TX
; /* Transmit VLAN hw accel */
843 ndev
->features
|= NETIF_F_HW_VLAN_RX
; /* Receive VLAN hw acceleration */
844 ndev
->features
|= NETIF_F_HW_VLAN_FILTER
; /* Receive VLAN filtering */
845 ndev
->features
|= NETIF_F_VLAN_CHALLENGED
; /* cannot handle VLAN pkts */
846 ndev
->features
|= NETIF_F_GSO
; /* Enable software GSO. */
847 ndev
->features
|= NETIF_F_MULTI_QUEUE
; /* Has multiple TX/RX queues */
848 ndev
->features
|= NETIF_F_LRO
; /* large receive offload */
851 /* setup temac private info structure */
852 lp
= netdev_priv(ndev
);
855 lp
->options
= XTE_OPTION_DEFAULTS
;
856 spin_lock_init(&lp
->rx_lock
);
857 mutex_init(&lp
->indirect_mutex
);
859 /* map device registers */
860 lp
->regs
= of_iomap(op
->node
, 0);
862 dev_err(&op
->dev
, "could not map temac regs.\n");
866 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
867 np
= of_parse_phandle(op
->node
, "llink-connected", 0);
869 dev_err(&op
->dev
, "could not find DMA node\n");
873 dcrs
= dcr_resource_start(np
, 0);
875 dev_err(&op
->dev
, "could not get DMA register address\n");
878 lp
->sdma_dcrs
= dcr_map(np
, dcrs
, dcr_resource_len(np
, 0));
879 dev_dbg(&op
->dev
, "DCR base: %x\n", dcrs
);
881 lp
->rx_irq
= irq_of_parse_and_map(np
, 0);
882 lp
->tx_irq
= irq_of_parse_and_map(np
, 1);
883 if (!lp
->rx_irq
|| !lp
->tx_irq
) {
884 dev_err(&op
->dev
, "could not determine irqs\n");
889 of_node_put(np
); /* Finished with the DMA node; drop the reference */
891 /* Retrieve the MAC address */
892 addr
= of_get_property(op
->node
, "local-mac-address", &size
);
893 if ((!addr
) || (size
!= 6)) {
894 dev_err(&op
->dev
, "could not find MAC address\n");
898 temac_set_mac_address(ndev
, (void *)addr
);
900 rc
= temac_mdio_setup(lp
, op
->node
);
902 dev_warn(&op
->dev
, "error registering MDIO bus\n");
904 lp
->phy_node
= of_parse_phandle(op
->node
, "phy-handle", 0);
906 dev_dbg(lp
->dev
, "using PHY node %s (%p)\n", np
->full_name
, np
);
908 /* Add the device attributes */
909 rc
= sysfs_create_group(&lp
->dev
->kobj
, &temac_attr_group
);
911 dev_err(lp
->dev
, "Error creating sysfs files\n");
915 rc
= register_netdev(lp
->ndev
);
917 dev_err(lp
->dev
, "register_netdev() error (%i)\n", rc
);
918 goto err_register_ndev
;
924 sysfs_remove_group(&lp
->dev
->kobj
, &temac_attr_group
);
931 static int __devexit
temac_of_remove(struct of_device
*op
)
933 struct net_device
*ndev
= dev_get_drvdata(&op
->dev
);
934 struct temac_local
*lp
= netdev_priv(ndev
);
936 temac_mdio_teardown(lp
);
937 unregister_netdev(ndev
);
938 sysfs_remove_group(&lp
->dev
->kobj
, &temac_attr_group
);
940 of_node_put(lp
->phy_node
);
942 dev_set_drvdata(&op
->dev
, NULL
);
947 static struct of_device_id temac_of_match
[] __devinitdata
= {
948 { .compatible
= "xlnx,xps-ll-temac-1.01.b", },
949 { .compatible
= "xlnx,xps-ll-temac-2.00.a", },
950 { .compatible
= "xlnx,xps-ll-temac-2.02.a", },
951 { .compatible
= "xlnx,xps-ll-temac-2.03.a", },
954 MODULE_DEVICE_TABLE(of
, temac_of_match
);
956 static struct of_platform_driver temac_of_driver
= {
957 .match_table
= temac_of_match
,
958 .probe
= temac_of_probe
,
959 .remove
= __devexit_p(temac_of_remove
),
961 .owner
= THIS_MODULE
,
962 .name
= "xilinx_temac",
966 static int __init
temac_init(void)
968 return of_register_platform_driver(&temac_of_driver
);
970 module_init(temac_init
);
972 static void __exit
temac_exit(void)
974 of_unregister_platform_driver(&temac_of_driver
);
976 module_exit(temac_exit
);
978 MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
979 MODULE_AUTHOR("Yoshio Kashiwagi");
980 MODULE_LICENSE("GPL");