1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2016-2017, National Instruments Corp.
4 * Author: Moritz Fischer <mdf@kernel.org>
7 #include <linux/etherdevice.h>
8 #include <linux/module.h>
9 #include <linux/netdevice.h>
11 #include <linux/of_mdio.h>
12 #include <linux/of_net.h>
13 #include <linux/platform_device.h>
14 #include <linux/skbuff.h>
15 #include <linux/phy.h>
16 #include <linux/mii.h>
17 #include <linux/nvmem-consumer.h>
18 #include <linux/ethtool.h>
19 #include <linux/iopoll.h>
24 /* Axi DMA Register definitions */
25 #define XAXIDMA_TX_CR_OFFSET 0x00 /* Channel control */
26 #define XAXIDMA_TX_SR_OFFSET 0x04 /* Status */
27 #define XAXIDMA_TX_CDESC_OFFSET 0x08 /* Current descriptor pointer */
28 #define XAXIDMA_TX_TDESC_OFFSET 0x10 /* Tail descriptor pointer */
30 #define XAXIDMA_RX_CR_OFFSET 0x30 /* Channel control */
31 #define XAXIDMA_RX_SR_OFFSET 0x34 /* Status */
32 #define XAXIDMA_RX_CDESC_OFFSET 0x38 /* Current descriptor pointer */
33 #define XAXIDMA_RX_TDESC_OFFSET 0x40 /* Tail descriptor pointer */
35 #define XAXIDMA_CR_RUNSTOP_MASK 0x1 /* Start/stop DMA channel */
36 #define XAXIDMA_CR_RESET_MASK 0x4 /* Reset DMA engine */
38 #define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */
39 #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */
40 #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */
41 #define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */
43 #define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */
44 #define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */
46 #define XAXIDMA_DELAY_SHIFT 24
47 #define XAXIDMA_COALESCE_SHIFT 16
49 #define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */
50 #define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */
51 #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */
52 #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */
54 /* Default TX/RX Threshold and waitbound values for SGDMA mode */
55 #define XAXIDMA_DFT_TX_THRESHOLD 24
56 #define XAXIDMA_DFT_TX_WAITBOUND 254
57 #define XAXIDMA_DFT_RX_THRESHOLD 24
58 #define XAXIDMA_DFT_RX_WAITBOUND 254
60 #define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */
61 #define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */
62 #define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */
63 #define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */
64 #define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */
65 #define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */
66 #define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */
67 #define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */
68 #define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */
70 #define NIXGE_REG_CTRL_OFFSET 0x4000
71 #define NIXGE_REG_INFO 0x00
72 #define NIXGE_REG_MAC_CTL 0x04
73 #define NIXGE_REG_PHY_CTL 0x08
74 #define NIXGE_REG_LED_CTL 0x0c
75 #define NIXGE_REG_MDIO_DATA 0x10
76 #define NIXGE_REG_MDIO_ADDR 0x14
77 #define NIXGE_REG_MDIO_OP 0x18
78 #define NIXGE_REG_MDIO_CTRL 0x1c
80 #define NIXGE_ID_LED_CTL_EN BIT(0)
81 #define NIXGE_ID_LED_CTL_VAL BIT(1)
83 #define NIXGE_MDIO_CLAUSE45 BIT(12)
84 #define NIXGE_MDIO_CLAUSE22 0
85 #define NIXGE_MDIO_OP(n) (((n) & 0x3) << 10)
86 #define NIXGE_MDIO_OP_ADDRESS 0
87 #define NIXGE_MDIO_C45_WRITE BIT(0)
88 #define NIXGE_MDIO_C45_READ (BIT(1) | BIT(0))
89 #define NIXGE_MDIO_C22_WRITE BIT(0)
90 #define NIXGE_MDIO_C22_READ BIT(1)
91 #define NIXGE_MDIO_ADDR(n) (((n) & 0x1f) << 5)
92 #define NIXGE_MDIO_MMD(n) (((n) & 0x1f) << 0)
94 #define NIXGE_REG_MAC_LSB 0x1000
95 #define NIXGE_REG_MAC_MSB 0x1004
97 /* Packet size info */
98 #define NIXGE_HDR_SIZE 14 /* Size of Ethernet header */
99 #define NIXGE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */
100 #define NIXGE_MTU 1500 /* Max MTU of an Ethernet frame */
101 #define NIXGE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */
103 #define NIXGE_MAX_FRAME_SIZE (NIXGE_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
104 #define NIXGE_MAX_JUMBO_FRAME_SIZE \
105 (NIXGE_JUMBO_MTU + NIXGE_HDR_SIZE + NIXGE_TRL_SIZE)
113 struct nixge_hw_dma_bd
{
132 #ifdef CONFIG_PHYS_ADDR_T_64BIT
133 #define nixge_hw_dma_bd_set_addr(bd, field, addr) \
135 (bd)->field##_lo = lower_32_bits((addr)); \
136 (bd)->field##_hi = upper_32_bits((addr)); \
139 #define nixge_hw_dma_bd_set_addr(bd, field, addr) \
140 ((bd)->field##_lo = lower_32_bits((addr)))
143 #define nixge_hw_dma_bd_set_phys(bd, addr) \
144 nixge_hw_dma_bd_set_addr((bd), phys, (addr))
146 #define nixge_hw_dma_bd_set_next(bd, addr) \
147 nixge_hw_dma_bd_set_addr((bd), next, (addr))
149 #define nixge_hw_dma_bd_set_offset(bd, addr) \
150 nixge_hw_dma_bd_set_addr((bd), sw_id_offset, (addr))
152 #ifdef CONFIG_PHYS_ADDR_T_64BIT
153 #define nixge_hw_dma_bd_get_addr(bd, field) \
154 (dma_addr_t)((((u64)(bd)->field##_hi) << 32) | ((bd)->field##_lo))
156 #define nixge_hw_dma_bd_get_addr(bd, field) \
157 (dma_addr_t)((bd)->field##_lo)
160 struct nixge_tx_skb
{
168 struct net_device
*ndev
;
169 struct napi_struct napi
;
172 /* Connection to PHY device */
173 struct device_node
*phy_node
;
174 phy_interface_t phy_mode
;
181 struct mii_bus
*mii_bus
; /* MII bus reference */
183 /* IO registers, dma functions and IRQs */
184 void __iomem
*ctrl_regs
;
185 void __iomem
*dma_regs
;
187 struct tasklet_struct dma_err_tasklet
;
192 /* Buffer descriptors */
193 struct nixge_hw_dma_bd
*tx_bd_v
;
194 struct nixge_tx_skb
*tx_skb
;
197 struct nixge_hw_dma_bd
*rx_bd_v
;
203 u32 coalesce_count_rx
;
204 u32 coalesce_count_tx
;
207 static void nixge_dma_write_reg(struct nixge_priv
*priv
, off_t offset
, u32 val
)
209 writel(val
, priv
->dma_regs
+ offset
);
212 static void nixge_dma_write_desc_reg(struct nixge_priv
*priv
, off_t offset
,
215 writel(lower_32_bits(addr
), priv
->dma_regs
+ offset
);
216 #ifdef CONFIG_PHYS_ADDR_T_64BIT
217 writel(upper_32_bits(addr
), priv
->dma_regs
+ offset
+ 4);
221 static u32
nixge_dma_read_reg(const struct nixge_priv
*priv
, off_t offset
)
223 return readl(priv
->dma_regs
+ offset
);
226 static void nixge_ctrl_write_reg(struct nixge_priv
*priv
, off_t offset
, u32 val
)
228 writel(val
, priv
->ctrl_regs
+ offset
);
231 static u32
nixge_ctrl_read_reg(struct nixge_priv
*priv
, off_t offset
)
233 return readl(priv
->ctrl_regs
+ offset
);
236 #define nixge_ctrl_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
237 readl_poll_timeout((priv)->ctrl_regs + (addr), (val), (cond), \
238 (sleep_us), (timeout_us))
240 #define nixge_dma_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
241 readl_poll_timeout((priv)->dma_regs + (addr), (val), (cond), \
242 (sleep_us), (timeout_us))
244 static void nixge_hw_dma_bd_release(struct net_device
*ndev
)
246 struct nixge_priv
*priv
= netdev_priv(ndev
);
247 dma_addr_t phys_addr
;
252 for (i
= 0; i
< RX_BD_NUM
; i
++) {
253 phys_addr
= nixge_hw_dma_bd_get_addr(&priv
->rx_bd_v
[i
],
256 dma_unmap_single(ndev
->dev
.parent
, phys_addr
,
257 NIXGE_MAX_JUMBO_FRAME_SIZE
,
260 skb
= (struct sk_buff
*)(uintptr_t)
261 nixge_hw_dma_bd_get_addr(&priv
->rx_bd_v
[i
],
266 dma_free_coherent(ndev
->dev
.parent
,
267 sizeof(*priv
->rx_bd_v
) * RX_BD_NUM
,
273 devm_kfree(ndev
->dev
.parent
, priv
->tx_skb
);
276 dma_free_coherent(ndev
->dev
.parent
,
277 sizeof(*priv
->tx_bd_v
) * TX_BD_NUM
,
282 static int nixge_hw_dma_bd_init(struct net_device
*ndev
)
284 struct nixge_priv
*priv
= netdev_priv(ndev
);
290 /* Reset the indexes which are used for accessing the BDs */
292 priv
->tx_bd_tail
= 0;
295 /* Allocate the Tx and Rx buffer descriptors. */
296 priv
->tx_bd_v
= dma_alloc_coherent(ndev
->dev
.parent
,
297 sizeof(*priv
->tx_bd_v
) * TX_BD_NUM
,
298 &priv
->tx_bd_p
, GFP_KERNEL
);
302 priv
->tx_skb
= devm_kcalloc(ndev
->dev
.parent
,
303 TX_BD_NUM
, sizeof(*priv
->tx_skb
),
308 priv
->rx_bd_v
= dma_alloc_coherent(ndev
->dev
.parent
,
309 sizeof(*priv
->rx_bd_v
) * RX_BD_NUM
,
310 &priv
->rx_bd_p
, GFP_KERNEL
);
314 for (i
= 0; i
< TX_BD_NUM
; i
++) {
315 nixge_hw_dma_bd_set_next(&priv
->tx_bd_v
[i
],
317 sizeof(*priv
->tx_bd_v
) *
318 ((i
+ 1) % TX_BD_NUM
));
321 for (i
= 0; i
< RX_BD_NUM
; i
++) {
322 nixge_hw_dma_bd_set_next(&priv
->rx_bd_v
[i
],
324 + sizeof(*priv
->rx_bd_v
) *
325 ((i
+ 1) % RX_BD_NUM
));
327 skb
= __netdev_alloc_skb_ip_align(ndev
,
328 NIXGE_MAX_JUMBO_FRAME_SIZE
,
333 nixge_hw_dma_bd_set_offset(&priv
->rx_bd_v
[i
], (uintptr_t)skb
);
334 phys
= dma_map_single(ndev
->dev
.parent
, skb
->data
,
335 NIXGE_MAX_JUMBO_FRAME_SIZE
,
338 nixge_hw_dma_bd_set_phys(&priv
->rx_bd_v
[i
], phys
);
340 priv
->rx_bd_v
[i
].cntrl
= NIXGE_MAX_JUMBO_FRAME_SIZE
;
343 /* Start updating the Rx channel control register */
344 cr
= nixge_dma_read_reg(priv
, XAXIDMA_RX_CR_OFFSET
);
345 /* Update the interrupt coalesce count */
346 cr
= ((cr
& ~XAXIDMA_COALESCE_MASK
) |
347 ((priv
->coalesce_count_rx
) << XAXIDMA_COALESCE_SHIFT
));
348 /* Update the delay timer count */
349 cr
= ((cr
& ~XAXIDMA_DELAY_MASK
) |
350 (XAXIDMA_DFT_RX_WAITBOUND
<< XAXIDMA_DELAY_SHIFT
));
351 /* Enable coalesce, delay timer and error interrupts */
352 cr
|= XAXIDMA_IRQ_ALL_MASK
;
353 /* Write to the Rx channel control register */
354 nixge_dma_write_reg(priv
, XAXIDMA_RX_CR_OFFSET
, cr
);
356 /* Start updating the Tx channel control register */
357 cr
= nixge_dma_read_reg(priv
, XAXIDMA_TX_CR_OFFSET
);
358 /* Update the interrupt coalesce count */
359 cr
= (((cr
& ~XAXIDMA_COALESCE_MASK
)) |
360 ((priv
->coalesce_count_tx
) << XAXIDMA_COALESCE_SHIFT
));
361 /* Update the delay timer count */
362 cr
= (((cr
& ~XAXIDMA_DELAY_MASK
)) |
363 (XAXIDMA_DFT_TX_WAITBOUND
<< XAXIDMA_DELAY_SHIFT
));
364 /* Enable coalesce, delay timer and error interrupts */
365 cr
|= XAXIDMA_IRQ_ALL_MASK
;
366 /* Write to the Tx channel control register */
367 nixge_dma_write_reg(priv
, XAXIDMA_TX_CR_OFFSET
, cr
);
369 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
370 * halted state. This will make the Rx side ready for reception.
372 nixge_dma_write_desc_reg(priv
, XAXIDMA_RX_CDESC_OFFSET
, priv
->rx_bd_p
);
373 cr
= nixge_dma_read_reg(priv
, XAXIDMA_RX_CR_OFFSET
);
374 nixge_dma_write_reg(priv
, XAXIDMA_RX_CR_OFFSET
,
375 cr
| XAXIDMA_CR_RUNSTOP_MASK
);
376 nixge_dma_write_desc_reg(priv
, XAXIDMA_RX_TDESC_OFFSET
, priv
->rx_bd_p
+
377 (sizeof(*priv
->rx_bd_v
) * (RX_BD_NUM
- 1)));
379 /* Write to the RS (Run-stop) bit in the Tx channel control register.
380 * Tx channel is now ready to run. But only after we write to the
381 * tail pointer register that the Tx channel will start transmitting.
383 nixge_dma_write_desc_reg(priv
, XAXIDMA_TX_CDESC_OFFSET
, priv
->tx_bd_p
);
384 cr
= nixge_dma_read_reg(priv
, XAXIDMA_TX_CR_OFFSET
);
385 nixge_dma_write_reg(priv
, XAXIDMA_TX_CR_OFFSET
,
386 cr
| XAXIDMA_CR_RUNSTOP_MASK
);
390 nixge_hw_dma_bd_release(ndev
);
394 static void __nixge_device_reset(struct nixge_priv
*priv
, off_t offset
)
399 /* Reset Axi DMA. This would reset NIXGE Ethernet core as well.
400 * The reset process of Axi DMA takes a while to complete as all
401 * pending commands/transfers will be flushed or completed during
402 * this reset process.
404 nixge_dma_write_reg(priv
, offset
, XAXIDMA_CR_RESET_MASK
);
405 err
= nixge_dma_poll_timeout(priv
, offset
, status
,
406 !(status
& XAXIDMA_CR_RESET_MASK
), 10,
409 netdev_err(priv
->ndev
, "%s: DMA reset timeout!\n", __func__
);
412 static void nixge_device_reset(struct net_device
*ndev
)
414 struct nixge_priv
*priv
= netdev_priv(ndev
);
416 __nixge_device_reset(priv
, XAXIDMA_TX_CR_OFFSET
);
417 __nixge_device_reset(priv
, XAXIDMA_RX_CR_OFFSET
);
419 if (nixge_hw_dma_bd_init(ndev
))
420 netdev_err(ndev
, "%s: descriptor allocation failed\n",
423 netif_trans_update(ndev
);
426 static void nixge_handle_link_change(struct net_device
*ndev
)
428 struct nixge_priv
*priv
= netdev_priv(ndev
);
429 struct phy_device
*phydev
= ndev
->phydev
;
431 if (phydev
->link
!= priv
->link
|| phydev
->speed
!= priv
->speed
||
432 phydev
->duplex
!= priv
->duplex
) {
433 priv
->link
= phydev
->link
;
434 priv
->speed
= phydev
->speed
;
435 priv
->duplex
= phydev
->duplex
;
436 phy_print_status(phydev
);
440 static void nixge_tx_skb_unmap(struct nixge_priv
*priv
,
441 struct nixge_tx_skb
*tx_skb
)
443 if (tx_skb
->mapping
) {
444 if (tx_skb
->mapped_as_page
)
445 dma_unmap_page(priv
->ndev
->dev
.parent
, tx_skb
->mapping
,
446 tx_skb
->size
, DMA_TO_DEVICE
);
448 dma_unmap_single(priv
->ndev
->dev
.parent
,
450 tx_skb
->size
, DMA_TO_DEVICE
);
455 dev_kfree_skb_any(tx_skb
->skb
);
460 static void nixge_start_xmit_done(struct net_device
*ndev
)
462 struct nixge_priv
*priv
= netdev_priv(ndev
);
463 struct nixge_hw_dma_bd
*cur_p
;
464 struct nixge_tx_skb
*tx_skb
;
465 unsigned int status
= 0;
469 cur_p
= &priv
->tx_bd_v
[priv
->tx_bd_ci
];
470 tx_skb
= &priv
->tx_skb
[priv
->tx_bd_ci
];
472 status
= cur_p
->status
;
474 while (status
& XAXIDMA_BD_STS_COMPLETE_MASK
) {
475 nixge_tx_skb_unmap(priv
, tx_skb
);
478 size
+= status
& XAXIDMA_BD_STS_ACTUAL_LEN_MASK
;
482 priv
->tx_bd_ci
%= TX_BD_NUM
;
483 cur_p
= &priv
->tx_bd_v
[priv
->tx_bd_ci
];
484 tx_skb
= &priv
->tx_skb
[priv
->tx_bd_ci
];
485 status
= cur_p
->status
;
488 ndev
->stats
.tx_packets
+= packets
;
489 ndev
->stats
.tx_bytes
+= size
;
492 netif_wake_queue(ndev
);
495 static int nixge_check_tx_bd_space(struct nixge_priv
*priv
,
498 struct nixge_hw_dma_bd
*cur_p
;
500 cur_p
= &priv
->tx_bd_v
[(priv
->tx_bd_tail
+ num_frag
) % TX_BD_NUM
];
501 if (cur_p
->status
& XAXIDMA_BD_STS_ALL_MASK
)
502 return NETDEV_TX_BUSY
;
506 static netdev_tx_t
nixge_start_xmit(struct sk_buff
*skb
,
507 struct net_device
*ndev
)
509 struct nixge_priv
*priv
= netdev_priv(ndev
);
510 struct nixge_hw_dma_bd
*cur_p
;
511 struct nixge_tx_skb
*tx_skb
;
512 dma_addr_t tail_p
, cur_phys
;
517 num_frag
= skb_shinfo(skb
)->nr_frags
;
518 cur_p
= &priv
->tx_bd_v
[priv
->tx_bd_tail
];
519 tx_skb
= &priv
->tx_skb
[priv
->tx_bd_tail
];
521 if (nixge_check_tx_bd_space(priv
, num_frag
)) {
522 if (!netif_queue_stopped(ndev
))
523 netif_stop_queue(ndev
);
527 cur_phys
= dma_map_single(ndev
->dev
.parent
, skb
->data
,
528 skb_headlen(skb
), DMA_TO_DEVICE
);
529 if (dma_mapping_error(ndev
->dev
.parent
, cur_phys
))
531 nixge_hw_dma_bd_set_phys(cur_p
, cur_phys
);
533 cur_p
->cntrl
= skb_headlen(skb
) | XAXIDMA_BD_CTRL_TXSOF_MASK
;
536 tx_skb
->mapping
= cur_phys
;
537 tx_skb
->size
= skb_headlen(skb
);
538 tx_skb
->mapped_as_page
= false;
540 for (ii
= 0; ii
< num_frag
; ii
++) {
542 priv
->tx_bd_tail
%= TX_BD_NUM
;
543 cur_p
= &priv
->tx_bd_v
[priv
->tx_bd_tail
];
544 tx_skb
= &priv
->tx_skb
[priv
->tx_bd_tail
];
545 frag
= &skb_shinfo(skb
)->frags
[ii
];
547 cur_phys
= skb_frag_dma_map(ndev
->dev
.parent
, frag
, 0,
550 if (dma_mapping_error(ndev
->dev
.parent
, cur_phys
))
552 nixge_hw_dma_bd_set_phys(cur_p
, cur_phys
);
554 cur_p
->cntrl
= skb_frag_size(frag
);
557 tx_skb
->mapping
= cur_phys
;
558 tx_skb
->size
= skb_frag_size(frag
);
559 tx_skb
->mapped_as_page
= true;
562 /* last buffer of the frame */
565 cur_p
->cntrl
|= XAXIDMA_BD_CTRL_TXEOF_MASK
;
567 tail_p
= priv
->tx_bd_p
+ sizeof(*priv
->tx_bd_v
) * priv
->tx_bd_tail
;
568 /* Start the transfer */
569 nixge_dma_write_desc_reg(priv
, XAXIDMA_TX_TDESC_OFFSET
, tail_p
);
571 priv
->tx_bd_tail
%= TX_BD_NUM
;
575 for (; ii
> 0; ii
--) {
576 if (priv
->tx_bd_tail
)
579 priv
->tx_bd_tail
= TX_BD_NUM
- 1;
581 tx_skb
= &priv
->tx_skb
[priv
->tx_bd_tail
];
582 nixge_tx_skb_unmap(priv
, tx_skb
);
584 cur_p
= &priv
->tx_bd_v
[priv
->tx_bd_tail
];
587 dma_unmap_single(priv
->ndev
->dev
.parent
,
589 tx_skb
->size
, DMA_TO_DEVICE
);
591 ndev
->stats
.tx_dropped
++;
595 static int nixge_recv(struct net_device
*ndev
, int budget
)
597 struct nixge_priv
*priv
= netdev_priv(ndev
);
598 struct sk_buff
*skb
, *new_skb
;
599 struct nixge_hw_dma_bd
*cur_p
;
600 dma_addr_t tail_p
= 0, cur_phys
= 0;
605 cur_p
= &priv
->rx_bd_v
[priv
->rx_bd_ci
];
607 while ((cur_p
->status
& XAXIDMA_BD_STS_COMPLETE_MASK
&&
609 tail_p
= priv
->rx_bd_p
+ sizeof(*priv
->rx_bd_v
) *
612 skb
= (struct sk_buff
*)(uintptr_t)
613 nixge_hw_dma_bd_get_addr(cur_p
, sw_id_offset
);
615 length
= cur_p
->status
& XAXIDMA_BD_STS_ACTUAL_LEN_MASK
;
616 if (length
> NIXGE_MAX_JUMBO_FRAME_SIZE
)
617 length
= NIXGE_MAX_JUMBO_FRAME_SIZE
;
619 dma_unmap_single(ndev
->dev
.parent
,
620 nixge_hw_dma_bd_get_addr(cur_p
, phys
),
621 NIXGE_MAX_JUMBO_FRAME_SIZE
,
624 skb_put(skb
, length
);
626 skb
->protocol
= eth_type_trans(skb
, ndev
);
627 skb_checksum_none_assert(skb
);
629 /* For now mark them as CHECKSUM_NONE since
630 * we don't have offload capabilities
632 skb
->ip_summed
= CHECKSUM_NONE
;
634 napi_gro_receive(&priv
->napi
, skb
);
639 new_skb
= netdev_alloc_skb_ip_align(ndev
,
640 NIXGE_MAX_JUMBO_FRAME_SIZE
);
644 cur_phys
= dma_map_single(ndev
->dev
.parent
, new_skb
->data
,
645 NIXGE_MAX_JUMBO_FRAME_SIZE
,
647 if (dma_mapping_error(ndev
->dev
.parent
, cur_phys
)) {
648 /* FIXME: bail out and clean up */
649 netdev_err(ndev
, "Failed to map ...\n");
651 nixge_hw_dma_bd_set_phys(cur_p
, cur_phys
);
652 cur_p
->cntrl
= NIXGE_MAX_JUMBO_FRAME_SIZE
;
654 nixge_hw_dma_bd_set_offset(cur_p
, (uintptr_t)new_skb
);
657 priv
->rx_bd_ci
%= RX_BD_NUM
;
658 cur_p
= &priv
->rx_bd_v
[priv
->rx_bd_ci
];
661 ndev
->stats
.rx_packets
+= packets
;
662 ndev
->stats
.rx_bytes
+= size
;
665 nixge_dma_write_desc_reg(priv
, XAXIDMA_RX_TDESC_OFFSET
, tail_p
);
670 static int nixge_poll(struct napi_struct
*napi
, int budget
)
672 struct nixge_priv
*priv
= container_of(napi
, struct nixge_priv
, napi
);
678 work_done
= nixge_recv(priv
->ndev
, budget
);
679 if (work_done
< budget
) {
680 napi_complete_done(napi
, work_done
);
681 status
= nixge_dma_read_reg(priv
, XAXIDMA_RX_SR_OFFSET
);
683 if (status
& (XAXIDMA_IRQ_IOC_MASK
| XAXIDMA_IRQ_DELAY_MASK
)) {
684 /* If there's more, reschedule, but clear */
685 nixge_dma_write_reg(priv
, XAXIDMA_RX_SR_OFFSET
, status
);
688 /* if not, turn on RX IRQs again ... */
689 cr
= nixge_dma_read_reg(priv
, XAXIDMA_RX_CR_OFFSET
);
690 cr
|= (XAXIDMA_IRQ_IOC_MASK
| XAXIDMA_IRQ_DELAY_MASK
);
691 nixge_dma_write_reg(priv
, XAXIDMA_RX_CR_OFFSET
, cr
);
698 static irqreturn_t
nixge_tx_irq(int irq
, void *_ndev
)
700 struct nixge_priv
*priv
= netdev_priv(_ndev
);
701 struct net_device
*ndev
= _ndev
;
706 status
= nixge_dma_read_reg(priv
, XAXIDMA_TX_SR_OFFSET
);
707 if (status
& (XAXIDMA_IRQ_IOC_MASK
| XAXIDMA_IRQ_DELAY_MASK
)) {
708 nixge_dma_write_reg(priv
, XAXIDMA_TX_SR_OFFSET
, status
);
709 nixge_start_xmit_done(priv
->ndev
);
712 if (!(status
& XAXIDMA_IRQ_ALL_MASK
)) {
713 netdev_err(ndev
, "No interrupts asserted in Tx path\n");
716 if (status
& XAXIDMA_IRQ_ERROR_MASK
) {
717 phys
= nixge_hw_dma_bd_get_addr(&priv
->tx_bd_v
[priv
->tx_bd_ci
],
720 netdev_err(ndev
, "DMA Tx error 0x%x\n", status
);
721 netdev_err(ndev
, "Current BD is at: 0x%llx\n", (u64
)phys
);
723 cr
= nixge_dma_read_reg(priv
, XAXIDMA_TX_CR_OFFSET
);
724 /* Disable coalesce, delay timer and error interrupts */
725 cr
&= (~XAXIDMA_IRQ_ALL_MASK
);
726 /* Write to the Tx channel control register */
727 nixge_dma_write_reg(priv
, XAXIDMA_TX_CR_OFFSET
, cr
);
729 cr
= nixge_dma_read_reg(priv
, XAXIDMA_RX_CR_OFFSET
);
730 /* Disable coalesce, delay timer and error interrupts */
731 cr
&= (~XAXIDMA_IRQ_ALL_MASK
);
732 /* Write to the Rx channel control register */
733 nixge_dma_write_reg(priv
, XAXIDMA_RX_CR_OFFSET
, cr
);
735 tasklet_schedule(&priv
->dma_err_tasklet
);
736 nixge_dma_write_reg(priv
, XAXIDMA_TX_SR_OFFSET
, status
);
742 static irqreturn_t
nixge_rx_irq(int irq
, void *_ndev
)
744 struct nixge_priv
*priv
= netdev_priv(_ndev
);
745 struct net_device
*ndev
= _ndev
;
750 status
= nixge_dma_read_reg(priv
, XAXIDMA_RX_SR_OFFSET
);
751 if (status
& (XAXIDMA_IRQ_IOC_MASK
| XAXIDMA_IRQ_DELAY_MASK
)) {
752 /* Turn of IRQs because NAPI */
753 nixge_dma_write_reg(priv
, XAXIDMA_RX_SR_OFFSET
, status
);
754 cr
= nixge_dma_read_reg(priv
, XAXIDMA_RX_CR_OFFSET
);
755 cr
&= ~(XAXIDMA_IRQ_IOC_MASK
| XAXIDMA_IRQ_DELAY_MASK
);
756 nixge_dma_write_reg(priv
, XAXIDMA_RX_CR_OFFSET
, cr
);
758 napi_schedule(&priv
->napi
);
761 if (!(status
& XAXIDMA_IRQ_ALL_MASK
)) {
762 netdev_err(ndev
, "No interrupts asserted in Rx path\n");
765 if (status
& XAXIDMA_IRQ_ERROR_MASK
) {
766 phys
= nixge_hw_dma_bd_get_addr(&priv
->rx_bd_v
[priv
->rx_bd_ci
],
768 netdev_err(ndev
, "DMA Rx error 0x%x\n", status
);
769 netdev_err(ndev
, "Current BD is at: 0x%llx\n", (u64
)phys
);
771 cr
= nixge_dma_read_reg(priv
, XAXIDMA_TX_CR_OFFSET
);
772 /* Disable coalesce, delay timer and error interrupts */
773 cr
&= (~XAXIDMA_IRQ_ALL_MASK
);
774 /* Finally write to the Tx channel control register */
775 nixge_dma_write_reg(priv
, XAXIDMA_TX_CR_OFFSET
, cr
);
777 cr
= nixge_dma_read_reg(priv
, XAXIDMA_RX_CR_OFFSET
);
778 /* Disable coalesce, delay timer and error interrupts */
779 cr
&= (~XAXIDMA_IRQ_ALL_MASK
);
780 /* write to the Rx channel control register */
781 nixge_dma_write_reg(priv
, XAXIDMA_RX_CR_OFFSET
, cr
);
783 tasklet_schedule(&priv
->dma_err_tasklet
);
784 nixge_dma_write_reg(priv
, XAXIDMA_RX_SR_OFFSET
, status
);
790 static void nixge_dma_err_handler(struct tasklet_struct
*t
)
792 struct nixge_priv
*lp
= from_tasklet(lp
, t
, dma_err_tasklet
);
793 struct nixge_hw_dma_bd
*cur_p
;
794 struct nixge_tx_skb
*tx_skb
;
797 __nixge_device_reset(lp
, XAXIDMA_TX_CR_OFFSET
);
798 __nixge_device_reset(lp
, XAXIDMA_RX_CR_OFFSET
);
800 for (i
= 0; i
< TX_BD_NUM
; i
++) {
801 cur_p
= &lp
->tx_bd_v
[i
];
802 tx_skb
= &lp
->tx_skb
[i
];
803 nixge_tx_skb_unmap(lp
, tx_skb
);
805 nixge_hw_dma_bd_set_phys(cur_p
, 0);
808 nixge_hw_dma_bd_set_offset(cur_p
, 0);
811 for (i
= 0; i
< RX_BD_NUM
; i
++) {
812 cur_p
= &lp
->rx_bd_v
[i
];
820 /* Start updating the Rx channel control register */
821 cr
= nixge_dma_read_reg(lp
, XAXIDMA_RX_CR_OFFSET
);
822 /* Update the interrupt coalesce count */
823 cr
= ((cr
& ~XAXIDMA_COALESCE_MASK
) |
824 (XAXIDMA_DFT_RX_THRESHOLD
<< XAXIDMA_COALESCE_SHIFT
));
825 /* Update the delay timer count */
826 cr
= ((cr
& ~XAXIDMA_DELAY_MASK
) |
827 (XAXIDMA_DFT_RX_WAITBOUND
<< XAXIDMA_DELAY_SHIFT
));
828 /* Enable coalesce, delay timer and error interrupts */
829 cr
|= XAXIDMA_IRQ_ALL_MASK
;
830 /* Finally write to the Rx channel control register */
831 nixge_dma_write_reg(lp
, XAXIDMA_RX_CR_OFFSET
, cr
);
833 /* Start updating the Tx channel control register */
834 cr
= nixge_dma_read_reg(lp
, XAXIDMA_TX_CR_OFFSET
);
835 /* Update the interrupt coalesce count */
836 cr
= (((cr
& ~XAXIDMA_COALESCE_MASK
)) |
837 (XAXIDMA_DFT_TX_THRESHOLD
<< XAXIDMA_COALESCE_SHIFT
));
838 /* Update the delay timer count */
839 cr
= (((cr
& ~XAXIDMA_DELAY_MASK
)) |
840 (XAXIDMA_DFT_TX_WAITBOUND
<< XAXIDMA_DELAY_SHIFT
));
841 /* Enable coalesce, delay timer and error interrupts */
842 cr
|= XAXIDMA_IRQ_ALL_MASK
;
843 /* Finally write to the Tx channel control register */
844 nixge_dma_write_reg(lp
, XAXIDMA_TX_CR_OFFSET
, cr
);
846 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
847 * halted state. This will make the Rx side ready for reception.
849 nixge_dma_write_desc_reg(lp
, XAXIDMA_RX_CDESC_OFFSET
, lp
->rx_bd_p
);
850 cr
= nixge_dma_read_reg(lp
, XAXIDMA_RX_CR_OFFSET
);
851 nixge_dma_write_reg(lp
, XAXIDMA_RX_CR_OFFSET
,
852 cr
| XAXIDMA_CR_RUNSTOP_MASK
);
853 nixge_dma_write_desc_reg(lp
, XAXIDMA_RX_TDESC_OFFSET
, lp
->rx_bd_p
+
854 (sizeof(*lp
->rx_bd_v
) * (RX_BD_NUM
- 1)));
856 /* Write to the RS (Run-stop) bit in the Tx channel control register.
857 * Tx channel is now ready to run. But only after we write to the
858 * tail pointer register that the Tx channel will start transmitting
860 nixge_dma_write_desc_reg(lp
, XAXIDMA_TX_CDESC_OFFSET
, lp
->tx_bd_p
);
861 cr
= nixge_dma_read_reg(lp
, XAXIDMA_TX_CR_OFFSET
);
862 nixge_dma_write_reg(lp
, XAXIDMA_TX_CR_OFFSET
,
863 cr
| XAXIDMA_CR_RUNSTOP_MASK
);
866 static int nixge_open(struct net_device
*ndev
)
868 struct nixge_priv
*priv
= netdev_priv(ndev
);
869 struct phy_device
*phy
;
872 nixge_device_reset(ndev
);
874 phy
= of_phy_connect(ndev
, priv
->phy_node
,
875 &nixge_handle_link_change
, 0, priv
->phy_mode
);
881 /* Enable tasklets for Axi DMA error handling */
882 tasklet_setup(&priv
->dma_err_tasklet
, nixge_dma_err_handler
);
884 napi_enable(&priv
->napi
);
886 /* Enable interrupts for Axi DMA Tx */
887 ret
= request_irq(priv
->tx_irq
, nixge_tx_irq
, 0, ndev
->name
, ndev
);
890 /* Enable interrupts for Axi DMA Rx */
891 ret
= request_irq(priv
->rx_irq
, nixge_rx_irq
, 0, ndev
->name
, ndev
);
895 netif_start_queue(ndev
);
900 free_irq(priv
->tx_irq
, ndev
);
902 napi_disable(&priv
->napi
);
905 tasklet_kill(&priv
->dma_err_tasklet
);
906 netdev_err(ndev
, "request_irq() failed\n");
910 static int nixge_stop(struct net_device
*ndev
)
912 struct nixge_priv
*priv
= netdev_priv(ndev
);
915 netif_stop_queue(ndev
);
916 napi_disable(&priv
->napi
);
919 phy_stop(ndev
->phydev
);
920 phy_disconnect(ndev
->phydev
);
923 cr
= nixge_dma_read_reg(priv
, XAXIDMA_RX_CR_OFFSET
);
924 nixge_dma_write_reg(priv
, XAXIDMA_RX_CR_OFFSET
,
925 cr
& (~XAXIDMA_CR_RUNSTOP_MASK
));
926 cr
= nixge_dma_read_reg(priv
, XAXIDMA_TX_CR_OFFSET
);
927 nixge_dma_write_reg(priv
, XAXIDMA_TX_CR_OFFSET
,
928 cr
& (~XAXIDMA_CR_RUNSTOP_MASK
));
930 tasklet_kill(&priv
->dma_err_tasklet
);
932 free_irq(priv
->tx_irq
, ndev
);
933 free_irq(priv
->rx_irq
, ndev
);
935 nixge_hw_dma_bd_release(ndev
);
940 static int nixge_change_mtu(struct net_device
*ndev
, int new_mtu
)
942 if (netif_running(ndev
))
945 if ((new_mtu
+ NIXGE_HDR_SIZE
+ NIXGE_TRL_SIZE
) >
946 NIXGE_MAX_JUMBO_FRAME_SIZE
)
949 WRITE_ONCE(ndev
->mtu
, new_mtu
);
954 static s32
__nixge_hw_set_mac_address(struct net_device
*ndev
)
956 struct nixge_priv
*priv
= netdev_priv(ndev
);
958 nixge_ctrl_write_reg(priv
, NIXGE_REG_MAC_LSB
,
959 (ndev
->dev_addr
[2]) << 24 |
960 (ndev
->dev_addr
[3] << 16) |
961 (ndev
->dev_addr
[4] << 8) |
962 (ndev
->dev_addr
[5] << 0));
964 nixge_ctrl_write_reg(priv
, NIXGE_REG_MAC_MSB
,
965 (ndev
->dev_addr
[1] | (ndev
->dev_addr
[0] << 8)));
970 static int nixge_net_set_mac_address(struct net_device
*ndev
, void *p
)
974 err
= eth_mac_addr(ndev
, p
);
976 __nixge_hw_set_mac_address(ndev
);
981 static const struct net_device_ops nixge_netdev_ops
= {
982 .ndo_open
= nixge_open
,
983 .ndo_stop
= nixge_stop
,
984 .ndo_start_xmit
= nixge_start_xmit
,
985 .ndo_change_mtu
= nixge_change_mtu
,
986 .ndo_set_mac_address
= nixge_net_set_mac_address
,
987 .ndo_validate_addr
= eth_validate_addr
,
990 static void nixge_ethtools_get_drvinfo(struct net_device
*ndev
,
991 struct ethtool_drvinfo
*ed
)
993 strscpy(ed
->driver
, "nixge", sizeof(ed
->driver
));
994 strscpy(ed
->bus_info
, "platform", sizeof(ed
->bus_info
));
998 nixge_ethtools_get_coalesce(struct net_device
*ndev
,
999 struct ethtool_coalesce
*ecoalesce
,
1000 struct kernel_ethtool_coalesce
*kernel_coal
,
1001 struct netlink_ext_ack
*extack
)
1003 struct nixge_priv
*priv
= netdev_priv(ndev
);
1006 regval
= nixge_dma_read_reg(priv
, XAXIDMA_RX_CR_OFFSET
);
1007 ecoalesce
->rx_max_coalesced_frames
= (regval
& XAXIDMA_COALESCE_MASK
)
1008 >> XAXIDMA_COALESCE_SHIFT
;
1009 regval
= nixge_dma_read_reg(priv
, XAXIDMA_TX_CR_OFFSET
);
1010 ecoalesce
->tx_max_coalesced_frames
= (regval
& XAXIDMA_COALESCE_MASK
)
1011 >> XAXIDMA_COALESCE_SHIFT
;
1016 nixge_ethtools_set_coalesce(struct net_device
*ndev
,
1017 struct ethtool_coalesce
*ecoalesce
,
1018 struct kernel_ethtool_coalesce
*kernel_coal
,
1019 struct netlink_ext_ack
*extack
)
1021 struct nixge_priv
*priv
= netdev_priv(ndev
);
1023 if (netif_running(ndev
)) {
1025 "Please stop netif before applying configuration\n");
1029 if (ecoalesce
->rx_max_coalesced_frames
)
1030 priv
->coalesce_count_rx
= ecoalesce
->rx_max_coalesced_frames
;
1031 if (ecoalesce
->tx_max_coalesced_frames
)
1032 priv
->coalesce_count_tx
= ecoalesce
->tx_max_coalesced_frames
;
1037 static int nixge_ethtools_set_phys_id(struct net_device
*ndev
,
1038 enum ethtool_phys_id_state state
)
1040 struct nixge_priv
*priv
= netdev_priv(ndev
);
1043 ctrl
= nixge_ctrl_read_reg(priv
, NIXGE_REG_LED_CTL
);
1045 case ETHTOOL_ID_ACTIVE
:
1046 ctrl
|= NIXGE_ID_LED_CTL_EN
;
1047 /* Enable identification LED override*/
1048 nixge_ctrl_write_reg(priv
, NIXGE_REG_LED_CTL
, ctrl
);
1052 ctrl
|= NIXGE_ID_LED_CTL_VAL
;
1053 nixge_ctrl_write_reg(priv
, NIXGE_REG_LED_CTL
, ctrl
);
1056 case ETHTOOL_ID_OFF
:
1057 ctrl
&= ~NIXGE_ID_LED_CTL_VAL
;
1058 nixge_ctrl_write_reg(priv
, NIXGE_REG_LED_CTL
, ctrl
);
1061 case ETHTOOL_ID_INACTIVE
:
1062 /* Restore LED settings */
1063 ctrl
&= ~NIXGE_ID_LED_CTL_EN
;
1064 nixge_ctrl_write_reg(priv
, NIXGE_REG_LED_CTL
, ctrl
);
1071 static const struct ethtool_ops nixge_ethtool_ops
= {
1072 .supported_coalesce_params
= ETHTOOL_COALESCE_MAX_FRAMES
,
1073 .get_drvinfo
= nixge_ethtools_get_drvinfo
,
1074 .get_coalesce
= nixge_ethtools_get_coalesce
,
1075 .set_coalesce
= nixge_ethtools_set_coalesce
,
1076 .set_phys_id
= nixge_ethtools_set_phys_id
,
1077 .get_link_ksettings
= phy_ethtool_get_link_ksettings
,
1078 .set_link_ksettings
= phy_ethtool_set_link_ksettings
,
1079 .get_link
= ethtool_op_get_link
,
1082 static int nixge_mdio_read_c22(struct mii_bus
*bus
, int phy_id
, int reg
)
1084 struct nixge_priv
*priv
= bus
->priv
;
1089 device
= reg
& 0x1f;
1091 tmp
= NIXGE_MDIO_CLAUSE22
| NIXGE_MDIO_OP(NIXGE_MDIO_C22_READ
) |
1092 NIXGE_MDIO_ADDR(phy_id
) | NIXGE_MDIO_MMD(device
);
1094 nixge_ctrl_write_reg(priv
, NIXGE_REG_MDIO_OP
, tmp
);
1095 nixge_ctrl_write_reg(priv
, NIXGE_REG_MDIO_CTRL
, 1);
1097 err
= nixge_ctrl_poll_timeout(priv
, NIXGE_REG_MDIO_CTRL
, status
,
1100 dev_err(priv
->dev
, "timeout setting read command");
1104 status
= nixge_ctrl_read_reg(priv
, NIXGE_REG_MDIO_DATA
);
1109 static int nixge_mdio_read_c45(struct mii_bus
*bus
, int phy_id
, int device
,
1112 struct nixge_priv
*priv
= bus
->priv
;
1116 nixge_ctrl_write_reg(priv
, NIXGE_REG_MDIO_ADDR
, reg
& 0xffff);
1118 tmp
= NIXGE_MDIO_CLAUSE45
|
1119 NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS
) |
1120 NIXGE_MDIO_ADDR(phy_id
) | NIXGE_MDIO_MMD(device
);
1122 nixge_ctrl_write_reg(priv
, NIXGE_REG_MDIO_OP
, tmp
);
1123 nixge_ctrl_write_reg(priv
, NIXGE_REG_MDIO_CTRL
, 1);
1125 err
= nixge_ctrl_poll_timeout(priv
, NIXGE_REG_MDIO_CTRL
, status
,
1128 dev_err(priv
->dev
, "timeout setting address");
1132 tmp
= NIXGE_MDIO_CLAUSE45
| NIXGE_MDIO_OP(NIXGE_MDIO_C45_READ
) |
1133 NIXGE_MDIO_ADDR(phy_id
) | NIXGE_MDIO_MMD(device
);
1135 nixge_ctrl_write_reg(priv
, NIXGE_REG_MDIO_OP
, tmp
);
1136 nixge_ctrl_write_reg(priv
, NIXGE_REG_MDIO_CTRL
, 1);
1138 err
= nixge_ctrl_poll_timeout(priv
, NIXGE_REG_MDIO_CTRL
, status
,
1141 dev_err(priv
->dev
, "timeout setting read command");
1145 status
= nixge_ctrl_read_reg(priv
, NIXGE_REG_MDIO_DATA
);
1150 static int nixge_mdio_write_c22(struct mii_bus
*bus
, int phy_id
, int reg
,
1153 struct nixge_priv
*priv
= bus
->priv
;
1158 device
= reg
& 0x1f;
1160 tmp
= NIXGE_MDIO_CLAUSE22
| NIXGE_MDIO_OP(NIXGE_MDIO_C22_WRITE
) |
1161 NIXGE_MDIO_ADDR(phy_id
) | NIXGE_MDIO_MMD(device
);
1163 nixge_ctrl_write_reg(priv
, NIXGE_REG_MDIO_DATA
, val
);
1164 nixge_ctrl_write_reg(priv
, NIXGE_REG_MDIO_OP
, tmp
);
1165 nixge_ctrl_write_reg(priv
, NIXGE_REG_MDIO_CTRL
, 1);
1167 err
= nixge_ctrl_poll_timeout(priv
, NIXGE_REG_MDIO_CTRL
, status
,
1170 dev_err(priv
->dev
, "timeout setting write command");
1175 static int nixge_mdio_write_c45(struct mii_bus
*bus
, int phy_id
,
1176 int device
, int reg
, u16 val
)
1178 struct nixge_priv
*priv
= bus
->priv
;
1182 nixge_ctrl_write_reg(priv
, NIXGE_REG_MDIO_ADDR
, reg
& 0xffff);
1184 tmp
= NIXGE_MDIO_CLAUSE45
|
1185 NIXGE_MDIO_OP(NIXGE_MDIO_OP_ADDRESS
) |
1186 NIXGE_MDIO_ADDR(phy_id
) | NIXGE_MDIO_MMD(device
);
1188 nixge_ctrl_write_reg(priv
, NIXGE_REG_MDIO_OP
, tmp
);
1189 nixge_ctrl_write_reg(priv
, NIXGE_REG_MDIO_CTRL
, 1);
1191 err
= nixge_ctrl_poll_timeout(priv
, NIXGE_REG_MDIO_CTRL
, status
,
1194 dev_err(priv
->dev
, "timeout setting address");
1198 tmp
= NIXGE_MDIO_CLAUSE45
| NIXGE_MDIO_OP(NIXGE_MDIO_C45_WRITE
) |
1199 NIXGE_MDIO_ADDR(phy_id
) | NIXGE_MDIO_MMD(device
);
1201 nixge_ctrl_write_reg(priv
, NIXGE_REG_MDIO_DATA
, val
);
1202 nixge_ctrl_write_reg(priv
, NIXGE_REG_MDIO_OP
, tmp
);
1204 err
= nixge_ctrl_poll_timeout(priv
, NIXGE_REG_MDIO_CTRL
, status
,
1207 dev_err(priv
->dev
, "timeout setting write command");
1212 static int nixge_mdio_setup(struct nixge_priv
*priv
, struct device_node
*np
)
1214 struct mii_bus
*bus
;
1216 bus
= devm_mdiobus_alloc(priv
->dev
);
1220 snprintf(bus
->id
, MII_BUS_ID_SIZE
, "%s-mii", dev_name(priv
->dev
));
1222 bus
->name
= "nixge_mii_bus";
1223 bus
->read
= nixge_mdio_read_c22
;
1224 bus
->write
= nixge_mdio_write_c22
;
1225 bus
->read_c45
= nixge_mdio_read_c45
;
1226 bus
->write_c45
= nixge_mdio_write_c45
;
1227 bus
->parent
= priv
->dev
;
1229 priv
->mii_bus
= bus
;
1231 return of_mdiobus_register(bus
, np
);
1234 static void *nixge_get_nvmem_address(struct device
*dev
)
1236 struct nvmem_cell
*cell
;
1240 cell
= nvmem_cell_get(dev
, "address");
1244 mac
= nvmem_cell_read(cell
, &cell_size
);
1245 nvmem_cell_put(cell
);
1250 /* Match table for of_platform binding */
1251 static const struct of_device_id nixge_dt_ids
[] = {
1252 { .compatible
= "ni,xge-enet-2.00", .data
= (void *)NIXGE_V2
},
1253 { .compatible
= "ni,xge-enet-3.00", .data
= (void *)NIXGE_V3
},
1256 MODULE_DEVICE_TABLE(of
, nixge_dt_ids
);
1258 static int nixge_of_get_resources(struct platform_device
*pdev
)
1260 const struct of_device_id
*of_id
;
1261 enum nixge_version version
;
1262 struct net_device
*ndev
;
1263 struct nixge_priv
*priv
;
1265 ndev
= platform_get_drvdata(pdev
);
1266 priv
= netdev_priv(ndev
);
1267 of_id
= of_match_node(nixge_dt_ids
, pdev
->dev
.of_node
);
1271 version
= (enum nixge_version
)of_id
->data
;
1272 if (version
<= NIXGE_V2
)
1273 priv
->dma_regs
= devm_platform_get_and_ioremap_resource(pdev
, 0, NULL
);
1275 priv
->dma_regs
= devm_platform_ioremap_resource_byname(pdev
, "dma");
1276 if (IS_ERR(priv
->dma_regs
)) {
1277 netdev_err(ndev
, "failed to map dma regs\n");
1278 return PTR_ERR(priv
->dma_regs
);
1280 if (version
<= NIXGE_V2
)
1281 priv
->ctrl_regs
= priv
->dma_regs
+ NIXGE_REG_CTRL_OFFSET
;
1283 priv
->ctrl_regs
= devm_platform_ioremap_resource_byname(pdev
, "ctrl");
1284 if (IS_ERR(priv
->ctrl_regs
)) {
1285 netdev_err(ndev
, "failed to map ctrl regs\n");
1286 return PTR_ERR(priv
->ctrl_regs
);
1291 static int nixge_probe(struct platform_device
*pdev
)
1293 struct device_node
*mn
, *phy_node
;
1294 struct nixge_priv
*priv
;
1295 struct net_device
*ndev
;
1299 ndev
= alloc_etherdev(sizeof(*priv
));
1303 platform_set_drvdata(pdev
, ndev
);
1304 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
1306 ndev
->features
= NETIF_F_SG
;
1307 ndev
->netdev_ops
= &nixge_netdev_ops
;
1308 ndev
->ethtool_ops
= &nixge_ethtool_ops
;
1310 /* MTU range: 64 - 9000 */
1312 ndev
->max_mtu
= NIXGE_JUMBO_MTU
;
1314 mac_addr
= nixge_get_nvmem_address(&pdev
->dev
);
1315 if (!IS_ERR(mac_addr
) && is_valid_ether_addr(mac_addr
)) {
1316 eth_hw_addr_set(ndev
, mac_addr
);
1319 eth_hw_addr_random(ndev
);
1322 priv
= netdev_priv(ndev
);
1324 priv
->dev
= &pdev
->dev
;
1326 netif_napi_add(ndev
, &priv
->napi
, nixge_poll
);
1327 err
= nixge_of_get_resources(pdev
);
1330 __nixge_hw_set_mac_address(ndev
);
1332 priv
->tx_irq
= platform_get_irq_byname(pdev
, "tx");
1333 if (priv
->tx_irq
< 0) {
1334 netdev_err(ndev
, "could not find 'tx' irq");
1339 priv
->rx_irq
= platform_get_irq_byname(pdev
, "rx");
1340 if (priv
->rx_irq
< 0) {
1341 netdev_err(ndev
, "could not find 'rx' irq");
1346 priv
->coalesce_count_rx
= XAXIDMA_DFT_RX_THRESHOLD
;
1347 priv
->coalesce_count_tx
= XAXIDMA_DFT_TX_THRESHOLD
;
1349 mn
= of_get_child_by_name(pdev
->dev
.of_node
, "mdio");
1351 err
= nixge_mdio_setup(priv
, mn
);
1354 netdev_err(ndev
, "error registering mdio bus");
1359 err
= of_get_phy_mode(pdev
->dev
.of_node
, &priv
->phy_mode
);
1361 netdev_err(ndev
, "not find \"phy-mode\" property\n");
1362 goto unregister_mdio
;
1365 phy_node
= of_parse_phandle(pdev
->dev
.of_node
, "phy-handle", 0);
1366 if (!phy_node
&& of_phy_is_fixed_link(pdev
->dev
.of_node
)) {
1367 err
= of_phy_register_fixed_link(pdev
->dev
.of_node
);
1369 netdev_err(ndev
, "broken fixed-link specification\n");
1370 goto unregister_mdio
;
1372 phy_node
= of_node_get(pdev
->dev
.of_node
);
1374 priv
->phy_node
= phy_node
;
1376 err
= register_netdev(priv
->ndev
);
1378 netdev_err(ndev
, "register_netdev() error (%i)\n", err
);
1385 if (of_phy_is_fixed_link(pdev
->dev
.of_node
))
1386 of_phy_deregister_fixed_link(pdev
->dev
.of_node
);
1387 of_node_put(phy_node
);
1391 mdiobus_unregister(priv
->mii_bus
);
1399 static void nixge_remove(struct platform_device
*pdev
)
1401 struct net_device
*ndev
= platform_get_drvdata(pdev
);
1402 struct nixge_priv
*priv
= netdev_priv(ndev
);
1404 unregister_netdev(ndev
);
1406 if (of_phy_is_fixed_link(pdev
->dev
.of_node
))
1407 of_phy_deregister_fixed_link(pdev
->dev
.of_node
);
1408 of_node_put(priv
->phy_node
);
1411 mdiobus_unregister(priv
->mii_bus
);
1416 static struct platform_driver nixge_driver
= {
1417 .probe
= nixge_probe
,
1418 .remove
= nixge_remove
,
1421 .of_match_table
= nixge_dt_ids
,
1424 module_platform_driver(nixge_driver
);
1426 MODULE_LICENSE("GPL v2");
1427 MODULE_DESCRIPTION("National Instruments XGE Management MAC");
1428 MODULE_AUTHOR("Moritz Fischer <mdf@kernel.org>");