1 // SPDX-License-Identifier: GPL-2.0-only
3 * Broadcom GENET (Gigabit Ethernet) controller driver
5 * Copyright (c) 2014-2020 Broadcom
8 #define pr_fmt(fmt) "bcmgenet: " fmt
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/types.h>
15 #include <linux/fcntl.h>
16 #include <linux/interrupt.h>
17 #include <linux/string.h>
18 #include <linux/if_ether.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/delay.h>
22 #include <linux/platform_device.h>
23 #include <linux/dma-mapping.h>
25 #include <linux/clk.h>
28 #include <linux/mii.h>
29 #include <linux/ethtool.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
36 #include <linux/ipv6.h>
37 #include <linux/phy.h>
38 #include <linux/platform_data/bcmgenet.h>
40 #include <asm/unaligned.h>
44 /* Maximum number of hardware queues, downsized if needed */
45 #define GENET_MAX_MQ_CNT 4
47 /* Default highest priority queue for multi queue support */
48 #define GENET_Q0_PRIORITY 0
50 #define GENET_Q16_RX_BD_CNT \
51 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
52 #define GENET_Q16_TX_BD_CNT \
53 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
55 #define RX_BUF_LENGTH 2048
56 #define SKB_ALIGNMENT 32
58 /* Tx/Rx DMA register offset, skip 256 descriptors */
59 #define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
60 #define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
62 #define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
63 TOTAL_DESC * DMA_DESC_SIZE)
65 #define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
66 TOTAL_DESC * DMA_DESC_SIZE)
68 /* Forward declarations */
69 static void bcmgenet_set_rx_mode(struct net_device
*dev
);
71 static inline void bcmgenet_writel(u32 value
, void __iomem
*offset
)
73 /* MIPS chips strapped for BE will automagically configure the
74 * peripheral registers for CPU-native byte order.
76 if (IS_ENABLED(CONFIG_MIPS
) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
77 __raw_writel(value
, offset
);
79 writel_relaxed(value
, offset
);
82 static inline u32
bcmgenet_readl(void __iomem
*offset
)
84 if (IS_ENABLED(CONFIG_MIPS
) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
85 return __raw_readl(offset
);
87 return readl_relaxed(offset
);
90 static inline void dmadesc_set_length_status(struct bcmgenet_priv
*priv
,
91 void __iomem
*d
, u32 value
)
93 bcmgenet_writel(value
, d
+ DMA_DESC_LENGTH_STATUS
);
96 static inline void dmadesc_set_addr(struct bcmgenet_priv
*priv
,
100 bcmgenet_writel(lower_32_bits(addr
), d
+ DMA_DESC_ADDRESS_LO
);
102 /* Register writes to GISB bus can take couple hundred nanoseconds
103 * and are done for each packet, save these expensive writes unless
104 * the platform is explicitly configured for 64-bits/LPAE.
106 #ifdef CONFIG_PHYS_ADDR_T_64BIT
107 if (priv
->hw_params
->flags
& GENET_HAS_40BITS
)
108 bcmgenet_writel(upper_32_bits(addr
), d
+ DMA_DESC_ADDRESS_HI
);
112 /* Combined address + length/status setter */
113 static inline void dmadesc_set(struct bcmgenet_priv
*priv
,
114 void __iomem
*d
, dma_addr_t addr
, u32 val
)
116 dmadesc_set_addr(priv
, d
, addr
);
117 dmadesc_set_length_status(priv
, d
, val
);
120 static inline dma_addr_t
dmadesc_get_addr(struct bcmgenet_priv
*priv
,
125 addr
= bcmgenet_readl(d
+ DMA_DESC_ADDRESS_LO
);
127 /* Register writes to GISB bus can take couple hundred nanoseconds
128 * and are done for each packet, save these expensive writes unless
129 * the platform is explicitly configured for 64-bits/LPAE.
131 #ifdef CONFIG_PHYS_ADDR_T_64BIT
132 if (priv
->hw_params
->flags
& GENET_HAS_40BITS
)
133 addr
|= (u64
)bcmgenet_readl(d
+ DMA_DESC_ADDRESS_HI
) << 32;
138 #define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
140 #define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
143 static inline u32
bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv
*priv
)
145 if (GENET_IS_V1(priv
))
146 return bcmgenet_rbuf_readl(priv
, RBUF_FLUSH_CTRL_V1
);
148 return bcmgenet_sys_readl(priv
, SYS_RBUF_FLUSH_CTRL
);
151 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv
*priv
, u32 val
)
153 if (GENET_IS_V1(priv
))
154 bcmgenet_rbuf_writel(priv
, val
, RBUF_FLUSH_CTRL_V1
);
156 bcmgenet_sys_writel(priv
, val
, SYS_RBUF_FLUSH_CTRL
);
159 /* These macros are defined to deal with register map change
160 * between GENET1.1 and GENET2. Only those currently being used
161 * by driver are defined.
163 static inline u32
bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv
*priv
)
165 if (GENET_IS_V1(priv
))
166 return bcmgenet_rbuf_readl(priv
, TBUF_CTRL_V1
);
168 return bcmgenet_readl(priv
->base
+
169 priv
->hw_params
->tbuf_offset
+ TBUF_CTRL
);
172 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv
*priv
, u32 val
)
174 if (GENET_IS_V1(priv
))
175 bcmgenet_rbuf_writel(priv
, val
, TBUF_CTRL_V1
);
177 bcmgenet_writel(val
, priv
->base
+
178 priv
->hw_params
->tbuf_offset
+ TBUF_CTRL
);
181 static inline u32
bcmgenet_bp_mc_get(struct bcmgenet_priv
*priv
)
183 if (GENET_IS_V1(priv
))
184 return bcmgenet_rbuf_readl(priv
, TBUF_BP_MC_V1
);
186 return bcmgenet_readl(priv
->base
+
187 priv
->hw_params
->tbuf_offset
+ TBUF_BP_MC
);
190 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv
*priv
, u32 val
)
192 if (GENET_IS_V1(priv
))
193 bcmgenet_rbuf_writel(priv
, val
, TBUF_BP_MC_V1
);
195 bcmgenet_writel(val
, priv
->base
+
196 priv
->hw_params
->tbuf_offset
+ TBUF_BP_MC
);
199 /* RX/TX DMA register accessors */
236 static const u8 bcmgenet_dma_regs_v3plus
[] = {
237 [DMA_RING_CFG
] = 0x00,
240 [DMA_SCB_BURST_SIZE
] = 0x0C,
241 [DMA_ARB_CTRL
] = 0x2C,
242 [DMA_PRIORITY_0
] = 0x30,
243 [DMA_PRIORITY_1
] = 0x34,
244 [DMA_PRIORITY_2
] = 0x38,
245 [DMA_RING0_TIMEOUT
] = 0x2C,
246 [DMA_RING1_TIMEOUT
] = 0x30,
247 [DMA_RING2_TIMEOUT
] = 0x34,
248 [DMA_RING3_TIMEOUT
] = 0x38,
249 [DMA_RING4_TIMEOUT
] = 0x3c,
250 [DMA_RING5_TIMEOUT
] = 0x40,
251 [DMA_RING6_TIMEOUT
] = 0x44,
252 [DMA_RING7_TIMEOUT
] = 0x48,
253 [DMA_RING8_TIMEOUT
] = 0x4c,
254 [DMA_RING9_TIMEOUT
] = 0x50,
255 [DMA_RING10_TIMEOUT
] = 0x54,
256 [DMA_RING11_TIMEOUT
] = 0x58,
257 [DMA_RING12_TIMEOUT
] = 0x5c,
258 [DMA_RING13_TIMEOUT
] = 0x60,
259 [DMA_RING14_TIMEOUT
] = 0x64,
260 [DMA_RING15_TIMEOUT
] = 0x68,
261 [DMA_RING16_TIMEOUT
] = 0x6C,
262 [DMA_INDEX2RING_0
] = 0x70,
263 [DMA_INDEX2RING_1
] = 0x74,
264 [DMA_INDEX2RING_2
] = 0x78,
265 [DMA_INDEX2RING_3
] = 0x7C,
266 [DMA_INDEX2RING_4
] = 0x80,
267 [DMA_INDEX2RING_5
] = 0x84,
268 [DMA_INDEX2RING_6
] = 0x88,
269 [DMA_INDEX2RING_7
] = 0x8C,
272 static const u8 bcmgenet_dma_regs_v2
[] = {
273 [DMA_RING_CFG
] = 0x00,
276 [DMA_SCB_BURST_SIZE
] = 0x0C,
277 [DMA_ARB_CTRL
] = 0x30,
278 [DMA_PRIORITY_0
] = 0x34,
279 [DMA_PRIORITY_1
] = 0x38,
280 [DMA_PRIORITY_2
] = 0x3C,
281 [DMA_RING0_TIMEOUT
] = 0x2C,
282 [DMA_RING1_TIMEOUT
] = 0x30,
283 [DMA_RING2_TIMEOUT
] = 0x34,
284 [DMA_RING3_TIMEOUT
] = 0x38,
285 [DMA_RING4_TIMEOUT
] = 0x3c,
286 [DMA_RING5_TIMEOUT
] = 0x40,
287 [DMA_RING6_TIMEOUT
] = 0x44,
288 [DMA_RING7_TIMEOUT
] = 0x48,
289 [DMA_RING8_TIMEOUT
] = 0x4c,
290 [DMA_RING9_TIMEOUT
] = 0x50,
291 [DMA_RING10_TIMEOUT
] = 0x54,
292 [DMA_RING11_TIMEOUT
] = 0x58,
293 [DMA_RING12_TIMEOUT
] = 0x5c,
294 [DMA_RING13_TIMEOUT
] = 0x60,
295 [DMA_RING14_TIMEOUT
] = 0x64,
296 [DMA_RING15_TIMEOUT
] = 0x68,
297 [DMA_RING16_TIMEOUT
] = 0x6C,
300 static const u8 bcmgenet_dma_regs_v1
[] = {
303 [DMA_SCB_BURST_SIZE
] = 0x0C,
304 [DMA_ARB_CTRL
] = 0x30,
305 [DMA_PRIORITY_0
] = 0x34,
306 [DMA_PRIORITY_1
] = 0x38,
307 [DMA_PRIORITY_2
] = 0x3C,
308 [DMA_RING0_TIMEOUT
] = 0x2C,
309 [DMA_RING1_TIMEOUT
] = 0x30,
310 [DMA_RING2_TIMEOUT
] = 0x34,
311 [DMA_RING3_TIMEOUT
] = 0x38,
312 [DMA_RING4_TIMEOUT
] = 0x3c,
313 [DMA_RING5_TIMEOUT
] = 0x40,
314 [DMA_RING6_TIMEOUT
] = 0x44,
315 [DMA_RING7_TIMEOUT
] = 0x48,
316 [DMA_RING8_TIMEOUT
] = 0x4c,
317 [DMA_RING9_TIMEOUT
] = 0x50,
318 [DMA_RING10_TIMEOUT
] = 0x54,
319 [DMA_RING11_TIMEOUT
] = 0x58,
320 [DMA_RING12_TIMEOUT
] = 0x5c,
321 [DMA_RING13_TIMEOUT
] = 0x60,
322 [DMA_RING14_TIMEOUT
] = 0x64,
323 [DMA_RING15_TIMEOUT
] = 0x68,
324 [DMA_RING16_TIMEOUT
] = 0x6C,
327 /* Set at runtime once bcmgenet version is known */
328 static const u8
*bcmgenet_dma_regs
;
330 static inline struct bcmgenet_priv
*dev_to_priv(struct device
*dev
)
332 return netdev_priv(dev_get_drvdata(dev
));
335 static inline u32
bcmgenet_tdma_readl(struct bcmgenet_priv
*priv
,
338 return bcmgenet_readl(priv
->base
+ GENET_TDMA_REG_OFF
+
339 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
342 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv
*priv
,
343 u32 val
, enum dma_reg r
)
345 bcmgenet_writel(val
, priv
->base
+ GENET_TDMA_REG_OFF
+
346 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
349 static inline u32
bcmgenet_rdma_readl(struct bcmgenet_priv
*priv
,
352 return bcmgenet_readl(priv
->base
+ GENET_RDMA_REG_OFF
+
353 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
356 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv
*priv
,
357 u32 val
, enum dma_reg r
)
359 bcmgenet_writel(val
, priv
->base
+ GENET_RDMA_REG_OFF
+
360 DMA_RINGS_SIZE
+ bcmgenet_dma_regs
[r
]);
363 /* RDMA/TDMA ring registers and accessors
364 * we merge the common fields and just prefix with T/D the registers
365 * having different meaning depending on the direction
369 RDMA_WRITE_PTR
= TDMA_READ_PTR
,
371 RDMA_WRITE_PTR_HI
= TDMA_READ_PTR_HI
,
373 RDMA_PROD_INDEX
= TDMA_CONS_INDEX
,
375 RDMA_CONS_INDEX
= TDMA_PROD_INDEX
,
381 DMA_MBUF_DONE_THRESH
,
383 RDMA_XON_XOFF_THRESH
= TDMA_FLOW_PERIOD
,
385 RDMA_READ_PTR
= TDMA_WRITE_PTR
,
387 RDMA_READ_PTR_HI
= TDMA_WRITE_PTR_HI
390 /* GENET v4 supports 40-bits pointer addressing
391 * for obvious reasons the LO and HI word parts
392 * are contiguous, but this offsets the other
395 static const u8 genet_dma_ring_regs_v4
[] = {
396 [TDMA_READ_PTR
] = 0x00,
397 [TDMA_READ_PTR_HI
] = 0x04,
398 [TDMA_CONS_INDEX
] = 0x08,
399 [TDMA_PROD_INDEX
] = 0x0C,
400 [DMA_RING_BUF_SIZE
] = 0x10,
401 [DMA_START_ADDR
] = 0x14,
402 [DMA_START_ADDR_HI
] = 0x18,
403 [DMA_END_ADDR
] = 0x1C,
404 [DMA_END_ADDR_HI
] = 0x20,
405 [DMA_MBUF_DONE_THRESH
] = 0x24,
406 [TDMA_FLOW_PERIOD
] = 0x28,
407 [TDMA_WRITE_PTR
] = 0x2C,
408 [TDMA_WRITE_PTR_HI
] = 0x30,
411 static const u8 genet_dma_ring_regs_v123
[] = {
412 [TDMA_READ_PTR
] = 0x00,
413 [TDMA_CONS_INDEX
] = 0x04,
414 [TDMA_PROD_INDEX
] = 0x08,
415 [DMA_RING_BUF_SIZE
] = 0x0C,
416 [DMA_START_ADDR
] = 0x10,
417 [DMA_END_ADDR
] = 0x14,
418 [DMA_MBUF_DONE_THRESH
] = 0x18,
419 [TDMA_FLOW_PERIOD
] = 0x1C,
420 [TDMA_WRITE_PTR
] = 0x20,
423 /* Set at runtime once GENET version is known */
424 static const u8
*genet_dma_ring_regs
;
426 static inline u32
bcmgenet_tdma_ring_readl(struct bcmgenet_priv
*priv
,
430 return bcmgenet_readl(priv
->base
+ GENET_TDMA_REG_OFF
+
431 (DMA_RING_SIZE
* ring
) +
432 genet_dma_ring_regs
[r
]);
435 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv
*priv
,
436 unsigned int ring
, u32 val
,
439 bcmgenet_writel(val
, priv
->base
+ GENET_TDMA_REG_OFF
+
440 (DMA_RING_SIZE
* ring
) +
441 genet_dma_ring_regs
[r
]);
444 static inline u32
bcmgenet_rdma_ring_readl(struct bcmgenet_priv
*priv
,
448 return bcmgenet_readl(priv
->base
+ GENET_RDMA_REG_OFF
+
449 (DMA_RING_SIZE
* ring
) +
450 genet_dma_ring_regs
[r
]);
453 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv
*priv
,
454 unsigned int ring
, u32 val
,
457 bcmgenet_writel(val
, priv
->base
+ GENET_RDMA_REG_OFF
+
458 (DMA_RING_SIZE
* ring
) +
459 genet_dma_ring_regs
[r
]);
462 static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv
*priv
, u32 f_index
)
467 offset
= HFB_FLT_ENABLE_V3PLUS
+ (f_index
< 32) * sizeof(u32
);
468 reg
= bcmgenet_hfb_reg_readl(priv
, offset
);
469 reg
|= (1 << (f_index
% 32));
470 bcmgenet_hfb_reg_writel(priv
, reg
, offset
);
471 reg
= bcmgenet_hfb_reg_readl(priv
, HFB_CTRL
);
473 bcmgenet_hfb_reg_writel(priv
, reg
, HFB_CTRL
);
476 static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv
*priv
, u32 f_index
)
478 u32 offset
, reg
, reg1
;
480 offset
= HFB_FLT_ENABLE_V3PLUS
;
481 reg
= bcmgenet_hfb_reg_readl(priv
, offset
);
482 reg1
= bcmgenet_hfb_reg_readl(priv
, offset
+ sizeof(u32
));
484 reg1
&= ~(1 << (f_index
% 32));
485 bcmgenet_hfb_reg_writel(priv
, reg1
, offset
+ sizeof(u32
));
487 reg
&= ~(1 << (f_index
% 32));
488 bcmgenet_hfb_reg_writel(priv
, reg
, offset
);
491 reg
= bcmgenet_hfb_reg_readl(priv
, HFB_CTRL
);
493 bcmgenet_hfb_reg_writel(priv
, reg
, HFB_CTRL
);
497 static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv
*priv
,
498 u32 f_index
, u32 rx_queue
)
503 offset
= f_index
/ 8;
504 reg
= bcmgenet_rdma_readl(priv
, DMA_INDEX2RING_0
+ offset
);
505 reg
&= ~(0xF << (4 * (f_index
% 8)));
506 reg
|= ((rx_queue
& 0xF) << (4 * (f_index
% 8)));
507 bcmgenet_rdma_writel(priv
, reg
, DMA_INDEX2RING_0
+ offset
);
510 static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv
*priv
,
511 u32 f_index
, u32 f_length
)
516 offset
= HFB_FLT_LEN_V3PLUS
+
517 ((priv
->hw_params
->hfb_filter_cnt
- 1 - f_index
) / 4) *
519 reg
= bcmgenet_hfb_reg_readl(priv
, offset
);
520 reg
&= ~(0xFF << (8 * (f_index
% 4)));
521 reg
|= ((f_length
& 0xFF) << (8 * (f_index
% 4)));
522 bcmgenet_hfb_reg_writel(priv
, reg
, offset
);
525 static int bcmgenet_hfb_validate_mask(void *mask
, size_t size
)
528 switch (*(unsigned char *)mask
++) {
543 #define VALIDATE_MASK(x) \
544 bcmgenet_hfb_validate_mask(&(x), sizeof(x))
546 static int bcmgenet_hfb_insert_data(struct bcmgenet_priv
*priv
, u32 f_index
,
547 u32 offset
, void *val
, void *mask
,
552 index
= f_index
* priv
->hw_params
->hfb_filter_size
+ offset
/ 2;
553 tmp
= bcmgenet_hfb_readl(priv
, index
* sizeof(u32
));
558 tmp
|= (*(unsigned char *)val
++);
559 switch ((*(unsigned char *)mask
++)) {
570 bcmgenet_hfb_writel(priv
, tmp
, index
++ * sizeof(u32
));
572 tmp
= bcmgenet_hfb_readl(priv
,
573 index
* sizeof(u32
));
576 tmp
|= (*(unsigned char *)val
++) << 8;
577 switch ((*(unsigned char *)mask
++)) {
589 bcmgenet_hfb_writel(priv
, tmp
, index
* sizeof(u32
));
596 static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv
*priv
,
597 struct bcmgenet_rxnfc_rule
*rule
)
599 struct ethtool_rx_flow_spec
*fs
= &rule
->fs
;
600 u32 offset
= 0, f_length
= 0, f
;
607 if (fs
->flow_type
& FLOW_MAC_EXT
) {
608 bcmgenet_hfb_insert_data(priv
, f
, 0,
609 &fs
->h_ext
.h_dest
, &fs
->m_ext
.h_dest
,
610 sizeof(fs
->h_ext
.h_dest
));
613 if (fs
->flow_type
& FLOW_EXT
) {
614 if (fs
->m_ext
.vlan_etype
||
615 fs
->m_ext
.vlan_tci
) {
616 bcmgenet_hfb_insert_data(priv
, f
, 12,
617 &fs
->h_ext
.vlan_etype
,
618 &fs
->m_ext
.vlan_etype
,
619 sizeof(fs
->h_ext
.vlan_etype
));
620 bcmgenet_hfb_insert_data(priv
, f
, 14,
623 sizeof(fs
->h_ext
.vlan_tci
));
625 f_length
+= DIV_ROUND_UP(VLAN_HLEN
, 2);
629 switch (fs
->flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
)) {
631 f_length
+= DIV_ROUND_UP(ETH_HLEN
, 2);
632 bcmgenet_hfb_insert_data(priv
, f
, 0,
633 &fs
->h_u
.ether_spec
.h_dest
,
634 &fs
->m_u
.ether_spec
.h_dest
,
635 sizeof(fs
->h_u
.ether_spec
.h_dest
));
636 bcmgenet_hfb_insert_data(priv
, f
, ETH_ALEN
,
637 &fs
->h_u
.ether_spec
.h_source
,
638 &fs
->m_u
.ether_spec
.h_source
,
639 sizeof(fs
->h_u
.ether_spec
.h_source
));
640 bcmgenet_hfb_insert_data(priv
, f
, (2 * ETH_ALEN
) + offset
,
641 &fs
->h_u
.ether_spec
.h_proto
,
642 &fs
->m_u
.ether_spec
.h_proto
,
643 sizeof(fs
->h_u
.ether_spec
.h_proto
));
646 f_length
+= DIV_ROUND_UP(ETH_HLEN
+ 20, 2);
647 /* Specify IP Ether Type */
648 val_16
= htons(ETH_P_IP
);
650 bcmgenet_hfb_insert_data(priv
, f
, (2 * ETH_ALEN
) + offset
,
651 &val_16
, &mask_16
, sizeof(val_16
));
652 bcmgenet_hfb_insert_data(priv
, f
, 15 + offset
,
653 &fs
->h_u
.usr_ip4_spec
.tos
,
654 &fs
->m_u
.usr_ip4_spec
.tos
,
655 sizeof(fs
->h_u
.usr_ip4_spec
.tos
));
656 bcmgenet_hfb_insert_data(priv
, f
, 23 + offset
,
657 &fs
->h_u
.usr_ip4_spec
.proto
,
658 &fs
->m_u
.usr_ip4_spec
.proto
,
659 sizeof(fs
->h_u
.usr_ip4_spec
.proto
));
660 bcmgenet_hfb_insert_data(priv
, f
, 26 + offset
,
661 &fs
->h_u
.usr_ip4_spec
.ip4src
,
662 &fs
->m_u
.usr_ip4_spec
.ip4src
,
663 sizeof(fs
->h_u
.usr_ip4_spec
.ip4src
));
664 bcmgenet_hfb_insert_data(priv
, f
, 30 + offset
,
665 &fs
->h_u
.usr_ip4_spec
.ip4dst
,
666 &fs
->m_u
.usr_ip4_spec
.ip4dst
,
667 sizeof(fs
->h_u
.usr_ip4_spec
.ip4dst
));
668 if (!fs
->m_u
.usr_ip4_spec
.l4_4_bytes
)
671 /* Only supports 20 byte IPv4 header */
674 bcmgenet_hfb_insert_data(priv
, f
, ETH_HLEN
+ offset
,
677 size
= sizeof(fs
->h_u
.usr_ip4_spec
.l4_4_bytes
);
678 bcmgenet_hfb_insert_data(priv
, f
,
679 ETH_HLEN
+ 20 + offset
,
680 &fs
->h_u
.usr_ip4_spec
.l4_4_bytes
,
681 &fs
->m_u
.usr_ip4_spec
.l4_4_bytes
,
683 f_length
+= DIV_ROUND_UP(size
, 2);
687 bcmgenet_hfb_set_filter_length(priv
, f
, 2 * f_length
);
688 if (!fs
->ring_cookie
|| fs
->ring_cookie
== RX_CLS_FLOW_WAKE
) {
689 /* Ring 0 flows can be handled by the default Descriptor Ring
690 * We'll map them to ring 0, but don't enable the filter
692 bcmgenet_hfb_set_filter_rx_queue_mapping(priv
, f
, 0);
693 rule
->state
= BCMGENET_RXNFC_STATE_DISABLED
;
695 /* Other Rx rings are direct mapped here */
696 bcmgenet_hfb_set_filter_rx_queue_mapping(priv
, f
,
698 bcmgenet_hfb_enable_filter(priv
, f
);
699 rule
->state
= BCMGENET_RXNFC_STATE_ENABLED
;
703 /* bcmgenet_hfb_clear
705 * Clear Hardware Filter Block and disable all filtering.
707 static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv
*priv
, u32 f_index
)
711 base
= f_index
* priv
->hw_params
->hfb_filter_size
;
712 for (i
= 0; i
< priv
->hw_params
->hfb_filter_size
; i
++)
713 bcmgenet_hfb_writel(priv
, 0x0, (base
+ i
) * sizeof(u32
));
716 static void bcmgenet_hfb_clear(struct bcmgenet_priv
*priv
)
720 if (GENET_IS_V1(priv
) || GENET_IS_V2(priv
))
723 bcmgenet_hfb_reg_writel(priv
, 0x0, HFB_CTRL
);
724 bcmgenet_hfb_reg_writel(priv
, 0x0, HFB_FLT_ENABLE_V3PLUS
);
725 bcmgenet_hfb_reg_writel(priv
, 0x0, HFB_FLT_ENABLE_V3PLUS
+ 4);
727 for (i
= DMA_INDEX2RING_0
; i
<= DMA_INDEX2RING_7
; i
++)
728 bcmgenet_rdma_writel(priv
, 0x0, i
);
730 for (i
= 0; i
< (priv
->hw_params
->hfb_filter_cnt
/ 4); i
++)
731 bcmgenet_hfb_reg_writel(priv
, 0x0,
732 HFB_FLT_LEN_V3PLUS
+ i
* sizeof(u32
));
734 for (i
= 0; i
< priv
->hw_params
->hfb_filter_cnt
; i
++)
735 bcmgenet_hfb_clear_filter(priv
, i
);
738 static void bcmgenet_hfb_init(struct bcmgenet_priv
*priv
)
742 INIT_LIST_HEAD(&priv
->rxnfc_list
);
743 if (GENET_IS_V1(priv
) || GENET_IS_V2(priv
))
746 for (i
= 0; i
< MAX_NUM_OF_FS_RULES
; i
++) {
747 INIT_LIST_HEAD(&priv
->rxnfc_rules
[i
].list
);
748 priv
->rxnfc_rules
[i
].state
= BCMGENET_RXNFC_STATE_UNUSED
;
751 bcmgenet_hfb_clear(priv
);
754 static int bcmgenet_begin(struct net_device
*dev
)
756 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
758 /* Turn on the clock */
759 return clk_prepare_enable(priv
->clk
);
762 static void bcmgenet_complete(struct net_device
*dev
)
764 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
766 /* Turn off the clock */
767 clk_disable_unprepare(priv
->clk
);
770 static int bcmgenet_get_link_ksettings(struct net_device
*dev
,
771 struct ethtool_link_ksettings
*cmd
)
773 if (!netif_running(dev
))
779 phy_ethtool_ksettings_get(dev
->phydev
, cmd
);
784 static int bcmgenet_set_link_ksettings(struct net_device
*dev
,
785 const struct ethtool_link_ksettings
*cmd
)
787 if (!netif_running(dev
))
793 return phy_ethtool_ksettings_set(dev
->phydev
, cmd
);
796 static int bcmgenet_set_features(struct net_device
*dev
,
797 netdev_features_t features
)
799 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
803 ret
= clk_prepare_enable(priv
->clk
);
807 /* Make sure we reflect the value of CRC_CMD_FWD */
808 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
809 priv
->crc_fwd_en
= !!(reg
& CMD_CRC_FWD
);
811 clk_disable_unprepare(priv
->clk
);
816 static u32
bcmgenet_get_msglevel(struct net_device
*dev
)
818 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
820 return priv
->msg_enable
;
823 static void bcmgenet_set_msglevel(struct net_device
*dev
, u32 level
)
825 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
827 priv
->msg_enable
= level
;
830 static int bcmgenet_get_coalesce(struct net_device
*dev
,
831 struct ethtool_coalesce
*ec
)
833 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
834 struct bcmgenet_rx_ring
*ring
;
837 ec
->tx_max_coalesced_frames
=
838 bcmgenet_tdma_ring_readl(priv
, DESC_INDEX
,
839 DMA_MBUF_DONE_THRESH
);
840 ec
->rx_max_coalesced_frames
=
841 bcmgenet_rdma_ring_readl(priv
, DESC_INDEX
,
842 DMA_MBUF_DONE_THRESH
);
843 ec
->rx_coalesce_usecs
=
844 bcmgenet_rdma_readl(priv
, DMA_RING16_TIMEOUT
) * 8192 / 1000;
846 for (i
= 0; i
< priv
->hw_params
->rx_queues
; i
++) {
847 ring
= &priv
->rx_rings
[i
];
848 ec
->use_adaptive_rx_coalesce
|= ring
->dim
.use_dim
;
850 ring
= &priv
->rx_rings
[DESC_INDEX
];
851 ec
->use_adaptive_rx_coalesce
|= ring
->dim
.use_dim
;
856 static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring
*ring
,
859 struct bcmgenet_priv
*priv
= ring
->priv
;
860 unsigned int i
= ring
->index
;
863 bcmgenet_rdma_ring_writel(priv
, i
, pkts
, DMA_MBUF_DONE_THRESH
);
865 reg
= bcmgenet_rdma_readl(priv
, DMA_RING0_TIMEOUT
+ i
);
866 reg
&= ~DMA_TIMEOUT_MASK
;
867 reg
|= DIV_ROUND_UP(usecs
* 1000, 8192);
868 bcmgenet_rdma_writel(priv
, reg
, DMA_RING0_TIMEOUT
+ i
);
871 static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring
*ring
,
872 struct ethtool_coalesce
*ec
)
874 struct dim_cq_moder moder
;
877 ring
->rx_coalesce_usecs
= ec
->rx_coalesce_usecs
;
878 ring
->rx_max_coalesced_frames
= ec
->rx_max_coalesced_frames
;
879 usecs
= ring
->rx_coalesce_usecs
;
880 pkts
= ring
->rx_max_coalesced_frames
;
882 if (ec
->use_adaptive_rx_coalesce
&& !ring
->dim
.use_dim
) {
883 moder
= net_dim_get_def_rx_moderation(ring
->dim
.dim
.mode
);
888 ring
->dim
.use_dim
= ec
->use_adaptive_rx_coalesce
;
889 bcmgenet_set_rx_coalesce(ring
, usecs
, pkts
);
892 static int bcmgenet_set_coalesce(struct net_device
*dev
,
893 struct ethtool_coalesce
*ec
)
895 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
898 /* Base system clock is 125Mhz, DMA timeout is this reference clock
899 * divided by 1024, which yields roughly 8.192us, our maximum value
900 * has to fit in the DMA_TIMEOUT_MASK (16 bits)
902 if (ec
->tx_max_coalesced_frames
> DMA_INTR_THRESHOLD_MASK
||
903 ec
->tx_max_coalesced_frames
== 0 ||
904 ec
->rx_max_coalesced_frames
> DMA_INTR_THRESHOLD_MASK
||
905 ec
->rx_coalesce_usecs
> (DMA_TIMEOUT_MASK
* 8) + 1)
908 if (ec
->rx_coalesce_usecs
== 0 && ec
->rx_max_coalesced_frames
== 0)
911 /* GENET TDMA hardware does not support a configurable timeout, but will
912 * always generate an interrupt either after MBDONE packets have been
913 * transmitted, or when the ring is empty.
916 /* Program all TX queues with the same values, as there is no
917 * ethtool knob to do coalescing on a per-queue basis
919 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++)
920 bcmgenet_tdma_ring_writel(priv
, i
,
921 ec
->tx_max_coalesced_frames
,
922 DMA_MBUF_DONE_THRESH
);
923 bcmgenet_tdma_ring_writel(priv
, DESC_INDEX
,
924 ec
->tx_max_coalesced_frames
,
925 DMA_MBUF_DONE_THRESH
);
927 for (i
= 0; i
< priv
->hw_params
->rx_queues
; i
++)
928 bcmgenet_set_ring_rx_coalesce(&priv
->rx_rings
[i
], ec
);
929 bcmgenet_set_ring_rx_coalesce(&priv
->rx_rings
[DESC_INDEX
], ec
);
934 /* standard ethtool support functions. */
935 enum bcmgenet_stat_type
{
936 BCMGENET_STAT_NETDEV
= -1,
937 BCMGENET_STAT_MIB_RX
,
938 BCMGENET_STAT_MIB_TX
,
944 struct bcmgenet_stats
{
945 char stat_string
[ETH_GSTRING_LEN
];
948 enum bcmgenet_stat_type type
;
949 /* reg offset from UMAC base for misc counters */
953 #define STAT_NETDEV(m) { \
954 .stat_string = __stringify(m), \
955 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
956 .stat_offset = offsetof(struct net_device_stats, m), \
957 .type = BCMGENET_STAT_NETDEV, \
960 #define STAT_GENET_MIB(str, m, _type) { \
961 .stat_string = str, \
962 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
963 .stat_offset = offsetof(struct bcmgenet_priv, m), \
967 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
968 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
969 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
970 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
972 #define STAT_GENET_MISC(str, m, offset) { \
973 .stat_string = str, \
974 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
975 .stat_offset = offsetof(struct bcmgenet_priv, m), \
976 .type = BCMGENET_STAT_MISC, \
977 .reg_offset = offset, \
980 #define STAT_GENET_Q(num) \
981 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
982 tx_rings[num].packets), \
983 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
984 tx_rings[num].bytes), \
985 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
986 rx_rings[num].bytes), \
987 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
988 rx_rings[num].packets), \
989 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
990 rx_rings[num].errors), \
991 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
992 rx_rings[num].dropped)
994 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
995 * between the end of TX stats and the beginning of the RX RUNT
997 #define BCMGENET_STAT_OFFSET 0xc
999 /* Hardware counters must be kept in sync because the order/offset
1000 * is important here (order in structure declaration = order in hardware)
1002 static const struct bcmgenet_stats bcmgenet_gstrings_stats
[] = {
1004 STAT_NETDEV(rx_packets
),
1005 STAT_NETDEV(tx_packets
),
1006 STAT_NETDEV(rx_bytes
),
1007 STAT_NETDEV(tx_bytes
),
1008 STAT_NETDEV(rx_errors
),
1009 STAT_NETDEV(tx_errors
),
1010 STAT_NETDEV(rx_dropped
),
1011 STAT_NETDEV(tx_dropped
),
1012 STAT_NETDEV(multicast
),
1013 /* UniMAC RSV counters */
1014 STAT_GENET_MIB_RX("rx_64_octets", mib
.rx
.pkt_cnt
.cnt_64
),
1015 STAT_GENET_MIB_RX("rx_65_127_oct", mib
.rx
.pkt_cnt
.cnt_127
),
1016 STAT_GENET_MIB_RX("rx_128_255_oct", mib
.rx
.pkt_cnt
.cnt_255
),
1017 STAT_GENET_MIB_RX("rx_256_511_oct", mib
.rx
.pkt_cnt
.cnt_511
),
1018 STAT_GENET_MIB_RX("rx_512_1023_oct", mib
.rx
.pkt_cnt
.cnt_1023
),
1019 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib
.rx
.pkt_cnt
.cnt_1518
),
1020 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib
.rx
.pkt_cnt
.cnt_mgv
),
1021 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib
.rx
.pkt_cnt
.cnt_2047
),
1022 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib
.rx
.pkt_cnt
.cnt_4095
),
1023 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib
.rx
.pkt_cnt
.cnt_9216
),
1024 STAT_GENET_MIB_RX("rx_pkts", mib
.rx
.pkt
),
1025 STAT_GENET_MIB_RX("rx_bytes", mib
.rx
.bytes
),
1026 STAT_GENET_MIB_RX("rx_multicast", mib
.rx
.mca
),
1027 STAT_GENET_MIB_RX("rx_broadcast", mib
.rx
.bca
),
1028 STAT_GENET_MIB_RX("rx_fcs", mib
.rx
.fcs
),
1029 STAT_GENET_MIB_RX("rx_control", mib
.rx
.cf
),
1030 STAT_GENET_MIB_RX("rx_pause", mib
.rx
.pf
),
1031 STAT_GENET_MIB_RX("rx_unknown", mib
.rx
.uo
),
1032 STAT_GENET_MIB_RX("rx_align", mib
.rx
.aln
),
1033 STAT_GENET_MIB_RX("rx_outrange", mib
.rx
.flr
),
1034 STAT_GENET_MIB_RX("rx_code", mib
.rx
.cde
),
1035 STAT_GENET_MIB_RX("rx_carrier", mib
.rx
.fcr
),
1036 STAT_GENET_MIB_RX("rx_oversize", mib
.rx
.ovr
),
1037 STAT_GENET_MIB_RX("rx_jabber", mib
.rx
.jbr
),
1038 STAT_GENET_MIB_RX("rx_mtu_err", mib
.rx
.mtue
),
1039 STAT_GENET_MIB_RX("rx_good_pkts", mib
.rx
.pok
),
1040 STAT_GENET_MIB_RX("rx_unicast", mib
.rx
.uc
),
1041 STAT_GENET_MIB_RX("rx_ppp", mib
.rx
.ppp
),
1042 STAT_GENET_MIB_RX("rx_crc", mib
.rx
.rcrc
),
1043 /* UniMAC TSV counters */
1044 STAT_GENET_MIB_TX("tx_64_octets", mib
.tx
.pkt_cnt
.cnt_64
),
1045 STAT_GENET_MIB_TX("tx_65_127_oct", mib
.tx
.pkt_cnt
.cnt_127
),
1046 STAT_GENET_MIB_TX("tx_128_255_oct", mib
.tx
.pkt_cnt
.cnt_255
),
1047 STAT_GENET_MIB_TX("tx_256_511_oct", mib
.tx
.pkt_cnt
.cnt_511
),
1048 STAT_GENET_MIB_TX("tx_512_1023_oct", mib
.tx
.pkt_cnt
.cnt_1023
),
1049 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib
.tx
.pkt_cnt
.cnt_1518
),
1050 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib
.tx
.pkt_cnt
.cnt_mgv
),
1051 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib
.tx
.pkt_cnt
.cnt_2047
),
1052 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib
.tx
.pkt_cnt
.cnt_4095
),
1053 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib
.tx
.pkt_cnt
.cnt_9216
),
1054 STAT_GENET_MIB_TX("tx_pkts", mib
.tx
.pkts
),
1055 STAT_GENET_MIB_TX("tx_multicast", mib
.tx
.mca
),
1056 STAT_GENET_MIB_TX("tx_broadcast", mib
.tx
.bca
),
1057 STAT_GENET_MIB_TX("tx_pause", mib
.tx
.pf
),
1058 STAT_GENET_MIB_TX("tx_control", mib
.tx
.cf
),
1059 STAT_GENET_MIB_TX("tx_fcs_err", mib
.tx
.fcs
),
1060 STAT_GENET_MIB_TX("tx_oversize", mib
.tx
.ovr
),
1061 STAT_GENET_MIB_TX("tx_defer", mib
.tx
.drf
),
1062 STAT_GENET_MIB_TX("tx_excess_defer", mib
.tx
.edf
),
1063 STAT_GENET_MIB_TX("tx_single_col", mib
.tx
.scl
),
1064 STAT_GENET_MIB_TX("tx_multi_col", mib
.tx
.mcl
),
1065 STAT_GENET_MIB_TX("tx_late_col", mib
.tx
.lcl
),
1066 STAT_GENET_MIB_TX("tx_excess_col", mib
.tx
.ecl
),
1067 STAT_GENET_MIB_TX("tx_frags", mib
.tx
.frg
),
1068 STAT_GENET_MIB_TX("tx_total_col", mib
.tx
.ncl
),
1069 STAT_GENET_MIB_TX("tx_jabber", mib
.tx
.jbr
),
1070 STAT_GENET_MIB_TX("tx_bytes", mib
.tx
.bytes
),
1071 STAT_GENET_MIB_TX("tx_good_pkts", mib
.tx
.pok
),
1072 STAT_GENET_MIB_TX("tx_unicast", mib
.tx
.uc
),
1073 /* UniMAC RUNT counters */
1074 STAT_GENET_RUNT("rx_runt_pkts", mib
.rx_runt_cnt
),
1075 STAT_GENET_RUNT("rx_runt_valid_fcs", mib
.rx_runt_fcs
),
1076 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib
.rx_runt_fcs_align
),
1077 STAT_GENET_RUNT("rx_runt_bytes", mib
.rx_runt_bytes
),
1078 /* Misc UniMAC counters */
1079 STAT_GENET_MISC("rbuf_ovflow_cnt", mib
.rbuf_ovflow_cnt
,
1080 UMAC_RBUF_OVFL_CNT_V1
),
1081 STAT_GENET_MISC("rbuf_err_cnt", mib
.rbuf_err_cnt
,
1082 UMAC_RBUF_ERR_CNT_V1
),
1083 STAT_GENET_MISC("mdf_err_cnt", mib
.mdf_err_cnt
, UMAC_MDF_ERR_CNT
),
1084 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib
.alloc_rx_buff_failed
),
1085 STAT_GENET_SOFT_MIB("rx_dma_failed", mib
.rx_dma_failed
),
1086 STAT_GENET_SOFT_MIB("tx_dma_failed", mib
.tx_dma_failed
),
1087 STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib
.tx_realloc_tsb
),
1088 STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed",
1089 mib
.tx_realloc_tsb_failed
),
1098 #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
1100 static void bcmgenet_get_drvinfo(struct net_device
*dev
,
1101 struct ethtool_drvinfo
*info
)
1103 strlcpy(info
->driver
, "bcmgenet", sizeof(info
->driver
));
1106 static int bcmgenet_get_sset_count(struct net_device
*dev
, int string_set
)
1108 switch (string_set
) {
1110 return BCMGENET_STATS_LEN
;
1116 static void bcmgenet_get_strings(struct net_device
*dev
, u32 stringset
,
1121 switch (stringset
) {
1123 for (i
= 0; i
< BCMGENET_STATS_LEN
; i
++) {
1124 memcpy(data
+ i
* ETH_GSTRING_LEN
,
1125 bcmgenet_gstrings_stats
[i
].stat_string
,
1132 static u32
bcmgenet_update_stat_misc(struct bcmgenet_priv
*priv
, u16 offset
)
1138 case UMAC_RBUF_OVFL_CNT_V1
:
1139 if (GENET_IS_V2(priv
))
1140 new_offset
= RBUF_OVFL_CNT_V2
;
1142 new_offset
= RBUF_OVFL_CNT_V3PLUS
;
1144 val
= bcmgenet_rbuf_readl(priv
, new_offset
);
1145 /* clear if overflowed */
1147 bcmgenet_rbuf_writel(priv
, 0, new_offset
);
1149 case UMAC_RBUF_ERR_CNT_V1
:
1150 if (GENET_IS_V2(priv
))
1151 new_offset
= RBUF_ERR_CNT_V2
;
1153 new_offset
= RBUF_ERR_CNT_V3PLUS
;
1155 val
= bcmgenet_rbuf_readl(priv
, new_offset
);
1156 /* clear if overflowed */
1158 bcmgenet_rbuf_writel(priv
, 0, new_offset
);
1161 val
= bcmgenet_umac_readl(priv
, offset
);
1162 /* clear if overflowed */
1164 bcmgenet_umac_writel(priv
, 0, offset
);
1171 static void bcmgenet_update_mib_counters(struct bcmgenet_priv
*priv
)
1175 for (i
= 0; i
< BCMGENET_STATS_LEN
; i
++) {
1176 const struct bcmgenet_stats
*s
;
1181 s
= &bcmgenet_gstrings_stats
[i
];
1183 case BCMGENET_STAT_NETDEV
:
1184 case BCMGENET_STAT_SOFT
:
1186 case BCMGENET_STAT_RUNT
:
1187 offset
+= BCMGENET_STAT_OFFSET
;
1189 case BCMGENET_STAT_MIB_TX
:
1190 offset
+= BCMGENET_STAT_OFFSET
;
1192 case BCMGENET_STAT_MIB_RX
:
1193 val
= bcmgenet_umac_readl(priv
,
1194 UMAC_MIB_START
+ j
+ offset
);
1195 offset
= 0; /* Reset Offset */
1197 case BCMGENET_STAT_MISC
:
1198 if (GENET_IS_V1(priv
)) {
1199 val
= bcmgenet_umac_readl(priv
, s
->reg_offset
);
1200 /* clear if overflowed */
1202 bcmgenet_umac_writel(priv
, 0,
1205 val
= bcmgenet_update_stat_misc(priv
,
1211 j
+= s
->stat_sizeof
;
1212 p
= (char *)priv
+ s
->stat_offset
;
1217 static void bcmgenet_get_ethtool_stats(struct net_device
*dev
,
1218 struct ethtool_stats
*stats
,
1221 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1224 if (netif_running(dev
))
1225 bcmgenet_update_mib_counters(priv
);
1227 dev
->netdev_ops
->ndo_get_stats(dev
);
1229 for (i
= 0; i
< BCMGENET_STATS_LEN
; i
++) {
1230 const struct bcmgenet_stats
*s
;
1233 s
= &bcmgenet_gstrings_stats
[i
];
1234 if (s
->type
== BCMGENET_STAT_NETDEV
)
1235 p
= (char *)&dev
->stats
;
1238 p
+= s
->stat_offset
;
1239 if (sizeof(unsigned long) != sizeof(u32
) &&
1240 s
->stat_sizeof
== sizeof(unsigned long))
1241 data
[i
] = *(unsigned long *)p
;
1243 data
[i
] = *(u32
*)p
;
1247 static void bcmgenet_eee_enable_set(struct net_device
*dev
, bool enable
)
1249 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1250 u32 off
= priv
->hw_params
->tbuf_offset
+ TBUF_ENERGY_CTRL
;
1253 if (enable
&& !priv
->clk_eee_enabled
) {
1254 clk_prepare_enable(priv
->clk_eee
);
1255 priv
->clk_eee_enabled
= true;
1258 reg
= bcmgenet_umac_readl(priv
, UMAC_EEE_CTRL
);
1263 bcmgenet_umac_writel(priv
, reg
, UMAC_EEE_CTRL
);
1265 /* Enable EEE and switch to a 27Mhz clock automatically */
1266 reg
= bcmgenet_readl(priv
->base
+ off
);
1268 reg
|= TBUF_EEE_EN
| TBUF_PM_EN
;
1270 reg
&= ~(TBUF_EEE_EN
| TBUF_PM_EN
);
1271 bcmgenet_writel(reg
, priv
->base
+ off
);
1273 /* Do the same for thing for RBUF */
1274 reg
= bcmgenet_rbuf_readl(priv
, RBUF_ENERGY_CTRL
);
1276 reg
|= RBUF_EEE_EN
| RBUF_PM_EN
;
1278 reg
&= ~(RBUF_EEE_EN
| RBUF_PM_EN
);
1279 bcmgenet_rbuf_writel(priv
, reg
, RBUF_ENERGY_CTRL
);
1281 if (!enable
&& priv
->clk_eee_enabled
) {
1282 clk_disable_unprepare(priv
->clk_eee
);
1283 priv
->clk_eee_enabled
= false;
1286 priv
->eee
.eee_enabled
= enable
;
1287 priv
->eee
.eee_active
= enable
;
1290 static int bcmgenet_get_eee(struct net_device
*dev
, struct ethtool_eee
*e
)
1292 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1293 struct ethtool_eee
*p
= &priv
->eee
;
1295 if (GENET_IS_V1(priv
))
1301 e
->eee_enabled
= p
->eee_enabled
;
1302 e
->eee_active
= p
->eee_active
;
1303 e
->tx_lpi_timer
= bcmgenet_umac_readl(priv
, UMAC_EEE_LPI_TIMER
);
1305 return phy_ethtool_get_eee(dev
->phydev
, e
);
1308 static int bcmgenet_set_eee(struct net_device
*dev
, struct ethtool_eee
*e
)
1310 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1311 struct ethtool_eee
*p
= &priv
->eee
;
1314 if (GENET_IS_V1(priv
))
1320 p
->eee_enabled
= e
->eee_enabled
;
1322 if (!p
->eee_enabled
) {
1323 bcmgenet_eee_enable_set(dev
, false);
1325 ret
= phy_init_eee(dev
->phydev
, 0);
1327 netif_err(priv
, hw
, dev
, "EEE initialization failed\n");
1331 bcmgenet_umac_writel(priv
, e
->tx_lpi_timer
, UMAC_EEE_LPI_TIMER
);
1332 bcmgenet_eee_enable_set(dev
, true);
1335 return phy_ethtool_set_eee(dev
->phydev
, e
);
1338 static int bcmgenet_validate_flow(struct net_device
*dev
,
1339 struct ethtool_rxnfc
*cmd
)
1341 struct ethtool_usrip4_spec
*l4_mask
;
1342 struct ethhdr
*eth_mask
;
1344 if (cmd
->fs
.location
>= MAX_NUM_OF_FS_RULES
) {
1345 netdev_err(dev
, "rxnfc: Invalid location (%d)\n",
1350 switch (cmd
->fs
.flow_type
& ~(FLOW_EXT
| FLOW_MAC_EXT
)) {
1352 l4_mask
= &cmd
->fs
.m_u
.usr_ip4_spec
;
1353 /* don't allow mask which isn't valid */
1354 if (VALIDATE_MASK(l4_mask
->ip4src
) ||
1355 VALIDATE_MASK(l4_mask
->ip4dst
) ||
1356 VALIDATE_MASK(l4_mask
->l4_4_bytes
) ||
1357 VALIDATE_MASK(l4_mask
->proto
) ||
1358 VALIDATE_MASK(l4_mask
->ip_ver
) ||
1359 VALIDATE_MASK(l4_mask
->tos
)) {
1360 netdev_err(dev
, "rxnfc: Unsupported mask\n");
1365 eth_mask
= &cmd
->fs
.m_u
.ether_spec
;
1366 /* don't allow mask which isn't valid */
1367 if (VALIDATE_MASK(eth_mask
->h_dest
) ||
1368 VALIDATE_MASK(eth_mask
->h_source
) ||
1369 VALIDATE_MASK(eth_mask
->h_proto
)) {
1370 netdev_err(dev
, "rxnfc: Unsupported mask\n");
1375 netdev_err(dev
, "rxnfc: Unsupported flow type (0x%x)\n",
1380 if ((cmd
->fs
.flow_type
& FLOW_EXT
)) {
1381 /* don't allow mask which isn't valid */
1382 if (VALIDATE_MASK(cmd
->fs
.m_ext
.vlan_etype
) ||
1383 VALIDATE_MASK(cmd
->fs
.m_ext
.vlan_tci
)) {
1384 netdev_err(dev
, "rxnfc: Unsupported mask\n");
1387 if (cmd
->fs
.m_ext
.data
[0] || cmd
->fs
.m_ext
.data
[1]) {
1388 netdev_err(dev
, "rxnfc: user-def not supported\n");
1393 if ((cmd
->fs
.flow_type
& FLOW_MAC_EXT
)) {
1394 /* don't allow mask which isn't valid */
1395 if (VALIDATE_MASK(cmd
->fs
.m_ext
.h_dest
)) {
1396 netdev_err(dev
, "rxnfc: Unsupported mask\n");
1404 static int bcmgenet_insert_flow(struct net_device
*dev
,
1405 struct ethtool_rxnfc
*cmd
)
1407 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1408 struct bcmgenet_rxnfc_rule
*loc_rule
;
1411 if (priv
->hw_params
->hfb_filter_size
< 128) {
1412 netdev_err(dev
, "rxnfc: Not supported by this device\n");
1416 if (cmd
->fs
.ring_cookie
> priv
->hw_params
->rx_queues
&&
1417 cmd
->fs
.ring_cookie
!= RX_CLS_FLOW_WAKE
) {
1418 netdev_err(dev
, "rxnfc: Unsupported action (%llu)\n",
1419 cmd
->fs
.ring_cookie
);
1423 err
= bcmgenet_validate_flow(dev
, cmd
);
1427 loc_rule
= &priv
->rxnfc_rules
[cmd
->fs
.location
];
1428 if (loc_rule
->state
== BCMGENET_RXNFC_STATE_ENABLED
)
1429 bcmgenet_hfb_disable_filter(priv
, cmd
->fs
.location
);
1430 if (loc_rule
->state
!= BCMGENET_RXNFC_STATE_UNUSED
) {
1431 list_del(&loc_rule
->list
);
1432 bcmgenet_hfb_clear_filter(priv
, cmd
->fs
.location
);
1434 loc_rule
->state
= BCMGENET_RXNFC_STATE_UNUSED
;
1435 memcpy(&loc_rule
->fs
, &cmd
->fs
,
1436 sizeof(struct ethtool_rx_flow_spec
));
1438 bcmgenet_hfb_create_rxnfc_filter(priv
, loc_rule
);
1440 list_add_tail(&loc_rule
->list
, &priv
->rxnfc_list
);
1445 static int bcmgenet_delete_flow(struct net_device
*dev
,
1446 struct ethtool_rxnfc
*cmd
)
1448 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1449 struct bcmgenet_rxnfc_rule
*rule
;
1452 if (cmd
->fs
.location
>= MAX_NUM_OF_FS_RULES
)
1455 rule
= &priv
->rxnfc_rules
[cmd
->fs
.location
];
1456 if (rule
->state
== BCMGENET_RXNFC_STATE_UNUSED
) {
1461 if (rule
->state
== BCMGENET_RXNFC_STATE_ENABLED
)
1462 bcmgenet_hfb_disable_filter(priv
, cmd
->fs
.location
);
1463 if (rule
->state
!= BCMGENET_RXNFC_STATE_UNUSED
) {
1464 list_del(&rule
->list
);
1465 bcmgenet_hfb_clear_filter(priv
, cmd
->fs
.location
);
1467 rule
->state
= BCMGENET_RXNFC_STATE_UNUSED
;
1468 memset(&rule
->fs
, 0, sizeof(struct ethtool_rx_flow_spec
));
1474 static int bcmgenet_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
1476 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1480 case ETHTOOL_SRXCLSRLINS
:
1481 err
= bcmgenet_insert_flow(dev
, cmd
);
1483 case ETHTOOL_SRXCLSRLDEL
:
1484 err
= bcmgenet_delete_flow(dev
, cmd
);
1487 netdev_warn(priv
->dev
, "Unsupported ethtool command. (%d)\n",
1495 static int bcmgenet_get_flow(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
1498 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1499 struct bcmgenet_rxnfc_rule
*rule
;
1502 if (loc
< 0 || loc
>= MAX_NUM_OF_FS_RULES
)
1505 rule
= &priv
->rxnfc_rules
[loc
];
1506 if (rule
->state
== BCMGENET_RXNFC_STATE_UNUSED
)
1509 memcpy(&cmd
->fs
, &rule
->fs
,
1510 sizeof(struct ethtool_rx_flow_spec
));
1515 static int bcmgenet_get_num_flows(struct bcmgenet_priv
*priv
)
1517 struct list_head
*pos
;
1520 list_for_each(pos
, &priv
->rxnfc_list
)
1526 static int bcmgenet_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
1529 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1530 struct bcmgenet_rxnfc_rule
*rule
;
1535 case ETHTOOL_GRXRINGS
:
1536 cmd
->data
= priv
->hw_params
->rx_queues
?: 1;
1538 case ETHTOOL_GRXCLSRLCNT
:
1539 cmd
->rule_cnt
= bcmgenet_get_num_flows(priv
);
1540 cmd
->data
= MAX_NUM_OF_FS_RULES
;
1542 case ETHTOOL_GRXCLSRULE
:
1543 err
= bcmgenet_get_flow(dev
, cmd
, cmd
->fs
.location
);
1545 case ETHTOOL_GRXCLSRLALL
:
1546 list_for_each_entry(rule
, &priv
->rxnfc_list
, list
)
1547 if (i
< cmd
->rule_cnt
)
1548 rule_locs
[i
++] = rule
->fs
.location
;
1550 cmd
->data
= MAX_NUM_OF_FS_RULES
;
1560 /* standard ethtool support functions. */
1561 static const struct ethtool_ops bcmgenet_ethtool_ops
= {
1562 .supported_coalesce_params
= ETHTOOL_COALESCE_RX_USECS
|
1563 ETHTOOL_COALESCE_MAX_FRAMES
|
1564 ETHTOOL_COALESCE_USE_ADAPTIVE_RX
,
1565 .begin
= bcmgenet_begin
,
1566 .complete
= bcmgenet_complete
,
1567 .get_strings
= bcmgenet_get_strings
,
1568 .get_sset_count
= bcmgenet_get_sset_count
,
1569 .get_ethtool_stats
= bcmgenet_get_ethtool_stats
,
1570 .get_drvinfo
= bcmgenet_get_drvinfo
,
1571 .get_link
= ethtool_op_get_link
,
1572 .get_msglevel
= bcmgenet_get_msglevel
,
1573 .set_msglevel
= bcmgenet_set_msglevel
,
1574 .get_wol
= bcmgenet_get_wol
,
1575 .set_wol
= bcmgenet_set_wol
,
1576 .get_eee
= bcmgenet_get_eee
,
1577 .set_eee
= bcmgenet_set_eee
,
1578 .nway_reset
= phy_ethtool_nway_reset
,
1579 .get_coalesce
= bcmgenet_get_coalesce
,
1580 .set_coalesce
= bcmgenet_set_coalesce
,
1581 .get_link_ksettings
= bcmgenet_get_link_ksettings
,
1582 .set_link_ksettings
= bcmgenet_set_link_ksettings
,
1583 .get_ts_info
= ethtool_op_get_ts_info
,
1584 .get_rxnfc
= bcmgenet_get_rxnfc
,
1585 .set_rxnfc
= bcmgenet_set_rxnfc
,
1588 /* Power down the unimac, based on mode. */
1589 static int bcmgenet_power_down(struct bcmgenet_priv
*priv
,
1590 enum bcmgenet_power_mode mode
)
1596 case GENET_POWER_CABLE_SENSE
:
1597 phy_detach(priv
->dev
->phydev
);
1600 case GENET_POWER_WOL_MAGIC
:
1601 ret
= bcmgenet_wol_power_down_cfg(priv
, mode
);
1604 case GENET_POWER_PASSIVE
:
1605 /* Power down LED */
1606 if (priv
->hw_params
->flags
& GENET_HAS_EXT
) {
1607 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
1608 if (GENET_IS_V5(priv
))
1609 reg
|= EXT_PWR_DOWN_PHY_EN
|
1610 EXT_PWR_DOWN_PHY_RD
|
1611 EXT_PWR_DOWN_PHY_SD
|
1612 EXT_PWR_DOWN_PHY_RX
|
1613 EXT_PWR_DOWN_PHY_TX
|
1616 reg
|= EXT_PWR_DOWN_PHY
;
1618 reg
|= (EXT_PWR_DOWN_DLL
| EXT_PWR_DOWN_BIAS
);
1619 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
1621 bcmgenet_phy_power_set(priv
->dev
, false);
1631 static void bcmgenet_power_up(struct bcmgenet_priv
*priv
,
1632 enum bcmgenet_power_mode mode
)
1636 if (!(priv
->hw_params
->flags
& GENET_HAS_EXT
))
1639 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
1642 case GENET_POWER_PASSIVE
:
1643 reg
&= ~(EXT_PWR_DOWN_DLL
| EXT_PWR_DOWN_BIAS
);
1644 if (GENET_IS_V5(priv
)) {
1645 reg
&= ~(EXT_PWR_DOWN_PHY_EN
|
1646 EXT_PWR_DOWN_PHY_RD
|
1647 EXT_PWR_DOWN_PHY_SD
|
1648 EXT_PWR_DOWN_PHY_RX
|
1649 EXT_PWR_DOWN_PHY_TX
|
1651 reg
|= EXT_PHY_RESET
;
1652 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
1655 reg
&= ~EXT_PHY_RESET
;
1657 reg
&= ~EXT_PWR_DOWN_PHY
;
1658 reg
|= EXT_PWR_DN_EN_LD
;
1660 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
1661 bcmgenet_phy_power_set(priv
->dev
, true);
1664 case GENET_POWER_CABLE_SENSE
:
1666 if (!GENET_IS_V5(priv
)) {
1667 reg
|= EXT_PWR_DN_EN_LD
;
1668 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
1671 case GENET_POWER_WOL_MAGIC
:
1672 bcmgenet_wol_power_up_cfg(priv
, mode
);
1679 static struct enet_cb
*bcmgenet_get_txcb(struct bcmgenet_priv
*priv
,
1680 struct bcmgenet_tx_ring
*ring
)
1682 struct enet_cb
*tx_cb_ptr
;
1684 tx_cb_ptr
= ring
->cbs
;
1685 tx_cb_ptr
+= ring
->write_ptr
- ring
->cb_ptr
;
1687 /* Advancing local write pointer */
1688 if (ring
->write_ptr
== ring
->end_ptr
)
1689 ring
->write_ptr
= ring
->cb_ptr
;
1696 static struct enet_cb
*bcmgenet_put_txcb(struct bcmgenet_priv
*priv
,
1697 struct bcmgenet_tx_ring
*ring
)
1699 struct enet_cb
*tx_cb_ptr
;
1701 tx_cb_ptr
= ring
->cbs
;
1702 tx_cb_ptr
+= ring
->write_ptr
- ring
->cb_ptr
;
1704 /* Rewinding local write pointer */
1705 if (ring
->write_ptr
== ring
->cb_ptr
)
1706 ring
->write_ptr
= ring
->end_ptr
;
1713 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring
*ring
)
1715 bcmgenet_intrl2_0_writel(ring
->priv
, UMAC_IRQ_RXDMA_DONE
,
1716 INTRL2_CPU_MASK_SET
);
1719 static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring
*ring
)
1721 bcmgenet_intrl2_0_writel(ring
->priv
, UMAC_IRQ_RXDMA_DONE
,
1722 INTRL2_CPU_MASK_CLEAR
);
1725 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring
*ring
)
1727 bcmgenet_intrl2_1_writel(ring
->priv
,
1728 1 << (UMAC_IRQ1_RX_INTR_SHIFT
+ ring
->index
),
1729 INTRL2_CPU_MASK_SET
);
1732 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring
*ring
)
1734 bcmgenet_intrl2_1_writel(ring
->priv
,
1735 1 << (UMAC_IRQ1_RX_INTR_SHIFT
+ ring
->index
),
1736 INTRL2_CPU_MASK_CLEAR
);
1739 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring
*ring
)
1741 bcmgenet_intrl2_0_writel(ring
->priv
, UMAC_IRQ_TXDMA_DONE
,
1742 INTRL2_CPU_MASK_SET
);
1745 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring
*ring
)
1747 bcmgenet_intrl2_0_writel(ring
->priv
, UMAC_IRQ_TXDMA_DONE
,
1748 INTRL2_CPU_MASK_CLEAR
);
1751 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring
*ring
)
1753 bcmgenet_intrl2_1_writel(ring
->priv
, 1 << ring
->index
,
1754 INTRL2_CPU_MASK_CLEAR
);
1757 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring
*ring
)
1759 bcmgenet_intrl2_1_writel(ring
->priv
, 1 << ring
->index
,
1760 INTRL2_CPU_MASK_SET
);
1763 /* Simple helper to free a transmit control block's resources
1764 * Returns an skb when the last transmit control block associated with the
1765 * skb is freed. The skb should be freed by the caller if necessary.
1767 static struct sk_buff
*bcmgenet_free_tx_cb(struct device
*dev
,
1770 struct sk_buff
*skb
;
1776 if (cb
== GENET_CB(skb
)->first_cb
)
1777 dma_unmap_single(dev
, dma_unmap_addr(cb
, dma_addr
),
1778 dma_unmap_len(cb
, dma_len
),
1781 dma_unmap_page(dev
, dma_unmap_addr(cb
, dma_addr
),
1782 dma_unmap_len(cb
, dma_len
),
1784 dma_unmap_addr_set(cb
, dma_addr
, 0);
1786 if (cb
== GENET_CB(skb
)->last_cb
)
1789 } else if (dma_unmap_addr(cb
, dma_addr
)) {
1791 dma_unmap_addr(cb
, dma_addr
),
1792 dma_unmap_len(cb
, dma_len
),
1794 dma_unmap_addr_set(cb
, dma_addr
, 0);
1800 /* Simple helper to free a receive control block's resources */
1801 static struct sk_buff
*bcmgenet_free_rx_cb(struct device
*dev
,
1804 struct sk_buff
*skb
;
1809 if (dma_unmap_addr(cb
, dma_addr
)) {
1810 dma_unmap_single(dev
, dma_unmap_addr(cb
, dma_addr
),
1811 dma_unmap_len(cb
, dma_len
), DMA_FROM_DEVICE
);
1812 dma_unmap_addr_set(cb
, dma_addr
, 0);
1818 /* Unlocked version of the reclaim routine */
1819 static unsigned int __bcmgenet_tx_reclaim(struct net_device
*dev
,
1820 struct bcmgenet_tx_ring
*ring
)
1822 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1823 unsigned int txbds_processed
= 0;
1824 unsigned int bytes_compl
= 0;
1825 unsigned int pkts_compl
= 0;
1826 unsigned int txbds_ready
;
1827 unsigned int c_index
;
1828 struct sk_buff
*skb
;
1830 /* Clear status before servicing to reduce spurious interrupts */
1831 if (ring
->index
== DESC_INDEX
)
1832 bcmgenet_intrl2_0_writel(priv
, UMAC_IRQ_TXDMA_DONE
,
1835 bcmgenet_intrl2_1_writel(priv
, (1 << ring
->index
),
1838 /* Compute how many buffers are transmitted since last xmit call */
1839 c_index
= bcmgenet_tdma_ring_readl(priv
, ring
->index
, TDMA_CONS_INDEX
)
1841 txbds_ready
= (c_index
- ring
->c_index
) & DMA_C_INDEX_MASK
;
1843 netif_dbg(priv
, tx_done
, dev
,
1844 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1845 __func__
, ring
->index
, ring
->c_index
, c_index
, txbds_ready
);
1847 /* Reclaim transmitted buffers */
1848 while (txbds_processed
< txbds_ready
) {
1849 skb
= bcmgenet_free_tx_cb(&priv
->pdev
->dev
,
1850 &priv
->tx_cbs
[ring
->clean_ptr
]);
1853 bytes_compl
+= GENET_CB(skb
)->bytes_sent
;
1854 dev_consume_skb_any(skb
);
1858 if (likely(ring
->clean_ptr
< ring
->end_ptr
))
1861 ring
->clean_ptr
= ring
->cb_ptr
;
1864 ring
->free_bds
+= txbds_processed
;
1865 ring
->c_index
= c_index
;
1867 ring
->packets
+= pkts_compl
;
1868 ring
->bytes
+= bytes_compl
;
1870 netdev_tx_completed_queue(netdev_get_tx_queue(dev
, ring
->queue
),
1871 pkts_compl
, bytes_compl
);
1873 return txbds_processed
;
1876 static unsigned int bcmgenet_tx_reclaim(struct net_device
*dev
,
1877 struct bcmgenet_tx_ring
*ring
)
1879 unsigned int released
;
1881 spin_lock_bh(&ring
->lock
);
1882 released
= __bcmgenet_tx_reclaim(dev
, ring
);
1883 spin_unlock_bh(&ring
->lock
);
1888 static int bcmgenet_tx_poll(struct napi_struct
*napi
, int budget
)
1890 struct bcmgenet_tx_ring
*ring
=
1891 container_of(napi
, struct bcmgenet_tx_ring
, napi
);
1892 unsigned int work_done
= 0;
1893 struct netdev_queue
*txq
;
1895 spin_lock(&ring
->lock
);
1896 work_done
= __bcmgenet_tx_reclaim(ring
->priv
->dev
, ring
);
1897 if (ring
->free_bds
> (MAX_SKB_FRAGS
+ 1)) {
1898 txq
= netdev_get_tx_queue(ring
->priv
->dev
, ring
->queue
);
1899 netif_tx_wake_queue(txq
);
1901 spin_unlock(&ring
->lock
);
1903 if (work_done
== 0) {
1904 napi_complete(napi
);
1905 ring
->int_enable(ring
);
1913 static void bcmgenet_tx_reclaim_all(struct net_device
*dev
)
1915 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1918 if (netif_is_multiqueue(dev
)) {
1919 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++)
1920 bcmgenet_tx_reclaim(dev
, &priv
->tx_rings
[i
]);
1923 bcmgenet_tx_reclaim(dev
, &priv
->tx_rings
[DESC_INDEX
]);
1926 /* Reallocate the SKB to put enough headroom in front of it and insert
1927 * the transmit checksum offsets in the descriptors
1929 static struct sk_buff
*bcmgenet_add_tsb(struct net_device
*dev
,
1930 struct sk_buff
*skb
)
1932 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1933 struct status_64
*status
= NULL
;
1934 struct sk_buff
*new_skb
;
1940 if (unlikely(skb_headroom(skb
) < sizeof(*status
))) {
1941 /* If 64 byte status block enabled, must make sure skb has
1942 * enough headroom for us to insert 64B status block.
1944 new_skb
= skb_realloc_headroom(skb
, sizeof(*status
));
1946 dev_kfree_skb_any(skb
);
1947 priv
->mib
.tx_realloc_tsb_failed
++;
1948 dev
->stats
.tx_dropped
++;
1951 dev_consume_skb_any(skb
);
1953 priv
->mib
.tx_realloc_tsb
++;
1956 skb_push(skb
, sizeof(*status
));
1957 status
= (struct status_64
*)skb
->data
;
1959 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1960 ip_ver
= skb
->protocol
;
1962 case htons(ETH_P_IP
):
1963 ip_proto
= ip_hdr(skb
)->protocol
;
1965 case htons(ETH_P_IPV6
):
1966 ip_proto
= ipv6_hdr(skb
)->nexthdr
;
1969 /* don't use UDP flag */
1974 offset
= skb_checksum_start_offset(skb
) - sizeof(*status
);
1975 tx_csum_info
= (offset
<< STATUS_TX_CSUM_START_SHIFT
) |
1976 (offset
+ skb
->csum_offset
) |
1979 /* Set the special UDP flag for UDP */
1980 if (ip_proto
== IPPROTO_UDP
)
1981 tx_csum_info
|= STATUS_TX_CSUM_PROTO_UDP
;
1983 status
->tx_csum_info
= tx_csum_info
;
1989 static netdev_tx_t
bcmgenet_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1991 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
1992 struct device
*kdev
= &priv
->pdev
->dev
;
1993 struct bcmgenet_tx_ring
*ring
= NULL
;
1994 struct enet_cb
*tx_cb_ptr
;
1995 struct netdev_queue
*txq
;
1996 int nr_frags
, index
;
2004 index
= skb_get_queue_mapping(skb
);
2005 /* Mapping strategy:
2006 * queue_mapping = 0, unclassified, packet xmited through ring16
2007 * queue_mapping = 1, goes to ring 0. (highest priority queue
2008 * queue_mapping = 2, goes to ring 1.
2009 * queue_mapping = 3, goes to ring 2.
2010 * queue_mapping = 4, goes to ring 3.
2017 ring
= &priv
->tx_rings
[index
];
2018 txq
= netdev_get_tx_queue(dev
, ring
->queue
);
2020 nr_frags
= skb_shinfo(skb
)->nr_frags
;
2022 spin_lock(&ring
->lock
);
2023 if (ring
->free_bds
<= (nr_frags
+ 1)) {
2024 if (!netif_tx_queue_stopped(txq
)) {
2025 netif_tx_stop_queue(txq
);
2027 "%s: tx ring %d full when queue %d awake\n",
2028 __func__
, index
, ring
->queue
);
2030 ret
= NETDEV_TX_BUSY
;
2034 /* Retain how many bytes will be sent on the wire, without TSB inserted
2035 * by transmit checksum offload
2037 GENET_CB(skb
)->bytes_sent
= skb
->len
;
2039 /* add the Transmit Status Block */
2040 skb
= bcmgenet_add_tsb(dev
, skb
);
2046 for (i
= 0; i
<= nr_frags
; i
++) {
2047 tx_cb_ptr
= bcmgenet_get_txcb(priv
, ring
);
2052 /* Transmit single SKB or head of fragment list */
2053 GENET_CB(skb
)->first_cb
= tx_cb_ptr
;
2054 size
= skb_headlen(skb
);
2055 mapping
= dma_map_single(kdev
, skb
->data
, size
,
2059 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
2060 size
= skb_frag_size(frag
);
2061 mapping
= skb_frag_dma_map(kdev
, frag
, 0, size
,
2065 ret
= dma_mapping_error(kdev
, mapping
);
2067 priv
->mib
.tx_dma_failed
++;
2068 netif_err(priv
, tx_err
, dev
, "Tx DMA map failed\n");
2070 goto out_unmap_frags
;
2072 dma_unmap_addr_set(tx_cb_ptr
, dma_addr
, mapping
);
2073 dma_unmap_len_set(tx_cb_ptr
, dma_len
, size
);
2075 tx_cb_ptr
->skb
= skb
;
2077 len_stat
= (size
<< DMA_BUFLENGTH_SHIFT
) |
2078 (priv
->hw_params
->qtag_mask
<< DMA_TX_QTAG_SHIFT
);
2080 /* Note: if we ever change from DMA_TX_APPEND_CRC below we
2081 * will need to restore software padding of "runt" packets
2084 len_stat
|= DMA_TX_APPEND_CRC
| DMA_SOP
;
2085 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2086 len_stat
|= DMA_TX_DO_CSUM
;
2089 len_stat
|= DMA_EOP
;
2091 dmadesc_set(priv
, tx_cb_ptr
->bd_addr
, mapping
, len_stat
);
2094 GENET_CB(skb
)->last_cb
= tx_cb_ptr
;
2095 skb_tx_timestamp(skb
);
2097 /* Decrement total BD count and advance our write pointer */
2098 ring
->free_bds
-= nr_frags
+ 1;
2099 ring
->prod_index
+= nr_frags
+ 1;
2100 ring
->prod_index
&= DMA_P_INDEX_MASK
;
2102 netdev_tx_sent_queue(txq
, GENET_CB(skb
)->bytes_sent
);
2104 if (ring
->free_bds
<= (MAX_SKB_FRAGS
+ 1))
2105 netif_tx_stop_queue(txq
);
2107 if (!netdev_xmit_more() || netif_xmit_stopped(txq
))
2108 /* Packets are ready, update producer index */
2109 bcmgenet_tdma_ring_writel(priv
, ring
->index
,
2110 ring
->prod_index
, TDMA_PROD_INDEX
);
2112 spin_unlock(&ring
->lock
);
2117 /* Back up for failed control block mapping */
2118 bcmgenet_put_txcb(priv
, ring
);
2120 /* Unmap successfully mapped control blocks */
2122 tx_cb_ptr
= bcmgenet_put_txcb(priv
, ring
);
2123 bcmgenet_free_tx_cb(kdev
, tx_cb_ptr
);
2130 static struct sk_buff
*bcmgenet_rx_refill(struct bcmgenet_priv
*priv
,
2133 struct device
*kdev
= &priv
->pdev
->dev
;
2134 struct sk_buff
*skb
;
2135 struct sk_buff
*rx_skb
;
2138 /* Allocate a new Rx skb */
2139 skb
= __netdev_alloc_skb(priv
->dev
, priv
->rx_buf_len
+ SKB_ALIGNMENT
,
2140 GFP_ATOMIC
| __GFP_NOWARN
);
2142 priv
->mib
.alloc_rx_buff_failed
++;
2143 netif_err(priv
, rx_err
, priv
->dev
,
2144 "%s: Rx skb allocation failed\n", __func__
);
2148 /* DMA-map the new Rx skb */
2149 mapping
= dma_map_single(kdev
, skb
->data
, priv
->rx_buf_len
,
2151 if (dma_mapping_error(kdev
, mapping
)) {
2152 priv
->mib
.rx_dma_failed
++;
2153 dev_kfree_skb_any(skb
);
2154 netif_err(priv
, rx_err
, priv
->dev
,
2155 "%s: Rx skb DMA mapping failed\n", __func__
);
2159 /* Grab the current Rx skb from the ring and DMA-unmap it */
2160 rx_skb
= bcmgenet_free_rx_cb(kdev
, cb
);
2162 /* Put the new Rx skb on the ring */
2164 dma_unmap_addr_set(cb
, dma_addr
, mapping
);
2165 dma_unmap_len_set(cb
, dma_len
, priv
->rx_buf_len
);
2166 dmadesc_set_addr(priv
, cb
->bd_addr
, mapping
);
2168 /* Return the current Rx skb to caller */
2172 /* bcmgenet_desc_rx - descriptor based rx process.
2173 * this could be called from bottom half, or from NAPI polling method.
2175 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring
*ring
,
2176 unsigned int budget
)
2178 struct bcmgenet_priv
*priv
= ring
->priv
;
2179 struct net_device
*dev
= priv
->dev
;
2181 struct sk_buff
*skb
;
2182 u32 dma_length_status
;
2183 unsigned long dma_flag
;
2185 unsigned int rxpktprocessed
= 0, rxpkttoprocess
;
2186 unsigned int bytes_processed
= 0;
2187 unsigned int p_index
, mask
;
2188 unsigned int discards
;
2190 /* Clear status before servicing to reduce spurious interrupts */
2191 if (ring
->index
== DESC_INDEX
) {
2192 bcmgenet_intrl2_0_writel(priv
, UMAC_IRQ_RXDMA_DONE
,
2195 mask
= 1 << (UMAC_IRQ1_RX_INTR_SHIFT
+ ring
->index
);
2196 bcmgenet_intrl2_1_writel(priv
,
2201 p_index
= bcmgenet_rdma_ring_readl(priv
, ring
->index
, RDMA_PROD_INDEX
);
2203 discards
= (p_index
>> DMA_P_INDEX_DISCARD_CNT_SHIFT
) &
2204 DMA_P_INDEX_DISCARD_CNT_MASK
;
2205 if (discards
> ring
->old_discards
) {
2206 discards
= discards
- ring
->old_discards
;
2207 ring
->errors
+= discards
;
2208 ring
->old_discards
+= discards
;
2210 /* Clear HW register when we reach 75% of maximum 0xFFFF */
2211 if (ring
->old_discards
>= 0xC000) {
2212 ring
->old_discards
= 0;
2213 bcmgenet_rdma_ring_writel(priv
, ring
->index
, 0,
2218 p_index
&= DMA_P_INDEX_MASK
;
2219 rxpkttoprocess
= (p_index
- ring
->c_index
) & DMA_C_INDEX_MASK
;
2221 netif_dbg(priv
, rx_status
, dev
,
2222 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess
);
2224 while ((rxpktprocessed
< rxpkttoprocess
) &&
2225 (rxpktprocessed
< budget
)) {
2226 struct status_64
*status
;
2229 cb
= &priv
->rx_cbs
[ring
->read_ptr
];
2230 skb
= bcmgenet_rx_refill(priv
, cb
);
2232 if (unlikely(!skb
)) {
2237 status
= (struct status_64
*)skb
->data
;
2238 dma_length_status
= status
->length_status
;
2239 if (dev
->features
& NETIF_F_RXCSUM
) {
2240 rx_csum
= (__force __be16
)(status
->rx_csum
& 0xffff);
2241 skb
->csum
= (__force __wsum
)ntohs(rx_csum
);
2242 skb
->ip_summed
= CHECKSUM_COMPLETE
;
2245 /* DMA flags and length are still valid no matter how
2246 * we got the Receive Status Vector (64B RSB or register)
2248 dma_flag
= dma_length_status
& 0xffff;
2249 len
= dma_length_status
>> DMA_BUFLENGTH_SHIFT
;
2251 netif_dbg(priv
, rx_status
, dev
,
2252 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
2253 __func__
, p_index
, ring
->c_index
,
2254 ring
->read_ptr
, dma_length_status
);
2256 if (unlikely(!(dma_flag
& DMA_EOP
) || !(dma_flag
& DMA_SOP
))) {
2257 netif_err(priv
, rx_status
, dev
,
2258 "dropping fragmented packet!\n");
2260 dev_kfree_skb_any(skb
);
2265 if (unlikely(dma_flag
& (DMA_RX_CRC_ERROR
|
2270 netif_err(priv
, rx_status
, dev
, "dma_flag=0x%x\n",
2271 (unsigned int)dma_flag
);
2272 if (dma_flag
& DMA_RX_CRC_ERROR
)
2273 dev
->stats
.rx_crc_errors
++;
2274 if (dma_flag
& DMA_RX_OV
)
2275 dev
->stats
.rx_over_errors
++;
2276 if (dma_flag
& DMA_RX_NO
)
2277 dev
->stats
.rx_frame_errors
++;
2278 if (dma_flag
& DMA_RX_LG
)
2279 dev
->stats
.rx_length_errors
++;
2280 dev
->stats
.rx_errors
++;
2281 dev_kfree_skb_any(skb
);
2283 } /* error packet */
2287 /* remove RSB and hardware 2bytes added for IP alignment */
2291 if (priv
->crc_fwd_en
) {
2292 skb_trim(skb
, len
- ETH_FCS_LEN
);
2296 bytes_processed
+= len
;
2298 /*Finish setting up the received SKB and send it to the kernel*/
2299 skb
->protocol
= eth_type_trans(skb
, priv
->dev
);
2302 if (dma_flag
& DMA_RX_MULT
)
2303 dev
->stats
.multicast
++;
2306 napi_gro_receive(&ring
->napi
, skb
);
2307 netif_dbg(priv
, rx_status
, dev
, "pushed up to kernel\n");
2311 if (likely(ring
->read_ptr
< ring
->end_ptr
))
2314 ring
->read_ptr
= ring
->cb_ptr
;
2316 ring
->c_index
= (ring
->c_index
+ 1) & DMA_C_INDEX_MASK
;
2317 bcmgenet_rdma_ring_writel(priv
, ring
->index
, ring
->c_index
, RDMA_CONS_INDEX
);
2320 ring
->dim
.bytes
= bytes_processed
;
2321 ring
->dim
.packets
= rxpktprocessed
;
2323 return rxpktprocessed
;
2326 /* Rx NAPI polling method */
2327 static int bcmgenet_rx_poll(struct napi_struct
*napi
, int budget
)
2329 struct bcmgenet_rx_ring
*ring
= container_of(napi
,
2330 struct bcmgenet_rx_ring
, napi
);
2331 struct dim_sample dim_sample
= {};
2332 unsigned int work_done
;
2334 work_done
= bcmgenet_desc_rx(ring
, budget
);
2336 if (work_done
< budget
) {
2337 napi_complete_done(napi
, work_done
);
2338 ring
->int_enable(ring
);
2341 if (ring
->dim
.use_dim
) {
2342 dim_update_sample(ring
->dim
.event_ctr
, ring
->dim
.packets
,
2343 ring
->dim
.bytes
, &dim_sample
);
2344 net_dim(&ring
->dim
.dim
, dim_sample
);
2350 static void bcmgenet_dim_work(struct work_struct
*work
)
2352 struct dim
*dim
= container_of(work
, struct dim
, work
);
2353 struct bcmgenet_net_dim
*ndim
=
2354 container_of(dim
, struct bcmgenet_net_dim
, dim
);
2355 struct bcmgenet_rx_ring
*ring
=
2356 container_of(ndim
, struct bcmgenet_rx_ring
, dim
);
2357 struct dim_cq_moder cur_profile
=
2358 net_dim_get_rx_moderation(dim
->mode
, dim
->profile_ix
);
2360 bcmgenet_set_rx_coalesce(ring
, cur_profile
.usec
, cur_profile
.pkts
);
2361 dim
->state
= DIM_START_MEASURE
;
2364 /* Assign skb to RX DMA descriptor. */
2365 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv
*priv
,
2366 struct bcmgenet_rx_ring
*ring
)
2369 struct sk_buff
*skb
;
2372 netif_dbg(priv
, hw
, priv
->dev
, "%s\n", __func__
);
2374 /* loop here for each buffer needing assign */
2375 for (i
= 0; i
< ring
->size
; i
++) {
2377 skb
= bcmgenet_rx_refill(priv
, cb
);
2379 dev_consume_skb_any(skb
);
2387 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv
*priv
)
2389 struct sk_buff
*skb
;
2393 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
2394 cb
= &priv
->rx_cbs
[i
];
2396 skb
= bcmgenet_free_rx_cb(&priv
->pdev
->dev
, cb
);
2398 dev_consume_skb_any(skb
);
2402 static void umac_enable_set(struct bcmgenet_priv
*priv
, u32 mask
, bool enable
)
2406 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
2407 if (reg
& CMD_SW_RESET
)
2413 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
2415 /* UniMAC stops on a packet boundary, wait for a full-size packet
2419 usleep_range(1000, 2000);
2422 static void reset_umac(struct bcmgenet_priv
*priv
)
2424 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
2425 bcmgenet_rbuf_ctrl_set(priv
, 0);
2428 /* issue soft reset and disable MAC while updating its registers */
2429 bcmgenet_umac_writel(priv
, CMD_SW_RESET
, UMAC_CMD
);
2433 static void bcmgenet_intr_disable(struct bcmgenet_priv
*priv
)
2435 /* Mask all interrupts.*/
2436 bcmgenet_intrl2_0_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_MASK_SET
);
2437 bcmgenet_intrl2_0_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_CLEAR
);
2438 bcmgenet_intrl2_1_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_MASK_SET
);
2439 bcmgenet_intrl2_1_writel(priv
, 0xFFFFFFFF, INTRL2_CPU_CLEAR
);
2442 static void bcmgenet_link_intr_enable(struct bcmgenet_priv
*priv
)
2444 u32 int0_enable
= 0;
2446 /* Monitor cable plug/unplugged event for internal PHY, external PHY
2449 if (priv
->internal_phy
) {
2450 int0_enable
|= UMAC_IRQ_LINK_EVENT
;
2451 if (GENET_IS_V1(priv
) || GENET_IS_V2(priv
) || GENET_IS_V3(priv
))
2452 int0_enable
|= UMAC_IRQ_PHY_DET_R
;
2453 } else if (priv
->ext_phy
) {
2454 int0_enable
|= UMAC_IRQ_LINK_EVENT
;
2455 } else if (priv
->phy_interface
== PHY_INTERFACE_MODE_MOCA
) {
2456 if (priv
->hw_params
->flags
& GENET_HAS_MOCA_LINK_DET
)
2457 int0_enable
|= UMAC_IRQ_LINK_EVENT
;
2459 bcmgenet_intrl2_0_writel(priv
, int0_enable
, INTRL2_CPU_MASK_CLEAR
);
2462 static void init_umac(struct bcmgenet_priv
*priv
)
2464 struct device
*kdev
= &priv
->pdev
->dev
;
2466 u32 int0_enable
= 0;
2468 dev_dbg(&priv
->pdev
->dev
, "bcmgenet: init_umac\n");
2472 /* clear tx/rx counter */
2473 bcmgenet_umac_writel(priv
,
2474 MIB_RESET_RX
| MIB_RESET_TX
| MIB_RESET_RUNT
,
2476 bcmgenet_umac_writel(priv
, 0, UMAC_MIB_CTRL
);
2478 bcmgenet_umac_writel(priv
, ENET_MAX_MTU_SIZE
, UMAC_MAX_FRAME_LEN
);
2480 /* init tx registers, enable TSB */
2481 reg
= bcmgenet_tbuf_ctrl_get(priv
);
2483 bcmgenet_tbuf_ctrl_set(priv
, reg
);
2485 /* init rx registers, enable ip header optimization and RSB */
2486 reg
= bcmgenet_rbuf_readl(priv
, RBUF_CTRL
);
2487 reg
|= RBUF_ALIGN_2B
| RBUF_64B_EN
;
2488 bcmgenet_rbuf_writel(priv
, reg
, RBUF_CTRL
);
2490 /* enable rx checksumming */
2491 reg
= bcmgenet_rbuf_readl(priv
, RBUF_CHK_CTRL
);
2492 reg
|= RBUF_RXCHK_EN
| RBUF_L3_PARSE_DIS
;
2493 /* If UniMAC forwards CRC, we need to skip over it to get
2494 * a valid CHK bit to be set in the per-packet status word
2496 if (priv
->crc_fwd_en
)
2497 reg
|= RBUF_SKIP_FCS
;
2499 reg
&= ~RBUF_SKIP_FCS
;
2500 bcmgenet_rbuf_writel(priv
, reg
, RBUF_CHK_CTRL
);
2502 if (!GENET_IS_V1(priv
) && !GENET_IS_V2(priv
))
2503 bcmgenet_rbuf_writel(priv
, 1, RBUF_TBUF_SIZE_CTRL
);
2505 bcmgenet_intr_disable(priv
);
2507 /* Configure backpressure vectors for MoCA */
2508 if (priv
->phy_interface
== PHY_INTERFACE_MODE_MOCA
) {
2509 reg
= bcmgenet_bp_mc_get(priv
);
2510 reg
|= BIT(priv
->hw_params
->bp_in_en_shift
);
2512 /* bp_mask: back pressure mask */
2513 if (netif_is_multiqueue(priv
->dev
))
2514 reg
|= priv
->hw_params
->bp_in_mask
;
2516 reg
&= ~priv
->hw_params
->bp_in_mask
;
2517 bcmgenet_bp_mc_set(priv
, reg
);
2520 /* Enable MDIO interrupts on GENET v3+ */
2521 if (priv
->hw_params
->flags
& GENET_HAS_MDIO_INTR
)
2522 int0_enable
|= (UMAC_IRQ_MDIO_DONE
| UMAC_IRQ_MDIO_ERROR
);
2524 bcmgenet_intrl2_0_writel(priv
, int0_enable
, INTRL2_CPU_MASK_CLEAR
);
2526 dev_dbg(kdev
, "done init umac\n");
2529 static void bcmgenet_init_dim(struct bcmgenet_rx_ring
*ring
,
2530 void (*cb
)(struct work_struct
*work
))
2532 struct bcmgenet_net_dim
*dim
= &ring
->dim
;
2534 INIT_WORK(&dim
->dim
.work
, cb
);
2535 dim
->dim
.mode
= DIM_CQ_PERIOD_MODE_START_FROM_EQE
;
2541 static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring
*ring
)
2543 struct bcmgenet_net_dim
*dim
= &ring
->dim
;
2544 struct dim_cq_moder moder
;
2547 usecs
= ring
->rx_coalesce_usecs
;
2548 pkts
= ring
->rx_max_coalesced_frames
;
2550 /* If DIM was enabled, re-apply default parameters */
2552 moder
= net_dim_get_def_rx_moderation(dim
->dim
.mode
);
2557 bcmgenet_set_rx_coalesce(ring
, usecs
, pkts
);
2560 /* Initialize a Tx ring along with corresponding hardware registers */
2561 static void bcmgenet_init_tx_ring(struct bcmgenet_priv
*priv
,
2562 unsigned int index
, unsigned int size
,
2563 unsigned int start_ptr
, unsigned int end_ptr
)
2565 struct bcmgenet_tx_ring
*ring
= &priv
->tx_rings
[index
];
2566 u32 words_per_bd
= WORDS_PER_BD(priv
);
2567 u32 flow_period_val
= 0;
2569 spin_lock_init(&ring
->lock
);
2571 ring
->index
= index
;
2572 if (index
== DESC_INDEX
) {
2574 ring
->int_enable
= bcmgenet_tx_ring16_int_enable
;
2575 ring
->int_disable
= bcmgenet_tx_ring16_int_disable
;
2577 ring
->queue
= index
+ 1;
2578 ring
->int_enable
= bcmgenet_tx_ring_int_enable
;
2579 ring
->int_disable
= bcmgenet_tx_ring_int_disable
;
2581 ring
->cbs
= priv
->tx_cbs
+ start_ptr
;
2583 ring
->clean_ptr
= start_ptr
;
2585 ring
->free_bds
= size
;
2586 ring
->write_ptr
= start_ptr
;
2587 ring
->cb_ptr
= start_ptr
;
2588 ring
->end_ptr
= end_ptr
- 1;
2589 ring
->prod_index
= 0;
2591 /* Set flow period for ring != 16 */
2592 if (index
!= DESC_INDEX
)
2593 flow_period_val
= ENET_MAX_MTU_SIZE
<< 16;
2595 bcmgenet_tdma_ring_writel(priv
, index
, 0, TDMA_PROD_INDEX
);
2596 bcmgenet_tdma_ring_writel(priv
, index
, 0, TDMA_CONS_INDEX
);
2597 bcmgenet_tdma_ring_writel(priv
, index
, 1, DMA_MBUF_DONE_THRESH
);
2598 /* Disable rate control for now */
2599 bcmgenet_tdma_ring_writel(priv
, index
, flow_period_val
,
2601 bcmgenet_tdma_ring_writel(priv
, index
,
2602 ((size
<< DMA_RING_SIZE_SHIFT
) |
2603 RX_BUF_LENGTH
), DMA_RING_BUF_SIZE
);
2605 /* Set start and end address, read and write pointers */
2606 bcmgenet_tdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
2608 bcmgenet_tdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
2610 bcmgenet_tdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
2612 bcmgenet_tdma_ring_writel(priv
, index
, end_ptr
* words_per_bd
- 1,
2615 /* Initialize Tx NAPI */
2616 netif_tx_napi_add(priv
->dev
, &ring
->napi
, bcmgenet_tx_poll
,
2620 /* Initialize a RDMA ring */
2621 static int bcmgenet_init_rx_ring(struct bcmgenet_priv
*priv
,
2622 unsigned int index
, unsigned int size
,
2623 unsigned int start_ptr
, unsigned int end_ptr
)
2625 struct bcmgenet_rx_ring
*ring
= &priv
->rx_rings
[index
];
2626 u32 words_per_bd
= WORDS_PER_BD(priv
);
2630 ring
->index
= index
;
2631 if (index
== DESC_INDEX
) {
2632 ring
->int_enable
= bcmgenet_rx_ring16_int_enable
;
2633 ring
->int_disable
= bcmgenet_rx_ring16_int_disable
;
2635 ring
->int_enable
= bcmgenet_rx_ring_int_enable
;
2636 ring
->int_disable
= bcmgenet_rx_ring_int_disable
;
2638 ring
->cbs
= priv
->rx_cbs
+ start_ptr
;
2641 ring
->read_ptr
= start_ptr
;
2642 ring
->cb_ptr
= start_ptr
;
2643 ring
->end_ptr
= end_ptr
- 1;
2645 ret
= bcmgenet_alloc_rx_buffers(priv
, ring
);
2649 bcmgenet_init_dim(ring
, bcmgenet_dim_work
);
2650 bcmgenet_init_rx_coalesce(ring
);
2652 /* Initialize Rx NAPI */
2653 netif_napi_add(priv
->dev
, &ring
->napi
, bcmgenet_rx_poll
,
2656 bcmgenet_rdma_ring_writel(priv
, index
, 0, RDMA_PROD_INDEX
);
2657 bcmgenet_rdma_ring_writel(priv
, index
, 0, RDMA_CONS_INDEX
);
2658 bcmgenet_rdma_ring_writel(priv
, index
,
2659 ((size
<< DMA_RING_SIZE_SHIFT
) |
2660 RX_BUF_LENGTH
), DMA_RING_BUF_SIZE
);
2661 bcmgenet_rdma_ring_writel(priv
, index
,
2662 (DMA_FC_THRESH_LO
<<
2663 DMA_XOFF_THRESHOLD_SHIFT
) |
2664 DMA_FC_THRESH_HI
, RDMA_XON_XOFF_THRESH
);
2666 /* Set start and end address, read and write pointers */
2667 bcmgenet_rdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
2669 bcmgenet_rdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
2671 bcmgenet_rdma_ring_writel(priv
, index
, start_ptr
* words_per_bd
,
2673 bcmgenet_rdma_ring_writel(priv
, index
, end_ptr
* words_per_bd
- 1,
2679 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv
*priv
)
2682 struct bcmgenet_tx_ring
*ring
;
2684 for (i
= 0; i
< priv
->hw_params
->tx_queues
; ++i
) {
2685 ring
= &priv
->tx_rings
[i
];
2686 napi_enable(&ring
->napi
);
2687 ring
->int_enable(ring
);
2690 ring
= &priv
->tx_rings
[DESC_INDEX
];
2691 napi_enable(&ring
->napi
);
2692 ring
->int_enable(ring
);
2695 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv
*priv
)
2698 struct bcmgenet_tx_ring
*ring
;
2700 for (i
= 0; i
< priv
->hw_params
->tx_queues
; ++i
) {
2701 ring
= &priv
->tx_rings
[i
];
2702 napi_disable(&ring
->napi
);
2705 ring
= &priv
->tx_rings
[DESC_INDEX
];
2706 napi_disable(&ring
->napi
);
2709 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv
*priv
)
2712 struct bcmgenet_tx_ring
*ring
;
2714 for (i
= 0; i
< priv
->hw_params
->tx_queues
; ++i
) {
2715 ring
= &priv
->tx_rings
[i
];
2716 netif_napi_del(&ring
->napi
);
2719 ring
= &priv
->tx_rings
[DESC_INDEX
];
2720 netif_napi_del(&ring
->napi
);
2723 /* Initialize Tx queues
2725 * Queues 0-3 are priority-based, each one has 32 descriptors,
2726 * with queue 0 being the highest priority queue.
2728 * Queue 16 is the default Tx queue with
2729 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2731 * The transmit control block pool is then partitioned as follows:
2732 * - Tx queue 0 uses tx_cbs[0..31]
2733 * - Tx queue 1 uses tx_cbs[32..63]
2734 * - Tx queue 2 uses tx_cbs[64..95]
2735 * - Tx queue 3 uses tx_cbs[96..127]
2736 * - Tx queue 16 uses tx_cbs[128..255]
2738 static void bcmgenet_init_tx_queues(struct net_device
*dev
)
2740 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2742 u32 dma_ctrl
, ring_cfg
;
2743 u32 dma_priority
[3] = {0, 0, 0};
2745 dma_ctrl
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2746 dma_enable
= dma_ctrl
& DMA_EN
;
2747 dma_ctrl
&= ~DMA_EN
;
2748 bcmgenet_tdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
2753 /* Enable strict priority arbiter mode */
2754 bcmgenet_tdma_writel(priv
, DMA_ARBITER_SP
, DMA_ARB_CTRL
);
2756 /* Initialize Tx priority queues */
2757 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++) {
2758 bcmgenet_init_tx_ring(priv
, i
, priv
->hw_params
->tx_bds_per_q
,
2759 i
* priv
->hw_params
->tx_bds_per_q
,
2760 (i
+ 1) * priv
->hw_params
->tx_bds_per_q
);
2761 ring_cfg
|= (1 << i
);
2762 dma_ctrl
|= (1 << (i
+ DMA_RING_BUF_EN_SHIFT
));
2763 dma_priority
[DMA_PRIO_REG_INDEX(i
)] |=
2764 ((GENET_Q0_PRIORITY
+ i
) << DMA_PRIO_REG_SHIFT(i
));
2767 /* Initialize Tx default queue 16 */
2768 bcmgenet_init_tx_ring(priv
, DESC_INDEX
, GENET_Q16_TX_BD_CNT
,
2769 priv
->hw_params
->tx_queues
*
2770 priv
->hw_params
->tx_bds_per_q
,
2772 ring_cfg
|= (1 << DESC_INDEX
);
2773 dma_ctrl
|= (1 << (DESC_INDEX
+ DMA_RING_BUF_EN_SHIFT
));
2774 dma_priority
[DMA_PRIO_REG_INDEX(DESC_INDEX
)] |=
2775 ((GENET_Q0_PRIORITY
+ priv
->hw_params
->tx_queues
) <<
2776 DMA_PRIO_REG_SHIFT(DESC_INDEX
));
2778 /* Set Tx queue priorities */
2779 bcmgenet_tdma_writel(priv
, dma_priority
[0], DMA_PRIORITY_0
);
2780 bcmgenet_tdma_writel(priv
, dma_priority
[1], DMA_PRIORITY_1
);
2781 bcmgenet_tdma_writel(priv
, dma_priority
[2], DMA_PRIORITY_2
);
2783 /* Enable Tx queues */
2784 bcmgenet_tdma_writel(priv
, ring_cfg
, DMA_RING_CFG
);
2789 bcmgenet_tdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
2792 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv
*priv
)
2795 struct bcmgenet_rx_ring
*ring
;
2797 for (i
= 0; i
< priv
->hw_params
->rx_queues
; ++i
) {
2798 ring
= &priv
->rx_rings
[i
];
2799 napi_enable(&ring
->napi
);
2800 ring
->int_enable(ring
);
2803 ring
= &priv
->rx_rings
[DESC_INDEX
];
2804 napi_enable(&ring
->napi
);
2805 ring
->int_enable(ring
);
2808 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv
*priv
)
2811 struct bcmgenet_rx_ring
*ring
;
2813 for (i
= 0; i
< priv
->hw_params
->rx_queues
; ++i
) {
2814 ring
= &priv
->rx_rings
[i
];
2815 napi_disable(&ring
->napi
);
2816 cancel_work_sync(&ring
->dim
.dim
.work
);
2819 ring
= &priv
->rx_rings
[DESC_INDEX
];
2820 napi_disable(&ring
->napi
);
2821 cancel_work_sync(&ring
->dim
.dim
.work
);
2824 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv
*priv
)
2827 struct bcmgenet_rx_ring
*ring
;
2829 for (i
= 0; i
< priv
->hw_params
->rx_queues
; ++i
) {
2830 ring
= &priv
->rx_rings
[i
];
2831 netif_napi_del(&ring
->napi
);
2834 ring
= &priv
->rx_rings
[DESC_INDEX
];
2835 netif_napi_del(&ring
->napi
);
2838 /* Initialize Rx queues
2840 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2841 * used to direct traffic to these queues.
2843 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2845 static int bcmgenet_init_rx_queues(struct net_device
*dev
)
2847 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
2854 dma_ctrl
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2855 dma_enable
= dma_ctrl
& DMA_EN
;
2856 dma_ctrl
&= ~DMA_EN
;
2857 bcmgenet_rdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
2862 /* Initialize Rx priority queues */
2863 for (i
= 0; i
< priv
->hw_params
->rx_queues
; i
++) {
2864 ret
= bcmgenet_init_rx_ring(priv
, i
,
2865 priv
->hw_params
->rx_bds_per_q
,
2866 i
* priv
->hw_params
->rx_bds_per_q
,
2868 priv
->hw_params
->rx_bds_per_q
);
2872 ring_cfg
|= (1 << i
);
2873 dma_ctrl
|= (1 << (i
+ DMA_RING_BUF_EN_SHIFT
));
2876 /* Initialize Rx default queue 16 */
2877 ret
= bcmgenet_init_rx_ring(priv
, DESC_INDEX
, GENET_Q16_RX_BD_CNT
,
2878 priv
->hw_params
->rx_queues
*
2879 priv
->hw_params
->rx_bds_per_q
,
2884 ring_cfg
|= (1 << DESC_INDEX
);
2885 dma_ctrl
|= (1 << (DESC_INDEX
+ DMA_RING_BUF_EN_SHIFT
));
2888 bcmgenet_rdma_writel(priv
, ring_cfg
, DMA_RING_CFG
);
2890 /* Configure ring as descriptor ring and re-enable DMA if enabled */
2893 bcmgenet_rdma_writel(priv
, dma_ctrl
, DMA_CTRL
);
2898 static int bcmgenet_dma_teardown(struct bcmgenet_priv
*priv
)
2906 /* Disable TDMA to stop add more frames in TX DMA */
2907 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2909 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
2911 /* Check TDMA status register to confirm TDMA is disabled */
2912 while (timeout
++ < DMA_TIMEOUT_VAL
) {
2913 reg
= bcmgenet_tdma_readl(priv
, DMA_STATUS
);
2914 if (reg
& DMA_DISABLED
)
2920 if (timeout
== DMA_TIMEOUT_VAL
) {
2921 netdev_warn(priv
->dev
, "Timed out while disabling TX DMA\n");
2925 /* Wait 10ms for packet drain in both tx and rx dma */
2926 usleep_range(10000, 20000);
2929 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2931 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
2934 /* Check RDMA status register to confirm RDMA is disabled */
2935 while (timeout
++ < DMA_TIMEOUT_VAL
) {
2936 reg
= bcmgenet_rdma_readl(priv
, DMA_STATUS
);
2937 if (reg
& DMA_DISABLED
)
2943 if (timeout
== DMA_TIMEOUT_VAL
) {
2944 netdev_warn(priv
->dev
, "Timed out while disabling RX DMA\n");
2949 for (i
= 0; i
< priv
->hw_params
->rx_queues
; i
++)
2950 dma_ctrl
|= (1 << (i
+ DMA_RING_BUF_EN_SHIFT
));
2951 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
2953 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
2956 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++)
2957 dma_ctrl
|= (1 << (i
+ DMA_RING_BUF_EN_SHIFT
));
2958 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
2960 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
2965 static void bcmgenet_fini_dma(struct bcmgenet_priv
*priv
)
2967 struct netdev_queue
*txq
;
2970 bcmgenet_fini_rx_napi(priv
);
2971 bcmgenet_fini_tx_napi(priv
);
2973 for (i
= 0; i
< priv
->num_tx_bds
; i
++)
2974 dev_kfree_skb(bcmgenet_free_tx_cb(&priv
->pdev
->dev
,
2977 for (i
= 0; i
< priv
->hw_params
->tx_queues
; i
++) {
2978 txq
= netdev_get_tx_queue(priv
->dev
, priv
->tx_rings
[i
].queue
);
2979 netdev_tx_reset_queue(txq
);
2982 txq
= netdev_get_tx_queue(priv
->dev
, priv
->tx_rings
[DESC_INDEX
].queue
);
2983 netdev_tx_reset_queue(txq
);
2985 bcmgenet_free_rx_buffers(priv
);
2986 kfree(priv
->rx_cbs
);
2987 kfree(priv
->tx_cbs
);
2990 /* init_edma: Initialize DMA control register */
2991 static int bcmgenet_init_dma(struct bcmgenet_priv
*priv
)
2997 netif_dbg(priv
, hw
, priv
->dev
, "%s\n", __func__
);
2999 /* Initialize common Rx ring structures */
3000 priv
->rx_bds
= priv
->base
+ priv
->hw_params
->rdma_offset
;
3001 priv
->num_rx_bds
= TOTAL_DESC
;
3002 priv
->rx_cbs
= kcalloc(priv
->num_rx_bds
, sizeof(struct enet_cb
),
3007 for (i
= 0; i
< priv
->num_rx_bds
; i
++) {
3008 cb
= priv
->rx_cbs
+ i
;
3009 cb
->bd_addr
= priv
->rx_bds
+ i
* DMA_DESC_SIZE
;
3012 /* Initialize common TX ring structures */
3013 priv
->tx_bds
= priv
->base
+ priv
->hw_params
->tdma_offset
;
3014 priv
->num_tx_bds
= TOTAL_DESC
;
3015 priv
->tx_cbs
= kcalloc(priv
->num_tx_bds
, sizeof(struct enet_cb
),
3017 if (!priv
->tx_cbs
) {
3018 kfree(priv
->rx_cbs
);
3022 for (i
= 0; i
< priv
->num_tx_bds
; i
++) {
3023 cb
= priv
->tx_cbs
+ i
;
3024 cb
->bd_addr
= priv
->tx_bds
+ i
* DMA_DESC_SIZE
;
3028 bcmgenet_rdma_writel(priv
, priv
->dma_max_burst_length
,
3029 DMA_SCB_BURST_SIZE
);
3031 /* Initialize Rx queues */
3032 ret
= bcmgenet_init_rx_queues(priv
->dev
);
3034 netdev_err(priv
->dev
, "failed to initialize Rx queues\n");
3035 bcmgenet_free_rx_buffers(priv
);
3036 kfree(priv
->rx_cbs
);
3037 kfree(priv
->tx_cbs
);
3042 bcmgenet_tdma_writel(priv
, priv
->dma_max_burst_length
,
3043 DMA_SCB_BURST_SIZE
);
3045 /* Initialize Tx queues */
3046 bcmgenet_init_tx_queues(priv
->dev
);
3051 /* Interrupt bottom half */
3052 static void bcmgenet_irq_task(struct work_struct
*work
)
3054 unsigned int status
;
3055 struct bcmgenet_priv
*priv
= container_of(
3056 work
, struct bcmgenet_priv
, bcmgenet_irq_work
);
3058 netif_dbg(priv
, intr
, priv
->dev
, "%s\n", __func__
);
3060 spin_lock_irq(&priv
->lock
);
3061 status
= priv
->irq0_stat
;
3062 priv
->irq0_stat
= 0;
3063 spin_unlock_irq(&priv
->lock
);
3065 if (status
& UMAC_IRQ_PHY_DET_R
&&
3066 priv
->dev
->phydev
->autoneg
!= AUTONEG_ENABLE
) {
3067 phy_init_hw(priv
->dev
->phydev
);
3068 genphy_config_aneg(priv
->dev
->phydev
);
3071 /* Link UP/DOWN event */
3072 if (status
& UMAC_IRQ_LINK_EVENT
)
3073 phy_mac_interrupt(priv
->dev
->phydev
);
3077 /* bcmgenet_isr1: handle Rx and Tx priority queues */
3078 static irqreturn_t
bcmgenet_isr1(int irq
, void *dev_id
)
3080 struct bcmgenet_priv
*priv
= dev_id
;
3081 struct bcmgenet_rx_ring
*rx_ring
;
3082 struct bcmgenet_tx_ring
*tx_ring
;
3083 unsigned int index
, status
;
3085 /* Read irq status */
3086 status
= bcmgenet_intrl2_1_readl(priv
, INTRL2_CPU_STAT
) &
3087 ~bcmgenet_intrl2_1_readl(priv
, INTRL2_CPU_MASK_STATUS
);
3089 /* clear interrupts */
3090 bcmgenet_intrl2_1_writel(priv
, status
, INTRL2_CPU_CLEAR
);
3092 netif_dbg(priv
, intr
, priv
->dev
,
3093 "%s: IRQ=0x%x\n", __func__
, status
);
3095 /* Check Rx priority queue interrupts */
3096 for (index
= 0; index
< priv
->hw_params
->rx_queues
; index
++) {
3097 if (!(status
& BIT(UMAC_IRQ1_RX_INTR_SHIFT
+ index
)))
3100 rx_ring
= &priv
->rx_rings
[index
];
3101 rx_ring
->dim
.event_ctr
++;
3103 if (likely(napi_schedule_prep(&rx_ring
->napi
))) {
3104 rx_ring
->int_disable(rx_ring
);
3105 __napi_schedule_irqoff(&rx_ring
->napi
);
3109 /* Check Tx priority queue interrupts */
3110 for (index
= 0; index
< priv
->hw_params
->tx_queues
; index
++) {
3111 if (!(status
& BIT(index
)))
3114 tx_ring
= &priv
->tx_rings
[index
];
3116 if (likely(napi_schedule_prep(&tx_ring
->napi
))) {
3117 tx_ring
->int_disable(tx_ring
);
3118 __napi_schedule_irqoff(&tx_ring
->napi
);
3125 /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
3126 static irqreturn_t
bcmgenet_isr0(int irq
, void *dev_id
)
3128 struct bcmgenet_priv
*priv
= dev_id
;
3129 struct bcmgenet_rx_ring
*rx_ring
;
3130 struct bcmgenet_tx_ring
*tx_ring
;
3131 unsigned int status
;
3132 unsigned long flags
;
3134 /* Read irq status */
3135 status
= bcmgenet_intrl2_0_readl(priv
, INTRL2_CPU_STAT
) &
3136 ~bcmgenet_intrl2_0_readl(priv
, INTRL2_CPU_MASK_STATUS
);
3138 /* clear interrupts */
3139 bcmgenet_intrl2_0_writel(priv
, status
, INTRL2_CPU_CLEAR
);
3141 netif_dbg(priv
, intr
, priv
->dev
,
3142 "IRQ=0x%x\n", status
);
3144 if (status
& UMAC_IRQ_RXDMA_DONE
) {
3145 rx_ring
= &priv
->rx_rings
[DESC_INDEX
];
3146 rx_ring
->dim
.event_ctr
++;
3148 if (likely(napi_schedule_prep(&rx_ring
->napi
))) {
3149 rx_ring
->int_disable(rx_ring
);
3150 __napi_schedule_irqoff(&rx_ring
->napi
);
3154 if (status
& UMAC_IRQ_TXDMA_DONE
) {
3155 tx_ring
= &priv
->tx_rings
[DESC_INDEX
];
3157 if (likely(napi_schedule_prep(&tx_ring
->napi
))) {
3158 tx_ring
->int_disable(tx_ring
);
3159 __napi_schedule_irqoff(&tx_ring
->napi
);
3163 if ((priv
->hw_params
->flags
& GENET_HAS_MDIO_INTR
) &&
3164 status
& (UMAC_IRQ_MDIO_DONE
| UMAC_IRQ_MDIO_ERROR
)) {
3168 /* all other interested interrupts handled in bottom half */
3169 status
&= (UMAC_IRQ_LINK_EVENT
| UMAC_IRQ_PHY_DET_R
);
3171 /* Save irq status for bottom-half processing. */
3172 spin_lock_irqsave(&priv
->lock
, flags
);
3173 priv
->irq0_stat
|= status
;
3174 spin_unlock_irqrestore(&priv
->lock
, flags
);
3176 schedule_work(&priv
->bcmgenet_irq_work
);
3182 static irqreturn_t
bcmgenet_wol_isr(int irq
, void *dev_id
)
3184 /* Acknowledge the interrupt */
3188 #ifdef CONFIG_NET_POLL_CONTROLLER
3189 static void bcmgenet_poll_controller(struct net_device
*dev
)
3191 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3193 /* Invoke the main RX/TX interrupt handler */
3194 disable_irq(priv
->irq0
);
3195 bcmgenet_isr0(priv
->irq0
, priv
);
3196 enable_irq(priv
->irq0
);
3198 /* And the interrupt handler for RX/TX priority queues */
3199 disable_irq(priv
->irq1
);
3200 bcmgenet_isr1(priv
->irq1
, priv
);
3201 enable_irq(priv
->irq1
);
3205 static void bcmgenet_umac_reset(struct bcmgenet_priv
*priv
)
3209 reg
= bcmgenet_rbuf_ctrl_get(priv
);
3211 bcmgenet_rbuf_ctrl_set(priv
, reg
);
3215 bcmgenet_rbuf_ctrl_set(priv
, reg
);
3219 static void bcmgenet_set_hw_addr(struct bcmgenet_priv
*priv
,
3220 unsigned char *addr
)
3222 bcmgenet_umac_writel(priv
, get_unaligned_be32(&addr
[0]), UMAC_MAC0
);
3223 bcmgenet_umac_writel(priv
, get_unaligned_be16(&addr
[4]), UMAC_MAC1
);
3226 static void bcmgenet_get_hw_addr(struct bcmgenet_priv
*priv
,
3227 unsigned char *addr
)
3231 addr_tmp
= bcmgenet_umac_readl(priv
, UMAC_MAC0
);
3232 put_unaligned_be32(addr_tmp
, &addr
[0]);
3233 addr_tmp
= bcmgenet_umac_readl(priv
, UMAC_MAC1
);
3234 put_unaligned_be16(addr_tmp
, &addr
[4]);
3237 /* Returns a reusable dma control register value */
3238 static u32
bcmgenet_dma_disable(struct bcmgenet_priv
*priv
)
3244 dma_ctrl
= 1 << (DESC_INDEX
+ DMA_RING_BUF_EN_SHIFT
) | DMA_EN
;
3245 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
3247 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
3249 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
3251 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
3253 bcmgenet_umac_writel(priv
, 1, UMAC_TX_FLUSH
);
3255 bcmgenet_umac_writel(priv
, 0, UMAC_TX_FLUSH
);
3260 static void bcmgenet_enable_dma(struct bcmgenet_priv
*priv
, u32 dma_ctrl
)
3264 reg
= bcmgenet_rdma_readl(priv
, DMA_CTRL
);
3266 bcmgenet_rdma_writel(priv
, reg
, DMA_CTRL
);
3268 reg
= bcmgenet_tdma_readl(priv
, DMA_CTRL
);
3270 bcmgenet_tdma_writel(priv
, reg
, DMA_CTRL
);
3273 static void bcmgenet_netif_start(struct net_device
*dev
)
3275 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3277 /* Start the network engine */
3278 bcmgenet_set_rx_mode(dev
);
3279 bcmgenet_enable_rx_napi(priv
);
3281 umac_enable_set(priv
, CMD_TX_EN
| CMD_RX_EN
, true);
3283 bcmgenet_enable_tx_napi(priv
);
3285 /* Monitor link interrupts now */
3286 bcmgenet_link_intr_enable(priv
);
3288 phy_start(dev
->phydev
);
3291 static int bcmgenet_open(struct net_device
*dev
)
3293 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3294 unsigned long dma_ctrl
;
3298 netif_dbg(priv
, ifup
, dev
, "bcmgenet_open\n");
3300 /* Turn on the clock */
3301 clk_prepare_enable(priv
->clk
);
3303 /* If this is an internal GPHY, power it back on now, before UniMAC is
3304 * brought out of reset as absolutely no UniMAC activity is allowed
3306 if (priv
->internal_phy
)
3307 bcmgenet_power_up(priv
, GENET_POWER_PASSIVE
);
3309 /* take MAC out of reset */
3310 bcmgenet_umac_reset(priv
);
3314 /* Apply features again in case we changed them while interface was
3317 bcmgenet_set_features(dev
, dev
->features
);
3319 bcmgenet_set_hw_addr(priv
, dev
->dev_addr
);
3321 if (priv
->internal_phy
) {
3322 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
3323 reg
|= EXT_ENERGY_DET_MASK
;
3324 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
3327 /* Disable RX/TX DMA and flush TX queues */
3328 dma_ctrl
= bcmgenet_dma_disable(priv
);
3330 /* Reinitialize TDMA and RDMA and SW housekeeping */
3331 ret
= bcmgenet_init_dma(priv
);
3333 netdev_err(dev
, "failed to initialize DMA\n");
3334 goto err_clk_disable
;
3337 /* Always enable ring 16 - descriptor ring */
3338 bcmgenet_enable_dma(priv
, dma_ctrl
);
3341 bcmgenet_hfb_init(priv
);
3343 ret
= request_irq(priv
->irq0
, bcmgenet_isr0
, IRQF_SHARED
,
3346 netdev_err(dev
, "can't request IRQ %d\n", priv
->irq0
);
3350 ret
= request_irq(priv
->irq1
, bcmgenet_isr1
, IRQF_SHARED
,
3353 netdev_err(dev
, "can't request IRQ %d\n", priv
->irq1
);
3357 ret
= bcmgenet_mii_probe(dev
);
3359 netdev_err(dev
, "failed to connect to PHY\n");
3363 bcmgenet_netif_start(dev
);
3365 netif_tx_start_all_queues(dev
);
3370 free_irq(priv
->irq1
, priv
);
3372 free_irq(priv
->irq0
, priv
);
3374 bcmgenet_dma_teardown(priv
);
3375 bcmgenet_fini_dma(priv
);
3377 if (priv
->internal_phy
)
3378 bcmgenet_power_down(priv
, GENET_POWER_PASSIVE
);
3379 clk_disable_unprepare(priv
->clk
);
3383 static void bcmgenet_netif_stop(struct net_device
*dev
)
3385 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3387 bcmgenet_disable_tx_napi(priv
);
3388 netif_tx_disable(dev
);
3390 /* Disable MAC receive */
3391 umac_enable_set(priv
, CMD_RX_EN
, false);
3393 bcmgenet_dma_teardown(priv
);
3395 /* Disable MAC transmit. TX DMA disabled must be done before this */
3396 umac_enable_set(priv
, CMD_TX_EN
, false);
3398 phy_stop(dev
->phydev
);
3399 bcmgenet_disable_rx_napi(priv
);
3400 bcmgenet_intr_disable(priv
);
3402 /* Wait for pending work items to complete. Since interrupts are
3403 * disabled no new work will be scheduled.
3405 cancel_work_sync(&priv
->bcmgenet_irq_work
);
3407 priv
->old_link
= -1;
3408 priv
->old_speed
= -1;
3409 priv
->old_duplex
= -1;
3410 priv
->old_pause
= -1;
3413 bcmgenet_tx_reclaim_all(dev
);
3414 bcmgenet_fini_dma(priv
);
3417 static int bcmgenet_close(struct net_device
*dev
)
3419 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3422 netif_dbg(priv
, ifdown
, dev
, "bcmgenet_close\n");
3424 bcmgenet_netif_stop(dev
);
3426 /* Really kill the PHY state machine and disconnect from it */
3427 phy_disconnect(dev
->phydev
);
3429 free_irq(priv
->irq0
, priv
);
3430 free_irq(priv
->irq1
, priv
);
3432 if (priv
->internal_phy
)
3433 ret
= bcmgenet_power_down(priv
, GENET_POWER_PASSIVE
);
3435 clk_disable_unprepare(priv
->clk
);
3440 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring
*ring
)
3442 struct bcmgenet_priv
*priv
= ring
->priv
;
3443 u32 p_index
, c_index
, intsts
, intmsk
;
3444 struct netdev_queue
*txq
;
3445 unsigned int free_bds
;
3448 if (!netif_msg_tx_err(priv
))
3451 txq
= netdev_get_tx_queue(priv
->dev
, ring
->queue
);
3453 spin_lock(&ring
->lock
);
3454 if (ring
->index
== DESC_INDEX
) {
3455 intsts
= ~bcmgenet_intrl2_0_readl(priv
, INTRL2_CPU_MASK_STATUS
);
3456 intmsk
= UMAC_IRQ_TXDMA_DONE
| UMAC_IRQ_TXDMA_MBDONE
;
3458 intsts
= ~bcmgenet_intrl2_1_readl(priv
, INTRL2_CPU_MASK_STATUS
);
3459 intmsk
= 1 << ring
->index
;
3461 c_index
= bcmgenet_tdma_ring_readl(priv
, ring
->index
, TDMA_CONS_INDEX
);
3462 p_index
= bcmgenet_tdma_ring_readl(priv
, ring
->index
, TDMA_PROD_INDEX
);
3463 txq_stopped
= netif_tx_queue_stopped(txq
);
3464 free_bds
= ring
->free_bds
;
3465 spin_unlock(&ring
->lock
);
3467 netif_err(priv
, tx_err
, priv
->dev
, "Ring %d queue %d status summary\n"
3468 "TX queue status: %s, interrupts: %s\n"
3469 "(sw)free_bds: %d (sw)size: %d\n"
3470 "(sw)p_index: %d (hw)p_index: %d\n"
3471 "(sw)c_index: %d (hw)c_index: %d\n"
3472 "(sw)clean_p: %d (sw)write_p: %d\n"
3473 "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
3474 ring
->index
, ring
->queue
,
3475 txq_stopped
? "stopped" : "active",
3476 intsts
& intmsk
? "enabled" : "disabled",
3477 free_bds
, ring
->size
,
3478 ring
->prod_index
, p_index
& DMA_P_INDEX_MASK
,
3479 ring
->c_index
, c_index
& DMA_C_INDEX_MASK
,
3480 ring
->clean_ptr
, ring
->write_ptr
,
3481 ring
->cb_ptr
, ring
->end_ptr
);
3484 static void bcmgenet_timeout(struct net_device
*dev
, unsigned int txqueue
)
3486 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3487 u32 int0_enable
= 0;
3488 u32 int1_enable
= 0;
3491 netif_dbg(priv
, tx_err
, dev
, "bcmgenet_timeout\n");
3493 for (q
= 0; q
< priv
->hw_params
->tx_queues
; q
++)
3494 bcmgenet_dump_tx_queue(&priv
->tx_rings
[q
]);
3495 bcmgenet_dump_tx_queue(&priv
->tx_rings
[DESC_INDEX
]);
3497 bcmgenet_tx_reclaim_all(dev
);
3499 for (q
= 0; q
< priv
->hw_params
->tx_queues
; q
++)
3500 int1_enable
|= (1 << q
);
3502 int0_enable
= UMAC_IRQ_TXDMA_DONE
;
3504 /* Re-enable TX interrupts if disabled */
3505 bcmgenet_intrl2_0_writel(priv
, int0_enable
, INTRL2_CPU_MASK_CLEAR
);
3506 bcmgenet_intrl2_1_writel(priv
, int1_enable
, INTRL2_CPU_MASK_CLEAR
);
3508 netif_trans_update(dev
);
3510 dev
->stats
.tx_errors
++;
3512 netif_tx_wake_all_queues(dev
);
3515 #define MAX_MDF_FILTER 17
3517 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv
*priv
,
3518 unsigned char *addr
,
3521 bcmgenet_umac_writel(priv
, addr
[0] << 8 | addr
[1],
3522 UMAC_MDF_ADDR
+ (*i
* 4));
3523 bcmgenet_umac_writel(priv
, addr
[2] << 24 | addr
[3] << 16 |
3524 addr
[4] << 8 | addr
[5],
3525 UMAC_MDF_ADDR
+ ((*i
+ 1) * 4));
3529 static void bcmgenet_set_rx_mode(struct net_device
*dev
)
3531 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3532 struct netdev_hw_addr
*ha
;
3536 netif_dbg(priv
, hw
, dev
, "%s: %08X\n", __func__
, dev
->flags
);
3538 /* Number of filters needed */
3539 nfilter
= netdev_uc_count(dev
) + netdev_mc_count(dev
) + 2;
3542 * Turn on promicuous mode for three scenarios
3543 * 1. IFF_PROMISC flag is set
3544 * 2. IFF_ALLMULTI flag is set
3545 * 3. The number of filters needed exceeds the number filters
3546 * supported by the hardware.
3548 reg
= bcmgenet_umac_readl(priv
, UMAC_CMD
);
3549 if ((dev
->flags
& (IFF_PROMISC
| IFF_ALLMULTI
)) ||
3550 (nfilter
> MAX_MDF_FILTER
)) {
3552 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
3553 bcmgenet_umac_writel(priv
, 0, UMAC_MDF_CTRL
);
3556 reg
&= ~CMD_PROMISC
;
3557 bcmgenet_umac_writel(priv
, reg
, UMAC_CMD
);
3560 /* update MDF filter */
3563 bcmgenet_set_mdf_addr(priv
, dev
->broadcast
, &i
);
3564 /* my own address.*/
3565 bcmgenet_set_mdf_addr(priv
, dev
->dev_addr
, &i
);
3568 netdev_for_each_uc_addr(ha
, dev
)
3569 bcmgenet_set_mdf_addr(priv
, ha
->addr
, &i
);
3572 netdev_for_each_mc_addr(ha
, dev
)
3573 bcmgenet_set_mdf_addr(priv
, ha
->addr
, &i
);
3575 /* Enable filters */
3576 reg
= GENMASK(MAX_MDF_FILTER
- 1, MAX_MDF_FILTER
- nfilter
);
3577 bcmgenet_umac_writel(priv
, reg
, UMAC_MDF_CTRL
);
3580 /* Set the hardware MAC address. */
3581 static int bcmgenet_set_mac_addr(struct net_device
*dev
, void *p
)
3583 struct sockaddr
*addr
= p
;
3585 /* Setting the MAC address at the hardware level is not possible
3586 * without disabling the UniMAC RX/TX enable bits.
3588 if (netif_running(dev
))
3591 ether_addr_copy(dev
->dev_addr
, addr
->sa_data
);
3596 static struct net_device_stats
*bcmgenet_get_stats(struct net_device
*dev
)
3598 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3599 unsigned long tx_bytes
= 0, tx_packets
= 0;
3600 unsigned long rx_bytes
= 0, rx_packets
= 0;
3601 unsigned long rx_errors
= 0, rx_dropped
= 0;
3602 struct bcmgenet_tx_ring
*tx_ring
;
3603 struct bcmgenet_rx_ring
*rx_ring
;
3606 for (q
= 0; q
< priv
->hw_params
->tx_queues
; q
++) {
3607 tx_ring
= &priv
->tx_rings
[q
];
3608 tx_bytes
+= tx_ring
->bytes
;
3609 tx_packets
+= tx_ring
->packets
;
3611 tx_ring
= &priv
->tx_rings
[DESC_INDEX
];
3612 tx_bytes
+= tx_ring
->bytes
;
3613 tx_packets
+= tx_ring
->packets
;
3615 for (q
= 0; q
< priv
->hw_params
->rx_queues
; q
++) {
3616 rx_ring
= &priv
->rx_rings
[q
];
3618 rx_bytes
+= rx_ring
->bytes
;
3619 rx_packets
+= rx_ring
->packets
;
3620 rx_errors
+= rx_ring
->errors
;
3621 rx_dropped
+= rx_ring
->dropped
;
3623 rx_ring
= &priv
->rx_rings
[DESC_INDEX
];
3624 rx_bytes
+= rx_ring
->bytes
;
3625 rx_packets
+= rx_ring
->packets
;
3626 rx_errors
+= rx_ring
->errors
;
3627 rx_dropped
+= rx_ring
->dropped
;
3629 dev
->stats
.tx_bytes
= tx_bytes
;
3630 dev
->stats
.tx_packets
= tx_packets
;
3631 dev
->stats
.rx_bytes
= rx_bytes
;
3632 dev
->stats
.rx_packets
= rx_packets
;
3633 dev
->stats
.rx_errors
= rx_errors
;
3634 dev
->stats
.rx_missed_errors
= rx_errors
;
3635 dev
->stats
.rx_dropped
= rx_dropped
;
3639 static int bcmgenet_change_carrier(struct net_device
*dev
, bool new_carrier
)
3641 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
3643 if (!dev
->phydev
|| !phy_is_pseudo_fixed_link(dev
->phydev
) ||
3644 priv
->phy_interface
!= PHY_INTERFACE_MODE_MOCA
)
3648 netif_carrier_on(dev
);
3650 netif_carrier_off(dev
);
3655 static const struct net_device_ops bcmgenet_netdev_ops
= {
3656 .ndo_open
= bcmgenet_open
,
3657 .ndo_stop
= bcmgenet_close
,
3658 .ndo_start_xmit
= bcmgenet_xmit
,
3659 .ndo_tx_timeout
= bcmgenet_timeout
,
3660 .ndo_set_rx_mode
= bcmgenet_set_rx_mode
,
3661 .ndo_set_mac_address
= bcmgenet_set_mac_addr
,
3662 .ndo_do_ioctl
= phy_do_ioctl_running
,
3663 .ndo_set_features
= bcmgenet_set_features
,
3664 #ifdef CONFIG_NET_POLL_CONTROLLER
3665 .ndo_poll_controller
= bcmgenet_poll_controller
,
3667 .ndo_get_stats
= bcmgenet_get_stats
,
3668 .ndo_change_carrier
= bcmgenet_change_carrier
,
3671 /* Array of GENET hardware parameters/characteristics */
3672 static struct bcmgenet_hw_params bcmgenet_hw_params
[] = {
3678 .bp_in_en_shift
= 16,
3679 .bp_in_mask
= 0xffff,
3680 .hfb_filter_cnt
= 16,
3682 .hfb_offset
= 0x1000,
3683 .rdma_offset
= 0x2000,
3684 .tdma_offset
= 0x3000,
3692 .bp_in_en_shift
= 16,
3693 .bp_in_mask
= 0xffff,
3694 .hfb_filter_cnt
= 16,
3696 .tbuf_offset
= 0x0600,
3697 .hfb_offset
= 0x1000,
3698 .hfb_reg_offset
= 0x2000,
3699 .rdma_offset
= 0x3000,
3700 .tdma_offset
= 0x4000,
3702 .flags
= GENET_HAS_EXT
,
3709 .bp_in_en_shift
= 17,
3710 .bp_in_mask
= 0x1ffff,
3711 .hfb_filter_cnt
= 48,
3712 .hfb_filter_size
= 128,
3714 .tbuf_offset
= 0x0600,
3715 .hfb_offset
= 0x8000,
3716 .hfb_reg_offset
= 0xfc00,
3717 .rdma_offset
= 0x10000,
3718 .tdma_offset
= 0x11000,
3720 .flags
= GENET_HAS_EXT
| GENET_HAS_MDIO_INTR
|
3721 GENET_HAS_MOCA_LINK_DET
,
3728 .bp_in_en_shift
= 17,
3729 .bp_in_mask
= 0x1ffff,
3730 .hfb_filter_cnt
= 48,
3731 .hfb_filter_size
= 128,
3733 .tbuf_offset
= 0x0600,
3734 .hfb_offset
= 0x8000,
3735 .hfb_reg_offset
= 0xfc00,
3736 .rdma_offset
= 0x2000,
3737 .tdma_offset
= 0x4000,
3739 .flags
= GENET_HAS_40BITS
| GENET_HAS_EXT
|
3740 GENET_HAS_MDIO_INTR
| GENET_HAS_MOCA_LINK_DET
,
3747 .bp_in_en_shift
= 17,
3748 .bp_in_mask
= 0x1ffff,
3749 .hfb_filter_cnt
= 48,
3750 .hfb_filter_size
= 128,
3752 .tbuf_offset
= 0x0600,
3753 .hfb_offset
= 0x8000,
3754 .hfb_reg_offset
= 0xfc00,
3755 .rdma_offset
= 0x2000,
3756 .tdma_offset
= 0x4000,
3758 .flags
= GENET_HAS_40BITS
| GENET_HAS_EXT
|
3759 GENET_HAS_MDIO_INTR
| GENET_HAS_MOCA_LINK_DET
,
3763 /* Infer hardware parameters from the detected GENET version */
3764 static void bcmgenet_set_hw_params(struct bcmgenet_priv
*priv
)
3766 struct bcmgenet_hw_params
*params
;
3771 if (GENET_IS_V5(priv
) || GENET_IS_V4(priv
)) {
3772 bcmgenet_dma_regs
= bcmgenet_dma_regs_v3plus
;
3773 genet_dma_ring_regs
= genet_dma_ring_regs_v4
;
3774 } else if (GENET_IS_V3(priv
)) {
3775 bcmgenet_dma_regs
= bcmgenet_dma_regs_v3plus
;
3776 genet_dma_ring_regs
= genet_dma_ring_regs_v123
;
3777 } else if (GENET_IS_V2(priv
)) {
3778 bcmgenet_dma_regs
= bcmgenet_dma_regs_v2
;
3779 genet_dma_ring_regs
= genet_dma_ring_regs_v123
;
3780 } else if (GENET_IS_V1(priv
)) {
3781 bcmgenet_dma_regs
= bcmgenet_dma_regs_v1
;
3782 genet_dma_ring_regs
= genet_dma_ring_regs_v123
;
3785 /* enum genet_version starts at 1 */
3786 priv
->hw_params
= &bcmgenet_hw_params
[priv
->version
];
3787 params
= priv
->hw_params
;
3789 /* Read GENET HW version */
3790 reg
= bcmgenet_sys_readl(priv
, SYS_REV_CTRL
);
3791 major
= (reg
>> 24 & 0x0f);
3794 else if (major
== 5)
3796 else if (major
== 0)
3798 if (major
!= priv
->version
) {
3799 dev_err(&priv
->pdev
->dev
,
3800 "GENET version mismatch, got: %d, configured for: %d\n",
3801 major
, priv
->version
);
3804 /* Print the GENET core version */
3805 dev_info(&priv
->pdev
->dev
, "GENET " GENET_VER_FMT
,
3806 major
, (reg
>> 16) & 0x0f, reg
& 0xffff);
3808 /* Store the integrated PHY revision for the MDIO probing function
3809 * to pass this information to the PHY driver. The PHY driver expects
3810 * to find the PHY major revision in bits 15:8 while the GENET register
3811 * stores that information in bits 7:0, account for that.
3813 * On newer chips, starting with PHY revision G0, a new scheme is
3814 * deployed similar to the Starfighter 2 switch with GPHY major
3815 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3816 * is reserved as well as special value 0x01ff, we have a small
3817 * heuristic to check for the new GPHY revision and re-arrange things
3818 * so the GPHY driver is happy.
3820 gphy_rev
= reg
& 0xffff;
3822 if (GENET_IS_V5(priv
)) {
3823 /* The EPHY revision should come from the MDIO registers of
3824 * the PHY not from GENET.
3826 if (gphy_rev
!= 0) {
3827 pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
3830 /* This is reserved so should require special treatment */
3831 } else if (gphy_rev
== 0 || gphy_rev
== 0x01ff) {
3832 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev
);
3834 /* This is the good old scheme, just GPHY major, no minor nor patch */
3835 } else if ((gphy_rev
& 0xf0) != 0) {
3836 priv
->gphy_rev
= gphy_rev
<< 8;
3837 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3838 } else if ((gphy_rev
& 0xff00) != 0) {
3839 priv
->gphy_rev
= gphy_rev
;
3842 #ifdef CONFIG_PHYS_ADDR_T_64BIT
3843 if (!(params
->flags
& GENET_HAS_40BITS
))
3844 pr_warn("GENET does not support 40-bits PA\n");
3847 pr_debug("Configuration for version: %d\n"
3848 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3849 "BP << en: %2d, BP msk: 0x%05x\n"
3850 "HFB count: %2d, QTAQ msk: 0x%05x\n"
3851 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3852 "RDMA: 0x%05x, TDMA: 0x%05x\n"
3855 params
->tx_queues
, params
->tx_bds_per_q
,
3856 params
->rx_queues
, params
->rx_bds_per_q
,
3857 params
->bp_in_en_shift
, params
->bp_in_mask
,
3858 params
->hfb_filter_cnt
, params
->qtag_mask
,
3859 params
->tbuf_offset
, params
->hfb_offset
,
3860 params
->hfb_reg_offset
,
3861 params
->rdma_offset
, params
->tdma_offset
,
3862 params
->words_per_bd
);
3865 struct bcmgenet_plat_data
{
3866 enum bcmgenet_version version
;
3867 u32 dma_max_burst_length
;
3870 static const struct bcmgenet_plat_data v1_plat_data
= {
3871 .version
= GENET_V1
,
3872 .dma_max_burst_length
= DMA_MAX_BURST_LENGTH
,
3875 static const struct bcmgenet_plat_data v2_plat_data
= {
3876 .version
= GENET_V2
,
3877 .dma_max_burst_length
= DMA_MAX_BURST_LENGTH
,
3880 static const struct bcmgenet_plat_data v3_plat_data
= {
3881 .version
= GENET_V3
,
3882 .dma_max_burst_length
= DMA_MAX_BURST_LENGTH
,
3885 static const struct bcmgenet_plat_data v4_plat_data
= {
3886 .version
= GENET_V4
,
3887 .dma_max_burst_length
= DMA_MAX_BURST_LENGTH
,
3890 static const struct bcmgenet_plat_data v5_plat_data
= {
3891 .version
= GENET_V5
,
3892 .dma_max_burst_length
= DMA_MAX_BURST_LENGTH
,
3895 static const struct bcmgenet_plat_data bcm2711_plat_data
= {
3896 .version
= GENET_V5
,
3897 .dma_max_burst_length
= 0x08,
3900 static const struct of_device_id bcmgenet_match
[] = {
3901 { .compatible
= "brcm,genet-v1", .data
= &v1_plat_data
},
3902 { .compatible
= "brcm,genet-v2", .data
= &v2_plat_data
},
3903 { .compatible
= "brcm,genet-v3", .data
= &v3_plat_data
},
3904 { .compatible
= "brcm,genet-v4", .data
= &v4_plat_data
},
3905 { .compatible
= "brcm,genet-v5", .data
= &v5_plat_data
},
3906 { .compatible
= "brcm,bcm2711-genet-v5", .data
= &bcm2711_plat_data
},
3909 MODULE_DEVICE_TABLE(of
, bcmgenet_match
);
3911 static int bcmgenet_probe(struct platform_device
*pdev
)
3913 struct bcmgenet_platform_data
*pd
= pdev
->dev
.platform_data
;
3914 const struct bcmgenet_plat_data
*pdata
;
3915 struct bcmgenet_priv
*priv
;
3916 struct net_device
*dev
;
3920 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3921 dev
= alloc_etherdev_mqs(sizeof(*priv
), GENET_MAX_MQ_CNT
+ 1,
3922 GENET_MAX_MQ_CNT
+ 1);
3924 dev_err(&pdev
->dev
, "can't allocate net device\n");
3928 priv
= netdev_priv(dev
);
3929 priv
->irq0
= platform_get_irq(pdev
, 0);
3930 if (priv
->irq0
< 0) {
3934 priv
->irq1
= platform_get_irq(pdev
, 1);
3935 if (priv
->irq1
< 0) {
3939 priv
->wol_irq
= platform_get_irq_optional(pdev
, 2);
3941 priv
->base
= devm_platform_ioremap_resource(pdev
, 0);
3942 if (IS_ERR(priv
->base
)) {
3943 err
= PTR_ERR(priv
->base
);
3947 spin_lock_init(&priv
->lock
);
3949 SET_NETDEV_DEV(dev
, &pdev
->dev
);
3950 dev_set_drvdata(&pdev
->dev
, dev
);
3951 dev
->watchdog_timeo
= 2 * HZ
;
3952 dev
->ethtool_ops
= &bcmgenet_ethtool_ops
;
3953 dev
->netdev_ops
= &bcmgenet_netdev_ops
;
3955 priv
->msg_enable
= netif_msg_init(-1, GENET_MSG_DEFAULT
);
3957 /* Set default features */
3958 dev
->features
|= NETIF_F_SG
| NETIF_F_HIGHDMA
| NETIF_F_HW_CSUM
|
3960 dev
->hw_features
|= dev
->features
;
3961 dev
->vlan_features
|= dev
->features
;
3963 /* Request the WOL interrupt and advertise suspend if available */
3964 priv
->wol_irq_disabled
= true;
3965 err
= devm_request_irq(&pdev
->dev
, priv
->wol_irq
, bcmgenet_wol_isr
, 0,
3968 device_set_wakeup_capable(&pdev
->dev
, 1);
3970 /* Set the needed headroom to account for any possible
3971 * features enabling/disabling at runtime
3973 dev
->needed_headroom
+= 64;
3975 netdev_boot_setup_check(dev
);
3980 pdata
= device_get_match_data(&pdev
->dev
);
3982 priv
->version
= pdata
->version
;
3983 priv
->dma_max_burst_length
= pdata
->dma_max_burst_length
;
3985 priv
->version
= pd
->genet_version
;
3986 priv
->dma_max_burst_length
= DMA_MAX_BURST_LENGTH
;
3989 priv
->clk
= devm_clk_get_optional(&priv
->pdev
->dev
, "enet");
3990 if (IS_ERR(priv
->clk
)) {
3991 dev_dbg(&priv
->pdev
->dev
, "failed to get enet clock\n");
3992 err
= PTR_ERR(priv
->clk
);
3996 err
= clk_prepare_enable(priv
->clk
);
4000 bcmgenet_set_hw_params(priv
);
4003 if (priv
->hw_params
->flags
& GENET_HAS_40BITS
)
4004 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(40));
4006 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
4008 goto err_clk_disable
;
4010 /* Mii wait queue */
4011 init_waitqueue_head(&priv
->wq
);
4012 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
4013 priv
->rx_buf_len
= RX_BUF_LENGTH
;
4014 INIT_WORK(&priv
->bcmgenet_irq_work
, bcmgenet_irq_task
);
4016 priv
->clk_wol
= devm_clk_get_optional(&priv
->pdev
->dev
, "enet-wol");
4017 if (IS_ERR(priv
->clk_wol
)) {
4018 dev_dbg(&priv
->pdev
->dev
, "failed to get enet-wol clock\n");
4019 err
= PTR_ERR(priv
->clk_wol
);
4020 goto err_clk_disable
;
4023 priv
->clk_eee
= devm_clk_get_optional(&priv
->pdev
->dev
, "enet-eee");
4024 if (IS_ERR(priv
->clk_eee
)) {
4025 dev_dbg(&priv
->pdev
->dev
, "failed to get enet-eee clock\n");
4026 err
= PTR_ERR(priv
->clk_eee
);
4027 goto err_clk_disable
;
4030 /* If this is an internal GPHY, power it on now, before UniMAC is
4031 * brought out of reset as absolutely no UniMAC activity is allowed
4033 if (device_get_phy_mode(&pdev
->dev
) == PHY_INTERFACE_MODE_INTERNAL
)
4034 bcmgenet_power_up(priv
, GENET_POWER_PASSIVE
);
4036 if (pd
&& !IS_ERR_OR_NULL(pd
->mac_address
))
4037 ether_addr_copy(dev
->dev_addr
, pd
->mac_address
);
4039 if (!device_get_mac_address(&pdev
->dev
, dev
->dev_addr
, ETH_ALEN
))
4040 if (has_acpi_companion(&pdev
->dev
))
4041 bcmgenet_get_hw_addr(priv
, dev
->dev_addr
);
4043 if (!is_valid_ether_addr(dev
->dev_addr
)) {
4044 dev_warn(&pdev
->dev
, "using random Ethernet MAC\n");
4045 eth_hw_addr_random(dev
);
4050 err
= bcmgenet_mii_init(dev
);
4052 goto err_clk_disable
;
4054 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
4055 * just the ring 16 descriptor based TX
4057 netif_set_real_num_tx_queues(priv
->dev
, priv
->hw_params
->tx_queues
+ 1);
4058 netif_set_real_num_rx_queues(priv
->dev
, priv
->hw_params
->rx_queues
+ 1);
4060 /* Set default coalescing parameters */
4061 for (i
= 0; i
< priv
->hw_params
->rx_queues
; i
++)
4062 priv
->rx_rings
[i
].rx_max_coalesced_frames
= 1;
4063 priv
->rx_rings
[DESC_INDEX
].rx_max_coalesced_frames
= 1;
4065 /* libphy will determine the link state */
4066 netif_carrier_off(dev
);
4068 /* Turn off the main clock, WOL clock is handled separately */
4069 clk_disable_unprepare(priv
->clk
);
4071 err
= register_netdev(dev
);
4073 bcmgenet_mii_exit(dev
);
4080 clk_disable_unprepare(priv
->clk
);
4086 static int bcmgenet_remove(struct platform_device
*pdev
)
4088 struct bcmgenet_priv
*priv
= dev_to_priv(&pdev
->dev
);
4090 dev_set_drvdata(&pdev
->dev
, NULL
);
4091 unregister_netdev(priv
->dev
);
4092 bcmgenet_mii_exit(priv
->dev
);
4093 free_netdev(priv
->dev
);
4098 static void bcmgenet_shutdown(struct platform_device
*pdev
)
4100 bcmgenet_remove(pdev
);
4103 #ifdef CONFIG_PM_SLEEP
4104 static int bcmgenet_resume_noirq(struct device
*d
)
4106 struct net_device
*dev
= dev_get_drvdata(d
);
4107 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
4111 if (!netif_running(dev
))
4114 /* Turn on the clock */
4115 ret
= clk_prepare_enable(priv
->clk
);
4119 if (device_may_wakeup(d
) && priv
->wolopts
) {
4120 /* Account for Wake-on-LAN events and clear those events
4121 * (Some devices need more time between enabling the clocks
4122 * and the interrupt register reflecting the wake event so
4123 * read the register twice)
4125 reg
= bcmgenet_intrl2_0_readl(priv
, INTRL2_CPU_STAT
);
4126 reg
= bcmgenet_intrl2_0_readl(priv
, INTRL2_CPU_STAT
);
4127 if (reg
& UMAC_IRQ_WAKE_EVENT
)
4128 pm_wakeup_event(&priv
->pdev
->dev
, 0);
4131 bcmgenet_intrl2_0_writel(priv
, UMAC_IRQ_WAKE_EVENT
, INTRL2_CPU_CLEAR
);
4136 static int bcmgenet_resume(struct device
*d
)
4138 struct net_device
*dev
= dev_get_drvdata(d
);
4139 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
4140 struct bcmgenet_rxnfc_rule
*rule
;
4141 unsigned long dma_ctrl
;
4145 if (!netif_running(dev
))
4148 /* From WOL-enabled suspend, switch to regular clock */
4149 if (device_may_wakeup(d
) && priv
->wolopts
)
4150 bcmgenet_power_up(priv
, GENET_POWER_WOL_MAGIC
);
4152 /* If this is an internal GPHY, power it back on now, before UniMAC is
4153 * brought out of reset as absolutely no UniMAC activity is allowed
4155 if (priv
->internal_phy
)
4156 bcmgenet_power_up(priv
, GENET_POWER_PASSIVE
);
4158 bcmgenet_umac_reset(priv
);
4162 phy_init_hw(dev
->phydev
);
4164 /* Speed settings must be restored */
4165 genphy_config_aneg(dev
->phydev
);
4166 bcmgenet_mii_config(priv
->dev
, false);
4168 /* Restore enabled features */
4169 bcmgenet_set_features(dev
, dev
->features
);
4171 bcmgenet_set_hw_addr(priv
, dev
->dev_addr
);
4173 /* Restore hardware filters */
4174 bcmgenet_hfb_clear(priv
);
4175 list_for_each_entry(rule
, &priv
->rxnfc_list
, list
)
4176 if (rule
->state
!= BCMGENET_RXNFC_STATE_UNUSED
)
4177 bcmgenet_hfb_create_rxnfc_filter(priv
, rule
);
4179 if (priv
->internal_phy
) {
4180 reg
= bcmgenet_ext_readl(priv
, EXT_EXT_PWR_MGMT
);
4181 reg
|= EXT_ENERGY_DET_MASK
;
4182 bcmgenet_ext_writel(priv
, reg
, EXT_EXT_PWR_MGMT
);
4185 /* Disable RX/TX DMA and flush TX queues */
4186 dma_ctrl
= bcmgenet_dma_disable(priv
);
4188 /* Reinitialize TDMA and RDMA and SW housekeeping */
4189 ret
= bcmgenet_init_dma(priv
);
4191 netdev_err(dev
, "failed to initialize DMA\n");
4192 goto out_clk_disable
;
4195 /* Always enable ring 16 - descriptor ring */
4196 bcmgenet_enable_dma(priv
, dma_ctrl
);
4198 if (!device_may_wakeup(d
))
4199 phy_resume(dev
->phydev
);
4201 if (priv
->eee
.eee_enabled
)
4202 bcmgenet_eee_enable_set(dev
, true);
4204 bcmgenet_netif_start(dev
);
4206 netif_device_attach(dev
);
4211 if (priv
->internal_phy
)
4212 bcmgenet_power_down(priv
, GENET_POWER_PASSIVE
);
4213 clk_disable_unprepare(priv
->clk
);
4217 static int bcmgenet_suspend(struct device
*d
)
4219 struct net_device
*dev
= dev_get_drvdata(d
);
4220 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
4222 if (!netif_running(dev
))
4225 netif_device_detach(dev
);
4227 bcmgenet_netif_stop(dev
);
4229 if (!device_may_wakeup(d
))
4230 phy_suspend(dev
->phydev
);
4232 /* Disable filtering */
4233 bcmgenet_hfb_reg_writel(priv
, 0, HFB_CTRL
);
4238 static int bcmgenet_suspend_noirq(struct device
*d
)
4240 struct net_device
*dev
= dev_get_drvdata(d
);
4241 struct bcmgenet_priv
*priv
= netdev_priv(dev
);
4244 if (!netif_running(dev
))
4247 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
4248 if (device_may_wakeup(d
) && priv
->wolopts
)
4249 ret
= bcmgenet_power_down(priv
, GENET_POWER_WOL_MAGIC
);
4250 else if (priv
->internal_phy
)
4251 ret
= bcmgenet_power_down(priv
, GENET_POWER_PASSIVE
);
4253 /* Let the framework handle resumption and leave the clocks on */
4257 /* Turn off the clocks */
4258 clk_disable_unprepare(priv
->clk
);
4263 #define bcmgenet_suspend NULL
4264 #define bcmgenet_suspend_noirq NULL
4265 #define bcmgenet_resume NULL
4266 #define bcmgenet_resume_noirq NULL
4267 #endif /* CONFIG_PM_SLEEP */
4269 static const struct dev_pm_ops bcmgenet_pm_ops
= {
4270 .suspend
= bcmgenet_suspend
,
4271 .suspend_noirq
= bcmgenet_suspend_noirq
,
4272 .resume
= bcmgenet_resume
,
4273 .resume_noirq
= bcmgenet_resume_noirq
,
4276 static const struct acpi_device_id genet_acpi_match
[] = {
4277 { "BCM6E4E", (kernel_ulong_t
)&bcm2711_plat_data
},
4280 MODULE_DEVICE_TABLE(acpi
, genet_acpi_match
);
4282 static struct platform_driver bcmgenet_driver
= {
4283 .probe
= bcmgenet_probe
,
4284 .remove
= bcmgenet_remove
,
4285 .shutdown
= bcmgenet_shutdown
,
4288 .of_match_table
= bcmgenet_match
,
4289 .pm
= &bcmgenet_pm_ops
,
4290 .acpi_match_table
= genet_acpi_match
,
4293 module_platform_driver(bcmgenet_driver
);
4295 MODULE_AUTHOR("Broadcom Corporation");
4296 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
4297 MODULE_ALIAS("platform:bcmgenet");
4298 MODULE_LICENSE("GPL");