2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/etherdevice.h>
17 #include <linux/if_vlan.h>
18 #include <linux/inetdevice.h>
19 #include <linux/interrupt.h>
21 #include <linux/kernel.h>
22 #include <linux/mbus.h>
23 #include <linux/module.h>
24 #include <linux/netdevice.h>
26 #include <linux/of_address.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/phy.h>
31 #include <linux/phylink.h>
32 #include <linux/platform_device.h>
33 #include <linux/skbuff.h>
35 #include "mvneta_bm.h"
41 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
42 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
43 #define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4
44 #define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30
45 #define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6
46 #define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0
47 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
48 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
49 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
50 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
51 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
52 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
53 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
54 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
55 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
56 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
57 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
58 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
59 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
60 #define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2))
61 #define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3
62 #define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8
63 #define MVNETA_PORT_RX_RESET 0x1cc0
64 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
65 #define MVNETA_PHY_ADDR 0x2000
66 #define MVNETA_PHY_ADDR_MASK 0x1f
67 #define MVNETA_MBUS_RETRY 0x2010
68 #define MVNETA_UNIT_INTR_CAUSE 0x2080
69 #define MVNETA_UNIT_CONTROL 0x20B0
70 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
71 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
72 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
73 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
74 #define MVNETA_BASE_ADDR_ENABLE 0x2290
75 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
76 #define MVNETA_PORT_CONFIG 0x2400
77 #define MVNETA_UNI_PROMISC_MODE BIT(0)
78 #define MVNETA_DEF_RXQ(q) ((q) << 1)
79 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
80 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
81 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
82 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
83 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
84 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
85 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
86 MVNETA_DEF_RXQ_ARP(q) | \
87 MVNETA_DEF_RXQ_TCP(q) | \
88 MVNETA_DEF_RXQ_UDP(q) | \
89 MVNETA_DEF_RXQ_BPDU(q) | \
90 MVNETA_TX_UNSET_ERR_SUM | \
91 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
92 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
93 #define MVNETA_MAC_ADDR_LOW 0x2414
94 #define MVNETA_MAC_ADDR_HIGH 0x2418
95 #define MVNETA_SDMA_CONFIG 0x241c
96 #define MVNETA_SDMA_BRST_SIZE_16 4
97 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
98 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
99 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
100 #define MVNETA_DESC_SWAP BIT(6)
101 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
102 #define MVNETA_PORT_STATUS 0x2444
103 #define MVNETA_TX_IN_PRGRS BIT(1)
104 #define MVNETA_TX_FIFO_EMPTY BIT(8)
105 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
106 #define MVNETA_SERDES_CFG 0x24A0
107 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
108 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
109 #define MVNETA_TYPE_PRIO 0x24bc
110 #define MVNETA_FORCE_UNI BIT(21)
111 #define MVNETA_TXQ_CMD_1 0x24e4
112 #define MVNETA_TXQ_CMD 0x2448
113 #define MVNETA_TXQ_DISABLE_SHIFT 8
114 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
115 #define MVNETA_RX_DISCARD_FRAME_COUNT 0x2484
116 #define MVNETA_OVERRUN_FRAME_COUNT 0x2488
117 #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4
118 #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31)
119 #define MVNETA_ACC_MODE 0x2500
120 #define MVNETA_BM_ADDRESS 0x2504
121 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
122 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
123 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
124 #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
125 #define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
126 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
128 /* Exception Interrupt Port/Queue Cause register
130 * Their behavior depend of the mapping done using the PCPX2Q
131 * registers. For a given CPU if the bit associated to a queue is not
132 * set, then for the register a read from this CPU will always return
133 * 0 and a write won't do anything
136 #define MVNETA_INTR_NEW_CAUSE 0x25a0
137 #define MVNETA_INTR_NEW_MASK 0x25a4
139 /* bits 0..7 = TXQ SENT, one bit per queue.
140 * bits 8..15 = RXQ OCCUP, one bit per queue.
141 * bits 16..23 = RXQ FREE, one bit per queue.
142 * bit 29 = OLD_REG_SUM, see old reg ?
143 * bit 30 = TX_ERR_SUM, one bit for 4 ports
144 * bit 31 = MISC_SUM, one bit for 4 ports
146 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
147 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
148 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
149 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
150 #define MVNETA_MISCINTR_INTR_MASK BIT(31)
152 #define MVNETA_INTR_OLD_CAUSE 0x25a8
153 #define MVNETA_INTR_OLD_MASK 0x25ac
155 /* Data Path Port/Queue Cause Register */
156 #define MVNETA_INTR_MISC_CAUSE 0x25b0
157 #define MVNETA_INTR_MISC_MASK 0x25b4
159 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
160 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
161 #define MVNETA_CAUSE_PTP BIT(4)
163 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
164 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
165 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
166 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
167 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
168 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
169 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
170 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
172 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
173 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
174 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
176 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
177 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
178 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
180 #define MVNETA_INTR_ENABLE 0x25b8
181 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
182 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
184 #define MVNETA_RXQ_CMD 0x2680
185 #define MVNETA_RXQ_DISABLE_SHIFT 8
186 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
187 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
188 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
189 #define MVNETA_GMAC_CTRL_0 0x2c00
190 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
191 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
192 #define MVNETA_GMAC0_PORT_1000BASE_X BIT(1)
193 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
194 #define MVNETA_GMAC_CTRL_2 0x2c08
195 #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0)
196 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
197 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
198 #define MVNETA_GMAC2_PORT_RESET BIT(6)
199 #define MVNETA_GMAC_STATUS 0x2c10
200 #define MVNETA_GMAC_LINK_UP BIT(0)
201 #define MVNETA_GMAC_SPEED_1000 BIT(1)
202 #define MVNETA_GMAC_SPEED_100 BIT(2)
203 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
204 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
205 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
206 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
207 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
208 #define MVNETA_GMAC_AN_COMPLETE BIT(11)
209 #define MVNETA_GMAC_SYNC_OK BIT(14)
210 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
211 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
212 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
213 #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2)
214 #define MVNETA_GMAC_AN_BYPASS_ENABLE BIT(3)
215 #define MVNETA_GMAC_INBAND_RESTART_AN BIT(4)
216 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
217 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
218 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
219 #define MVNETA_GMAC_CONFIG_FLOW_CTRL BIT(8)
220 #define MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL BIT(9)
221 #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11)
222 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
223 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
224 #define MVNETA_MIB_COUNTERS_BASE 0x3000
225 #define MVNETA_MIB_LATE_COLLISION 0x7c
226 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
227 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
228 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
229 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
230 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
231 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
232 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
233 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
234 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
235 #define MVNETA_TXQ_DEC_SENT_MASK 0xff
236 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
237 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
238 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
239 #define MVNETA_PORT_TX_RESET 0x3cf0
240 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
241 #define MVNETA_TX_MTU 0x3e0c
242 #define MVNETA_TX_TOKEN_SIZE 0x3e14
243 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
244 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
245 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
247 #define MVNETA_LPI_CTRL_0 0x2cc0
248 #define MVNETA_LPI_CTRL_1 0x2cc4
249 #define MVNETA_LPI_REQUEST_ENABLE BIT(0)
250 #define MVNETA_LPI_CTRL_2 0x2cc8
251 #define MVNETA_LPI_STATUS 0x2ccc
253 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
255 /* Descriptor ring Macros */
256 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
257 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
259 /* Various constants */
262 #define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
263 #define MVNETA_RX_COAL_PKTS 32
264 #define MVNETA_RX_COAL_USEC 100
266 /* The two bytes Marvell header. Either contains a special value used
267 * by Marvell switches when a specific hardware mode is enabled (not
268 * supported by this driver) or is filled automatically by zeroes on
269 * the RX side. Those two bytes being at the front of the Ethernet
270 * header, they allow to have the IP header aligned on a 4 bytes
271 * boundary automatically: the hardware skips those two bytes on its
274 #define MVNETA_MH_SIZE 2
276 #define MVNETA_VLAN_TAG_LEN 4
278 #define MVNETA_TX_CSUM_DEF_SIZE 1600
279 #define MVNETA_TX_CSUM_MAX_SIZE 9800
280 #define MVNETA_ACC_MODE_EXT1 1
281 #define MVNETA_ACC_MODE_EXT2 2
283 #define MVNETA_MAX_DECODE_WIN 6
285 /* Timeout constants */
286 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
287 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
288 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
290 #define MVNETA_TX_MTU_MAX 0x3ffff
292 /* The RSS lookup table actually has 256 entries but we do not use
295 #define MVNETA_RSS_LU_TABLE_SIZE 1
297 /* Max number of Rx descriptors */
298 #define MVNETA_MAX_RXD 128
300 /* Max number of Tx descriptors */
301 #define MVNETA_MAX_TXD 532
303 /* Max number of allowed TCP segments for software TSO */
304 #define MVNETA_MAX_TSO_SEGS 100
306 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
308 /* descriptor aligned size */
309 #define MVNETA_DESC_ALIGNED_SIZE 32
311 /* Number of bytes to be taken into account by HW when putting incoming data
312 * to the buffers. It is needed in case NET_SKB_PAD exceeds maximum packet
313 * offset supported in MVNETA_RXQ_CONFIG_REG(q) registers.
315 #define MVNETA_RX_PKT_OFFSET_CORRECTION 64
317 #define MVNETA_RX_PKT_SIZE(mtu) \
318 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
319 ETH_HLEN + ETH_FCS_LEN, \
322 #define IS_TSO_HEADER(txq, addr) \
323 ((addr >= txq->tso_hdrs_phys) && \
324 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
326 #define MVNETA_RX_GET_BM_POOL_ID(rxd) \
327 (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT)
330 ETHTOOL_STAT_EEE_WAKEUP
,
334 struct mvneta_statistic
{
335 unsigned short offset
;
337 const char name
[ETH_GSTRING_LEN
];
344 static const struct mvneta_statistic mvneta_statistics
[] = {
345 { 0x3000, T_REG_64
, "good_octets_received", },
346 { 0x3010, T_REG_32
, "good_frames_received", },
347 { 0x3008, T_REG_32
, "bad_octets_received", },
348 { 0x3014, T_REG_32
, "bad_frames_received", },
349 { 0x3018, T_REG_32
, "broadcast_frames_received", },
350 { 0x301c, T_REG_32
, "multicast_frames_received", },
351 { 0x3050, T_REG_32
, "unrec_mac_control_received", },
352 { 0x3058, T_REG_32
, "good_fc_received", },
353 { 0x305c, T_REG_32
, "bad_fc_received", },
354 { 0x3060, T_REG_32
, "undersize_received", },
355 { 0x3064, T_REG_32
, "fragments_received", },
356 { 0x3068, T_REG_32
, "oversize_received", },
357 { 0x306c, T_REG_32
, "jabber_received", },
358 { 0x3070, T_REG_32
, "mac_receive_error", },
359 { 0x3074, T_REG_32
, "bad_crc_event", },
360 { 0x3078, T_REG_32
, "collision", },
361 { 0x307c, T_REG_32
, "late_collision", },
362 { 0x2484, T_REG_32
, "rx_discard", },
363 { 0x2488, T_REG_32
, "rx_overrun", },
364 { 0x3020, T_REG_32
, "frames_64_octets", },
365 { 0x3024, T_REG_32
, "frames_65_to_127_octets", },
366 { 0x3028, T_REG_32
, "frames_128_to_255_octets", },
367 { 0x302c, T_REG_32
, "frames_256_to_511_octets", },
368 { 0x3030, T_REG_32
, "frames_512_to_1023_octets", },
369 { 0x3034, T_REG_32
, "frames_1024_to_max_octets", },
370 { 0x3038, T_REG_64
, "good_octets_sent", },
371 { 0x3040, T_REG_32
, "good_frames_sent", },
372 { 0x3044, T_REG_32
, "excessive_collision", },
373 { 0x3048, T_REG_32
, "multicast_frames_sent", },
374 { 0x304c, T_REG_32
, "broadcast_frames_sent", },
375 { 0x3054, T_REG_32
, "fc_sent", },
376 { 0x300c, T_REG_32
, "internal_mac_transmit_err", },
377 { ETHTOOL_STAT_EEE_WAKEUP
, T_SW
, "eee_wakeup_errors", },
380 struct mvneta_pcpu_stats
{
381 struct u64_stats_sync syncp
;
388 struct mvneta_pcpu_port
{
389 /* Pointer to the shared port */
390 struct mvneta_port
*pp
;
392 /* Pointer to the CPU-local NAPI struct */
393 struct napi_struct napi
;
395 /* Cause of the previous interrupt */
401 struct mvneta_pcpu_port __percpu
*ports
;
402 struct mvneta_pcpu_stats __percpu
*stats
;
405 unsigned int frag_size
;
407 struct mvneta_rx_queue
*rxqs
;
408 struct mvneta_tx_queue
*txqs
;
409 struct net_device
*dev
;
410 struct hlist_node node_online
;
411 struct hlist_node node_dead
;
413 /* Protect the access to the percpu interrupt registers,
414 * ensuring that the configuration remains coherent.
420 struct napi_struct napi
;
430 phy_interface_t phy_interface
;
431 struct device_node
*dn
;
432 unsigned int tx_csum_limit
;
433 struct phylink
*phylink
;
435 struct mvneta_bm
*bm_priv
;
436 struct mvneta_bm_pool
*pool_long
;
437 struct mvneta_bm_pool
*pool_short
;
444 u64 ethtool_stats
[ARRAY_SIZE(mvneta_statistics
)];
446 u32 indir
[MVNETA_RSS_LU_TABLE_SIZE
];
448 /* Flags for special SoC configurations */
449 bool neta_armada3700
;
450 u16 rx_offset_correction
;
451 const struct mbus_dram_target_info
*dram_target_info
;
454 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
455 * layout of the transmit and reception DMA descriptors, and their
456 * layout is therefore defined by the hardware design
459 #define MVNETA_TX_L3_OFF_SHIFT 0
460 #define MVNETA_TX_IP_HLEN_SHIFT 8
461 #define MVNETA_TX_L4_UDP BIT(16)
462 #define MVNETA_TX_L3_IP6 BIT(17)
463 #define MVNETA_TXD_IP_CSUM BIT(18)
464 #define MVNETA_TXD_Z_PAD BIT(19)
465 #define MVNETA_TXD_L_DESC BIT(20)
466 #define MVNETA_TXD_F_DESC BIT(21)
467 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
468 MVNETA_TXD_L_DESC | \
470 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
471 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
473 #define MVNETA_RXD_ERR_CRC 0x0
474 #define MVNETA_RXD_BM_POOL_SHIFT 13
475 #define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14))
476 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
477 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
478 #define MVNETA_RXD_ERR_LEN BIT(18)
479 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
480 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
481 #define MVNETA_RXD_L3_IP4 BIT(25)
482 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
483 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
485 #if defined(__LITTLE_ENDIAN)
486 struct mvneta_tx_desc
{
487 u32 command
; /* Options used by HW for packet transmitting.*/
488 u16 reserverd1
; /* csum_l4 (for future use) */
489 u16 data_size
; /* Data size of transmitted packet in bytes */
490 u32 buf_phys_addr
; /* Physical addr of transmitted buffer */
491 u32 reserved2
; /* hw_cmd - (for future use, PMT) */
492 u32 reserved3
[4]; /* Reserved - (for future use) */
495 struct mvneta_rx_desc
{
496 u32 status
; /* Info about received packet */
497 u16 reserved1
; /* pnc_info - (for future use, PnC) */
498 u16 data_size
; /* Size of received packet in bytes */
500 u32 buf_phys_addr
; /* Physical address of the buffer */
501 u32 reserved2
; /* pnc_flow_id (for future use, PnC) */
503 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
504 u16 reserved3
; /* prefetch_cmd, for future use */
505 u16 reserved4
; /* csum_l4 - (for future use, PnC) */
507 u32 reserved5
; /* pnc_extra PnC (for future use, PnC) */
508 u32 reserved6
; /* hw_cmd (for future use, PnC and HWF) */
511 struct mvneta_tx_desc
{
512 u16 data_size
; /* Data size of transmitted packet in bytes */
513 u16 reserverd1
; /* csum_l4 (for future use) */
514 u32 command
; /* Options used by HW for packet transmitting.*/
515 u32 reserved2
; /* hw_cmd - (for future use, PMT) */
516 u32 buf_phys_addr
; /* Physical addr of transmitted buffer */
517 u32 reserved3
[4]; /* Reserved - (for future use) */
520 struct mvneta_rx_desc
{
521 u16 data_size
; /* Size of received packet in bytes */
522 u16 reserved1
; /* pnc_info - (for future use, PnC) */
523 u32 status
; /* Info about received packet */
525 u32 reserved2
; /* pnc_flow_id (for future use, PnC) */
526 u32 buf_phys_addr
; /* Physical address of the buffer */
528 u16 reserved4
; /* csum_l4 - (for future use, PnC) */
529 u16 reserved3
; /* prefetch_cmd, for future use */
530 u32 buf_cookie
; /* cookie for access to RX buffer in rx path */
532 u32 reserved5
; /* pnc_extra PnC (for future use, PnC) */
533 u32 reserved6
; /* hw_cmd (for future use, PnC and HWF) */
537 struct mvneta_tx_queue
{
538 /* Number of this TX queue, in the range 0-7 */
541 /* Number of TX DMA descriptors in the descriptor ring */
544 /* Number of currently used TX DMA descriptor in the
549 int tx_stop_threshold
;
550 int tx_wake_threshold
;
552 /* Array of transmitted skb */
553 struct sk_buff
**tx_skb
;
555 /* Index of last TX DMA descriptor that was inserted */
558 /* Index of the TX DMA descriptor to be cleaned up */
563 /* Virtual address of the TX DMA descriptors array */
564 struct mvneta_tx_desc
*descs
;
566 /* DMA address of the TX DMA descriptors array */
567 dma_addr_t descs_phys
;
569 /* Index of the last TX DMA descriptor */
572 /* Index of the next TX DMA descriptor to process */
573 int next_desc_to_proc
;
575 /* DMA buffers for TSO headers */
578 /* DMA address of TSO headers */
579 dma_addr_t tso_hdrs_phys
;
581 /* Affinity mask for CPUs*/
582 cpumask_t affinity_mask
;
585 struct mvneta_rx_queue
{
586 /* rx queue number, in the range 0-7 */
589 /* num of rx descriptors in the rx descriptor ring */
592 /* counter of times when mvneta_refill() failed */
598 /* Virtual address of the RX buffer */
599 void **buf_virt_addr
;
601 /* Virtual address of the RX DMA descriptors array */
602 struct mvneta_rx_desc
*descs
;
604 /* DMA address of the RX DMA descriptors array */
605 dma_addr_t descs_phys
;
607 /* Index of the last RX DMA descriptor */
610 /* Index of the next RX DMA descriptor to process */
611 int next_desc_to_proc
;
614 static enum cpuhp_state online_hpstate
;
615 /* The hardware supports eight (8) rx queues, but we are only allowing
616 * the first one to be used. Therefore, let's just allocate one queue.
618 static int rxq_number
= 8;
619 static int txq_number
= 8;
623 static int rx_copybreak __read_mostly
= 256;
625 /* HW BM need that each port be identify by a unique ID */
626 static int global_port_id
;
628 #define MVNETA_DRIVER_NAME "mvneta"
629 #define MVNETA_DRIVER_VERSION "1.0"
631 /* Utility/helper methods */
633 /* Write helper method */
634 static void mvreg_write(struct mvneta_port
*pp
, u32 offset
, u32 data
)
636 writel(data
, pp
->base
+ offset
);
639 /* Read helper method */
640 static u32
mvreg_read(struct mvneta_port
*pp
, u32 offset
)
642 return readl(pp
->base
+ offset
);
645 /* Increment txq get counter */
646 static void mvneta_txq_inc_get(struct mvneta_tx_queue
*txq
)
648 txq
->txq_get_index
++;
649 if (txq
->txq_get_index
== txq
->size
)
650 txq
->txq_get_index
= 0;
653 /* Increment txq put counter */
654 static void mvneta_txq_inc_put(struct mvneta_tx_queue
*txq
)
656 txq
->txq_put_index
++;
657 if (txq
->txq_put_index
== txq
->size
)
658 txq
->txq_put_index
= 0;
662 /* Clear all MIB counters */
663 static void mvneta_mib_counters_clear(struct mvneta_port
*pp
)
668 /* Perform dummy reads from MIB counters */
669 for (i
= 0; i
< MVNETA_MIB_LATE_COLLISION
; i
+= 4)
670 dummy
= mvreg_read(pp
, (MVNETA_MIB_COUNTERS_BASE
+ i
));
671 dummy
= mvreg_read(pp
, MVNETA_RX_DISCARD_FRAME_COUNT
);
672 dummy
= mvreg_read(pp
, MVNETA_OVERRUN_FRAME_COUNT
);
675 /* Get System Network Statistics */
677 mvneta_get_stats64(struct net_device
*dev
,
678 struct rtnl_link_stats64
*stats
)
680 struct mvneta_port
*pp
= netdev_priv(dev
);
684 for_each_possible_cpu(cpu
) {
685 struct mvneta_pcpu_stats
*cpu_stats
;
691 cpu_stats
= per_cpu_ptr(pp
->stats
, cpu
);
693 start
= u64_stats_fetch_begin_irq(&cpu_stats
->syncp
);
694 rx_packets
= cpu_stats
->rx_packets
;
695 rx_bytes
= cpu_stats
->rx_bytes
;
696 tx_packets
= cpu_stats
->tx_packets
;
697 tx_bytes
= cpu_stats
->tx_bytes
;
698 } while (u64_stats_fetch_retry_irq(&cpu_stats
->syncp
, start
));
700 stats
->rx_packets
+= rx_packets
;
701 stats
->rx_bytes
+= rx_bytes
;
702 stats
->tx_packets
+= tx_packets
;
703 stats
->tx_bytes
+= tx_bytes
;
706 stats
->rx_errors
= dev
->stats
.rx_errors
;
707 stats
->rx_dropped
= dev
->stats
.rx_dropped
;
709 stats
->tx_dropped
= dev
->stats
.tx_dropped
;
712 /* Rx descriptors helper methods */
714 /* Checks whether the RX descriptor having this status is both the first
715 * and the last descriptor for the RX packet. Each RX packet is currently
716 * received through a single RX descriptor, so not having each RX
717 * descriptor with its first and last bits set is an error
719 static int mvneta_rxq_desc_is_first_last(u32 status
)
721 return (status
& MVNETA_RXD_FIRST_LAST_DESC
) ==
722 MVNETA_RXD_FIRST_LAST_DESC
;
725 /* Add number of descriptors ready to receive new packets */
726 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port
*pp
,
727 struct mvneta_rx_queue
*rxq
,
730 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
733 while (ndescs
> MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
) {
734 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
735 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
<<
736 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
737 ndescs
-= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX
;
740 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
),
741 (ndescs
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
));
744 /* Get number of RX descriptors occupied by received packets */
745 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port
*pp
,
746 struct mvneta_rx_queue
*rxq
)
750 val
= mvreg_read(pp
, MVNETA_RXQ_STATUS_REG(rxq
->id
));
751 return val
& MVNETA_RXQ_OCCUPIED_ALL_MASK
;
754 /* Update num of rx desc called upon return from rx path or
755 * from mvneta_rxq_drop_pkts().
757 static void mvneta_rxq_desc_num_update(struct mvneta_port
*pp
,
758 struct mvneta_rx_queue
*rxq
,
759 int rx_done
, int rx_filled
)
763 if ((rx_done
<= 0xff) && (rx_filled
<= 0xff)) {
765 (rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
);
766 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
770 /* Only 255 descriptors can be added at once */
771 while ((rx_done
> 0) || (rx_filled
> 0)) {
772 if (rx_done
<= 0xff) {
779 if (rx_filled
<= 0xff) {
780 val
|= rx_filled
<< MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
783 val
|= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT
;
786 mvreg_write(pp
, MVNETA_RXQ_STATUS_UPDATE_REG(rxq
->id
), val
);
790 /* Get pointer to next RX descriptor to be processed by SW */
791 static struct mvneta_rx_desc
*
792 mvneta_rxq_next_desc_get(struct mvneta_rx_queue
*rxq
)
794 int rx_desc
= rxq
->next_desc_to_proc
;
796 rxq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(rxq
, rx_desc
);
797 prefetch(rxq
->descs
+ rxq
->next_desc_to_proc
);
798 return rxq
->descs
+ rx_desc
;
801 /* Change maximum receive size of the port. */
802 static void mvneta_max_rx_size_set(struct mvneta_port
*pp
, int max_rx_size
)
806 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
807 val
&= ~MVNETA_GMAC_MAX_RX_SIZE_MASK
;
808 val
|= ((max_rx_size
- MVNETA_MH_SIZE
) / 2) <<
809 MVNETA_GMAC_MAX_RX_SIZE_SHIFT
;
810 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
814 /* Set rx queue offset */
815 static void mvneta_rxq_offset_set(struct mvneta_port
*pp
,
816 struct mvneta_rx_queue
*rxq
,
821 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
822 val
&= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK
;
825 val
|= MVNETA_RXQ_PKT_OFFSET_MASK(offset
>> 3);
826 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
830 /* Tx descriptors helper methods */
832 /* Update HW with number of TX descriptors to be sent */
833 static void mvneta_txq_pend_desc_add(struct mvneta_port
*pp
,
834 struct mvneta_tx_queue
*txq
,
839 pend_desc
+= txq
->pending
;
841 /* Only 255 Tx descriptors can be added at once */
843 val
= min(pend_desc
, 255);
844 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
846 } while (pend_desc
> 0);
850 /* Get pointer to next TX descriptor to be processed (send) by HW */
851 static struct mvneta_tx_desc
*
852 mvneta_txq_next_desc_get(struct mvneta_tx_queue
*txq
)
854 int tx_desc
= txq
->next_desc_to_proc
;
856 txq
->next_desc_to_proc
= MVNETA_QUEUE_NEXT_DESC(txq
, tx_desc
);
857 return txq
->descs
+ tx_desc
;
860 /* Release the last allocated TX descriptor. Useful to handle DMA
861 * mapping failures in the TX path.
863 static void mvneta_txq_desc_put(struct mvneta_tx_queue
*txq
)
865 if (txq
->next_desc_to_proc
== 0)
866 txq
->next_desc_to_proc
= txq
->last_desc
- 1;
868 txq
->next_desc_to_proc
--;
871 /* Set rxq buf size */
872 static void mvneta_rxq_buf_size_set(struct mvneta_port
*pp
,
873 struct mvneta_rx_queue
*rxq
,
878 val
= mvreg_read(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
));
880 val
&= ~MVNETA_RXQ_BUF_SIZE_MASK
;
881 val
|= ((buf_size
>> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT
);
883 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), val
);
886 /* Disable buffer management (BM) */
887 static void mvneta_rxq_bm_disable(struct mvneta_port
*pp
,
888 struct mvneta_rx_queue
*rxq
)
892 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
893 val
&= ~MVNETA_RXQ_HW_BUF_ALLOC
;
894 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
897 /* Enable buffer management (BM) */
898 static void mvneta_rxq_bm_enable(struct mvneta_port
*pp
,
899 struct mvneta_rx_queue
*rxq
)
903 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
904 val
|= MVNETA_RXQ_HW_BUF_ALLOC
;
905 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
908 /* Notify HW about port's assignment of pool for bigger packets */
909 static void mvneta_rxq_long_pool_set(struct mvneta_port
*pp
,
910 struct mvneta_rx_queue
*rxq
)
914 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
915 val
&= ~MVNETA_RXQ_LONG_POOL_ID_MASK
;
916 val
|= (pp
->pool_long
->id
<< MVNETA_RXQ_LONG_POOL_ID_SHIFT
);
918 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
921 /* Notify HW about port's assignment of pool for smaller packets */
922 static void mvneta_rxq_short_pool_set(struct mvneta_port
*pp
,
923 struct mvneta_rx_queue
*rxq
)
927 val
= mvreg_read(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
));
928 val
&= ~MVNETA_RXQ_SHORT_POOL_ID_MASK
;
929 val
|= (pp
->pool_short
->id
<< MVNETA_RXQ_SHORT_POOL_ID_SHIFT
);
931 mvreg_write(pp
, MVNETA_RXQ_CONFIG_REG(rxq
->id
), val
);
934 /* Set port's receive buffer size for assigned BM pool */
935 static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port
*pp
,
941 if (!IS_ALIGNED(buf_size
, 8)) {
942 dev_warn(pp
->dev
->dev
.parent
,
943 "illegal buf_size value %d, round to %d\n",
944 buf_size
, ALIGN(buf_size
, 8));
945 buf_size
= ALIGN(buf_size
, 8);
948 val
= mvreg_read(pp
, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id
));
949 val
|= buf_size
& MVNETA_PORT_POOL_BUFFER_SZ_MASK
;
950 mvreg_write(pp
, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id
), val
);
953 /* Configure MBUS window in order to enable access BM internal SRAM */
954 static int mvneta_mbus_io_win_set(struct mvneta_port
*pp
, u32 base
, u32 wsize
,
957 u32 win_enable
, win_protect
;
960 win_enable
= mvreg_read(pp
, MVNETA_BASE_ADDR_ENABLE
);
962 if (pp
->bm_win_id
< 0) {
963 /* Find first not occupied window */
964 for (i
= 0; i
< MVNETA_MAX_DECODE_WIN
; i
++) {
965 if (win_enable
& (1 << i
)) {
970 if (i
== MVNETA_MAX_DECODE_WIN
)
976 mvreg_write(pp
, MVNETA_WIN_BASE(i
), 0);
977 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), 0);
980 mvreg_write(pp
, MVNETA_WIN_REMAP(i
), 0);
982 mvreg_write(pp
, MVNETA_WIN_BASE(i
), (base
& 0xffff0000) |
983 (attr
<< 8) | target
);
985 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), (wsize
- 1) & 0xffff0000);
987 win_protect
= mvreg_read(pp
, MVNETA_ACCESS_PROTECT_ENABLE
);
988 win_protect
|= 3 << (2 * i
);
989 mvreg_write(pp
, MVNETA_ACCESS_PROTECT_ENABLE
, win_protect
);
991 win_enable
&= ~(1 << i
);
992 mvreg_write(pp
, MVNETA_BASE_ADDR_ENABLE
, win_enable
);
997 static int mvneta_bm_port_mbus_init(struct mvneta_port
*pp
)
1003 /* Get BM window information */
1004 err
= mvebu_mbus_get_io_win_info(pp
->bm_priv
->bppi_phys_addr
, &wsize
,
1011 /* Open NETA -> BM window */
1012 err
= mvneta_mbus_io_win_set(pp
, pp
->bm_priv
->bppi_phys_addr
, wsize
,
1015 netdev_info(pp
->dev
, "fail to configure mbus window to BM\n");
1021 /* Assign and initialize pools for port. In case of fail
1022 * buffer manager will remain disabled for current port.
1024 static int mvneta_bm_port_init(struct platform_device
*pdev
,
1025 struct mvneta_port
*pp
)
1027 struct device_node
*dn
= pdev
->dev
.of_node
;
1028 u32 long_pool_id
, short_pool_id
;
1030 if (!pp
->neta_armada3700
) {
1033 ret
= mvneta_bm_port_mbus_init(pp
);
1038 if (of_property_read_u32(dn
, "bm,pool-long", &long_pool_id
)) {
1039 netdev_info(pp
->dev
, "missing long pool id\n");
1043 /* Create port's long pool depending on mtu */
1044 pp
->pool_long
= mvneta_bm_pool_use(pp
->bm_priv
, long_pool_id
,
1045 MVNETA_BM_LONG
, pp
->id
,
1046 MVNETA_RX_PKT_SIZE(pp
->dev
->mtu
));
1047 if (!pp
->pool_long
) {
1048 netdev_info(pp
->dev
, "fail to obtain long pool for port\n");
1052 pp
->pool_long
->port_map
|= 1 << pp
->id
;
1054 mvneta_bm_pool_bufsize_set(pp
, pp
->pool_long
->buf_size
,
1057 /* If short pool id is not defined, assume using single pool */
1058 if (of_property_read_u32(dn
, "bm,pool-short", &short_pool_id
))
1059 short_pool_id
= long_pool_id
;
1061 /* Create port's short pool */
1062 pp
->pool_short
= mvneta_bm_pool_use(pp
->bm_priv
, short_pool_id
,
1063 MVNETA_BM_SHORT
, pp
->id
,
1064 MVNETA_BM_SHORT_PKT_SIZE
);
1065 if (!pp
->pool_short
) {
1066 netdev_info(pp
->dev
, "fail to obtain short pool for port\n");
1067 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
1071 if (short_pool_id
!= long_pool_id
) {
1072 pp
->pool_short
->port_map
|= 1 << pp
->id
;
1073 mvneta_bm_pool_bufsize_set(pp
, pp
->pool_short
->buf_size
,
1074 pp
->pool_short
->id
);
1080 /* Update settings of a pool for bigger packets */
1081 static void mvneta_bm_update_mtu(struct mvneta_port
*pp
, int mtu
)
1083 struct mvneta_bm_pool
*bm_pool
= pp
->pool_long
;
1084 struct hwbm_pool
*hwbm_pool
= &bm_pool
->hwbm_pool
;
1087 /* Release all buffers from long pool */
1088 mvneta_bm_bufs_free(pp
->bm_priv
, bm_pool
, 1 << pp
->id
);
1089 if (hwbm_pool
->buf_num
) {
1090 WARN(1, "cannot free all buffers in pool %d\n",
1095 bm_pool
->pkt_size
= MVNETA_RX_PKT_SIZE(mtu
);
1096 bm_pool
->buf_size
= MVNETA_RX_BUF_SIZE(bm_pool
->pkt_size
);
1097 hwbm_pool
->frag_size
= SKB_DATA_ALIGN(sizeof(struct skb_shared_info
)) +
1098 SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool
->pkt_size
));
1100 /* Fill entire long pool */
1101 num
= hwbm_pool_add(hwbm_pool
, hwbm_pool
->size
, GFP_ATOMIC
);
1102 if (num
!= hwbm_pool
->size
) {
1103 WARN(1, "pool %d: %d of %d allocated\n",
1104 bm_pool
->id
, num
, hwbm_pool
->size
);
1107 mvneta_bm_pool_bufsize_set(pp
, bm_pool
->buf_size
, bm_pool
->id
);
1112 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
1113 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_short
, 1 << pp
->id
);
1116 mvreg_write(pp
, MVNETA_ACC_MODE
, MVNETA_ACC_MODE_EXT1
);
1117 netdev_info(pp
->dev
, "fail to update MTU, fall back to software BM\n");
1120 /* Start the Ethernet port RX and TX activity */
1121 static void mvneta_port_up(struct mvneta_port
*pp
)
1126 /* Enable all initialized TXs. */
1128 for (queue
= 0; queue
< txq_number
; queue
++) {
1129 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
1131 q_map
|= (1 << queue
);
1133 mvreg_write(pp
, MVNETA_TXQ_CMD
, q_map
);
1136 /* Enable all initialized RXQs. */
1137 for (queue
= 0; queue
< rxq_number
; queue
++) {
1138 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
1141 q_map
|= (1 << queue
);
1143 mvreg_write(pp
, MVNETA_RXQ_CMD
, q_map
);
1146 /* Stop the Ethernet port activity */
1147 static void mvneta_port_down(struct mvneta_port
*pp
)
1152 /* Stop Rx port activity. Check port Rx activity. */
1153 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
) & MVNETA_RXQ_ENABLE_MASK
;
1155 /* Issue stop command for active channels only */
1157 mvreg_write(pp
, MVNETA_RXQ_CMD
,
1158 val
<< MVNETA_RXQ_DISABLE_SHIFT
);
1160 /* Wait for all Rx activity to terminate. */
1163 if (count
++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC
) {
1164 netdev_warn(pp
->dev
,
1165 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n",
1171 val
= mvreg_read(pp
, MVNETA_RXQ_CMD
);
1172 } while (val
& MVNETA_RXQ_ENABLE_MASK
);
1174 /* Stop Tx port activity. Check port Tx activity. Issue stop
1175 * command for active channels only
1177 val
= (mvreg_read(pp
, MVNETA_TXQ_CMD
)) & MVNETA_TXQ_ENABLE_MASK
;
1180 mvreg_write(pp
, MVNETA_TXQ_CMD
,
1181 (val
<< MVNETA_TXQ_DISABLE_SHIFT
));
1183 /* Wait for all Tx activity to terminate. */
1186 if (count
++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC
) {
1187 netdev_warn(pp
->dev
,
1188 "TIMEOUT for TX stopped status=0x%08x\n",
1194 /* Check TX Command reg that all Txqs are stopped */
1195 val
= mvreg_read(pp
, MVNETA_TXQ_CMD
);
1197 } while (val
& MVNETA_TXQ_ENABLE_MASK
);
1199 /* Double check to verify that TX FIFO is empty */
1202 if (count
++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT
) {
1203 netdev_warn(pp
->dev
,
1204 "TX FIFO empty timeout status=0x%08x\n",
1210 val
= mvreg_read(pp
, MVNETA_PORT_STATUS
);
1211 } while (!(val
& MVNETA_TX_FIFO_EMPTY
) &&
1212 (val
& MVNETA_TX_IN_PRGRS
));
1217 /* Enable the port by setting the port enable bit of the MAC control register */
1218 static void mvneta_port_enable(struct mvneta_port
*pp
)
1223 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
1224 val
|= MVNETA_GMAC0_PORT_ENABLE
;
1225 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
1228 /* Disable the port and wait for about 200 usec before retuning */
1229 static void mvneta_port_disable(struct mvneta_port
*pp
)
1233 /* Reset the Enable bit in the Serial Control Register */
1234 val
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
1235 val
&= ~MVNETA_GMAC0_PORT_ENABLE
;
1236 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, val
);
1241 /* Multicast tables methods */
1243 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
1244 static void mvneta_set_ucast_table(struct mvneta_port
*pp
, int queue
)
1252 val
= 0x1 | (queue
<< 1);
1253 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
1256 for (offset
= 0; offset
<= 0xc; offset
+= 4)
1257 mvreg_write(pp
, MVNETA_DA_FILT_UCAST_BASE
+ offset
, val
);
1260 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
1261 static void mvneta_set_special_mcast_table(struct mvneta_port
*pp
, int queue
)
1269 val
= 0x1 | (queue
<< 1);
1270 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
1273 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
1274 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ offset
, val
);
1278 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
1279 static void mvneta_set_other_mcast_table(struct mvneta_port
*pp
, int queue
)
1285 memset(pp
->mcast_count
, 0, sizeof(pp
->mcast_count
));
1288 memset(pp
->mcast_count
, 1, sizeof(pp
->mcast_count
));
1289 val
= 0x1 | (queue
<< 1);
1290 val
|= (val
<< 24) | (val
<< 16) | (val
<< 8);
1293 for (offset
= 0; offset
<= 0xfc; offset
+= 4)
1294 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ offset
, val
);
1297 static void mvneta_percpu_unmask_interrupt(void *arg
)
1299 struct mvneta_port
*pp
= arg
;
1301 /* All the queue are unmasked, but actually only the ones
1302 * mapped to this CPU will be unmasked
1304 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
1305 MVNETA_RX_INTR_MASK_ALL
|
1306 MVNETA_TX_INTR_MASK_ALL
|
1307 MVNETA_MISCINTR_INTR_MASK
);
1310 static void mvneta_percpu_mask_interrupt(void *arg
)
1312 struct mvneta_port
*pp
= arg
;
1314 /* All the queue are masked, but actually only the ones
1315 * mapped to this CPU will be masked
1317 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
1318 mvreg_write(pp
, MVNETA_INTR_OLD_MASK
, 0);
1319 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
, 0);
1322 static void mvneta_percpu_clear_intr_cause(void *arg
)
1324 struct mvneta_port
*pp
= arg
;
1326 /* All the queue are cleared, but actually only the ones
1327 * mapped to this CPU will be cleared
1329 mvreg_write(pp
, MVNETA_INTR_NEW_CAUSE
, 0);
1330 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
1331 mvreg_write(pp
, MVNETA_INTR_OLD_CAUSE
, 0);
1334 /* This method sets defaults to the NETA port:
1335 * Clears interrupt Cause and Mask registers.
1336 * Clears all MAC tables.
1337 * Sets defaults to all registers.
1338 * Resets RX and TX descriptor rings.
1340 * This method can be called after mvneta_port_down() to return the port
1341 * settings to defaults.
1343 static void mvneta_defaults_set(struct mvneta_port
*pp
)
1348 int max_cpu
= num_present_cpus();
1350 /* Clear all Cause registers */
1351 on_each_cpu(mvneta_percpu_clear_intr_cause
, pp
, true);
1353 /* Mask all interrupts */
1354 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
1355 mvreg_write(pp
, MVNETA_INTR_ENABLE
, 0);
1357 /* Enable MBUS Retry bit16 */
1358 mvreg_write(pp
, MVNETA_MBUS_RETRY
, 0x20);
1360 /* Set CPU queue access map. CPUs are assigned to the RX and
1361 * TX queues modulo their number. If there is only one TX
1362 * queue then it is assigned to the CPU associated to the
1365 for_each_present_cpu(cpu
) {
1366 int rxq_map
= 0, txq_map
= 0;
1368 if (!pp
->neta_armada3700
) {
1369 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
1370 if ((rxq
% max_cpu
) == cpu
)
1371 rxq_map
|= MVNETA_CPU_RXQ_ACCESS(rxq
);
1373 for (txq
= 0; txq
< txq_number
; txq
++)
1374 if ((txq
% max_cpu
) == cpu
)
1375 txq_map
|= MVNETA_CPU_TXQ_ACCESS(txq
);
1377 /* With only one TX queue we configure a special case
1378 * which will allow to get all the irq on a single
1381 if (txq_number
== 1)
1382 txq_map
= (cpu
== pp
->rxq_def
) ?
1383 MVNETA_CPU_TXQ_ACCESS(1) : 0;
1386 txq_map
= MVNETA_CPU_TXQ_ACCESS_ALL_MASK
;
1387 rxq_map
= MVNETA_CPU_RXQ_ACCESS_ALL_MASK
;
1390 mvreg_write(pp
, MVNETA_CPU_MAP(cpu
), rxq_map
| txq_map
);
1393 /* Reset RX and TX DMAs */
1394 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
1395 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
1397 /* Disable Legacy WRR, Disable EJP, Release from reset */
1398 mvreg_write(pp
, MVNETA_TXQ_CMD_1
, 0);
1399 for (queue
= 0; queue
< txq_number
; queue
++) {
1400 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(queue
), 0);
1401 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(queue
), 0);
1404 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
1405 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
1407 /* Set Port Acceleration Mode */
1409 /* HW buffer management + legacy parser */
1410 val
= MVNETA_ACC_MODE_EXT2
;
1412 /* SW buffer management + legacy parser */
1413 val
= MVNETA_ACC_MODE_EXT1
;
1414 mvreg_write(pp
, MVNETA_ACC_MODE
, val
);
1417 mvreg_write(pp
, MVNETA_BM_ADDRESS
, pp
->bm_priv
->bppi_phys_addr
);
1419 /* Update val of portCfg register accordingly with all RxQueue types */
1420 val
= MVNETA_PORT_CONFIG_DEFL_VALUE(pp
->rxq_def
);
1421 mvreg_write(pp
, MVNETA_PORT_CONFIG
, val
);
1424 mvreg_write(pp
, MVNETA_PORT_CONFIG_EXTEND
, val
);
1425 mvreg_write(pp
, MVNETA_RX_MIN_FRAME_SIZE
, 64);
1427 /* Build PORT_SDMA_CONFIG_REG */
1430 /* Default burst size */
1431 val
|= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
1432 val
|= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16
);
1433 val
|= MVNETA_RX_NO_DATA_SWAP
| MVNETA_TX_NO_DATA_SWAP
;
1435 #if defined(__BIG_ENDIAN)
1436 val
|= MVNETA_DESC_SWAP
;
1439 /* Assign port SDMA configuration */
1440 mvreg_write(pp
, MVNETA_SDMA_CONFIG
, val
);
1442 /* Disable PHY polling in hardware, since we're using the
1443 * kernel phylib to do this.
1445 val
= mvreg_read(pp
, MVNETA_UNIT_CONTROL
);
1446 val
&= ~MVNETA_PHY_POLLING_ENABLE
;
1447 mvreg_write(pp
, MVNETA_UNIT_CONTROL
, val
);
1449 mvneta_set_ucast_table(pp
, -1);
1450 mvneta_set_special_mcast_table(pp
, -1);
1451 mvneta_set_other_mcast_table(pp
, -1);
1453 /* Set port interrupt enable register - default enable all */
1454 mvreg_write(pp
, MVNETA_INTR_ENABLE
,
1455 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1456 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK
));
1458 mvneta_mib_counters_clear(pp
);
1461 /* Set max sizes for tx queues */
1462 static void mvneta_txq_max_tx_size_set(struct mvneta_port
*pp
, int max_tx_size
)
1468 mtu
= max_tx_size
* 8;
1469 if (mtu
> MVNETA_TX_MTU_MAX
)
1470 mtu
= MVNETA_TX_MTU_MAX
;
1473 val
= mvreg_read(pp
, MVNETA_TX_MTU
);
1474 val
&= ~MVNETA_TX_MTU_MAX
;
1476 mvreg_write(pp
, MVNETA_TX_MTU
, val
);
1478 /* TX token size and all TXQs token size must be larger that MTU */
1479 val
= mvreg_read(pp
, MVNETA_TX_TOKEN_SIZE
);
1481 size
= val
& MVNETA_TX_TOKEN_SIZE_MAX
;
1484 val
&= ~MVNETA_TX_TOKEN_SIZE_MAX
;
1486 mvreg_write(pp
, MVNETA_TX_TOKEN_SIZE
, val
);
1488 for (queue
= 0; queue
< txq_number
; queue
++) {
1489 val
= mvreg_read(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
));
1491 size
= val
& MVNETA_TXQ_TOKEN_SIZE_MAX
;
1494 val
&= ~MVNETA_TXQ_TOKEN_SIZE_MAX
;
1496 mvreg_write(pp
, MVNETA_TXQ_TOKEN_SIZE_REG(queue
), val
);
1501 /* Set unicast address */
1502 static void mvneta_set_ucast_addr(struct mvneta_port
*pp
, u8 last_nibble
,
1505 unsigned int unicast_reg
;
1506 unsigned int tbl_offset
;
1507 unsigned int reg_offset
;
1509 /* Locate the Unicast table entry */
1510 last_nibble
= (0xf & last_nibble
);
1512 /* offset from unicast tbl base */
1513 tbl_offset
= (last_nibble
/ 4) * 4;
1515 /* offset within the above reg */
1516 reg_offset
= last_nibble
% 4;
1518 unicast_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
));
1521 /* Clear accepts frame bit at specified unicast DA tbl entry */
1522 unicast_reg
&= ~(0xff << (8 * reg_offset
));
1524 unicast_reg
&= ~(0xff << (8 * reg_offset
));
1525 unicast_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
1528 mvreg_write(pp
, (MVNETA_DA_FILT_UCAST_BASE
+ tbl_offset
), unicast_reg
);
1531 /* Set mac address */
1532 static void mvneta_mac_addr_set(struct mvneta_port
*pp
, unsigned char *addr
,
1539 mac_l
= (addr
[4] << 8) | (addr
[5]);
1540 mac_h
= (addr
[0] << 24) | (addr
[1] << 16) |
1541 (addr
[2] << 8) | (addr
[3] << 0);
1543 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, mac_l
);
1544 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, mac_h
);
1547 /* Accept frames of this address */
1548 mvneta_set_ucast_addr(pp
, addr
[5], queue
);
1551 /* Set the number of packets that will be received before RX interrupt
1552 * will be generated by HW.
1554 static void mvneta_rx_pkts_coal_set(struct mvneta_port
*pp
,
1555 struct mvneta_rx_queue
*rxq
, u32 value
)
1557 mvreg_write(pp
, MVNETA_RXQ_THRESHOLD_REG(rxq
->id
),
1558 value
| MVNETA_RXQ_NON_OCCUPIED(0));
1559 rxq
->pkts_coal
= value
;
1562 /* Set the time delay in usec before RX interrupt will be generated by
1565 static void mvneta_rx_time_coal_set(struct mvneta_port
*pp
,
1566 struct mvneta_rx_queue
*rxq
, u32 value
)
1569 unsigned long clk_rate
;
1571 clk_rate
= clk_get_rate(pp
->clk
);
1572 val
= (clk_rate
/ 1000000) * value
;
1574 mvreg_write(pp
, MVNETA_RXQ_TIME_COAL_REG(rxq
->id
), val
);
1575 rxq
->time_coal
= value
;
1578 /* Set threshold for TX_DONE pkts coalescing */
1579 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port
*pp
,
1580 struct mvneta_tx_queue
*txq
, u32 value
)
1584 val
= mvreg_read(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
));
1586 val
&= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK
;
1587 val
|= MVNETA_TXQ_SENT_THRESH_MASK(value
);
1589 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), val
);
1591 txq
->done_pkts_coal
= value
;
1594 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1595 static void mvneta_rx_desc_fill(struct mvneta_rx_desc
*rx_desc
,
1596 u32 phys_addr
, void *virt_addr
,
1597 struct mvneta_rx_queue
*rxq
)
1601 rx_desc
->buf_phys_addr
= phys_addr
;
1602 i
= rx_desc
- rxq
->descs
;
1603 rxq
->buf_virt_addr
[i
] = virt_addr
;
1606 /* Decrement sent descriptors counter */
1607 static void mvneta_txq_sent_desc_dec(struct mvneta_port
*pp
,
1608 struct mvneta_tx_queue
*txq
,
1613 /* Only 255 TX descriptors can be updated at once */
1614 while (sent_desc
> 0xff) {
1615 val
= 0xff << MVNETA_TXQ_DEC_SENT_SHIFT
;
1616 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1617 sent_desc
= sent_desc
- 0xff;
1620 val
= sent_desc
<< MVNETA_TXQ_DEC_SENT_SHIFT
;
1621 mvreg_write(pp
, MVNETA_TXQ_UPDATE_REG(txq
->id
), val
);
1624 /* Get number of TX descriptors already sent by HW */
1625 static int mvneta_txq_sent_desc_num_get(struct mvneta_port
*pp
,
1626 struct mvneta_tx_queue
*txq
)
1631 val
= mvreg_read(pp
, MVNETA_TXQ_STATUS_REG(txq
->id
));
1632 sent_desc
= (val
& MVNETA_TXQ_SENT_DESC_MASK
) >>
1633 MVNETA_TXQ_SENT_DESC_SHIFT
;
1638 /* Get number of sent descriptors and decrement counter.
1639 * The number of sent descriptors is returned.
1641 static int mvneta_txq_sent_desc_proc(struct mvneta_port
*pp
,
1642 struct mvneta_tx_queue
*txq
)
1646 /* Get number of sent descriptors */
1647 sent_desc
= mvneta_txq_sent_desc_num_get(pp
, txq
);
1649 /* Decrement sent descriptors counter */
1651 mvneta_txq_sent_desc_dec(pp
, txq
, sent_desc
);
1656 /* Set TXQ descriptors fields relevant for CSUM calculation */
1657 static u32
mvneta_txq_desc_csum(int l3_offs
, int l3_proto
,
1658 int ip_hdr_len
, int l4_proto
)
1662 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1663 * G_L4_chk, L4_type; required only for checksum
1666 command
= l3_offs
<< MVNETA_TX_L3_OFF_SHIFT
;
1667 command
|= ip_hdr_len
<< MVNETA_TX_IP_HLEN_SHIFT
;
1669 if (l3_proto
== htons(ETH_P_IP
))
1670 command
|= MVNETA_TXD_IP_CSUM
;
1672 command
|= MVNETA_TX_L3_IP6
;
1674 if (l4_proto
== IPPROTO_TCP
)
1675 command
|= MVNETA_TX_L4_CSUM_FULL
;
1676 else if (l4_proto
== IPPROTO_UDP
)
1677 command
|= MVNETA_TX_L4_UDP
| MVNETA_TX_L4_CSUM_FULL
;
1679 command
|= MVNETA_TX_L4_CSUM_NOT
;
1685 /* Display more error info */
1686 static void mvneta_rx_error(struct mvneta_port
*pp
,
1687 struct mvneta_rx_desc
*rx_desc
)
1689 u32 status
= rx_desc
->status
;
1691 if (!mvneta_rxq_desc_is_first_last(status
)) {
1693 "bad rx status %08x (buffer oversize), size=%d\n",
1694 status
, rx_desc
->data_size
);
1698 switch (status
& MVNETA_RXD_ERR_CODE_MASK
) {
1699 case MVNETA_RXD_ERR_CRC
:
1700 netdev_err(pp
->dev
, "bad rx status %08x (crc error), size=%d\n",
1701 status
, rx_desc
->data_size
);
1703 case MVNETA_RXD_ERR_OVERRUN
:
1704 netdev_err(pp
->dev
, "bad rx status %08x (overrun error), size=%d\n",
1705 status
, rx_desc
->data_size
);
1707 case MVNETA_RXD_ERR_LEN
:
1708 netdev_err(pp
->dev
, "bad rx status %08x (max frame length error), size=%d\n",
1709 status
, rx_desc
->data_size
);
1711 case MVNETA_RXD_ERR_RESOURCE
:
1712 netdev_err(pp
->dev
, "bad rx status %08x (resource error), size=%d\n",
1713 status
, rx_desc
->data_size
);
1718 /* Handle RX checksum offload based on the descriptor's status */
1719 static void mvneta_rx_csum(struct mvneta_port
*pp
, u32 status
,
1720 struct sk_buff
*skb
)
1722 if ((status
& MVNETA_RXD_L3_IP4
) &&
1723 (status
& MVNETA_RXD_L4_CSUM_OK
)) {
1725 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
1729 skb
->ip_summed
= CHECKSUM_NONE
;
1732 /* Return tx queue pointer (find last set bit) according to <cause> returned
1733 * form tx_done reg. <cause> must not be null. The return value is always a
1734 * valid queue for matching the first one found in <cause>.
1736 static struct mvneta_tx_queue
*mvneta_tx_done_policy(struct mvneta_port
*pp
,
1739 int queue
= fls(cause
) - 1;
1741 return &pp
->txqs
[queue
];
1744 /* Free tx queue skbuffs */
1745 static void mvneta_txq_bufs_free(struct mvneta_port
*pp
,
1746 struct mvneta_tx_queue
*txq
, int num
,
1747 struct netdev_queue
*nq
)
1749 unsigned int bytes_compl
= 0, pkts_compl
= 0;
1752 for (i
= 0; i
< num
; i
++) {
1753 struct mvneta_tx_desc
*tx_desc
= txq
->descs
+
1755 struct sk_buff
*skb
= txq
->tx_skb
[txq
->txq_get_index
];
1758 bytes_compl
+= skb
->len
;
1762 mvneta_txq_inc_get(txq
);
1764 if (!IS_TSO_HEADER(txq
, tx_desc
->buf_phys_addr
))
1765 dma_unmap_single(pp
->dev
->dev
.parent
,
1766 tx_desc
->buf_phys_addr
,
1767 tx_desc
->data_size
, DMA_TO_DEVICE
);
1770 dev_kfree_skb_any(skb
);
1773 netdev_tx_completed_queue(nq
, pkts_compl
, bytes_compl
);
1776 /* Handle end of transmission */
1777 static void mvneta_txq_done(struct mvneta_port
*pp
,
1778 struct mvneta_tx_queue
*txq
)
1780 struct netdev_queue
*nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
1783 tx_done
= mvneta_txq_sent_desc_proc(pp
, txq
);
1787 mvneta_txq_bufs_free(pp
, txq
, tx_done
, nq
);
1789 txq
->count
-= tx_done
;
1791 if (netif_tx_queue_stopped(nq
)) {
1792 if (txq
->count
<= txq
->tx_wake_threshold
)
1793 netif_tx_wake_queue(nq
);
1797 void *mvneta_frag_alloc(unsigned int frag_size
)
1799 if (likely(frag_size
<= PAGE_SIZE
))
1800 return netdev_alloc_frag(frag_size
);
1802 return kmalloc(frag_size
, GFP_ATOMIC
);
1804 EXPORT_SYMBOL_GPL(mvneta_frag_alloc
);
1806 void mvneta_frag_free(unsigned int frag_size
, void *data
)
1808 if (likely(frag_size
<= PAGE_SIZE
))
1809 skb_free_frag(data
);
1813 EXPORT_SYMBOL_GPL(mvneta_frag_free
);
1815 /* Refill processing for SW buffer management */
1816 static int mvneta_rx_refill(struct mvneta_port
*pp
,
1817 struct mvneta_rx_desc
*rx_desc
,
1818 struct mvneta_rx_queue
*rxq
)
1821 dma_addr_t phys_addr
;
1824 data
= mvneta_frag_alloc(pp
->frag_size
);
1828 phys_addr
= dma_map_single(pp
->dev
->dev
.parent
, data
,
1829 MVNETA_RX_BUF_SIZE(pp
->pkt_size
),
1831 if (unlikely(dma_mapping_error(pp
->dev
->dev
.parent
, phys_addr
))) {
1832 mvneta_frag_free(pp
->frag_size
, data
);
1836 phys_addr
+= pp
->rx_offset_correction
;
1837 mvneta_rx_desc_fill(rx_desc
, phys_addr
, data
, rxq
);
1841 /* Handle tx checksum */
1842 static u32
mvneta_skb_tx_csum(struct mvneta_port
*pp
, struct sk_buff
*skb
)
1844 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
1846 __be16 l3_proto
= vlan_get_protocol(skb
);
1849 if (l3_proto
== htons(ETH_P_IP
)) {
1850 struct iphdr
*ip4h
= ip_hdr(skb
);
1852 /* Calculate IPv4 checksum and L4 checksum */
1853 ip_hdr_len
= ip4h
->ihl
;
1854 l4_proto
= ip4h
->protocol
;
1855 } else if (l3_proto
== htons(ETH_P_IPV6
)) {
1856 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
1858 /* Read l4_protocol from one of IPv6 extra headers */
1859 if (skb_network_header_len(skb
) > 0)
1860 ip_hdr_len
= (skb_network_header_len(skb
) >> 2);
1861 l4_proto
= ip6h
->nexthdr
;
1863 return MVNETA_TX_L4_CSUM_NOT
;
1865 return mvneta_txq_desc_csum(skb_network_offset(skb
),
1866 l3_proto
, ip_hdr_len
, l4_proto
);
1869 return MVNETA_TX_L4_CSUM_NOT
;
1872 /* Drop packets received by the RXQ and free buffers */
1873 static void mvneta_rxq_drop_pkts(struct mvneta_port
*pp
,
1874 struct mvneta_rx_queue
*rxq
)
1878 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
1880 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
1883 for (i
= 0; i
< rx_done
; i
++) {
1884 struct mvneta_rx_desc
*rx_desc
=
1885 mvneta_rxq_next_desc_get(rxq
);
1886 u8 pool_id
= MVNETA_RX_GET_BM_POOL_ID(rx_desc
);
1887 struct mvneta_bm_pool
*bm_pool
;
1889 bm_pool
= &pp
->bm_priv
->bm_pools
[pool_id
];
1890 /* Return dropped buffer to the pool */
1891 mvneta_bm_pool_put_bp(pp
->bm_priv
, bm_pool
,
1892 rx_desc
->buf_phys_addr
);
1897 for (i
= 0; i
< rxq
->size
; i
++) {
1898 struct mvneta_rx_desc
*rx_desc
= rxq
->descs
+ i
;
1899 void *data
= rxq
->buf_virt_addr
[i
];
1901 dma_unmap_single(pp
->dev
->dev
.parent
, rx_desc
->buf_phys_addr
,
1902 MVNETA_RX_BUF_SIZE(pp
->pkt_size
), DMA_FROM_DEVICE
);
1903 mvneta_frag_free(pp
->frag_size
, data
);
1907 /* Main rx processing when using software buffer management */
1908 static int mvneta_rx_swbm(struct mvneta_port
*pp
, int rx_todo
,
1909 struct mvneta_rx_queue
*rxq
)
1911 struct mvneta_pcpu_port
*port
= this_cpu_ptr(pp
->ports
);
1912 struct net_device
*dev
= pp
->dev
;
1917 /* Get number of received packets */
1918 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
1920 if (rx_todo
> rx_done
)
1925 /* Fairness NAPI loop */
1926 while (rx_done
< rx_todo
) {
1927 struct mvneta_rx_desc
*rx_desc
= mvneta_rxq_next_desc_get(rxq
);
1928 struct sk_buff
*skb
;
1929 unsigned char *data
;
1930 dma_addr_t phys_addr
;
1931 u32 rx_status
, frag_size
;
1932 int rx_bytes
, err
, index
;
1935 rx_status
= rx_desc
->status
;
1936 rx_bytes
= rx_desc
->data_size
- (ETH_FCS_LEN
+ MVNETA_MH_SIZE
);
1937 index
= rx_desc
- rxq
->descs
;
1938 data
= rxq
->buf_virt_addr
[index
];
1939 phys_addr
= rx_desc
->buf_phys_addr
;
1941 if (!mvneta_rxq_desc_is_first_last(rx_status
) ||
1942 (rx_status
& MVNETA_RXD_ERR_SUMMARY
)) {
1943 mvneta_rx_error(pp
, rx_desc
);
1945 dev
->stats
.rx_errors
++;
1946 /* leave the descriptor untouched */
1950 if (rx_bytes
<= rx_copybreak
) {
1951 /* better copy a small frame and not unmap the DMA region */
1952 skb
= netdev_alloc_skb_ip_align(dev
, rx_bytes
);
1954 goto err_drop_frame
;
1956 dma_sync_single_range_for_cpu(dev
->dev
.parent
,
1958 MVNETA_MH_SIZE
+ NET_SKB_PAD
,
1961 skb_put_data(skb
, data
+ MVNETA_MH_SIZE
+ NET_SKB_PAD
,
1964 skb
->protocol
= eth_type_trans(skb
, dev
);
1965 mvneta_rx_csum(pp
, rx_status
, skb
);
1966 napi_gro_receive(&port
->napi
, skb
);
1969 rcvd_bytes
+= rx_bytes
;
1971 /* leave the descriptor and buffer untouched */
1975 /* Refill processing */
1976 err
= mvneta_rx_refill(pp
, rx_desc
, rxq
);
1978 netdev_err(dev
, "Linux processing - Can't refill\n");
1980 goto err_drop_frame
;
1983 frag_size
= pp
->frag_size
;
1985 skb
= build_skb(data
, frag_size
> PAGE_SIZE
? 0 : frag_size
);
1987 /* After refill old buffer has to be unmapped regardless
1988 * the skb is successfully built or not.
1990 dma_unmap_single(dev
->dev
.parent
, phys_addr
,
1991 MVNETA_RX_BUF_SIZE(pp
->pkt_size
),
1995 goto err_drop_frame
;
1998 rcvd_bytes
+= rx_bytes
;
2000 /* Linux processing */
2001 skb_reserve(skb
, MVNETA_MH_SIZE
+ NET_SKB_PAD
);
2002 skb_put(skb
, rx_bytes
);
2004 skb
->protocol
= eth_type_trans(skb
, dev
);
2006 mvneta_rx_csum(pp
, rx_status
, skb
);
2008 napi_gro_receive(&port
->napi
, skb
);
2012 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
2014 u64_stats_update_begin(&stats
->syncp
);
2015 stats
->rx_packets
+= rcvd_pkts
;
2016 stats
->rx_bytes
+= rcvd_bytes
;
2017 u64_stats_update_end(&stats
->syncp
);
2020 /* Update rxq management counters */
2021 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
2026 /* Main rx processing when using hardware buffer management */
2027 static int mvneta_rx_hwbm(struct mvneta_port
*pp
, int rx_todo
,
2028 struct mvneta_rx_queue
*rxq
)
2030 struct mvneta_pcpu_port
*port
= this_cpu_ptr(pp
->ports
);
2031 struct net_device
*dev
= pp
->dev
;
2036 /* Get number of received packets */
2037 rx_done
= mvneta_rxq_busy_desc_num_get(pp
, rxq
);
2039 if (rx_todo
> rx_done
)
2044 /* Fairness NAPI loop */
2045 while (rx_done
< rx_todo
) {
2046 struct mvneta_rx_desc
*rx_desc
= mvneta_rxq_next_desc_get(rxq
);
2047 struct mvneta_bm_pool
*bm_pool
= NULL
;
2048 struct sk_buff
*skb
;
2049 unsigned char *data
;
2050 dma_addr_t phys_addr
;
2051 u32 rx_status
, frag_size
;
2056 rx_status
= rx_desc
->status
;
2057 rx_bytes
= rx_desc
->data_size
- (ETH_FCS_LEN
+ MVNETA_MH_SIZE
);
2058 data
= (u8
*)(uintptr_t)rx_desc
->buf_cookie
;
2059 phys_addr
= rx_desc
->buf_phys_addr
;
2060 pool_id
= MVNETA_RX_GET_BM_POOL_ID(rx_desc
);
2061 bm_pool
= &pp
->bm_priv
->bm_pools
[pool_id
];
2063 if (!mvneta_rxq_desc_is_first_last(rx_status
) ||
2064 (rx_status
& MVNETA_RXD_ERR_SUMMARY
)) {
2065 err_drop_frame_ret_pool
:
2066 /* Return the buffer to the pool */
2067 mvneta_bm_pool_put_bp(pp
->bm_priv
, bm_pool
,
2068 rx_desc
->buf_phys_addr
);
2070 dev
->stats
.rx_errors
++;
2071 mvneta_rx_error(pp
, rx_desc
);
2072 /* leave the descriptor untouched */
2076 if (rx_bytes
<= rx_copybreak
) {
2077 /* better copy a small frame and not unmap the DMA region */
2078 skb
= netdev_alloc_skb_ip_align(dev
, rx_bytes
);
2080 goto err_drop_frame_ret_pool
;
2082 dma_sync_single_range_for_cpu(dev
->dev
.parent
,
2083 rx_desc
->buf_phys_addr
,
2084 MVNETA_MH_SIZE
+ NET_SKB_PAD
,
2087 skb_put_data(skb
, data
+ MVNETA_MH_SIZE
+ NET_SKB_PAD
,
2090 skb
->protocol
= eth_type_trans(skb
, dev
);
2091 mvneta_rx_csum(pp
, rx_status
, skb
);
2092 napi_gro_receive(&port
->napi
, skb
);
2095 rcvd_bytes
+= rx_bytes
;
2097 /* Return the buffer to the pool */
2098 mvneta_bm_pool_put_bp(pp
->bm_priv
, bm_pool
,
2099 rx_desc
->buf_phys_addr
);
2101 /* leave the descriptor and buffer untouched */
2105 /* Refill processing */
2106 err
= hwbm_pool_refill(&bm_pool
->hwbm_pool
, GFP_ATOMIC
);
2108 netdev_err(dev
, "Linux processing - Can't refill\n");
2110 goto err_drop_frame_ret_pool
;
2113 frag_size
= bm_pool
->hwbm_pool
.frag_size
;
2115 skb
= build_skb(data
, frag_size
> PAGE_SIZE
? 0 : frag_size
);
2117 /* After refill old buffer has to be unmapped regardless
2118 * the skb is successfully built or not.
2120 dma_unmap_single(&pp
->bm_priv
->pdev
->dev
, phys_addr
,
2121 bm_pool
->buf_size
, DMA_FROM_DEVICE
);
2123 goto err_drop_frame
;
2126 rcvd_bytes
+= rx_bytes
;
2128 /* Linux processing */
2129 skb_reserve(skb
, MVNETA_MH_SIZE
+ NET_SKB_PAD
);
2130 skb_put(skb
, rx_bytes
);
2132 skb
->protocol
= eth_type_trans(skb
, dev
);
2134 mvneta_rx_csum(pp
, rx_status
, skb
);
2136 napi_gro_receive(&port
->napi
, skb
);
2140 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
2142 u64_stats_update_begin(&stats
->syncp
);
2143 stats
->rx_packets
+= rcvd_pkts
;
2144 stats
->rx_bytes
+= rcvd_bytes
;
2145 u64_stats_update_end(&stats
->syncp
);
2148 /* Update rxq management counters */
2149 mvneta_rxq_desc_num_update(pp
, rxq
, rx_done
, rx_done
);
2155 mvneta_tso_put_hdr(struct sk_buff
*skb
,
2156 struct mvneta_port
*pp
, struct mvneta_tx_queue
*txq
)
2158 struct mvneta_tx_desc
*tx_desc
;
2159 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
2161 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
2162 tx_desc
= mvneta_txq_next_desc_get(txq
);
2163 tx_desc
->data_size
= hdr_len
;
2164 tx_desc
->command
= mvneta_skb_tx_csum(pp
, skb
);
2165 tx_desc
->command
|= MVNETA_TXD_F_DESC
;
2166 tx_desc
->buf_phys_addr
= txq
->tso_hdrs_phys
+
2167 txq
->txq_put_index
* TSO_HEADER_SIZE
;
2168 mvneta_txq_inc_put(txq
);
2172 mvneta_tso_put_data(struct net_device
*dev
, struct mvneta_tx_queue
*txq
,
2173 struct sk_buff
*skb
, char *data
, int size
,
2174 bool last_tcp
, bool is_last
)
2176 struct mvneta_tx_desc
*tx_desc
;
2178 tx_desc
= mvneta_txq_next_desc_get(txq
);
2179 tx_desc
->data_size
= size
;
2180 tx_desc
->buf_phys_addr
= dma_map_single(dev
->dev
.parent
, data
,
2181 size
, DMA_TO_DEVICE
);
2182 if (unlikely(dma_mapping_error(dev
->dev
.parent
,
2183 tx_desc
->buf_phys_addr
))) {
2184 mvneta_txq_desc_put(txq
);
2188 tx_desc
->command
= 0;
2189 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
2192 /* last descriptor in the TCP packet */
2193 tx_desc
->command
= MVNETA_TXD_L_DESC
;
2195 /* last descriptor in SKB */
2197 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
2199 mvneta_txq_inc_put(txq
);
2203 static int mvneta_tx_tso(struct sk_buff
*skb
, struct net_device
*dev
,
2204 struct mvneta_tx_queue
*txq
)
2206 int total_len
, data_left
;
2208 struct mvneta_port
*pp
= netdev_priv(dev
);
2210 int hdr_len
= skb_transport_offset(skb
) + tcp_hdrlen(skb
);
2213 /* Count needed descriptors */
2214 if ((txq
->count
+ tso_count_descs(skb
)) >= txq
->size
)
2217 if (skb_headlen(skb
) < (skb_transport_offset(skb
) + tcp_hdrlen(skb
))) {
2218 pr_info("*** Is this even possible???!?!?\n");
2222 /* Initialize the TSO handler, and prepare the first payload */
2223 tso_start(skb
, &tso
);
2225 total_len
= skb
->len
- hdr_len
;
2226 while (total_len
> 0) {
2229 data_left
= min_t(int, skb_shinfo(skb
)->gso_size
, total_len
);
2230 total_len
-= data_left
;
2233 /* prepare packet headers: MAC + IP + TCP */
2234 hdr
= txq
->tso_hdrs
+ txq
->txq_put_index
* TSO_HEADER_SIZE
;
2235 tso_build_hdr(skb
, hdr
, &tso
, data_left
, total_len
== 0);
2237 mvneta_tso_put_hdr(skb
, pp
, txq
);
2239 while (data_left
> 0) {
2243 size
= min_t(int, tso
.size
, data_left
);
2245 if (mvneta_tso_put_data(dev
, txq
, skb
,
2252 tso_build_data(skb
, &tso
, size
);
2259 /* Release all used data descriptors; header descriptors must not
2262 for (i
= desc_count
- 1; i
>= 0; i
--) {
2263 struct mvneta_tx_desc
*tx_desc
= txq
->descs
+ i
;
2264 if (!IS_TSO_HEADER(txq
, tx_desc
->buf_phys_addr
))
2265 dma_unmap_single(pp
->dev
->dev
.parent
,
2266 tx_desc
->buf_phys_addr
,
2269 mvneta_txq_desc_put(txq
);
2274 /* Handle tx fragmentation processing */
2275 static int mvneta_tx_frag_process(struct mvneta_port
*pp
, struct sk_buff
*skb
,
2276 struct mvneta_tx_queue
*txq
)
2278 struct mvneta_tx_desc
*tx_desc
;
2279 int i
, nr_frags
= skb_shinfo(skb
)->nr_frags
;
2281 for (i
= 0; i
< nr_frags
; i
++) {
2282 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
2283 void *addr
= page_address(frag
->page
.p
) + frag
->page_offset
;
2285 tx_desc
= mvneta_txq_next_desc_get(txq
);
2286 tx_desc
->data_size
= frag
->size
;
2288 tx_desc
->buf_phys_addr
=
2289 dma_map_single(pp
->dev
->dev
.parent
, addr
,
2290 tx_desc
->data_size
, DMA_TO_DEVICE
);
2292 if (dma_mapping_error(pp
->dev
->dev
.parent
,
2293 tx_desc
->buf_phys_addr
)) {
2294 mvneta_txq_desc_put(txq
);
2298 if (i
== nr_frags
- 1) {
2299 /* Last descriptor */
2300 tx_desc
->command
= MVNETA_TXD_L_DESC
| MVNETA_TXD_Z_PAD
;
2301 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
2303 /* Descriptor in the middle: Not First, Not Last */
2304 tx_desc
->command
= 0;
2305 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
2307 mvneta_txq_inc_put(txq
);
2313 /* Release all descriptors that were used to map fragments of
2314 * this packet, as well as the corresponding DMA mappings
2316 for (i
= i
- 1; i
>= 0; i
--) {
2317 tx_desc
= txq
->descs
+ i
;
2318 dma_unmap_single(pp
->dev
->dev
.parent
,
2319 tx_desc
->buf_phys_addr
,
2322 mvneta_txq_desc_put(txq
);
2328 /* Main tx processing */
2329 static int mvneta_tx(struct sk_buff
*skb
, struct net_device
*dev
)
2331 struct mvneta_port
*pp
= netdev_priv(dev
);
2332 u16 txq_id
= skb_get_queue_mapping(skb
);
2333 struct mvneta_tx_queue
*txq
= &pp
->txqs
[txq_id
];
2334 struct mvneta_tx_desc
*tx_desc
;
2339 if (!netif_running(dev
))
2342 if (skb_is_gso(skb
)) {
2343 frags
= mvneta_tx_tso(skb
, dev
, txq
);
2347 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
2349 /* Get a descriptor for the first part of the packet */
2350 tx_desc
= mvneta_txq_next_desc_get(txq
);
2352 tx_cmd
= mvneta_skb_tx_csum(pp
, skb
);
2354 tx_desc
->data_size
= skb_headlen(skb
);
2356 tx_desc
->buf_phys_addr
= dma_map_single(dev
->dev
.parent
, skb
->data
,
2359 if (unlikely(dma_mapping_error(dev
->dev
.parent
,
2360 tx_desc
->buf_phys_addr
))) {
2361 mvneta_txq_desc_put(txq
);
2367 /* First and Last descriptor */
2368 tx_cmd
|= MVNETA_TXD_FLZ_DESC
;
2369 tx_desc
->command
= tx_cmd
;
2370 txq
->tx_skb
[txq
->txq_put_index
] = skb
;
2371 mvneta_txq_inc_put(txq
);
2373 /* First but not Last */
2374 tx_cmd
|= MVNETA_TXD_F_DESC
;
2375 txq
->tx_skb
[txq
->txq_put_index
] = NULL
;
2376 mvneta_txq_inc_put(txq
);
2377 tx_desc
->command
= tx_cmd
;
2378 /* Continue with other skb fragments */
2379 if (mvneta_tx_frag_process(pp
, skb
, txq
)) {
2380 dma_unmap_single(dev
->dev
.parent
,
2381 tx_desc
->buf_phys_addr
,
2384 mvneta_txq_desc_put(txq
);
2392 struct mvneta_pcpu_stats
*stats
= this_cpu_ptr(pp
->stats
);
2393 struct netdev_queue
*nq
= netdev_get_tx_queue(dev
, txq_id
);
2395 netdev_tx_sent_queue(nq
, len
);
2397 txq
->count
+= frags
;
2398 if (txq
->count
>= txq
->tx_stop_threshold
)
2399 netif_tx_stop_queue(nq
);
2401 if (!skb
->xmit_more
|| netif_xmit_stopped(nq
) ||
2402 txq
->pending
+ frags
> MVNETA_TXQ_DEC_SENT_MASK
)
2403 mvneta_txq_pend_desc_add(pp
, txq
, frags
);
2405 txq
->pending
+= frags
;
2407 u64_stats_update_begin(&stats
->syncp
);
2408 stats
->tx_packets
++;
2409 stats
->tx_bytes
+= len
;
2410 u64_stats_update_end(&stats
->syncp
);
2412 dev
->stats
.tx_dropped
++;
2413 dev_kfree_skb_any(skb
);
2416 return NETDEV_TX_OK
;
2420 /* Free tx resources, when resetting a port */
2421 static void mvneta_txq_done_force(struct mvneta_port
*pp
,
2422 struct mvneta_tx_queue
*txq
)
2425 struct netdev_queue
*nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
2426 int tx_done
= txq
->count
;
2428 mvneta_txq_bufs_free(pp
, txq
, tx_done
, nq
);
2432 txq
->txq_put_index
= 0;
2433 txq
->txq_get_index
= 0;
2436 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
2437 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
2439 static void mvneta_tx_done_gbe(struct mvneta_port
*pp
, u32 cause_tx_done
)
2441 struct mvneta_tx_queue
*txq
;
2442 struct netdev_queue
*nq
;
2444 while (cause_tx_done
) {
2445 txq
= mvneta_tx_done_policy(pp
, cause_tx_done
);
2447 nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
2448 __netif_tx_lock(nq
, smp_processor_id());
2451 mvneta_txq_done(pp
, txq
);
2453 __netif_tx_unlock(nq
);
2454 cause_tx_done
&= ~((1 << txq
->id
));
2458 /* Compute crc8 of the specified address, using a unique algorithm ,
2459 * according to hw spec, different than generic crc8 algorithm
2461 static int mvneta_addr_crc(unsigned char *addr
)
2466 for (i
= 0; i
< ETH_ALEN
; i
++) {
2469 crc
= (crc
^ addr
[i
]) << 8;
2470 for (j
= 7; j
>= 0; j
--) {
2471 if (crc
& (0x100 << j
))
2479 /* This method controls the net device special MAC multicast support.
2480 * The Special Multicast Table for MAC addresses supports MAC of the form
2481 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2482 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2483 * Table entries in the DA-Filter table. This method set the Special
2484 * Multicast Table appropriate entry.
2486 static void mvneta_set_special_mcast_addr(struct mvneta_port
*pp
,
2487 unsigned char last_byte
,
2490 unsigned int smc_table_reg
;
2491 unsigned int tbl_offset
;
2492 unsigned int reg_offset
;
2494 /* Register offset from SMC table base */
2495 tbl_offset
= (last_byte
/ 4);
2496 /* Entry offset within the above reg */
2497 reg_offset
= last_byte
% 4;
2499 smc_table_reg
= mvreg_read(pp
, (MVNETA_DA_FILT_SPEC_MCAST
2503 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
2505 smc_table_reg
&= ~(0xff << (8 * reg_offset
));
2506 smc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
2509 mvreg_write(pp
, MVNETA_DA_FILT_SPEC_MCAST
+ tbl_offset
* 4,
2513 /* This method controls the network device Other MAC multicast support.
2514 * The Other Multicast Table is used for multicast of another type.
2515 * A CRC-8 is used as an index to the Other Multicast Table entries
2516 * in the DA-Filter table.
2517 * The method gets the CRC-8 value from the calling routine and
2518 * sets the Other Multicast Table appropriate entry according to the
2521 static void mvneta_set_other_mcast_addr(struct mvneta_port
*pp
,
2525 unsigned int omc_table_reg
;
2526 unsigned int tbl_offset
;
2527 unsigned int reg_offset
;
2529 tbl_offset
= (crc8
/ 4) * 4; /* Register offset from OMC table base */
2530 reg_offset
= crc8
% 4; /* Entry offset within the above reg */
2532 omc_table_reg
= mvreg_read(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
);
2535 /* Clear accepts frame bit at specified Other DA table entry */
2536 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
2538 omc_table_reg
&= ~(0xff << (8 * reg_offset
));
2539 omc_table_reg
|= ((0x01 | (queue
<< 1)) << (8 * reg_offset
));
2542 mvreg_write(pp
, MVNETA_DA_FILT_OTH_MCAST
+ tbl_offset
, omc_table_reg
);
2545 /* The network device supports multicast using two tables:
2546 * 1) Special Multicast Table for MAC addresses of the form
2547 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
2548 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
2549 * Table entries in the DA-Filter table.
2550 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
2551 * is used as an index to the Other Multicast Table entries in the
2554 static int mvneta_mcast_addr_set(struct mvneta_port
*pp
, unsigned char *p_addr
,
2557 unsigned char crc_result
= 0;
2559 if (memcmp(p_addr
, "\x01\x00\x5e\x00\x00", 5) == 0) {
2560 mvneta_set_special_mcast_addr(pp
, p_addr
[5], queue
);
2564 crc_result
= mvneta_addr_crc(p_addr
);
2566 if (pp
->mcast_count
[crc_result
] == 0) {
2567 netdev_info(pp
->dev
, "No valid Mcast for crc8=0x%02x\n",
2572 pp
->mcast_count
[crc_result
]--;
2573 if (pp
->mcast_count
[crc_result
] != 0) {
2574 netdev_info(pp
->dev
,
2575 "After delete there are %d valid Mcast for crc8=0x%02x\n",
2576 pp
->mcast_count
[crc_result
], crc_result
);
2580 pp
->mcast_count
[crc_result
]++;
2582 mvneta_set_other_mcast_addr(pp
, crc_result
, queue
);
2587 /* Configure Fitering mode of Ethernet port */
2588 static void mvneta_rx_unicast_promisc_set(struct mvneta_port
*pp
,
2591 u32 port_cfg_reg
, val
;
2593 port_cfg_reg
= mvreg_read(pp
, MVNETA_PORT_CONFIG
);
2595 val
= mvreg_read(pp
, MVNETA_TYPE_PRIO
);
2597 /* Set / Clear UPM bit in port configuration register */
2599 /* Accept all Unicast addresses */
2600 port_cfg_reg
|= MVNETA_UNI_PROMISC_MODE
;
2601 val
|= MVNETA_FORCE_UNI
;
2602 mvreg_write(pp
, MVNETA_MAC_ADDR_LOW
, 0xffff);
2603 mvreg_write(pp
, MVNETA_MAC_ADDR_HIGH
, 0xffffffff);
2605 /* Reject all Unicast addresses */
2606 port_cfg_reg
&= ~MVNETA_UNI_PROMISC_MODE
;
2607 val
&= ~MVNETA_FORCE_UNI
;
2610 mvreg_write(pp
, MVNETA_PORT_CONFIG
, port_cfg_reg
);
2611 mvreg_write(pp
, MVNETA_TYPE_PRIO
, val
);
2614 /* register unicast and multicast addresses */
2615 static void mvneta_set_rx_mode(struct net_device
*dev
)
2617 struct mvneta_port
*pp
= netdev_priv(dev
);
2618 struct netdev_hw_addr
*ha
;
2620 if (dev
->flags
& IFF_PROMISC
) {
2621 /* Accept all: Multicast + Unicast */
2622 mvneta_rx_unicast_promisc_set(pp
, 1);
2623 mvneta_set_ucast_table(pp
, pp
->rxq_def
);
2624 mvneta_set_special_mcast_table(pp
, pp
->rxq_def
);
2625 mvneta_set_other_mcast_table(pp
, pp
->rxq_def
);
2627 /* Accept single Unicast */
2628 mvneta_rx_unicast_promisc_set(pp
, 0);
2629 mvneta_set_ucast_table(pp
, -1);
2630 mvneta_mac_addr_set(pp
, dev
->dev_addr
, pp
->rxq_def
);
2632 if (dev
->flags
& IFF_ALLMULTI
) {
2633 /* Accept all multicast */
2634 mvneta_set_special_mcast_table(pp
, pp
->rxq_def
);
2635 mvneta_set_other_mcast_table(pp
, pp
->rxq_def
);
2637 /* Accept only initialized multicast */
2638 mvneta_set_special_mcast_table(pp
, -1);
2639 mvneta_set_other_mcast_table(pp
, -1);
2641 if (!netdev_mc_empty(dev
)) {
2642 netdev_for_each_mc_addr(ha
, dev
) {
2643 mvneta_mcast_addr_set(pp
, ha
->addr
,
2651 /* Interrupt handling - the callback for request_irq() */
2652 static irqreturn_t
mvneta_isr(int irq
, void *dev_id
)
2654 struct mvneta_port
*pp
= (struct mvneta_port
*)dev_id
;
2656 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
, 0);
2657 napi_schedule(&pp
->napi
);
2662 /* Interrupt handling - the callback for request_percpu_irq() */
2663 static irqreturn_t
mvneta_percpu_isr(int irq
, void *dev_id
)
2665 struct mvneta_pcpu_port
*port
= (struct mvneta_pcpu_port
*)dev_id
;
2667 disable_percpu_irq(port
->pp
->dev
->irq
);
2668 napi_schedule(&port
->napi
);
2673 static void mvneta_link_change(struct mvneta_port
*pp
)
2675 u32 gmac_stat
= mvreg_read(pp
, MVNETA_GMAC_STATUS
);
2677 phylink_mac_change(pp
->phylink
, !!(gmac_stat
& MVNETA_GMAC_LINK_UP
));
2681 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2682 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2683 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2684 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2685 * Each CPU has its own causeRxTx register
2687 static int mvneta_poll(struct napi_struct
*napi
, int budget
)
2692 struct mvneta_port
*pp
= netdev_priv(napi
->dev
);
2693 struct mvneta_pcpu_port
*port
= this_cpu_ptr(pp
->ports
);
2695 if (!netif_running(pp
->dev
)) {
2696 napi_complete(napi
);
2700 /* Read cause register */
2701 cause_rx_tx
= mvreg_read(pp
, MVNETA_INTR_NEW_CAUSE
);
2702 if (cause_rx_tx
& MVNETA_MISCINTR_INTR_MASK
) {
2703 u32 cause_misc
= mvreg_read(pp
, MVNETA_INTR_MISC_CAUSE
);
2705 mvreg_write(pp
, MVNETA_INTR_MISC_CAUSE
, 0);
2707 if (cause_misc
& (MVNETA_CAUSE_PHY_STATUS_CHANGE
|
2708 MVNETA_CAUSE_LINK_CHANGE
))
2709 mvneta_link_change(pp
);
2712 /* Release Tx descriptors */
2713 if (cause_rx_tx
& MVNETA_TX_INTR_MASK_ALL
) {
2714 mvneta_tx_done_gbe(pp
, (cause_rx_tx
& MVNETA_TX_INTR_MASK_ALL
));
2715 cause_rx_tx
&= ~MVNETA_TX_INTR_MASK_ALL
;
2718 /* For the case where the last mvneta_poll did not process all
2721 rx_queue
= fls(((cause_rx_tx
>> 8) & 0xff));
2723 cause_rx_tx
|= pp
->neta_armada3700
? pp
->cause_rx_tx
:
2727 rx_queue
= rx_queue
- 1;
2729 rx_done
= mvneta_rx_hwbm(pp
, budget
, &pp
->rxqs
[rx_queue
]);
2731 rx_done
= mvneta_rx_swbm(pp
, budget
, &pp
->rxqs
[rx_queue
]);
2734 if (rx_done
< budget
) {
2736 napi_complete_done(napi
, rx_done
);
2738 if (pp
->neta_armada3700
) {
2739 unsigned long flags
;
2741 local_irq_save(flags
);
2742 mvreg_write(pp
, MVNETA_INTR_NEW_MASK
,
2743 MVNETA_RX_INTR_MASK(rxq_number
) |
2744 MVNETA_TX_INTR_MASK(txq_number
) |
2745 MVNETA_MISCINTR_INTR_MASK
);
2746 local_irq_restore(flags
);
2748 enable_percpu_irq(pp
->dev
->irq
, 0);
2752 if (pp
->neta_armada3700
)
2753 pp
->cause_rx_tx
= cause_rx_tx
;
2755 port
->cause_rx_tx
= cause_rx_tx
;
2760 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2761 static int mvneta_rxq_fill(struct mvneta_port
*pp
, struct mvneta_rx_queue
*rxq
,
2766 for (i
= 0; i
< num
; i
++) {
2767 memset(rxq
->descs
+ i
, 0, sizeof(struct mvneta_rx_desc
));
2768 if (mvneta_rx_refill(pp
, rxq
->descs
+ i
, rxq
) != 0) {
2769 netdev_err(pp
->dev
, "%s:rxq %d, %d of %d buffs filled\n",
2770 __func__
, rxq
->id
, i
, num
);
2775 /* Add this number of RX descriptors as non occupied (ready to
2778 mvneta_rxq_non_occup_desc_add(pp
, rxq
, i
);
2783 /* Free all packets pending transmit from all TXQs and reset TX port */
2784 static void mvneta_tx_reset(struct mvneta_port
*pp
)
2788 /* free the skb's in the tx ring */
2789 for (queue
= 0; queue
< txq_number
; queue
++)
2790 mvneta_txq_done_force(pp
, &pp
->txqs
[queue
]);
2792 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, MVNETA_PORT_TX_DMA_RESET
);
2793 mvreg_write(pp
, MVNETA_PORT_TX_RESET
, 0);
2796 static void mvneta_rx_reset(struct mvneta_port
*pp
)
2798 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, MVNETA_PORT_RX_DMA_RESET
);
2799 mvreg_write(pp
, MVNETA_PORT_RX_RESET
, 0);
2802 /* Rx/Tx queue initialization/cleanup methods */
2804 /* Create a specified RX queue */
2805 static int mvneta_rxq_init(struct mvneta_port
*pp
,
2806 struct mvneta_rx_queue
*rxq
)
2809 rxq
->size
= pp
->rx_ring_size
;
2811 /* Allocate memory for RX descriptors */
2812 rxq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2813 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2814 &rxq
->descs_phys
, GFP_KERNEL
);
2818 rxq
->last_desc
= rxq
->size
- 1;
2820 /* Set Rx descriptors queue starting address */
2821 mvreg_write(pp
, MVNETA_RXQ_BASE_ADDR_REG(rxq
->id
), rxq
->descs_phys
);
2822 mvreg_write(pp
, MVNETA_RXQ_SIZE_REG(rxq
->id
), rxq
->size
);
2825 mvneta_rxq_offset_set(pp
, rxq
, NET_SKB_PAD
- pp
->rx_offset_correction
);
2827 /* Set coalescing pkts and time */
2828 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
2829 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
2832 /* Fill RXQ with buffers from RX pool */
2833 mvneta_rxq_buf_size_set(pp
, rxq
,
2834 MVNETA_RX_BUF_SIZE(pp
->pkt_size
));
2835 mvneta_rxq_bm_disable(pp
, rxq
);
2836 mvneta_rxq_fill(pp
, rxq
, rxq
->size
);
2838 mvneta_rxq_bm_enable(pp
, rxq
);
2839 mvneta_rxq_long_pool_set(pp
, rxq
);
2840 mvneta_rxq_short_pool_set(pp
, rxq
);
2841 mvneta_rxq_non_occup_desc_add(pp
, rxq
, rxq
->size
);
2847 /* Cleanup Rx queue */
2848 static void mvneta_rxq_deinit(struct mvneta_port
*pp
,
2849 struct mvneta_rx_queue
*rxq
)
2851 mvneta_rxq_drop_pkts(pp
, rxq
);
2854 dma_free_coherent(pp
->dev
->dev
.parent
,
2855 rxq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2861 rxq
->next_desc_to_proc
= 0;
2862 rxq
->descs_phys
= 0;
2865 /* Create and initialize a tx queue */
2866 static int mvneta_txq_init(struct mvneta_port
*pp
,
2867 struct mvneta_tx_queue
*txq
)
2871 txq
->size
= pp
->tx_ring_size
;
2873 /* A queue must always have room for at least one skb.
2874 * Therefore, stop the queue when the free entries reaches
2875 * the maximum number of descriptors per skb.
2877 txq
->tx_stop_threshold
= txq
->size
- MVNETA_MAX_SKB_DESCS
;
2878 txq
->tx_wake_threshold
= txq
->tx_stop_threshold
/ 2;
2881 /* Allocate memory for TX descriptors */
2882 txq
->descs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2883 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2884 &txq
->descs_phys
, GFP_KERNEL
);
2888 txq
->last_desc
= txq
->size
- 1;
2890 /* Set maximum bandwidth for enabled TXQs */
2891 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0x03ffffff);
2892 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0x3fffffff);
2894 /* Set Tx descriptors queue starting address */
2895 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), txq
->descs_phys
);
2896 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), txq
->size
);
2898 txq
->tx_skb
= kmalloc_array(txq
->size
, sizeof(*txq
->tx_skb
),
2901 dma_free_coherent(pp
->dev
->dev
.parent
,
2902 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2903 txq
->descs
, txq
->descs_phys
);
2907 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2908 txq
->tso_hdrs
= dma_alloc_coherent(pp
->dev
->dev
.parent
,
2909 txq
->size
* TSO_HEADER_SIZE
,
2910 &txq
->tso_hdrs_phys
, GFP_KERNEL
);
2911 if (!txq
->tso_hdrs
) {
2913 dma_free_coherent(pp
->dev
->dev
.parent
,
2914 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2915 txq
->descs
, txq
->descs_phys
);
2918 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
2920 /* Setup XPS mapping */
2922 cpu
= txq
->id
% num_present_cpus();
2924 cpu
= pp
->rxq_def
% num_present_cpus();
2925 cpumask_set_cpu(cpu
, &txq
->affinity_mask
);
2926 netif_set_xps_queue(pp
->dev
, &txq
->affinity_mask
, txq
->id
);
2931 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2932 static void mvneta_txq_deinit(struct mvneta_port
*pp
,
2933 struct mvneta_tx_queue
*txq
)
2935 struct netdev_queue
*nq
= netdev_get_tx_queue(pp
->dev
, txq
->id
);
2940 dma_free_coherent(pp
->dev
->dev
.parent
,
2941 txq
->size
* TSO_HEADER_SIZE
,
2942 txq
->tso_hdrs
, txq
->tso_hdrs_phys
);
2944 dma_free_coherent(pp
->dev
->dev
.parent
,
2945 txq
->size
* MVNETA_DESC_ALIGNED_SIZE
,
2946 txq
->descs
, txq
->descs_phys
);
2948 netdev_tx_reset_queue(nq
);
2952 txq
->next_desc_to_proc
= 0;
2953 txq
->descs_phys
= 0;
2955 /* Set minimum bandwidth for disabled TXQs */
2956 mvreg_write(pp
, MVETH_TXQ_TOKEN_CFG_REG(txq
->id
), 0);
2957 mvreg_write(pp
, MVETH_TXQ_TOKEN_COUNT_REG(txq
->id
), 0);
2959 /* Set Tx descriptors queue starting address and size */
2960 mvreg_write(pp
, MVNETA_TXQ_BASE_ADDR_REG(txq
->id
), 0);
2961 mvreg_write(pp
, MVNETA_TXQ_SIZE_REG(txq
->id
), 0);
2964 /* Cleanup all Tx queues */
2965 static void mvneta_cleanup_txqs(struct mvneta_port
*pp
)
2969 for (queue
= 0; queue
< txq_number
; queue
++)
2970 mvneta_txq_deinit(pp
, &pp
->txqs
[queue
]);
2973 /* Cleanup all Rx queues */
2974 static void mvneta_cleanup_rxqs(struct mvneta_port
*pp
)
2978 for (queue
= 0; queue
< rxq_number
; queue
++)
2979 mvneta_rxq_deinit(pp
, &pp
->rxqs
[queue
]);
2983 /* Init all Rx queues */
2984 static int mvneta_setup_rxqs(struct mvneta_port
*pp
)
2988 for (queue
= 0; queue
< rxq_number
; queue
++) {
2989 int err
= mvneta_rxq_init(pp
, &pp
->rxqs
[queue
]);
2992 netdev_err(pp
->dev
, "%s: can't create rxq=%d\n",
2994 mvneta_cleanup_rxqs(pp
);
3002 /* Init all tx queues */
3003 static int mvneta_setup_txqs(struct mvneta_port
*pp
)
3007 for (queue
= 0; queue
< txq_number
; queue
++) {
3008 int err
= mvneta_txq_init(pp
, &pp
->txqs
[queue
]);
3010 netdev_err(pp
->dev
, "%s: can't create txq=%d\n",
3012 mvneta_cleanup_txqs(pp
);
3020 static void mvneta_start_dev(struct mvneta_port
*pp
)
3024 mvneta_max_rx_size_set(pp
, pp
->pkt_size
);
3025 mvneta_txq_max_tx_size_set(pp
, pp
->pkt_size
);
3027 /* start the Rx/Tx activity */
3028 mvneta_port_enable(pp
);
3030 if (!pp
->neta_armada3700
) {
3031 /* Enable polling on the port */
3032 for_each_online_cpu(cpu
) {
3033 struct mvneta_pcpu_port
*port
=
3034 per_cpu_ptr(pp
->ports
, cpu
);
3036 napi_enable(&port
->napi
);
3039 napi_enable(&pp
->napi
);
3042 /* Unmask interrupts. It has to be done from each CPU */
3043 on_each_cpu(mvneta_percpu_unmask_interrupt
, pp
, true);
3045 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
3046 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
3047 MVNETA_CAUSE_LINK_CHANGE
);
3049 phylink_start(pp
->phylink
);
3050 netif_tx_start_all_queues(pp
->dev
);
3053 static void mvneta_stop_dev(struct mvneta_port
*pp
)
3057 phylink_stop(pp
->phylink
);
3059 if (!pp
->neta_armada3700
) {
3060 for_each_online_cpu(cpu
) {
3061 struct mvneta_pcpu_port
*port
=
3062 per_cpu_ptr(pp
->ports
, cpu
);
3064 napi_disable(&port
->napi
);
3067 napi_disable(&pp
->napi
);
3070 netif_carrier_off(pp
->dev
);
3072 mvneta_port_down(pp
);
3073 netif_tx_stop_all_queues(pp
->dev
);
3075 /* Stop the port activity */
3076 mvneta_port_disable(pp
);
3078 /* Clear all ethernet port interrupts */
3079 on_each_cpu(mvneta_percpu_clear_intr_cause
, pp
, true);
3081 /* Mask all ethernet port interrupts */
3082 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
3084 mvneta_tx_reset(pp
);
3085 mvneta_rx_reset(pp
);
3088 static void mvneta_percpu_enable(void *arg
)
3090 struct mvneta_port
*pp
= arg
;
3092 enable_percpu_irq(pp
->dev
->irq
, IRQ_TYPE_NONE
);
3095 static void mvneta_percpu_disable(void *arg
)
3097 struct mvneta_port
*pp
= arg
;
3099 disable_percpu_irq(pp
->dev
->irq
);
3102 /* Change the device mtu */
3103 static int mvneta_change_mtu(struct net_device
*dev
, int mtu
)
3105 struct mvneta_port
*pp
= netdev_priv(dev
);
3108 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu
), 8)) {
3109 netdev_info(dev
, "Illegal MTU value %d, rounding to %d\n",
3110 mtu
, ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8));
3111 mtu
= ALIGN(MVNETA_RX_PKT_SIZE(mtu
), 8);
3116 if (!netif_running(dev
)) {
3118 mvneta_bm_update_mtu(pp
, mtu
);
3120 netdev_update_features(dev
);
3124 /* The interface is running, so we have to force a
3125 * reallocation of the queues
3127 mvneta_stop_dev(pp
);
3128 on_each_cpu(mvneta_percpu_disable
, pp
, true);
3130 mvneta_cleanup_txqs(pp
);
3131 mvneta_cleanup_rxqs(pp
);
3134 mvneta_bm_update_mtu(pp
, mtu
);
3136 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(dev
->mtu
);
3137 pp
->frag_size
= SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp
->pkt_size
)) +
3138 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
3140 ret
= mvneta_setup_rxqs(pp
);
3142 netdev_err(dev
, "unable to setup rxqs after MTU change\n");
3146 ret
= mvneta_setup_txqs(pp
);
3148 netdev_err(dev
, "unable to setup txqs after MTU change\n");
3152 on_each_cpu(mvneta_percpu_enable
, pp
, true);
3153 mvneta_start_dev(pp
);
3156 netdev_update_features(dev
);
3161 static netdev_features_t
mvneta_fix_features(struct net_device
*dev
,
3162 netdev_features_t features
)
3164 struct mvneta_port
*pp
= netdev_priv(dev
);
3166 if (pp
->tx_csum_limit
&& dev
->mtu
> pp
->tx_csum_limit
) {
3167 features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_TSO
);
3169 "Disable IP checksum for MTU greater than %dB\n",
3176 /* Get mac address */
3177 static void mvneta_get_mac_addr(struct mvneta_port
*pp
, unsigned char *addr
)
3179 u32 mac_addr_l
, mac_addr_h
;
3181 mac_addr_l
= mvreg_read(pp
, MVNETA_MAC_ADDR_LOW
);
3182 mac_addr_h
= mvreg_read(pp
, MVNETA_MAC_ADDR_HIGH
);
3183 addr
[0] = (mac_addr_h
>> 24) & 0xFF;
3184 addr
[1] = (mac_addr_h
>> 16) & 0xFF;
3185 addr
[2] = (mac_addr_h
>> 8) & 0xFF;
3186 addr
[3] = mac_addr_h
& 0xFF;
3187 addr
[4] = (mac_addr_l
>> 8) & 0xFF;
3188 addr
[5] = mac_addr_l
& 0xFF;
3191 /* Handle setting mac address */
3192 static int mvneta_set_mac_addr(struct net_device
*dev
, void *addr
)
3194 struct mvneta_port
*pp
= netdev_priv(dev
);
3195 struct sockaddr
*sockaddr
= addr
;
3198 ret
= eth_prepare_mac_addr_change(dev
, addr
);
3201 /* Remove previous address table entry */
3202 mvneta_mac_addr_set(pp
, dev
->dev_addr
, -1);
3204 /* Set new addr in hw */
3205 mvneta_mac_addr_set(pp
, sockaddr
->sa_data
, pp
->rxq_def
);
3207 eth_commit_mac_addr_change(dev
, addr
);
3211 static void mvneta_validate(struct net_device
*ndev
, unsigned long *supported
,
3212 struct phylink_link_state
*state
)
3214 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask
) = { 0, };
3216 /* We only support QSGMII, SGMII, 802.3z and RGMII modes */
3217 if (state
->interface
!= PHY_INTERFACE_MODE_NA
&&
3218 state
->interface
!= PHY_INTERFACE_MODE_QSGMII
&&
3219 state
->interface
!= PHY_INTERFACE_MODE_SGMII
&&
3220 !phy_interface_mode_is_8023z(state
->interface
) &&
3221 !phy_interface_mode_is_rgmii(state
->interface
)) {
3222 bitmap_zero(supported
, __ETHTOOL_LINK_MODE_MASK_NBITS
);
3226 /* Allow all the expected bits */
3227 phylink_set(mask
, Autoneg
);
3228 phylink_set_port_modes(mask
);
3230 /* Asymmetric pause is unsupported */
3231 phylink_set(mask
, Pause
);
3232 /* Half-duplex at speeds higher than 100Mbit is unsupported */
3233 phylink_set(mask
, 1000baseT_Full
);
3234 phylink_set(mask
, 1000baseX_Full
);
3236 if (!phy_interface_mode_is_8023z(state
->interface
)) {
3237 /* 10M and 100M are only supported in non-802.3z mode */
3238 phylink_set(mask
, 10baseT_Half
);
3239 phylink_set(mask
, 10baseT_Full
);
3240 phylink_set(mask
, 100baseT_Half
);
3241 phylink_set(mask
, 100baseT_Full
);
3244 bitmap_and(supported
, supported
, mask
,
3245 __ETHTOOL_LINK_MODE_MASK_NBITS
);
3246 bitmap_and(state
->advertising
, state
->advertising
, mask
,
3247 __ETHTOOL_LINK_MODE_MASK_NBITS
);
3250 static int mvneta_mac_link_state(struct net_device
*ndev
,
3251 struct phylink_link_state
*state
)
3253 struct mvneta_port
*pp
= netdev_priv(ndev
);
3256 gmac_stat
= mvreg_read(pp
, MVNETA_GMAC_STATUS
);
3258 if (gmac_stat
& MVNETA_GMAC_SPEED_1000
)
3259 state
->speed
= SPEED_1000
;
3260 else if (gmac_stat
& MVNETA_GMAC_SPEED_100
)
3261 state
->speed
= SPEED_100
;
3263 state
->speed
= SPEED_10
;
3265 state
->an_complete
= !!(gmac_stat
& MVNETA_GMAC_AN_COMPLETE
);
3266 state
->link
= !!(gmac_stat
& MVNETA_GMAC_LINK_UP
);
3267 state
->duplex
= !!(gmac_stat
& MVNETA_GMAC_FULL_DUPLEX
);
3270 if (gmac_stat
& MVNETA_GMAC_RX_FLOW_CTRL_ENABLE
)
3271 state
->pause
|= MLO_PAUSE_RX
;
3272 if (gmac_stat
& MVNETA_GMAC_TX_FLOW_CTRL_ENABLE
)
3273 state
->pause
|= MLO_PAUSE_TX
;
3278 static void mvneta_mac_an_restart(struct net_device
*ndev
)
3280 struct mvneta_port
*pp
= netdev_priv(ndev
);
3281 u32 gmac_an
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
3283 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
,
3284 gmac_an
| MVNETA_GMAC_INBAND_RESTART_AN
);
3285 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
,
3286 gmac_an
& ~MVNETA_GMAC_INBAND_RESTART_AN
);
3289 static void mvneta_mac_config(struct net_device
*ndev
, unsigned int mode
,
3290 const struct phylink_link_state
*state
)
3292 struct mvneta_port
*pp
= netdev_priv(ndev
);
3293 u32 new_ctrl0
, gmac_ctrl0
= mvreg_read(pp
, MVNETA_GMAC_CTRL_0
);
3294 u32 new_ctrl2
, gmac_ctrl2
= mvreg_read(pp
, MVNETA_GMAC_CTRL_2
);
3295 u32 new_clk
, gmac_clk
= mvreg_read(pp
, MVNETA_GMAC_CLOCK_DIVIDER
);
3296 u32 new_an
, gmac_an
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
3298 new_ctrl0
= gmac_ctrl0
& ~MVNETA_GMAC0_PORT_1000BASE_X
;
3299 new_ctrl2
= gmac_ctrl2
& ~(MVNETA_GMAC2_INBAND_AN_ENABLE
|
3300 MVNETA_GMAC2_PORT_RESET
);
3301 new_clk
= gmac_clk
& ~MVNETA_GMAC_1MS_CLOCK_ENABLE
;
3302 new_an
= gmac_an
& ~(MVNETA_GMAC_INBAND_AN_ENABLE
|
3303 MVNETA_GMAC_INBAND_RESTART_AN
|
3304 MVNETA_GMAC_CONFIG_MII_SPEED
|
3305 MVNETA_GMAC_CONFIG_GMII_SPEED
|
3306 MVNETA_GMAC_AN_SPEED_EN
|
3307 MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL
|
3308 MVNETA_GMAC_CONFIG_FLOW_CTRL
|
3309 MVNETA_GMAC_AN_FLOW_CTRL_EN
|
3310 MVNETA_GMAC_CONFIG_FULL_DUPLEX
|
3311 MVNETA_GMAC_AN_DUPLEX_EN
);
3313 /* Even though it might look weird, when we're configured in
3314 * SGMII or QSGMII mode, the RGMII bit needs to be set.
3316 new_ctrl2
|= MVNETA_GMAC2_PORT_RGMII
;
3318 if (state
->interface
== PHY_INTERFACE_MODE_QSGMII
||
3319 state
->interface
== PHY_INTERFACE_MODE_SGMII
||
3320 phy_interface_mode_is_8023z(state
->interface
))
3321 new_ctrl2
|= MVNETA_GMAC2_PCS_ENABLE
;
3323 if (phylink_test(state
->advertising
, Pause
))
3324 new_an
|= MVNETA_GMAC_ADVERT_SYM_FLOW_CTRL
;
3325 if (state
->pause
& MLO_PAUSE_TXRX_MASK
)
3326 new_an
|= MVNETA_GMAC_CONFIG_FLOW_CTRL
;
3328 if (!phylink_autoneg_inband(mode
)) {
3329 /* Phy or fixed speed */
3331 new_an
|= MVNETA_GMAC_CONFIG_FULL_DUPLEX
;
3333 if (state
->speed
== SPEED_1000
)
3334 new_an
|= MVNETA_GMAC_CONFIG_GMII_SPEED
;
3335 else if (state
->speed
== SPEED_100
)
3336 new_an
|= MVNETA_GMAC_CONFIG_MII_SPEED
;
3337 } else if (state
->interface
== PHY_INTERFACE_MODE_SGMII
) {
3338 /* SGMII mode receives the state from the PHY */
3339 new_ctrl2
|= MVNETA_GMAC2_INBAND_AN_ENABLE
;
3340 new_clk
|= MVNETA_GMAC_1MS_CLOCK_ENABLE
;
3341 new_an
= (new_an
& ~(MVNETA_GMAC_FORCE_LINK_DOWN
|
3342 MVNETA_GMAC_FORCE_LINK_PASS
)) |
3343 MVNETA_GMAC_INBAND_AN_ENABLE
|
3344 MVNETA_GMAC_AN_SPEED_EN
|
3345 MVNETA_GMAC_AN_DUPLEX_EN
;
3347 /* 802.3z negotiation - only 1000base-X */
3348 new_ctrl0
|= MVNETA_GMAC0_PORT_1000BASE_X
;
3349 new_clk
|= MVNETA_GMAC_1MS_CLOCK_ENABLE
;
3350 new_an
= (new_an
& ~(MVNETA_GMAC_FORCE_LINK_DOWN
|
3351 MVNETA_GMAC_FORCE_LINK_PASS
)) |
3352 MVNETA_GMAC_INBAND_AN_ENABLE
|
3353 MVNETA_GMAC_CONFIG_GMII_SPEED
|
3354 /* The MAC only supports FD mode */
3355 MVNETA_GMAC_CONFIG_FULL_DUPLEX
;
3357 if (state
->pause
& MLO_PAUSE_AN
&& state
->an_enabled
)
3358 new_an
|= MVNETA_GMAC_AN_FLOW_CTRL_EN
;
3361 /* Armada 370 documentation says we can only change the port mode
3362 * and in-band enable when the link is down, so force it down
3363 * while making these changes. We also do this for GMAC_CTRL2 */
3364 if ((new_ctrl0
^ gmac_ctrl0
) & MVNETA_GMAC0_PORT_1000BASE_X
||
3365 (new_ctrl2
^ gmac_ctrl2
) & MVNETA_GMAC2_INBAND_AN_ENABLE
||
3366 (new_an
^ gmac_an
) & MVNETA_GMAC_INBAND_AN_ENABLE
) {
3367 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
,
3368 (gmac_an
& ~MVNETA_GMAC_FORCE_LINK_PASS
) |
3369 MVNETA_GMAC_FORCE_LINK_DOWN
);
3372 if (new_ctrl0
!= gmac_ctrl0
)
3373 mvreg_write(pp
, MVNETA_GMAC_CTRL_0
, new_ctrl0
);
3374 if (new_ctrl2
!= gmac_ctrl2
)
3375 mvreg_write(pp
, MVNETA_GMAC_CTRL_2
, new_ctrl2
);
3376 if (new_clk
!= gmac_clk
)
3377 mvreg_write(pp
, MVNETA_GMAC_CLOCK_DIVIDER
, new_clk
);
3378 if (new_an
!= gmac_an
)
3379 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, new_an
);
3381 if (gmac_ctrl2
& MVNETA_GMAC2_PORT_RESET
) {
3382 while ((mvreg_read(pp
, MVNETA_GMAC_CTRL_2
) &
3383 MVNETA_GMAC2_PORT_RESET
) != 0)
3388 static void mvneta_set_eee(struct mvneta_port
*pp
, bool enable
)
3392 lpi_ctl1
= mvreg_read(pp
, MVNETA_LPI_CTRL_1
);
3394 lpi_ctl1
|= MVNETA_LPI_REQUEST_ENABLE
;
3396 lpi_ctl1
&= ~MVNETA_LPI_REQUEST_ENABLE
;
3397 mvreg_write(pp
, MVNETA_LPI_CTRL_1
, lpi_ctl1
);
3400 static void mvneta_mac_link_down(struct net_device
*ndev
, unsigned int mode
)
3402 struct mvneta_port
*pp
= netdev_priv(ndev
);
3405 mvneta_port_down(pp
);
3407 if (!phylink_autoneg_inband(mode
)) {
3408 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
3409 val
&= ~MVNETA_GMAC_FORCE_LINK_PASS
;
3410 val
|= MVNETA_GMAC_FORCE_LINK_DOWN
;
3411 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
3414 pp
->eee_active
= false;
3415 mvneta_set_eee(pp
, false);
3418 static void mvneta_mac_link_up(struct net_device
*ndev
, unsigned int mode
,
3419 struct phy_device
*phy
)
3421 struct mvneta_port
*pp
= netdev_priv(ndev
);
3424 if (!phylink_autoneg_inband(mode
)) {
3425 val
= mvreg_read(pp
, MVNETA_GMAC_AUTONEG_CONFIG
);
3426 val
&= ~MVNETA_GMAC_FORCE_LINK_DOWN
;
3427 val
|= MVNETA_GMAC_FORCE_LINK_PASS
;
3428 mvreg_write(pp
, MVNETA_GMAC_AUTONEG_CONFIG
, val
);
3433 if (phy
&& pp
->eee_enabled
) {
3434 pp
->eee_active
= phy_init_eee(phy
, 0) >= 0;
3435 mvneta_set_eee(pp
, pp
->eee_active
&& pp
->tx_lpi_enabled
);
3439 static const struct phylink_mac_ops mvneta_phylink_ops
= {
3440 .validate
= mvneta_validate
,
3441 .mac_link_state
= mvneta_mac_link_state
,
3442 .mac_an_restart
= mvneta_mac_an_restart
,
3443 .mac_config
= mvneta_mac_config
,
3444 .mac_link_down
= mvneta_mac_link_down
,
3445 .mac_link_up
= mvneta_mac_link_up
,
3448 static int mvneta_mdio_probe(struct mvneta_port
*pp
)
3450 struct ethtool_wolinfo wol
= { .cmd
= ETHTOOL_GWOL
};
3451 int err
= phylink_of_phy_connect(pp
->phylink
, pp
->dn
, 0);
3454 netdev_err(pp
->dev
, "could not attach PHY: %d\n", err
);
3456 phylink_ethtool_get_wol(pp
->phylink
, &wol
);
3457 device_set_wakeup_capable(&pp
->dev
->dev
, !!wol
.supported
);
3462 static void mvneta_mdio_remove(struct mvneta_port
*pp
)
3464 phylink_disconnect_phy(pp
->phylink
);
3467 /* Electing a CPU must be done in an atomic way: it should be done
3468 * after or before the removal/insertion of a CPU and this function is
3471 static void mvneta_percpu_elect(struct mvneta_port
*pp
)
3473 int elected_cpu
= 0, max_cpu
, cpu
, i
= 0;
3475 /* Use the cpu associated to the rxq when it is online, in all
3476 * the other cases, use the cpu 0 which can't be offline.
3478 if (cpu_online(pp
->rxq_def
))
3479 elected_cpu
= pp
->rxq_def
;
3481 max_cpu
= num_present_cpus();
3483 for_each_online_cpu(cpu
) {
3484 int rxq_map
= 0, txq_map
= 0;
3487 for (rxq
= 0; rxq
< rxq_number
; rxq
++)
3488 if ((rxq
% max_cpu
) == cpu
)
3489 rxq_map
|= MVNETA_CPU_RXQ_ACCESS(rxq
);
3491 if (cpu
== elected_cpu
)
3492 /* Map the default receive queue queue to the
3495 rxq_map
|= MVNETA_CPU_RXQ_ACCESS(pp
->rxq_def
);
3497 /* We update the TX queue map only if we have one
3498 * queue. In this case we associate the TX queue to
3499 * the CPU bound to the default RX queue
3501 if (txq_number
== 1)
3502 txq_map
= (cpu
== elected_cpu
) ?
3503 MVNETA_CPU_TXQ_ACCESS(1) : 0;
3505 txq_map
= mvreg_read(pp
, MVNETA_CPU_MAP(cpu
)) &
3506 MVNETA_CPU_TXQ_ACCESS_ALL_MASK
;
3508 mvreg_write(pp
, MVNETA_CPU_MAP(cpu
), rxq_map
| txq_map
);
3510 /* Update the interrupt mask on each CPU according the
3513 smp_call_function_single(cpu
, mvneta_percpu_unmask_interrupt
,
3520 static int mvneta_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
3523 struct mvneta_port
*pp
= hlist_entry_safe(node
, struct mvneta_port
,
3525 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
3528 spin_lock(&pp
->lock
);
3530 * Configuring the driver for a new CPU while the driver is
3531 * stopping is racy, so just avoid it.
3533 if (pp
->is_stopped
) {
3534 spin_unlock(&pp
->lock
);
3537 netif_tx_stop_all_queues(pp
->dev
);
3540 * We have to synchronise on tha napi of each CPU except the one
3541 * just being woken up
3543 for_each_online_cpu(other_cpu
) {
3544 if (other_cpu
!= cpu
) {
3545 struct mvneta_pcpu_port
*other_port
=
3546 per_cpu_ptr(pp
->ports
, other_cpu
);
3548 napi_synchronize(&other_port
->napi
);
3552 /* Mask all ethernet port interrupts */
3553 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
3554 napi_enable(&port
->napi
);
3557 * Enable per-CPU interrupts on the CPU that is
3560 mvneta_percpu_enable(pp
);
3563 * Enable per-CPU interrupt on the one CPU we care
3566 mvneta_percpu_elect(pp
);
3568 /* Unmask all ethernet port interrupts */
3569 on_each_cpu(mvneta_percpu_unmask_interrupt
, pp
, true);
3570 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
3571 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
3572 MVNETA_CAUSE_LINK_CHANGE
);
3573 netif_tx_start_all_queues(pp
->dev
);
3574 spin_unlock(&pp
->lock
);
3578 static int mvneta_cpu_down_prepare(unsigned int cpu
, struct hlist_node
*node
)
3580 struct mvneta_port
*pp
= hlist_entry_safe(node
, struct mvneta_port
,
3582 struct mvneta_pcpu_port
*port
= per_cpu_ptr(pp
->ports
, cpu
);
3585 * Thanks to this lock we are sure that any pending cpu election is
3588 spin_lock(&pp
->lock
);
3589 /* Mask all ethernet port interrupts */
3590 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
3591 spin_unlock(&pp
->lock
);
3593 napi_synchronize(&port
->napi
);
3594 napi_disable(&port
->napi
);
3595 /* Disable per-CPU interrupts on the CPU that is brought down. */
3596 mvneta_percpu_disable(pp
);
3600 static int mvneta_cpu_dead(unsigned int cpu
, struct hlist_node
*node
)
3602 struct mvneta_port
*pp
= hlist_entry_safe(node
, struct mvneta_port
,
3605 /* Check if a new CPU must be elected now this on is down */
3606 spin_lock(&pp
->lock
);
3607 mvneta_percpu_elect(pp
);
3608 spin_unlock(&pp
->lock
);
3609 /* Unmask all ethernet port interrupts */
3610 on_each_cpu(mvneta_percpu_unmask_interrupt
, pp
, true);
3611 mvreg_write(pp
, MVNETA_INTR_MISC_MASK
,
3612 MVNETA_CAUSE_PHY_STATUS_CHANGE
|
3613 MVNETA_CAUSE_LINK_CHANGE
);
3614 netif_tx_start_all_queues(pp
->dev
);
3618 static int mvneta_open(struct net_device
*dev
)
3620 struct mvneta_port
*pp
= netdev_priv(dev
);
3623 pp
->pkt_size
= MVNETA_RX_PKT_SIZE(pp
->dev
->mtu
);
3624 pp
->frag_size
= SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp
->pkt_size
)) +
3625 SKB_DATA_ALIGN(sizeof(struct skb_shared_info
));
3627 ret
= mvneta_setup_rxqs(pp
);
3631 ret
= mvneta_setup_txqs(pp
);
3633 goto err_cleanup_rxqs
;
3635 /* Connect to port interrupt line */
3636 if (pp
->neta_armada3700
)
3637 ret
= request_irq(pp
->dev
->irq
, mvneta_isr
, 0,
3640 ret
= request_percpu_irq(pp
->dev
->irq
, mvneta_percpu_isr
,
3641 dev
->name
, pp
->ports
);
3643 netdev_err(pp
->dev
, "cannot request irq %d\n", pp
->dev
->irq
);
3644 goto err_cleanup_txqs
;
3647 if (!pp
->neta_armada3700
) {
3648 /* Enable per-CPU interrupt on all the CPU to handle our RX
3651 on_each_cpu(mvneta_percpu_enable
, pp
, true);
3653 pp
->is_stopped
= false;
3654 /* Register a CPU notifier to handle the case where our CPU
3655 * might be taken offline.
3657 ret
= cpuhp_state_add_instance_nocalls(online_hpstate
,
3662 ret
= cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD
,
3665 goto err_free_online_hp
;
3668 /* In default link is down */
3669 netif_carrier_off(pp
->dev
);
3671 ret
= mvneta_mdio_probe(pp
);
3673 netdev_err(dev
, "cannot probe MDIO bus\n");
3674 goto err_free_dead_hp
;
3677 mvneta_start_dev(pp
);
3682 if (!pp
->neta_armada3700
)
3683 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD
,
3686 if (!pp
->neta_armada3700
)
3687 cpuhp_state_remove_instance_nocalls(online_hpstate
,
3690 if (pp
->neta_armada3700
) {
3691 free_irq(pp
->dev
->irq
, pp
);
3693 on_each_cpu(mvneta_percpu_disable
, pp
, true);
3694 free_percpu_irq(pp
->dev
->irq
, pp
->ports
);
3697 mvneta_cleanup_txqs(pp
);
3699 mvneta_cleanup_rxqs(pp
);
3703 /* Stop the port, free port interrupt line */
3704 static int mvneta_stop(struct net_device
*dev
)
3706 struct mvneta_port
*pp
= netdev_priv(dev
);
3708 if (!pp
->neta_armada3700
) {
3709 /* Inform that we are stopping so we don't want to setup the
3710 * driver for new CPUs in the notifiers. The code of the
3711 * notifier for CPU online is protected by the same spinlock,
3712 * so when we get the lock, the notifer work is done.
3714 spin_lock(&pp
->lock
);
3715 pp
->is_stopped
= true;
3716 spin_unlock(&pp
->lock
);
3718 mvneta_stop_dev(pp
);
3719 mvneta_mdio_remove(pp
);
3721 cpuhp_state_remove_instance_nocalls(online_hpstate
,
3723 cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD
,
3725 on_each_cpu(mvneta_percpu_disable
, pp
, true);
3726 free_percpu_irq(dev
->irq
, pp
->ports
);
3728 mvneta_stop_dev(pp
);
3729 mvneta_mdio_remove(pp
);
3730 free_irq(dev
->irq
, pp
);
3733 mvneta_cleanup_rxqs(pp
);
3734 mvneta_cleanup_txqs(pp
);
3739 static int mvneta_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
3741 struct mvneta_port
*pp
= netdev_priv(dev
);
3743 return phylink_mii_ioctl(pp
->phylink
, ifr
, cmd
);
3746 /* Ethtool methods */
3748 /* Set link ksettings (phy address, speed) for ethtools */
3750 mvneta_ethtool_set_link_ksettings(struct net_device
*ndev
,
3751 const struct ethtool_link_ksettings
*cmd
)
3753 struct mvneta_port
*pp
= netdev_priv(ndev
);
3755 return phylink_ethtool_ksettings_set(pp
->phylink
, cmd
);
3758 /* Get link ksettings for ethtools */
3760 mvneta_ethtool_get_link_ksettings(struct net_device
*ndev
,
3761 struct ethtool_link_ksettings
*cmd
)
3763 struct mvneta_port
*pp
= netdev_priv(ndev
);
3765 return phylink_ethtool_ksettings_get(pp
->phylink
, cmd
);
3768 static int mvneta_ethtool_nway_reset(struct net_device
*dev
)
3770 struct mvneta_port
*pp
= netdev_priv(dev
);
3772 return phylink_ethtool_nway_reset(pp
->phylink
);
3775 /* Set interrupt coalescing for ethtools */
3776 static int mvneta_ethtool_set_coalesce(struct net_device
*dev
,
3777 struct ethtool_coalesce
*c
)
3779 struct mvneta_port
*pp
= netdev_priv(dev
);
3782 for (queue
= 0; queue
< rxq_number
; queue
++) {
3783 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
3784 rxq
->time_coal
= c
->rx_coalesce_usecs
;
3785 rxq
->pkts_coal
= c
->rx_max_coalesced_frames
;
3786 mvneta_rx_pkts_coal_set(pp
, rxq
, rxq
->pkts_coal
);
3787 mvneta_rx_time_coal_set(pp
, rxq
, rxq
->time_coal
);
3790 for (queue
= 0; queue
< txq_number
; queue
++) {
3791 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
3792 txq
->done_pkts_coal
= c
->tx_max_coalesced_frames
;
3793 mvneta_tx_done_pkts_coal_set(pp
, txq
, txq
->done_pkts_coal
);
3799 /* get coalescing for ethtools */
3800 static int mvneta_ethtool_get_coalesce(struct net_device
*dev
,
3801 struct ethtool_coalesce
*c
)
3803 struct mvneta_port
*pp
= netdev_priv(dev
);
3805 c
->rx_coalesce_usecs
= pp
->rxqs
[0].time_coal
;
3806 c
->rx_max_coalesced_frames
= pp
->rxqs
[0].pkts_coal
;
3808 c
->tx_max_coalesced_frames
= pp
->txqs
[0].done_pkts_coal
;
3813 static void mvneta_ethtool_get_drvinfo(struct net_device
*dev
,
3814 struct ethtool_drvinfo
*drvinfo
)
3816 strlcpy(drvinfo
->driver
, MVNETA_DRIVER_NAME
,
3817 sizeof(drvinfo
->driver
));
3818 strlcpy(drvinfo
->version
, MVNETA_DRIVER_VERSION
,
3819 sizeof(drvinfo
->version
));
3820 strlcpy(drvinfo
->bus_info
, dev_name(&dev
->dev
),
3821 sizeof(drvinfo
->bus_info
));
3825 static void mvneta_ethtool_get_ringparam(struct net_device
*netdev
,
3826 struct ethtool_ringparam
*ring
)
3828 struct mvneta_port
*pp
= netdev_priv(netdev
);
3830 ring
->rx_max_pending
= MVNETA_MAX_RXD
;
3831 ring
->tx_max_pending
= MVNETA_MAX_TXD
;
3832 ring
->rx_pending
= pp
->rx_ring_size
;
3833 ring
->tx_pending
= pp
->tx_ring_size
;
3836 static int mvneta_ethtool_set_ringparam(struct net_device
*dev
,
3837 struct ethtool_ringparam
*ring
)
3839 struct mvneta_port
*pp
= netdev_priv(dev
);
3841 if ((ring
->rx_pending
== 0) || (ring
->tx_pending
== 0))
3843 pp
->rx_ring_size
= ring
->rx_pending
< MVNETA_MAX_RXD
?
3844 ring
->rx_pending
: MVNETA_MAX_RXD
;
3846 pp
->tx_ring_size
= clamp_t(u16
, ring
->tx_pending
,
3847 MVNETA_MAX_SKB_DESCS
* 2, MVNETA_MAX_TXD
);
3848 if (pp
->tx_ring_size
!= ring
->tx_pending
)
3849 netdev_warn(dev
, "TX queue size set to %u (requested %u)\n",
3850 pp
->tx_ring_size
, ring
->tx_pending
);
3852 if (netif_running(dev
)) {
3854 if (mvneta_open(dev
)) {
3856 "error on opening device after ring param change\n");
3864 static void mvneta_ethtool_get_pauseparam(struct net_device
*dev
,
3865 struct ethtool_pauseparam
*pause
)
3867 struct mvneta_port
*pp
= netdev_priv(dev
);
3869 phylink_ethtool_get_pauseparam(pp
->phylink
, pause
);
3872 static int mvneta_ethtool_set_pauseparam(struct net_device
*dev
,
3873 struct ethtool_pauseparam
*pause
)
3875 struct mvneta_port
*pp
= netdev_priv(dev
);
3877 return phylink_ethtool_set_pauseparam(pp
->phylink
, pause
);
3880 static void mvneta_ethtool_get_strings(struct net_device
*netdev
, u32 sset
,
3883 if (sset
== ETH_SS_STATS
) {
3886 for (i
= 0; i
< ARRAY_SIZE(mvneta_statistics
); i
++)
3887 memcpy(data
+ i
* ETH_GSTRING_LEN
,
3888 mvneta_statistics
[i
].name
, ETH_GSTRING_LEN
);
3892 static void mvneta_ethtool_update_stats(struct mvneta_port
*pp
)
3894 const struct mvneta_statistic
*s
;
3895 void __iomem
*base
= pp
->base
;
3900 for (i
= 0, s
= mvneta_statistics
;
3901 s
< mvneta_statistics
+ ARRAY_SIZE(mvneta_statistics
);
3907 val
= readl_relaxed(base
+ s
->offset
);
3910 /* Docs say to read low 32-bit then high */
3911 low
= readl_relaxed(base
+ s
->offset
);
3912 high
= readl_relaxed(base
+ s
->offset
+ 4);
3913 val
= (u64
)high
<< 32 | low
;
3916 switch (s
->offset
) {
3917 case ETHTOOL_STAT_EEE_WAKEUP
:
3918 val
= phylink_get_eee_err(pp
->phylink
);
3924 pp
->ethtool_stats
[i
] += val
;
3928 static void mvneta_ethtool_get_stats(struct net_device
*dev
,
3929 struct ethtool_stats
*stats
, u64
*data
)
3931 struct mvneta_port
*pp
= netdev_priv(dev
);
3934 mvneta_ethtool_update_stats(pp
);
3936 for (i
= 0; i
< ARRAY_SIZE(mvneta_statistics
); i
++)
3937 *data
++ = pp
->ethtool_stats
[i
];
3940 static int mvneta_ethtool_get_sset_count(struct net_device
*dev
, int sset
)
3942 if (sset
== ETH_SS_STATS
)
3943 return ARRAY_SIZE(mvneta_statistics
);
3947 static u32
mvneta_ethtool_get_rxfh_indir_size(struct net_device
*dev
)
3949 return MVNETA_RSS_LU_TABLE_SIZE
;
3952 static int mvneta_ethtool_get_rxnfc(struct net_device
*dev
,
3953 struct ethtool_rxnfc
*info
,
3954 u32
*rules __always_unused
)
3956 switch (info
->cmd
) {
3957 case ETHTOOL_GRXRINGS
:
3958 info
->data
= rxq_number
;
3967 static int mvneta_config_rss(struct mvneta_port
*pp
)
3972 netif_tx_stop_all_queues(pp
->dev
);
3974 on_each_cpu(mvneta_percpu_mask_interrupt
, pp
, true);
3976 /* We have to synchronise on the napi of each CPU */
3977 for_each_online_cpu(cpu
) {
3978 struct mvneta_pcpu_port
*pcpu_port
=
3979 per_cpu_ptr(pp
->ports
, cpu
);
3981 napi_synchronize(&pcpu_port
->napi
);
3982 napi_disable(&pcpu_port
->napi
);
3985 pp
->rxq_def
= pp
->indir
[0];
3987 /* Update unicast mapping */
3988 mvneta_set_rx_mode(pp
->dev
);
3990 /* Update val of portCfg register accordingly with all RxQueue types */
3991 val
= MVNETA_PORT_CONFIG_DEFL_VALUE(pp
->rxq_def
);
3992 mvreg_write(pp
, MVNETA_PORT_CONFIG
, val
);
3994 /* Update the elected CPU matching the new rxq_def */
3995 spin_lock(&pp
->lock
);
3996 mvneta_percpu_elect(pp
);
3997 spin_unlock(&pp
->lock
);
3999 /* We have to synchronise on the napi of each CPU */
4000 for_each_online_cpu(cpu
) {
4001 struct mvneta_pcpu_port
*pcpu_port
=
4002 per_cpu_ptr(pp
->ports
, cpu
);
4004 napi_enable(&pcpu_port
->napi
);
4007 netif_tx_start_all_queues(pp
->dev
);
4012 static int mvneta_ethtool_set_rxfh(struct net_device
*dev
, const u32
*indir
,
4013 const u8
*key
, const u8 hfunc
)
4015 struct mvneta_port
*pp
= netdev_priv(dev
);
4017 /* Current code for Armada 3700 doesn't support RSS features yet */
4018 if (pp
->neta_armada3700
)
4021 /* We require at least one supported parameter to be changed
4022 * and no change in any of the unsupported parameters
4025 (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
))
4031 memcpy(pp
->indir
, indir
, MVNETA_RSS_LU_TABLE_SIZE
);
4033 return mvneta_config_rss(pp
);
4036 static int mvneta_ethtool_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
,
4039 struct mvneta_port
*pp
= netdev_priv(dev
);
4041 /* Current code for Armada 3700 doesn't support RSS features yet */
4042 if (pp
->neta_armada3700
)
4046 *hfunc
= ETH_RSS_HASH_TOP
;
4051 memcpy(indir
, pp
->indir
, MVNETA_RSS_LU_TABLE_SIZE
);
4056 static void mvneta_ethtool_get_wol(struct net_device
*dev
,
4057 struct ethtool_wolinfo
*wol
)
4059 struct mvneta_port
*pp
= netdev_priv(dev
);
4061 phylink_ethtool_get_wol(pp
->phylink
, wol
);
4064 static int mvneta_ethtool_set_wol(struct net_device
*dev
,
4065 struct ethtool_wolinfo
*wol
)
4067 struct mvneta_port
*pp
= netdev_priv(dev
);
4070 ret
= phylink_ethtool_set_wol(pp
->phylink
, wol
);
4072 device_set_wakeup_enable(&dev
->dev
, !!wol
->wolopts
);
4077 static int mvneta_ethtool_get_module_info(struct net_device
*dev
,
4078 struct ethtool_modinfo
*modinfo
)
4080 struct mvneta_port
*pp
= netdev_priv(dev
);
4082 return phylink_ethtool_get_module_info(pp
->phylink
, modinfo
);
4085 static int mvneta_ethtool_get_module_eeprom(struct net_device
*dev
,
4086 struct ethtool_eeprom
*ee
, u8
*buf
)
4088 struct mvneta_port
*pp
= netdev_priv(dev
);
4090 return phylink_ethtool_get_module_eeprom(pp
->phylink
, ee
, buf
);
4093 static int mvneta_ethtool_get_eee(struct net_device
*dev
,
4094 struct ethtool_eee
*eee
)
4096 struct mvneta_port
*pp
= netdev_priv(dev
);
4099 lpi_ctl0
= mvreg_read(pp
, MVNETA_LPI_CTRL_0
);
4101 eee
->eee_enabled
= pp
->eee_enabled
;
4102 eee
->eee_active
= pp
->eee_active
;
4103 eee
->tx_lpi_enabled
= pp
->tx_lpi_enabled
;
4104 eee
->tx_lpi_timer
= (lpi_ctl0
) >> 8; // * scale;
4106 return phylink_ethtool_get_eee(pp
->phylink
, eee
);
4109 static int mvneta_ethtool_set_eee(struct net_device
*dev
,
4110 struct ethtool_eee
*eee
)
4112 struct mvneta_port
*pp
= netdev_priv(dev
);
4115 /* The Armada 37x documents do not give limits for this other than
4116 * it being an 8-bit register. */
4117 if (eee
->tx_lpi_enabled
&&
4118 (eee
->tx_lpi_timer
< 0 || eee
->tx_lpi_timer
> 255))
4121 lpi_ctl0
= mvreg_read(pp
, MVNETA_LPI_CTRL_0
);
4122 lpi_ctl0
&= ~(0xff << 8);
4123 lpi_ctl0
|= eee
->tx_lpi_timer
<< 8;
4124 mvreg_write(pp
, MVNETA_LPI_CTRL_0
, lpi_ctl0
);
4126 pp
->eee_enabled
= eee
->eee_enabled
;
4127 pp
->tx_lpi_enabled
= eee
->tx_lpi_enabled
;
4129 mvneta_set_eee(pp
, eee
->tx_lpi_enabled
&& eee
->eee_enabled
);
4131 return phylink_ethtool_set_eee(pp
->phylink
, eee
);
4134 static const struct net_device_ops mvneta_netdev_ops
= {
4135 .ndo_open
= mvneta_open
,
4136 .ndo_stop
= mvneta_stop
,
4137 .ndo_start_xmit
= mvneta_tx
,
4138 .ndo_set_rx_mode
= mvneta_set_rx_mode
,
4139 .ndo_set_mac_address
= mvneta_set_mac_addr
,
4140 .ndo_change_mtu
= mvneta_change_mtu
,
4141 .ndo_fix_features
= mvneta_fix_features
,
4142 .ndo_get_stats64
= mvneta_get_stats64
,
4143 .ndo_do_ioctl
= mvneta_ioctl
,
4146 static const struct ethtool_ops mvneta_eth_tool_ops
= {
4147 .nway_reset
= mvneta_ethtool_nway_reset
,
4148 .get_link
= ethtool_op_get_link
,
4149 .set_coalesce
= mvneta_ethtool_set_coalesce
,
4150 .get_coalesce
= mvneta_ethtool_get_coalesce
,
4151 .get_drvinfo
= mvneta_ethtool_get_drvinfo
,
4152 .get_ringparam
= mvneta_ethtool_get_ringparam
,
4153 .set_ringparam
= mvneta_ethtool_set_ringparam
,
4154 .get_pauseparam
= mvneta_ethtool_get_pauseparam
,
4155 .set_pauseparam
= mvneta_ethtool_set_pauseparam
,
4156 .get_strings
= mvneta_ethtool_get_strings
,
4157 .get_ethtool_stats
= mvneta_ethtool_get_stats
,
4158 .get_sset_count
= mvneta_ethtool_get_sset_count
,
4159 .get_rxfh_indir_size
= mvneta_ethtool_get_rxfh_indir_size
,
4160 .get_rxnfc
= mvneta_ethtool_get_rxnfc
,
4161 .get_rxfh
= mvneta_ethtool_get_rxfh
,
4162 .set_rxfh
= mvneta_ethtool_set_rxfh
,
4163 .get_link_ksettings
= mvneta_ethtool_get_link_ksettings
,
4164 .set_link_ksettings
= mvneta_ethtool_set_link_ksettings
,
4165 .get_wol
= mvneta_ethtool_get_wol
,
4166 .set_wol
= mvneta_ethtool_set_wol
,
4167 .get_module_info
= mvneta_ethtool_get_module_info
,
4168 .get_module_eeprom
= mvneta_ethtool_get_module_eeprom
,
4169 .get_eee
= mvneta_ethtool_get_eee
,
4170 .set_eee
= mvneta_ethtool_set_eee
,
4174 static int mvneta_init(struct device
*dev
, struct mvneta_port
*pp
)
4179 mvneta_port_disable(pp
);
4181 /* Set port default values */
4182 mvneta_defaults_set(pp
);
4184 pp
->txqs
= devm_kcalloc(dev
, txq_number
, sizeof(*pp
->txqs
), GFP_KERNEL
);
4188 /* Initialize TX descriptor rings */
4189 for (queue
= 0; queue
< txq_number
; queue
++) {
4190 struct mvneta_tx_queue
*txq
= &pp
->txqs
[queue
];
4192 txq
->size
= pp
->tx_ring_size
;
4193 txq
->done_pkts_coal
= MVNETA_TXDONE_COAL_PKTS
;
4196 pp
->rxqs
= devm_kcalloc(dev
, rxq_number
, sizeof(*pp
->rxqs
), GFP_KERNEL
);
4200 /* Create Rx descriptor rings */
4201 for (queue
= 0; queue
< rxq_number
; queue
++) {
4202 struct mvneta_rx_queue
*rxq
= &pp
->rxqs
[queue
];
4204 rxq
->size
= pp
->rx_ring_size
;
4205 rxq
->pkts_coal
= MVNETA_RX_COAL_PKTS
;
4206 rxq
->time_coal
= MVNETA_RX_COAL_USEC
;
4208 = devm_kmalloc_array(pp
->dev
->dev
.parent
,
4210 sizeof(*rxq
->buf_virt_addr
),
4212 if (!rxq
->buf_virt_addr
)
4219 /* platform glue : initialize decoding windows */
4220 static void mvneta_conf_mbus_windows(struct mvneta_port
*pp
,
4221 const struct mbus_dram_target_info
*dram
)
4227 for (i
= 0; i
< 6; i
++) {
4228 mvreg_write(pp
, MVNETA_WIN_BASE(i
), 0);
4229 mvreg_write(pp
, MVNETA_WIN_SIZE(i
), 0);
4232 mvreg_write(pp
, MVNETA_WIN_REMAP(i
), 0);
4239 for (i
= 0; i
< dram
->num_cs
; i
++) {
4240 const struct mbus_dram_window
*cs
= dram
->cs
+ i
;
4242 mvreg_write(pp
, MVNETA_WIN_BASE(i
),
4243 (cs
->base
& 0xffff0000) |
4244 (cs
->mbus_attr
<< 8) |
4245 dram
->mbus_dram_target_id
);
4247 mvreg_write(pp
, MVNETA_WIN_SIZE(i
),
4248 (cs
->size
- 1) & 0xffff0000);
4250 win_enable
&= ~(1 << i
);
4251 win_protect
|= 3 << (2 * i
);
4254 /* For Armada3700 open default 4GB Mbus window, leaving
4255 * arbitration of target/attribute to a different layer
4258 mvreg_write(pp
, MVNETA_WIN_SIZE(0), 0xffff0000);
4259 win_enable
&= ~BIT(0);
4263 mvreg_write(pp
, MVNETA_BASE_ADDR_ENABLE
, win_enable
);
4264 mvreg_write(pp
, MVNETA_ACCESS_PROTECT_ENABLE
, win_protect
);
4267 /* Power up the port */
4268 static int mvneta_port_power_up(struct mvneta_port
*pp
, int phy_mode
)
4270 /* MAC Cause register should be cleared */
4271 mvreg_write(pp
, MVNETA_UNIT_INTR_CAUSE
, 0);
4273 if (phy_mode
== PHY_INTERFACE_MODE_QSGMII
)
4274 mvreg_write(pp
, MVNETA_SERDES_CFG
, MVNETA_QSGMII_SERDES_PROTO
);
4275 else if (phy_mode
== PHY_INTERFACE_MODE_SGMII
||
4276 phy_mode
== PHY_INTERFACE_MODE_1000BASEX
)
4277 mvreg_write(pp
, MVNETA_SERDES_CFG
, MVNETA_SGMII_SERDES_PROTO
);
4278 else if (!phy_interface_mode_is_rgmii(phy_mode
))
4284 /* Device initialization routine */
4285 static int mvneta_probe(struct platform_device
*pdev
)
4287 struct resource
*res
;
4288 struct device_node
*dn
= pdev
->dev
.of_node
;
4289 struct device_node
*bm_node
;
4290 struct mvneta_port
*pp
;
4291 struct net_device
*dev
;
4292 struct phylink
*phylink
;
4293 const char *dt_mac_addr
;
4294 char hw_mac_addr
[ETH_ALEN
];
4295 const char *mac_from
;
4301 dev
= alloc_etherdev_mqs(sizeof(struct mvneta_port
), txq_number
, rxq_number
);
4305 dev
->irq
= irq_of_parse_and_map(dn
, 0);
4306 if (dev
->irq
== 0) {
4308 goto err_free_netdev
;
4311 phy_mode
= of_get_phy_mode(dn
);
4313 dev_err(&pdev
->dev
, "incorrect phy-mode\n");
4318 phylink
= phylink_create(dev
, pdev
->dev
.fwnode
, phy_mode
,
4319 &mvneta_phylink_ops
);
4320 if (IS_ERR(phylink
)) {
4321 err
= PTR_ERR(phylink
);
4325 dev
->tx_queue_len
= MVNETA_MAX_TXD
;
4326 dev
->watchdog_timeo
= 5 * HZ
;
4327 dev
->netdev_ops
= &mvneta_netdev_ops
;
4329 dev
->ethtool_ops
= &mvneta_eth_tool_ops
;
4331 pp
= netdev_priv(dev
);
4332 spin_lock_init(&pp
->lock
);
4333 pp
->phylink
= phylink
;
4334 pp
->phy_interface
= phy_mode
;
4337 pp
->rxq_def
= rxq_def
;
4339 /* Set RX packet offset correction for platforms, whose
4340 * NET_SKB_PAD, exceeds 64B. It should be 64B for 64-bit
4341 * platforms and 0B for 32-bit ones.
4343 pp
->rx_offset_correction
=
4344 max(0, NET_SKB_PAD
- MVNETA_RX_PKT_OFFSET_CORRECTION
);
4346 pp
->indir
[0] = rxq_def
;
4348 /* Get special SoC configurations */
4349 if (of_device_is_compatible(dn
, "marvell,armada-3700-neta"))
4350 pp
->neta_armada3700
= true;
4352 pp
->clk
= devm_clk_get(&pdev
->dev
, "core");
4353 if (IS_ERR(pp
->clk
))
4354 pp
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
4355 if (IS_ERR(pp
->clk
)) {
4356 err
= PTR_ERR(pp
->clk
);
4357 goto err_free_phylink
;
4360 clk_prepare_enable(pp
->clk
);
4362 pp
->clk_bus
= devm_clk_get(&pdev
->dev
, "bus");
4363 if (!IS_ERR(pp
->clk_bus
))
4364 clk_prepare_enable(pp
->clk_bus
);
4366 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
4367 pp
->base
= devm_ioremap_resource(&pdev
->dev
, res
);
4368 if (IS_ERR(pp
->base
)) {
4369 err
= PTR_ERR(pp
->base
);
4373 /* Alloc per-cpu port structure */
4374 pp
->ports
= alloc_percpu(struct mvneta_pcpu_port
);
4380 /* Alloc per-cpu stats */
4381 pp
->stats
= netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats
);
4384 goto err_free_ports
;
4387 dt_mac_addr
= of_get_mac_address(dn
);
4389 mac_from
= "device tree";
4390 memcpy(dev
->dev_addr
, dt_mac_addr
, ETH_ALEN
);
4392 mvneta_get_mac_addr(pp
, hw_mac_addr
);
4393 if (is_valid_ether_addr(hw_mac_addr
)) {
4394 mac_from
= "hardware";
4395 memcpy(dev
->dev_addr
, hw_mac_addr
, ETH_ALEN
);
4397 mac_from
= "random";
4398 eth_hw_addr_random(dev
);
4402 if (!of_property_read_u32(dn
, "tx-csum-limit", &tx_csum_limit
)) {
4403 if (tx_csum_limit
< 0 ||
4404 tx_csum_limit
> MVNETA_TX_CSUM_MAX_SIZE
) {
4405 tx_csum_limit
= MVNETA_TX_CSUM_DEF_SIZE
;
4406 dev_info(&pdev
->dev
,
4407 "Wrong TX csum limit in DT, set to %dB\n",
4408 MVNETA_TX_CSUM_DEF_SIZE
);
4410 } else if (of_device_is_compatible(dn
, "marvell,armada-370-neta")) {
4411 tx_csum_limit
= MVNETA_TX_CSUM_DEF_SIZE
;
4413 tx_csum_limit
= MVNETA_TX_CSUM_MAX_SIZE
;
4416 pp
->tx_csum_limit
= tx_csum_limit
;
4418 pp
->dram_target_info
= mv_mbus_dram_info();
4419 /* Armada3700 requires setting default configuration of Mbus
4420 * windows, however without using filled mbus_dram_target_info
4423 if (pp
->dram_target_info
|| pp
->neta_armada3700
)
4424 mvneta_conf_mbus_windows(pp
, pp
->dram_target_info
);
4426 pp
->tx_ring_size
= MVNETA_MAX_TXD
;
4427 pp
->rx_ring_size
= MVNETA_MAX_RXD
;
4430 SET_NETDEV_DEV(dev
, &pdev
->dev
);
4432 pp
->id
= global_port_id
++;
4434 /* Obtain access to BM resources if enabled and already initialized */
4435 bm_node
= of_parse_phandle(dn
, "buffer-manager", 0);
4436 if (bm_node
&& bm_node
->data
) {
4437 pp
->bm_priv
= bm_node
->data
;
4438 err
= mvneta_bm_port_init(pdev
, pp
);
4440 dev_info(&pdev
->dev
, "use SW buffer management\n");
4444 of_node_put(bm_node
);
4446 err
= mvneta_init(&pdev
->dev
, pp
);
4450 err
= mvneta_port_power_up(pp
, phy_mode
);
4452 dev_err(&pdev
->dev
, "can't power up port\n");
4456 /* Armada3700 network controller does not support per-cpu
4457 * operation, so only single NAPI should be initialized.
4459 if (pp
->neta_armada3700
) {
4460 netif_napi_add(dev
, &pp
->napi
, mvneta_poll
, NAPI_POLL_WEIGHT
);
4462 for_each_present_cpu(cpu
) {
4463 struct mvneta_pcpu_port
*port
=
4464 per_cpu_ptr(pp
->ports
, cpu
);
4466 netif_napi_add(dev
, &port
->napi
, mvneta_poll
,
4472 dev
->features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_TSO
;
4473 dev
->hw_features
|= dev
->features
;
4474 dev
->vlan_features
|= dev
->features
;
4475 dev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
4476 dev
->gso_max_segs
= MVNETA_MAX_TSO_SEGS
;
4478 /* MTU range: 68 - 9676 */
4479 dev
->min_mtu
= ETH_MIN_MTU
;
4480 /* 9676 == 9700 - 20 and rounding to 8 */
4481 dev
->max_mtu
= 9676;
4483 err
= register_netdev(dev
);
4485 dev_err(&pdev
->dev
, "failed to register\n");
4486 goto err_free_stats
;
4489 netdev_info(dev
, "Using %s mac address %pM\n", mac_from
,
4492 platform_set_drvdata(pdev
, pp
->dev
);
4497 unregister_netdev(dev
);
4499 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
4500 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_short
,
4504 free_percpu(pp
->stats
);
4506 free_percpu(pp
->ports
);
4508 clk_disable_unprepare(pp
->clk_bus
);
4509 clk_disable_unprepare(pp
->clk
);
4512 phylink_destroy(pp
->phylink
);
4514 irq_dispose_mapping(dev
->irq
);
4520 /* Device removal routine */
4521 static int mvneta_remove(struct platform_device
*pdev
)
4523 struct net_device
*dev
= platform_get_drvdata(pdev
);
4524 struct mvneta_port
*pp
= netdev_priv(dev
);
4526 unregister_netdev(dev
);
4527 clk_disable_unprepare(pp
->clk_bus
);
4528 clk_disable_unprepare(pp
->clk
);
4529 free_percpu(pp
->ports
);
4530 free_percpu(pp
->stats
);
4531 irq_dispose_mapping(dev
->irq
);
4532 phylink_destroy(pp
->phylink
);
4536 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_long
, 1 << pp
->id
);
4537 mvneta_bm_pool_destroy(pp
->bm_priv
, pp
->pool_short
,
4544 #ifdef CONFIG_PM_SLEEP
4545 static int mvneta_suspend(struct device
*device
)
4547 struct net_device
*dev
= dev_get_drvdata(device
);
4548 struct mvneta_port
*pp
= netdev_priv(dev
);
4551 if (netif_running(dev
))
4554 netif_device_detach(dev
);
4555 clk_disable_unprepare(pp
->clk_bus
);
4556 clk_disable_unprepare(pp
->clk
);
4560 static int mvneta_resume(struct device
*device
)
4562 struct platform_device
*pdev
= to_platform_device(device
);
4563 struct net_device
*dev
= dev_get_drvdata(device
);
4564 struct mvneta_port
*pp
= netdev_priv(dev
);
4567 clk_prepare_enable(pp
->clk
);
4568 if (!IS_ERR(pp
->clk_bus
))
4569 clk_prepare_enable(pp
->clk_bus
);
4570 if (pp
->dram_target_info
|| pp
->neta_armada3700
)
4571 mvneta_conf_mbus_windows(pp
, pp
->dram_target_info
);
4573 err
= mvneta_bm_port_init(pdev
, pp
);
4575 dev_info(&pdev
->dev
, "use SW buffer management\n");
4579 mvneta_defaults_set(pp
);
4580 err
= mvneta_port_power_up(pp
, pp
->phy_interface
);
4582 dev_err(device
, "can't power up port\n");
4586 netif_device_attach(dev
);
4588 if (netif_running(dev
)) {
4590 mvneta_set_rx_mode(dev
);
4598 static SIMPLE_DEV_PM_OPS(mvneta_pm_ops
, mvneta_suspend
, mvneta_resume
);
4600 static const struct of_device_id mvneta_match
[] = {
4601 { .compatible
= "marvell,armada-370-neta" },
4602 { .compatible
= "marvell,armada-xp-neta" },
4603 { .compatible
= "marvell,armada-3700-neta" },
4606 MODULE_DEVICE_TABLE(of
, mvneta_match
);
4608 static struct platform_driver mvneta_driver
= {
4609 .probe
= mvneta_probe
,
4610 .remove
= mvneta_remove
,
4612 .name
= MVNETA_DRIVER_NAME
,
4613 .of_match_table
= mvneta_match
,
4614 .pm
= &mvneta_pm_ops
,
4618 static int __init
mvneta_driver_init(void)
4622 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "net/mvmeta:online",
4624 mvneta_cpu_down_prepare
);
4627 online_hpstate
= ret
;
4628 ret
= cpuhp_setup_state_multi(CPUHP_NET_MVNETA_DEAD
, "net/mvneta:dead",
4629 NULL
, mvneta_cpu_dead
);
4633 ret
= platform_driver_register(&mvneta_driver
);
4639 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD
);
4641 cpuhp_remove_multi_state(online_hpstate
);
4645 module_init(mvneta_driver_init
);
4647 static void __exit
mvneta_driver_exit(void)
4649 platform_driver_unregister(&mvneta_driver
);
4650 cpuhp_remove_multi_state(CPUHP_NET_MVNETA_DEAD
);
4651 cpuhp_remove_multi_state(online_hpstate
);
4653 module_exit(mvneta_driver_exit
);
4655 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
4656 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
4657 MODULE_LICENSE("GPL");
4659 module_param(rxq_number
, int, S_IRUGO
);
4660 module_param(txq_number
, int, S_IRUGO
);
4662 module_param(rxq_def
, int, S_IRUGO
);
4663 module_param(rx_copybreak
, int, S_IRUGO
| S_IWUSR
);