jme: Do not enable NIC WoL functions on S0
[linux/fpc-iii.git] / drivers / net / ethernet / marvell / mvneta.c
blob99a69490f39b0b804588da8cc771684b13c186aa
1 /*
2 * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs.
4 * Copyright (C) 2012 Marvell
6 * Rami Rosen <rosenr@marvell.com>
7 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/platform_device.h>
18 #include <linux/skbuff.h>
19 #include <linux/inetdevice.h>
20 #include <linux/mbus.h>
21 #include <linux/module.h>
22 #include <linux/interrupt.h>
23 #include <net/ip.h>
24 #include <net/ipv6.h>
25 #include <linux/io.h>
26 #include <net/tso.h>
27 #include <linux/of.h>
28 #include <linux/of_irq.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include <linux/of_address.h>
32 #include <linux/phy.h>
33 #include <linux/clk.h>
35 /* Registers */
36 #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2))
37 #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0)
38 #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8)
39 #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8)
40 #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2))
41 #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16)
42 #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2))
43 #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2))
44 #define MVNETA_RXQ_BUF_SIZE_SHIFT 19
45 #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19)
46 #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2))
47 #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff
48 #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2))
49 #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16
50 #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255
51 #define MVNETA_PORT_RX_RESET 0x1cc0
52 #define MVNETA_PORT_RX_DMA_RESET BIT(0)
53 #define MVNETA_PHY_ADDR 0x2000
54 #define MVNETA_PHY_ADDR_MASK 0x1f
55 #define MVNETA_MBUS_RETRY 0x2010
56 #define MVNETA_UNIT_INTR_CAUSE 0x2080
57 #define MVNETA_UNIT_CONTROL 0x20B0
58 #define MVNETA_PHY_POLLING_ENABLE BIT(1)
59 #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3))
60 #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3))
61 #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2))
62 #define MVNETA_BASE_ADDR_ENABLE 0x2290
63 #define MVNETA_ACCESS_PROTECT_ENABLE 0x2294
64 #define MVNETA_PORT_CONFIG 0x2400
65 #define MVNETA_UNI_PROMISC_MODE BIT(0)
66 #define MVNETA_DEF_RXQ(q) ((q) << 1)
67 #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4)
68 #define MVNETA_TX_UNSET_ERR_SUM BIT(12)
69 #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16)
70 #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19)
71 #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22)
72 #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25)
73 #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \
74 MVNETA_DEF_RXQ_ARP(q) | \
75 MVNETA_DEF_RXQ_TCP(q) | \
76 MVNETA_DEF_RXQ_UDP(q) | \
77 MVNETA_DEF_RXQ_BPDU(q) | \
78 MVNETA_TX_UNSET_ERR_SUM | \
79 MVNETA_RX_CSUM_WITH_PSEUDO_HDR)
80 #define MVNETA_PORT_CONFIG_EXTEND 0x2404
81 #define MVNETA_MAC_ADDR_LOW 0x2414
82 #define MVNETA_MAC_ADDR_HIGH 0x2418
83 #define MVNETA_SDMA_CONFIG 0x241c
84 #define MVNETA_SDMA_BRST_SIZE_16 4
85 #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
86 #define MVNETA_RX_NO_DATA_SWAP BIT(4)
87 #define MVNETA_TX_NO_DATA_SWAP BIT(5)
88 #define MVNETA_DESC_SWAP BIT(6)
89 #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
90 #define MVNETA_PORT_STATUS 0x2444
91 #define MVNETA_TX_IN_PRGRS BIT(1)
92 #define MVNETA_TX_FIFO_EMPTY BIT(8)
93 #define MVNETA_RX_MIN_FRAME_SIZE 0x247c
94 #define MVNETA_SERDES_CFG 0x24A0
95 #define MVNETA_SGMII_SERDES_PROTO 0x0cc7
96 #define MVNETA_QSGMII_SERDES_PROTO 0x0667
97 #define MVNETA_TYPE_PRIO 0x24bc
98 #define MVNETA_FORCE_UNI BIT(21)
99 #define MVNETA_TXQ_CMD_1 0x24e4
100 #define MVNETA_TXQ_CMD 0x2448
101 #define MVNETA_TXQ_DISABLE_SHIFT 8
102 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff
103 #define MVNETA_ACC_MODE 0x2500
104 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
105 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
106 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
107 #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
109 /* Exception Interrupt Port/Queue Cause register */
111 #define MVNETA_INTR_NEW_CAUSE 0x25a0
112 #define MVNETA_INTR_NEW_MASK 0x25a4
114 /* bits 0..7 = TXQ SENT, one bit per queue.
115 * bits 8..15 = RXQ OCCUP, one bit per queue.
116 * bits 16..23 = RXQ FREE, one bit per queue.
117 * bit 29 = OLD_REG_SUM, see old reg ?
118 * bit 30 = TX_ERR_SUM, one bit for 4 ports
119 * bit 31 = MISC_SUM, one bit for 4 ports
121 #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0)
122 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0)
123 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8)
124 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8)
126 #define MVNETA_INTR_OLD_CAUSE 0x25a8
127 #define MVNETA_INTR_OLD_MASK 0x25ac
129 /* Data Path Port/Queue Cause Register */
130 #define MVNETA_INTR_MISC_CAUSE 0x25b0
131 #define MVNETA_INTR_MISC_MASK 0x25b4
133 #define MVNETA_CAUSE_PHY_STATUS_CHANGE BIT(0)
134 #define MVNETA_CAUSE_LINK_CHANGE BIT(1)
135 #define MVNETA_CAUSE_PTP BIT(4)
137 #define MVNETA_CAUSE_INTERNAL_ADDR_ERR BIT(7)
138 #define MVNETA_CAUSE_RX_OVERRUN BIT(8)
139 #define MVNETA_CAUSE_RX_CRC_ERROR BIT(9)
140 #define MVNETA_CAUSE_RX_LARGE_PKT BIT(10)
141 #define MVNETA_CAUSE_TX_UNDERUN BIT(11)
142 #define MVNETA_CAUSE_PRBS_ERR BIT(12)
143 #define MVNETA_CAUSE_PSC_SYNC_CHANGE BIT(13)
144 #define MVNETA_CAUSE_SERDES_SYNC_ERR BIT(14)
146 #define MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT 16
147 #define MVNETA_CAUSE_BMU_ALLOC_ERR_ALL_MASK (0xF << MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT)
148 #define MVNETA_CAUSE_BMU_ALLOC_ERR_MASK(pool) (1 << (MVNETA_CAUSE_BMU_ALLOC_ERR_SHIFT + (pool)))
150 #define MVNETA_CAUSE_TXQ_ERROR_SHIFT 24
151 #define MVNETA_CAUSE_TXQ_ERROR_ALL_MASK (0xFF << MVNETA_CAUSE_TXQ_ERROR_SHIFT)
152 #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q)))
154 #define MVNETA_INTR_ENABLE 0x25b8
155 #define MVNETA_TXQ_INTR_ENABLE_ALL_MASK 0x0000ff00
156 #define MVNETA_RXQ_INTR_ENABLE_ALL_MASK 0x000000ff
158 #define MVNETA_RXQ_CMD 0x2680
159 #define MVNETA_RXQ_DISABLE_SHIFT 8
160 #define MVNETA_RXQ_ENABLE_MASK 0x000000ff
161 #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4))
162 #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4))
163 #define MVNETA_GMAC_CTRL_0 0x2c00
164 #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2
165 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
166 #define MVNETA_GMAC0_PORT_ENABLE BIT(0)
167 #define MVNETA_GMAC_CTRL_2 0x2c08
168 #define MVNETA_GMAC2_PCS_ENABLE BIT(3)
169 #define MVNETA_GMAC2_PORT_RGMII BIT(4)
170 #define MVNETA_GMAC2_PORT_RESET BIT(6)
171 #define MVNETA_GMAC_STATUS 0x2c10
172 #define MVNETA_GMAC_LINK_UP BIT(0)
173 #define MVNETA_GMAC_SPEED_1000 BIT(1)
174 #define MVNETA_GMAC_SPEED_100 BIT(2)
175 #define MVNETA_GMAC_FULL_DUPLEX BIT(3)
176 #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4)
177 #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5)
178 #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6)
179 #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7)
180 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c
181 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0)
182 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
183 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
184 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
185 #define MVNETA_GMAC_AN_SPEED_EN BIT(7)
186 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
187 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
188 #define MVNETA_MIB_COUNTERS_BASE 0x3080
189 #define MVNETA_MIB_LATE_COLLISION 0x7c
190 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
191 #define MVNETA_DA_FILT_OTH_MCAST 0x3500
192 #define MVNETA_DA_FILT_UCAST_BASE 0x3600
193 #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2))
194 #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2))
195 #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000
196 #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
197 #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
198 #define MVNETA_TXQ_DEC_SENT_SHIFT 16
199 #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
200 #define MVNETA_TXQ_SENT_DESC_SHIFT 16
201 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
202 #define MVNETA_PORT_TX_RESET 0x3cf0
203 #define MVNETA_PORT_TX_DMA_RESET BIT(0)
204 #define MVNETA_TX_MTU 0x3e0c
205 #define MVNETA_TX_TOKEN_SIZE 0x3e14
206 #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
207 #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
208 #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
210 #define MVNETA_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
212 /* Descriptor ring Macros */
213 #define MVNETA_QUEUE_NEXT_DESC(q, index) \
214 (((index) < (q)->last_desc) ? ((index) + 1) : 0)
216 /* Various constants */
218 /* Coalescing */
219 #define MVNETA_TXDONE_COAL_PKTS 1
220 #define MVNETA_RX_COAL_PKTS 32
221 #define MVNETA_RX_COAL_USEC 100
223 /* The two bytes Marvell header. Either contains a special value used
224 * by Marvell switches when a specific hardware mode is enabled (not
225 * supported by this driver) or is filled automatically by zeroes on
226 * the RX side. Those two bytes being at the front of the Ethernet
227 * header, they allow to have the IP header aligned on a 4 bytes
228 * boundary automatically: the hardware skips those two bytes on its
229 * own.
231 #define MVNETA_MH_SIZE 2
233 #define MVNETA_VLAN_TAG_LEN 4
235 #define MVNETA_CPU_D_CACHE_LINE_SIZE 32
236 #define MVNETA_TX_CSUM_MAX_SIZE 9800
237 #define MVNETA_ACC_MODE_EXT 1
239 /* Timeout constants */
240 #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000
241 #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000
242 #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000
244 #define MVNETA_TX_MTU_MAX 0x3ffff
246 /* TSO header size */
247 #define TSO_HEADER_SIZE 128
249 /* Max number of Rx descriptors */
250 #define MVNETA_MAX_RXD 128
252 /* Max number of Tx descriptors */
253 #define MVNETA_MAX_TXD 532
255 /* Max number of allowed TCP segments for software TSO */
256 #define MVNETA_MAX_TSO_SEGS 100
258 #define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
260 /* descriptor aligned size */
261 #define MVNETA_DESC_ALIGNED_SIZE 32
263 #define MVNETA_RX_PKT_SIZE(mtu) \
264 ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \
265 ETH_HLEN + ETH_FCS_LEN, \
266 MVNETA_CPU_D_CACHE_LINE_SIZE)
268 #define IS_TSO_HEADER(txq, addr) \
269 ((addr >= txq->tso_hdrs_phys) && \
270 (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
272 #define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
274 struct mvneta_pcpu_stats {
275 struct u64_stats_sync syncp;
276 u64 rx_packets;
277 u64 rx_bytes;
278 u64 tx_packets;
279 u64 tx_bytes;
282 struct mvneta_port {
283 int pkt_size;
284 unsigned int frag_size;
285 void __iomem *base;
286 struct mvneta_rx_queue *rxqs;
287 struct mvneta_tx_queue *txqs;
288 struct net_device *dev;
290 u32 cause_rx_tx;
291 struct napi_struct napi;
293 /* Core clock */
294 struct clk *clk;
295 u8 mcast_count[256];
296 u16 tx_ring_size;
297 u16 rx_ring_size;
298 struct mvneta_pcpu_stats *stats;
300 struct mii_bus *mii_bus;
301 struct phy_device *phy_dev;
302 phy_interface_t phy_interface;
303 struct device_node *phy_node;
304 unsigned int link;
305 unsigned int duplex;
306 unsigned int speed;
307 unsigned int tx_csum_limit;
310 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
311 * layout of the transmit and reception DMA descriptors, and their
312 * layout is therefore defined by the hardware design
315 #define MVNETA_TX_L3_OFF_SHIFT 0
316 #define MVNETA_TX_IP_HLEN_SHIFT 8
317 #define MVNETA_TX_L4_UDP BIT(16)
318 #define MVNETA_TX_L3_IP6 BIT(17)
319 #define MVNETA_TXD_IP_CSUM BIT(18)
320 #define MVNETA_TXD_Z_PAD BIT(19)
321 #define MVNETA_TXD_L_DESC BIT(20)
322 #define MVNETA_TXD_F_DESC BIT(21)
323 #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \
324 MVNETA_TXD_L_DESC | \
325 MVNETA_TXD_F_DESC)
326 #define MVNETA_TX_L4_CSUM_FULL BIT(30)
327 #define MVNETA_TX_L4_CSUM_NOT BIT(31)
329 #define MVNETA_RXD_ERR_CRC 0x0
330 #define MVNETA_RXD_ERR_SUMMARY BIT(16)
331 #define MVNETA_RXD_ERR_OVERRUN BIT(17)
332 #define MVNETA_RXD_ERR_LEN BIT(18)
333 #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18))
334 #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18))
335 #define MVNETA_RXD_L3_IP4 BIT(25)
336 #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
337 #define MVNETA_RXD_L4_CSUM_OK BIT(30)
339 #if defined(__LITTLE_ENDIAN)
340 struct mvneta_tx_desc {
341 u32 command; /* Options used by HW for packet transmitting.*/
342 u16 reserverd1; /* csum_l4 (for future use) */
343 u16 data_size; /* Data size of transmitted packet in bytes */
344 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
345 u32 reserved2; /* hw_cmd - (for future use, PMT) */
346 u32 reserved3[4]; /* Reserved - (for future use) */
349 struct mvneta_rx_desc {
350 u32 status; /* Info about received packet */
351 u16 reserved1; /* pnc_info - (for future use, PnC) */
352 u16 data_size; /* Size of received packet in bytes */
354 u32 buf_phys_addr; /* Physical address of the buffer */
355 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
357 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
358 u16 reserved3; /* prefetch_cmd, for future use */
359 u16 reserved4; /* csum_l4 - (for future use, PnC) */
361 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
362 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
364 #else
365 struct mvneta_tx_desc {
366 u16 data_size; /* Data size of transmitted packet in bytes */
367 u16 reserverd1; /* csum_l4 (for future use) */
368 u32 command; /* Options used by HW for packet transmitting.*/
369 u32 reserved2; /* hw_cmd - (for future use, PMT) */
370 u32 buf_phys_addr; /* Physical addr of transmitted buffer */
371 u32 reserved3[4]; /* Reserved - (for future use) */
374 struct mvneta_rx_desc {
375 u16 data_size; /* Size of received packet in bytes */
376 u16 reserved1; /* pnc_info - (for future use, PnC) */
377 u32 status; /* Info about received packet */
379 u32 reserved2; /* pnc_flow_id (for future use, PnC) */
380 u32 buf_phys_addr; /* Physical address of the buffer */
382 u16 reserved4; /* csum_l4 - (for future use, PnC) */
383 u16 reserved3; /* prefetch_cmd, for future use */
384 u32 buf_cookie; /* cookie for access to RX buffer in rx path */
386 u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
387 u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
389 #endif
391 struct mvneta_tx_queue {
392 /* Number of this TX queue, in the range 0-7 */
393 u8 id;
395 /* Number of TX DMA descriptors in the descriptor ring */
396 int size;
398 /* Number of currently used TX DMA descriptor in the
399 * descriptor ring
401 int count;
402 int tx_stop_threshold;
403 int tx_wake_threshold;
405 /* Array of transmitted skb */
406 struct sk_buff **tx_skb;
408 /* Index of last TX DMA descriptor that was inserted */
409 int txq_put_index;
411 /* Index of the TX DMA descriptor to be cleaned up */
412 int txq_get_index;
414 u32 done_pkts_coal;
416 /* Virtual address of the TX DMA descriptors array */
417 struct mvneta_tx_desc *descs;
419 /* DMA address of the TX DMA descriptors array */
420 dma_addr_t descs_phys;
422 /* Index of the last TX DMA descriptor */
423 int last_desc;
425 /* Index of the next TX DMA descriptor to process */
426 int next_desc_to_proc;
428 /* DMA buffers for TSO headers */
429 char *tso_hdrs;
431 /* DMA address of TSO headers */
432 dma_addr_t tso_hdrs_phys;
435 struct mvneta_rx_queue {
436 /* rx queue number, in the range 0-7 */
437 u8 id;
439 /* num of rx descriptors in the rx descriptor ring */
440 int size;
442 /* counter of times when mvneta_refill() failed */
443 int missed;
445 u32 pkts_coal;
446 u32 time_coal;
448 /* Virtual address of the RX DMA descriptors array */
449 struct mvneta_rx_desc *descs;
451 /* DMA address of the RX DMA descriptors array */
452 dma_addr_t descs_phys;
454 /* Index of the last RX DMA descriptor */
455 int last_desc;
457 /* Index of the next RX DMA descriptor to process */
458 int next_desc_to_proc;
461 /* The hardware supports eight (8) rx queues, but we are only allowing
462 * the first one to be used. Therefore, let's just allocate one queue.
464 static int rxq_number = 1;
465 static int txq_number = 8;
467 static int rxq_def;
469 static int rx_copybreak __read_mostly = 256;
471 #define MVNETA_DRIVER_NAME "mvneta"
472 #define MVNETA_DRIVER_VERSION "1.0"
474 /* Utility/helper methods */
476 /* Write helper method */
477 static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data)
479 writel(data, pp->base + offset);
482 /* Read helper method */
483 static u32 mvreg_read(struct mvneta_port *pp, u32 offset)
485 return readl(pp->base + offset);
488 /* Increment txq get counter */
489 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
491 txq->txq_get_index++;
492 if (txq->txq_get_index == txq->size)
493 txq->txq_get_index = 0;
496 /* Increment txq put counter */
497 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
499 txq->txq_put_index++;
500 if (txq->txq_put_index == txq->size)
501 txq->txq_put_index = 0;
505 /* Clear all MIB counters */
506 static void mvneta_mib_counters_clear(struct mvneta_port *pp)
508 int i;
509 u32 dummy;
511 /* Perform dummy reads from MIB counters */
512 for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
513 dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
516 /* Get System Network Statistics */
517 struct rtnl_link_stats64 *mvneta_get_stats64(struct net_device *dev,
518 struct rtnl_link_stats64 *stats)
520 struct mvneta_port *pp = netdev_priv(dev);
521 unsigned int start;
522 int cpu;
524 for_each_possible_cpu(cpu) {
525 struct mvneta_pcpu_stats *cpu_stats;
526 u64 rx_packets;
527 u64 rx_bytes;
528 u64 tx_packets;
529 u64 tx_bytes;
531 cpu_stats = per_cpu_ptr(pp->stats, cpu);
532 do {
533 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
534 rx_packets = cpu_stats->rx_packets;
535 rx_bytes = cpu_stats->rx_bytes;
536 tx_packets = cpu_stats->tx_packets;
537 tx_bytes = cpu_stats->tx_bytes;
538 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
540 stats->rx_packets += rx_packets;
541 stats->rx_bytes += rx_bytes;
542 stats->tx_packets += tx_packets;
543 stats->tx_bytes += tx_bytes;
546 stats->rx_errors = dev->stats.rx_errors;
547 stats->rx_dropped = dev->stats.rx_dropped;
549 stats->tx_dropped = dev->stats.tx_dropped;
551 return stats;
554 /* Rx descriptors helper methods */
556 /* Checks whether the RX descriptor having this status is both the first
557 * and the last descriptor for the RX packet. Each RX packet is currently
558 * received through a single RX descriptor, so not having each RX
559 * descriptor with its first and last bits set is an error
561 static int mvneta_rxq_desc_is_first_last(u32 status)
563 return (status & MVNETA_RXD_FIRST_LAST_DESC) ==
564 MVNETA_RXD_FIRST_LAST_DESC;
567 /* Add number of descriptors ready to receive new packets */
568 static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp,
569 struct mvneta_rx_queue *rxq,
570 int ndescs)
572 /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can
573 * be added at once
575 while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) {
576 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
577 (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX <<
578 MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
579 ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX;
582 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id),
583 (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT));
586 /* Get number of RX descriptors occupied by received packets */
587 static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp,
588 struct mvneta_rx_queue *rxq)
590 u32 val;
592 val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id));
593 return val & MVNETA_RXQ_OCCUPIED_ALL_MASK;
596 /* Update num of rx desc called upon return from rx path or
597 * from mvneta_rxq_drop_pkts().
599 static void mvneta_rxq_desc_num_update(struct mvneta_port *pp,
600 struct mvneta_rx_queue *rxq,
601 int rx_done, int rx_filled)
603 u32 val;
605 if ((rx_done <= 0xff) && (rx_filled <= 0xff)) {
606 val = rx_done |
607 (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT);
608 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
609 return;
612 /* Only 255 descriptors can be added at once */
613 while ((rx_done > 0) || (rx_filled > 0)) {
614 if (rx_done <= 0xff) {
615 val = rx_done;
616 rx_done = 0;
617 } else {
618 val = 0xff;
619 rx_done -= 0xff;
621 if (rx_filled <= 0xff) {
622 val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
623 rx_filled = 0;
624 } else {
625 val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT;
626 rx_filled -= 0xff;
628 mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val);
632 /* Get pointer to next RX descriptor to be processed by SW */
633 static struct mvneta_rx_desc *
634 mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq)
636 int rx_desc = rxq->next_desc_to_proc;
638 rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc);
639 prefetch(rxq->descs + rxq->next_desc_to_proc);
640 return rxq->descs + rx_desc;
643 /* Change maximum receive size of the port. */
644 static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size)
646 u32 val;
648 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
649 val &= ~MVNETA_GMAC_MAX_RX_SIZE_MASK;
650 val |= ((max_rx_size - MVNETA_MH_SIZE) / 2) <<
651 MVNETA_GMAC_MAX_RX_SIZE_SHIFT;
652 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
656 /* Set rx queue offset */
657 static void mvneta_rxq_offset_set(struct mvneta_port *pp,
658 struct mvneta_rx_queue *rxq,
659 int offset)
661 u32 val;
663 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
664 val &= ~MVNETA_RXQ_PKT_OFFSET_ALL_MASK;
666 /* Offset is in */
667 val |= MVNETA_RXQ_PKT_OFFSET_MASK(offset >> 3);
668 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
672 /* Tx descriptors helper methods */
674 /* Update HW with number of TX descriptors to be sent */
675 static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
676 struct mvneta_tx_queue *txq,
677 int pend_desc)
679 u32 val;
681 /* Only 255 descriptors can be added at once ; Assume caller
682 * process TX desriptors in quanta less than 256
684 val = pend_desc;
685 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
688 /* Get pointer to next TX descriptor to be processed (send) by HW */
689 static struct mvneta_tx_desc *
690 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
692 int tx_desc = txq->next_desc_to_proc;
694 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
695 return txq->descs + tx_desc;
698 /* Release the last allocated TX descriptor. Useful to handle DMA
699 * mapping failures in the TX path.
701 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
703 if (txq->next_desc_to_proc == 0)
704 txq->next_desc_to_proc = txq->last_desc - 1;
705 else
706 txq->next_desc_to_proc--;
709 /* Set rxq buf size */
710 static void mvneta_rxq_buf_size_set(struct mvneta_port *pp,
711 struct mvneta_rx_queue *rxq,
712 int buf_size)
714 u32 val;
716 val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id));
718 val &= ~MVNETA_RXQ_BUF_SIZE_MASK;
719 val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT);
721 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val);
724 /* Disable buffer management (BM) */
725 static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
726 struct mvneta_rx_queue *rxq)
728 u32 val;
730 val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id));
731 val &= ~MVNETA_RXQ_HW_BUF_ALLOC;
732 mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
735 /* Start the Ethernet port RX and TX activity */
736 static void mvneta_port_up(struct mvneta_port *pp)
738 int queue;
739 u32 q_map;
741 /* Enable all initialized TXs. */
742 mvneta_mib_counters_clear(pp);
743 q_map = 0;
744 for (queue = 0; queue < txq_number; queue++) {
745 struct mvneta_tx_queue *txq = &pp->txqs[queue];
746 if (txq->descs != NULL)
747 q_map |= (1 << queue);
749 mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
751 /* Enable all initialized RXQs. */
752 q_map = 0;
753 for (queue = 0; queue < rxq_number; queue++) {
754 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
755 if (rxq->descs != NULL)
756 q_map |= (1 << queue);
759 mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
762 /* Stop the Ethernet port activity */
763 static void mvneta_port_down(struct mvneta_port *pp)
765 u32 val;
766 int count;
768 /* Stop Rx port activity. Check port Rx activity. */
769 val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK;
771 /* Issue stop command for active channels only */
772 if (val != 0)
773 mvreg_write(pp, MVNETA_RXQ_CMD,
774 val << MVNETA_RXQ_DISABLE_SHIFT);
776 /* Wait for all Rx activity to terminate. */
777 count = 0;
778 do {
779 if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) {
780 netdev_warn(pp->dev,
781 "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n",
782 val);
783 break;
785 mdelay(1);
787 val = mvreg_read(pp, MVNETA_RXQ_CMD);
788 } while (val & 0xff);
790 /* Stop Tx port activity. Check port Tx activity. Issue stop
791 * command for active channels only
793 val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK;
795 if (val != 0)
796 mvreg_write(pp, MVNETA_TXQ_CMD,
797 (val << MVNETA_TXQ_DISABLE_SHIFT));
799 /* Wait for all Tx activity to terminate. */
800 count = 0;
801 do {
802 if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) {
803 netdev_warn(pp->dev,
804 "TIMEOUT for TX stopped status=0x%08x\n",
805 val);
806 break;
808 mdelay(1);
810 /* Check TX Command reg that all Txqs are stopped */
811 val = mvreg_read(pp, MVNETA_TXQ_CMD);
813 } while (val & 0xff);
815 /* Double check to verify that TX FIFO is empty */
816 count = 0;
817 do {
818 if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) {
819 netdev_warn(pp->dev,
820 "TX FIFO empty timeout status=0x08%x\n",
821 val);
822 break;
824 mdelay(1);
826 val = mvreg_read(pp, MVNETA_PORT_STATUS);
827 } while (!(val & MVNETA_TX_FIFO_EMPTY) &&
828 (val & MVNETA_TX_IN_PRGRS));
830 udelay(200);
833 /* Enable the port by setting the port enable bit of the MAC control register */
834 static void mvneta_port_enable(struct mvneta_port *pp)
836 u32 val;
838 /* Enable port */
839 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
840 val |= MVNETA_GMAC0_PORT_ENABLE;
841 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
844 /* Disable the port and wait for about 200 usec before retuning */
845 static void mvneta_port_disable(struct mvneta_port *pp)
847 u32 val;
849 /* Reset the Enable bit in the Serial Control Register */
850 val = mvreg_read(pp, MVNETA_GMAC_CTRL_0);
851 val &= ~MVNETA_GMAC0_PORT_ENABLE;
852 mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
854 udelay(200);
857 /* Multicast tables methods */
859 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */
860 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue)
862 int offset;
863 u32 val;
865 if (queue == -1) {
866 val = 0;
867 } else {
868 val = 0x1 | (queue << 1);
869 val |= (val << 24) | (val << 16) | (val << 8);
872 for (offset = 0; offset <= 0xc; offset += 4)
873 mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val);
876 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */
877 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue)
879 int offset;
880 u32 val;
882 if (queue == -1) {
883 val = 0;
884 } else {
885 val = 0x1 | (queue << 1);
886 val |= (val << 24) | (val << 16) | (val << 8);
889 for (offset = 0; offset <= 0xfc; offset += 4)
890 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val);
894 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */
895 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue)
897 int offset;
898 u32 val;
900 if (queue == -1) {
901 memset(pp->mcast_count, 0, sizeof(pp->mcast_count));
902 val = 0;
903 } else {
904 memset(pp->mcast_count, 1, sizeof(pp->mcast_count));
905 val = 0x1 | (queue << 1);
906 val |= (val << 24) | (val << 16) | (val << 8);
909 for (offset = 0; offset <= 0xfc; offset += 4)
910 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val);
913 /* This method sets defaults to the NETA port:
914 * Clears interrupt Cause and Mask registers.
915 * Clears all MAC tables.
916 * Sets defaults to all registers.
917 * Resets RX and TX descriptor rings.
918 * Resets PHY.
919 * This method can be called after mvneta_port_down() to return the port
920 * settings to defaults.
922 static void mvneta_defaults_set(struct mvneta_port *pp)
924 int cpu;
925 int queue;
926 u32 val;
928 /* Clear all Cause registers */
929 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
930 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
931 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
933 /* Mask all interrupts */
934 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
935 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
936 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
937 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
939 /* Enable MBUS Retry bit16 */
940 mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
942 /* Set CPU queue access map - all CPUs have access to all RX
943 * queues and to all TX queues
945 for_each_present_cpu(cpu)
946 mvreg_write(pp, MVNETA_CPU_MAP(cpu),
947 (MVNETA_CPU_RXQ_ACCESS_ALL_MASK |
948 MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
950 /* Reset RX and TX DMAs */
951 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
952 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
954 /* Disable Legacy WRR, Disable EJP, Release from reset */
955 mvreg_write(pp, MVNETA_TXQ_CMD_1, 0);
956 for (queue = 0; queue < txq_number; queue++) {
957 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0);
958 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0);
961 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
962 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
964 /* Set Port Acceleration Mode */
965 val = MVNETA_ACC_MODE_EXT;
966 mvreg_write(pp, MVNETA_ACC_MODE, val);
968 /* Update val of portCfg register accordingly with all RxQueue types */
969 val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def);
970 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
972 val = 0;
973 mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val);
974 mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64);
976 /* Build PORT_SDMA_CONFIG_REG */
977 val = 0;
979 /* Default burst size */
980 val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
981 val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
982 val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
984 #if defined(__BIG_ENDIAN)
985 val |= MVNETA_DESC_SWAP;
986 #endif
988 /* Assign port SDMA configuration */
989 mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
991 /* Disable PHY polling in hardware, since we're using the
992 * kernel phylib to do this.
994 val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
995 val &= ~MVNETA_PHY_POLLING_ENABLE;
996 mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
998 mvneta_set_ucast_table(pp, -1);
999 mvneta_set_special_mcast_table(pp, -1);
1000 mvneta_set_other_mcast_table(pp, -1);
1002 /* Set port interrupt enable register - default enable all */
1003 mvreg_write(pp, MVNETA_INTR_ENABLE,
1004 (MVNETA_RXQ_INTR_ENABLE_ALL_MASK
1005 | MVNETA_TXQ_INTR_ENABLE_ALL_MASK));
1008 /* Set max sizes for tx queues */
1009 static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size)
1012 u32 val, size, mtu;
1013 int queue;
1015 mtu = max_tx_size * 8;
1016 if (mtu > MVNETA_TX_MTU_MAX)
1017 mtu = MVNETA_TX_MTU_MAX;
1019 /* Set MTU */
1020 val = mvreg_read(pp, MVNETA_TX_MTU);
1021 val &= ~MVNETA_TX_MTU_MAX;
1022 val |= mtu;
1023 mvreg_write(pp, MVNETA_TX_MTU, val);
1025 /* TX token size and all TXQs token size must be larger that MTU */
1026 val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE);
1028 size = val & MVNETA_TX_TOKEN_SIZE_MAX;
1029 if (size < mtu) {
1030 size = mtu;
1031 val &= ~MVNETA_TX_TOKEN_SIZE_MAX;
1032 val |= size;
1033 mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val);
1035 for (queue = 0; queue < txq_number; queue++) {
1036 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue));
1038 size = val & MVNETA_TXQ_TOKEN_SIZE_MAX;
1039 if (size < mtu) {
1040 size = mtu;
1041 val &= ~MVNETA_TXQ_TOKEN_SIZE_MAX;
1042 val |= size;
1043 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val);
1048 /* Set unicast address */
1049 static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
1050 int queue)
1052 unsigned int unicast_reg;
1053 unsigned int tbl_offset;
1054 unsigned int reg_offset;
1056 /* Locate the Unicast table entry */
1057 last_nibble = (0xf & last_nibble);
1059 /* offset from unicast tbl base */
1060 tbl_offset = (last_nibble / 4) * 4;
1062 /* offset within the above reg */
1063 reg_offset = last_nibble % 4;
1065 unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset));
1067 if (queue == -1) {
1068 /* Clear accepts frame bit at specified unicast DA tbl entry */
1069 unicast_reg &= ~(0xff << (8 * reg_offset));
1070 } else {
1071 unicast_reg &= ~(0xff << (8 * reg_offset));
1072 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1075 mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg);
1078 /* Set mac address */
1079 static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
1080 int queue)
1082 unsigned int mac_h;
1083 unsigned int mac_l;
1085 if (queue != -1) {
1086 mac_l = (addr[4] << 8) | (addr[5]);
1087 mac_h = (addr[0] << 24) | (addr[1] << 16) |
1088 (addr[2] << 8) | (addr[3] << 0);
1090 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l);
1091 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h);
1094 /* Accept frames of this address */
1095 mvneta_set_ucast_addr(pp, addr[5], queue);
1098 /* Set the number of packets that will be received before RX interrupt
1099 * will be generated by HW.
1101 static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp,
1102 struct mvneta_rx_queue *rxq, u32 value)
1104 mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id),
1105 value | MVNETA_RXQ_NON_OCCUPIED(0));
1106 rxq->pkts_coal = value;
1109 /* Set the time delay in usec before RX interrupt will be generated by
1110 * HW.
1112 static void mvneta_rx_time_coal_set(struct mvneta_port *pp,
1113 struct mvneta_rx_queue *rxq, u32 value)
1115 u32 val;
1116 unsigned long clk_rate;
1118 clk_rate = clk_get_rate(pp->clk);
1119 val = (clk_rate / 1000000) * value;
1121 mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val);
1122 rxq->time_coal = value;
1125 /* Set threshold for TX_DONE pkts coalescing */
1126 static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp,
1127 struct mvneta_tx_queue *txq, u32 value)
1129 u32 val;
1131 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1133 val &= ~MVNETA_TXQ_SENT_THRESH_ALL_MASK;
1134 val |= MVNETA_TXQ_SENT_THRESH_MASK(value);
1136 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1138 txq->done_pkts_coal = value;
1141 /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */
1142 static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc,
1143 u32 phys_addr, u32 cookie)
1145 rx_desc->buf_cookie = cookie;
1146 rx_desc->buf_phys_addr = phys_addr;
1149 /* Decrement sent descriptors counter */
1150 static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp,
1151 struct mvneta_tx_queue *txq,
1152 int sent_desc)
1154 u32 val;
1156 /* Only 255 TX descriptors can be updated at once */
1157 while (sent_desc > 0xff) {
1158 val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT;
1159 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1160 sent_desc = sent_desc - 0xff;
1163 val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT;
1164 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1167 /* Get number of TX descriptors already sent by HW */
1168 static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp,
1169 struct mvneta_tx_queue *txq)
1171 u32 val;
1172 int sent_desc;
1174 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1175 sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >>
1176 MVNETA_TXQ_SENT_DESC_SHIFT;
1178 return sent_desc;
1181 /* Get number of sent descriptors and decrement counter.
1182 * The number of sent descriptors is returned.
1184 static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp,
1185 struct mvneta_tx_queue *txq)
1187 int sent_desc;
1189 /* Get number of sent descriptors */
1190 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1192 /* Decrement sent descriptors counter */
1193 if (sent_desc)
1194 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1196 return sent_desc;
1199 /* Set TXQ descriptors fields relevant for CSUM calculation */
1200 static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1201 int ip_hdr_len, int l4_proto)
1203 u32 command;
1205 /* Fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
1206 * G_L4_chk, L4_type; required only for checksum
1207 * calculation
1209 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1210 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1212 if (l3_proto == htons(ETH_P_IP))
1213 command |= MVNETA_TXD_IP_CSUM;
1214 else
1215 command |= MVNETA_TX_L3_IP6;
1217 if (l4_proto == IPPROTO_TCP)
1218 command |= MVNETA_TX_L4_CSUM_FULL;
1219 else if (l4_proto == IPPROTO_UDP)
1220 command |= MVNETA_TX_L4_UDP | MVNETA_TX_L4_CSUM_FULL;
1221 else
1222 command |= MVNETA_TX_L4_CSUM_NOT;
1224 return command;
1228 /* Display more error info */
1229 static void mvneta_rx_error(struct mvneta_port *pp,
1230 struct mvneta_rx_desc *rx_desc)
1232 u32 status = rx_desc->status;
1234 if (!mvneta_rxq_desc_is_first_last(status)) {
1235 netdev_err(pp->dev,
1236 "bad rx status %08x (buffer oversize), size=%d\n",
1237 status, rx_desc->data_size);
1238 return;
1241 switch (status & MVNETA_RXD_ERR_CODE_MASK) {
1242 case MVNETA_RXD_ERR_CRC:
1243 netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n",
1244 status, rx_desc->data_size);
1245 break;
1246 case MVNETA_RXD_ERR_OVERRUN:
1247 netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n",
1248 status, rx_desc->data_size);
1249 break;
1250 case MVNETA_RXD_ERR_LEN:
1251 netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n",
1252 status, rx_desc->data_size);
1253 break;
1254 case MVNETA_RXD_ERR_RESOURCE:
1255 netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n",
1256 status, rx_desc->data_size);
1257 break;
1261 /* Handle RX checksum offload based on the descriptor's status */
1262 static void mvneta_rx_csum(struct mvneta_port *pp, u32 status,
1263 struct sk_buff *skb)
1265 if ((status & MVNETA_RXD_L3_IP4) &&
1266 (status & MVNETA_RXD_L4_CSUM_OK)) {
1267 skb->csum = 0;
1268 skb->ip_summed = CHECKSUM_UNNECESSARY;
1269 return;
1272 skb->ip_summed = CHECKSUM_NONE;
1275 /* Return tx queue pointer (find last set bit) according to <cause> returned
1276 * form tx_done reg. <cause> must not be null. The return value is always a
1277 * valid queue for matching the first one found in <cause>.
1279 static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
1280 u32 cause)
1282 int queue = fls(cause) - 1;
1284 return &pp->txqs[queue];
1287 /* Free tx queue skbuffs */
1288 static void mvneta_txq_bufs_free(struct mvneta_port *pp,
1289 struct mvneta_tx_queue *txq, int num)
1291 int i;
1293 for (i = 0; i < num; i++) {
1294 struct mvneta_tx_desc *tx_desc = txq->descs +
1295 txq->txq_get_index;
1296 struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
1298 mvneta_txq_inc_get(txq);
1300 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1301 dma_unmap_single(pp->dev->dev.parent,
1302 tx_desc->buf_phys_addr,
1303 tx_desc->data_size, DMA_TO_DEVICE);
1304 if (!skb)
1305 continue;
1306 dev_kfree_skb_any(skb);
1310 /* Handle end of transmission */
1311 static void mvneta_txq_done(struct mvneta_port *pp,
1312 struct mvneta_tx_queue *txq)
1314 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1315 int tx_done;
1317 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1318 if (!tx_done)
1319 return;
1321 mvneta_txq_bufs_free(pp, txq, tx_done);
1323 txq->count -= tx_done;
1325 if (netif_tx_queue_stopped(nq)) {
1326 if (txq->count <= txq->tx_wake_threshold)
1327 netif_tx_wake_queue(nq);
1331 static void *mvneta_frag_alloc(const struct mvneta_port *pp)
1333 if (likely(pp->frag_size <= PAGE_SIZE))
1334 return netdev_alloc_frag(pp->frag_size);
1335 else
1336 return kmalloc(pp->frag_size, GFP_ATOMIC);
1339 static void mvneta_frag_free(const struct mvneta_port *pp, void *data)
1341 if (likely(pp->frag_size <= PAGE_SIZE))
1342 put_page(virt_to_head_page(data));
1343 else
1344 kfree(data);
1347 /* Refill processing */
1348 static int mvneta_rx_refill(struct mvneta_port *pp,
1349 struct mvneta_rx_desc *rx_desc)
1352 dma_addr_t phys_addr;
1353 void *data;
1355 data = mvneta_frag_alloc(pp);
1356 if (!data)
1357 return -ENOMEM;
1359 phys_addr = dma_map_single(pp->dev->dev.parent, data,
1360 MVNETA_RX_BUF_SIZE(pp->pkt_size),
1361 DMA_FROM_DEVICE);
1362 if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) {
1363 mvneta_frag_free(pp, data);
1364 return -ENOMEM;
1367 mvneta_rx_desc_fill(rx_desc, phys_addr, (u32)data);
1368 return 0;
1371 /* Handle tx checksum */
1372 static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
1374 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1375 int ip_hdr_len = 0;
1376 u8 l4_proto;
1378 if (skb->protocol == htons(ETH_P_IP)) {
1379 struct iphdr *ip4h = ip_hdr(skb);
1381 /* Calculate IPv4 checksum and L4 checksum */
1382 ip_hdr_len = ip4h->ihl;
1383 l4_proto = ip4h->protocol;
1384 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1385 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1387 /* Read l4_protocol from one of IPv6 extra headers */
1388 if (skb_network_header_len(skb) > 0)
1389 ip_hdr_len = (skb_network_header_len(skb) >> 2);
1390 l4_proto = ip6h->nexthdr;
1391 } else
1392 return MVNETA_TX_L4_CSUM_NOT;
1394 return mvneta_txq_desc_csum(skb_network_offset(skb),
1395 skb->protocol, ip_hdr_len, l4_proto);
1398 return MVNETA_TX_L4_CSUM_NOT;
1401 /* Returns rx queue pointer (find last set bit) according to causeRxTx
1402 * value
1404 static struct mvneta_rx_queue *mvneta_rx_policy(struct mvneta_port *pp,
1405 u32 cause)
1407 int queue = fls(cause >> 8) - 1;
1409 return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue];
1412 /* Drop packets received by the RXQ and free buffers */
1413 static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1414 struct mvneta_rx_queue *rxq)
1416 int rx_done, i;
1418 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1419 for (i = 0; i < rxq->size; i++) {
1420 struct mvneta_rx_desc *rx_desc = rxq->descs + i;
1421 void *data = (void *)rx_desc->buf_cookie;
1423 mvneta_frag_free(pp, data);
1424 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1425 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1428 if (rx_done)
1429 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1432 /* Main rx processing */
1433 static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1434 struct mvneta_rx_queue *rxq)
1436 struct net_device *dev = pp->dev;
1437 int rx_done;
1438 u32 rcvd_pkts = 0;
1439 u32 rcvd_bytes = 0;
1441 /* Get number of received packets */
1442 rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq);
1444 if (rx_todo > rx_done)
1445 rx_todo = rx_done;
1447 rx_done = 0;
1449 /* Fairness NAPI loop */
1450 while (rx_done < rx_todo) {
1451 struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
1452 struct sk_buff *skb;
1453 unsigned char *data;
1454 dma_addr_t phys_addr;
1455 u32 rx_status;
1456 int rx_bytes, err;
1458 rx_done++;
1459 rx_status = rx_desc->status;
1460 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1461 data = (unsigned char *)rx_desc->buf_cookie;
1462 phys_addr = rx_desc->buf_phys_addr;
1464 if (!mvneta_rxq_desc_is_first_last(rx_status) ||
1465 (rx_status & MVNETA_RXD_ERR_SUMMARY)) {
1466 err_drop_frame:
1467 dev->stats.rx_errors++;
1468 mvneta_rx_error(pp, rx_desc);
1469 /* leave the descriptor untouched */
1470 continue;
1473 if (rx_bytes <= rx_copybreak) {
1474 /* better copy a small frame and not unmap the DMA region */
1475 skb = netdev_alloc_skb_ip_align(dev, rx_bytes);
1476 if (unlikely(!skb))
1477 goto err_drop_frame;
1479 dma_sync_single_range_for_cpu(dev->dev.parent,
1480 rx_desc->buf_phys_addr,
1481 MVNETA_MH_SIZE + NET_SKB_PAD,
1482 rx_bytes,
1483 DMA_FROM_DEVICE);
1484 memcpy(skb_put(skb, rx_bytes),
1485 data + MVNETA_MH_SIZE + NET_SKB_PAD,
1486 rx_bytes);
1488 skb->protocol = eth_type_trans(skb, dev);
1489 mvneta_rx_csum(pp, rx_status, skb);
1490 napi_gro_receive(&pp->napi, skb);
1492 rcvd_pkts++;
1493 rcvd_bytes += rx_bytes;
1495 /* leave the descriptor and buffer untouched */
1496 continue;
1499 /* Refill processing */
1500 err = mvneta_rx_refill(pp, rx_desc);
1501 if (err) {
1502 netdev_err(dev, "Linux processing - Can't refill\n");
1503 rxq->missed++;
1504 goto err_drop_frame;
1507 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1508 if (!skb)
1509 goto err_drop_frame;
1511 dma_unmap_single(dev->dev.parent, phys_addr,
1512 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
1514 rcvd_pkts++;
1515 rcvd_bytes += rx_bytes;
1517 /* Linux processing */
1518 skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD);
1519 skb_put(skb, rx_bytes);
1521 skb->protocol = eth_type_trans(skb, dev);
1523 mvneta_rx_csum(pp, rx_status, skb);
1525 napi_gro_receive(&pp->napi, skb);
1528 if (rcvd_pkts) {
1529 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1531 u64_stats_update_begin(&stats->syncp);
1532 stats->rx_packets += rcvd_pkts;
1533 stats->rx_bytes += rcvd_bytes;
1534 u64_stats_update_end(&stats->syncp);
1537 /* Update rxq management counters */
1538 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1540 return rx_done;
1543 static inline void
1544 mvneta_tso_put_hdr(struct sk_buff *skb,
1545 struct mvneta_port *pp, struct mvneta_tx_queue *txq)
1547 struct mvneta_tx_desc *tx_desc;
1548 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1550 txq->tx_skb[txq->txq_put_index] = NULL;
1551 tx_desc = mvneta_txq_next_desc_get(txq);
1552 tx_desc->data_size = hdr_len;
1553 tx_desc->command = mvneta_skb_tx_csum(pp, skb);
1554 tx_desc->command |= MVNETA_TXD_F_DESC;
1555 tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
1556 txq->txq_put_index * TSO_HEADER_SIZE;
1557 mvneta_txq_inc_put(txq);
1560 static inline int
1561 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
1562 struct sk_buff *skb, char *data, int size,
1563 bool last_tcp, bool is_last)
1565 struct mvneta_tx_desc *tx_desc;
1567 tx_desc = mvneta_txq_next_desc_get(txq);
1568 tx_desc->data_size = size;
1569 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
1570 size, DMA_TO_DEVICE);
1571 if (unlikely(dma_mapping_error(dev->dev.parent,
1572 tx_desc->buf_phys_addr))) {
1573 mvneta_txq_desc_put(txq);
1574 return -ENOMEM;
1577 tx_desc->command = 0;
1578 txq->tx_skb[txq->txq_put_index] = NULL;
1580 if (last_tcp) {
1581 /* last descriptor in the TCP packet */
1582 tx_desc->command = MVNETA_TXD_L_DESC;
1584 /* last descriptor in SKB */
1585 if (is_last)
1586 txq->tx_skb[txq->txq_put_index] = skb;
1588 mvneta_txq_inc_put(txq);
1589 return 0;
1592 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
1593 struct mvneta_tx_queue *txq)
1595 int total_len, data_left;
1596 int desc_count = 0;
1597 struct mvneta_port *pp = netdev_priv(dev);
1598 struct tso_t tso;
1599 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1600 int i;
1602 /* Count needed descriptors */
1603 if ((txq->count + tso_count_descs(skb)) >= txq->size)
1604 return 0;
1606 if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
1607 pr_info("*** Is this even possible???!?!?\n");
1608 return 0;
1611 /* Initialize the TSO handler, and prepare the first payload */
1612 tso_start(skb, &tso);
1614 total_len = skb->len - hdr_len;
1615 while (total_len > 0) {
1616 char *hdr;
1618 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1619 total_len -= data_left;
1620 desc_count++;
1622 /* prepare packet headers: MAC + IP + TCP */
1623 hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
1624 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
1626 mvneta_tso_put_hdr(skb, pp, txq);
1628 while (data_left > 0) {
1629 int size;
1630 desc_count++;
1632 size = min_t(int, tso.size, data_left);
1634 if (mvneta_tso_put_data(dev, txq, skb,
1635 tso.data, size,
1636 size == data_left,
1637 total_len == 0))
1638 goto err_release;
1639 data_left -= size;
1641 tso_build_data(skb, &tso, size);
1645 return desc_count;
1647 err_release:
1648 /* Release all used data descriptors; header descriptors must not
1649 * be DMA-unmapped.
1651 for (i = desc_count - 1; i >= 0; i--) {
1652 struct mvneta_tx_desc *tx_desc = txq->descs + i;
1653 if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
1654 dma_unmap_single(pp->dev->dev.parent,
1655 tx_desc->buf_phys_addr,
1656 tx_desc->data_size,
1657 DMA_TO_DEVICE);
1658 mvneta_txq_desc_put(txq);
1660 return 0;
1663 /* Handle tx fragmentation processing */
1664 static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
1665 struct mvneta_tx_queue *txq)
1667 struct mvneta_tx_desc *tx_desc;
1668 int i, nr_frags = skb_shinfo(skb)->nr_frags;
1670 for (i = 0; i < nr_frags; i++) {
1671 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1672 void *addr = page_address(frag->page.p) + frag->page_offset;
1674 tx_desc = mvneta_txq_next_desc_get(txq);
1675 tx_desc->data_size = frag->size;
1677 tx_desc->buf_phys_addr =
1678 dma_map_single(pp->dev->dev.parent, addr,
1679 tx_desc->data_size, DMA_TO_DEVICE);
1681 if (dma_mapping_error(pp->dev->dev.parent,
1682 tx_desc->buf_phys_addr)) {
1683 mvneta_txq_desc_put(txq);
1684 goto error;
1687 if (i == nr_frags - 1) {
1688 /* Last descriptor */
1689 tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
1690 txq->tx_skb[txq->txq_put_index] = skb;
1691 } else {
1692 /* Descriptor in the middle: Not First, Not Last */
1693 tx_desc->command = 0;
1694 txq->tx_skb[txq->txq_put_index] = NULL;
1696 mvneta_txq_inc_put(txq);
1699 return 0;
1701 error:
1702 /* Release all descriptors that were used to map fragments of
1703 * this packet, as well as the corresponding DMA mappings
1705 for (i = i - 1; i >= 0; i--) {
1706 tx_desc = txq->descs + i;
1707 dma_unmap_single(pp->dev->dev.parent,
1708 tx_desc->buf_phys_addr,
1709 tx_desc->data_size,
1710 DMA_TO_DEVICE);
1711 mvneta_txq_desc_put(txq);
1714 return -ENOMEM;
1717 /* Main tx processing */
1718 static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
1720 struct mvneta_port *pp = netdev_priv(dev);
1721 u16 txq_id = skb_get_queue_mapping(skb);
1722 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
1723 struct mvneta_tx_desc *tx_desc;
1724 int len = skb->len;
1725 int frags = 0;
1726 u32 tx_cmd;
1728 if (!netif_running(dev))
1729 goto out;
1731 if (skb_is_gso(skb)) {
1732 frags = mvneta_tx_tso(skb, dev, txq);
1733 goto out;
1736 frags = skb_shinfo(skb)->nr_frags + 1;
1738 /* Get a descriptor for the first part of the packet */
1739 tx_desc = mvneta_txq_next_desc_get(txq);
1741 tx_cmd = mvneta_skb_tx_csum(pp, skb);
1743 tx_desc->data_size = skb_headlen(skb);
1745 tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
1746 tx_desc->data_size,
1747 DMA_TO_DEVICE);
1748 if (unlikely(dma_mapping_error(dev->dev.parent,
1749 tx_desc->buf_phys_addr))) {
1750 mvneta_txq_desc_put(txq);
1751 frags = 0;
1752 goto out;
1755 if (frags == 1) {
1756 /* First and Last descriptor */
1757 tx_cmd |= MVNETA_TXD_FLZ_DESC;
1758 tx_desc->command = tx_cmd;
1759 txq->tx_skb[txq->txq_put_index] = skb;
1760 mvneta_txq_inc_put(txq);
1761 } else {
1762 /* First but not Last */
1763 tx_cmd |= MVNETA_TXD_F_DESC;
1764 txq->tx_skb[txq->txq_put_index] = NULL;
1765 mvneta_txq_inc_put(txq);
1766 tx_desc->command = tx_cmd;
1767 /* Continue with other skb fragments */
1768 if (mvneta_tx_frag_process(pp, skb, txq)) {
1769 dma_unmap_single(dev->dev.parent,
1770 tx_desc->buf_phys_addr,
1771 tx_desc->data_size,
1772 DMA_TO_DEVICE);
1773 mvneta_txq_desc_put(txq);
1774 frags = 0;
1775 goto out;
1779 out:
1780 if (frags > 0) {
1781 struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
1782 struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
1784 txq->count += frags;
1785 mvneta_txq_pend_desc_add(pp, txq, frags);
1787 if (txq->count >= txq->tx_stop_threshold)
1788 netif_tx_stop_queue(nq);
1790 u64_stats_update_begin(&stats->syncp);
1791 stats->tx_packets++;
1792 stats->tx_bytes += len;
1793 u64_stats_update_end(&stats->syncp);
1794 } else {
1795 dev->stats.tx_dropped++;
1796 dev_kfree_skb_any(skb);
1799 return NETDEV_TX_OK;
1803 /* Free tx resources, when resetting a port */
1804 static void mvneta_txq_done_force(struct mvneta_port *pp,
1805 struct mvneta_tx_queue *txq)
1808 int tx_done = txq->count;
1810 mvneta_txq_bufs_free(pp, txq, tx_done);
1812 /* reset txq */
1813 txq->count = 0;
1814 txq->txq_put_index = 0;
1815 txq->txq_get_index = 0;
1818 /* Handle tx done - called in softirq context. The <cause_tx_done> argument
1819 * must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
1821 static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
1823 struct mvneta_tx_queue *txq;
1824 struct netdev_queue *nq;
1826 while (cause_tx_done) {
1827 txq = mvneta_tx_done_policy(pp, cause_tx_done);
1829 nq = netdev_get_tx_queue(pp->dev, txq->id);
1830 __netif_tx_lock(nq, smp_processor_id());
1832 if (txq->count)
1833 mvneta_txq_done(pp, txq);
1835 __netif_tx_unlock(nq);
1836 cause_tx_done &= ~((1 << txq->id));
1840 /* Compute crc8 of the specified address, using a unique algorithm ,
1841 * according to hw spec, different than generic crc8 algorithm
1843 static int mvneta_addr_crc(unsigned char *addr)
1845 int crc = 0;
1846 int i;
1848 for (i = 0; i < ETH_ALEN; i++) {
1849 int j;
1851 crc = (crc ^ addr[i]) << 8;
1852 for (j = 7; j >= 0; j--) {
1853 if (crc & (0x100 << j))
1854 crc ^= 0x107 << j;
1858 return crc;
1861 /* This method controls the net device special MAC multicast support.
1862 * The Special Multicast Table for MAC addresses supports MAC of the form
1863 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1864 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1865 * Table entries in the DA-Filter table. This method set the Special
1866 * Multicast Table appropriate entry.
1868 static void mvneta_set_special_mcast_addr(struct mvneta_port *pp,
1869 unsigned char last_byte,
1870 int queue)
1872 unsigned int smc_table_reg;
1873 unsigned int tbl_offset;
1874 unsigned int reg_offset;
1876 /* Register offset from SMC table base */
1877 tbl_offset = (last_byte / 4);
1878 /* Entry offset within the above reg */
1879 reg_offset = last_byte % 4;
1881 smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST
1882 + tbl_offset * 4));
1884 if (queue == -1)
1885 smc_table_reg &= ~(0xff << (8 * reg_offset));
1886 else {
1887 smc_table_reg &= ~(0xff << (8 * reg_offset));
1888 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1891 mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4,
1892 smc_table_reg);
1895 /* This method controls the network device Other MAC multicast support.
1896 * The Other Multicast Table is used for multicast of another type.
1897 * A CRC-8 is used as an index to the Other Multicast Table entries
1898 * in the DA-Filter table.
1899 * The method gets the CRC-8 value from the calling routine and
1900 * sets the Other Multicast Table appropriate entry according to the
1901 * specified CRC-8 .
1903 static void mvneta_set_other_mcast_addr(struct mvneta_port *pp,
1904 unsigned char crc8,
1905 int queue)
1907 unsigned int omc_table_reg;
1908 unsigned int tbl_offset;
1909 unsigned int reg_offset;
1911 tbl_offset = (crc8 / 4) * 4; /* Register offset from OMC table base */
1912 reg_offset = crc8 % 4; /* Entry offset within the above reg */
1914 omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset);
1916 if (queue == -1) {
1917 /* Clear accepts frame bit at specified Other DA table entry */
1918 omc_table_reg &= ~(0xff << (8 * reg_offset));
1919 } else {
1920 omc_table_reg &= ~(0xff << (8 * reg_offset));
1921 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset));
1924 mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg);
1927 /* The network device supports multicast using two tables:
1928 * 1) Special Multicast Table for MAC addresses of the form
1929 * 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0xFF).
1930 * The MAC DA[7:0] bits are used as a pointer to the Special Multicast
1931 * Table entries in the DA-Filter table.
1932 * 2) Other Multicast Table for multicast of another type. A CRC-8 value
1933 * is used as an index to the Other Multicast Table entries in the
1934 * DA-Filter table.
1936 static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr,
1937 int queue)
1939 unsigned char crc_result = 0;
1941 if (memcmp(p_addr, "\x01\x00\x5e\x00\x00", 5) == 0) {
1942 mvneta_set_special_mcast_addr(pp, p_addr[5], queue);
1943 return 0;
1946 crc_result = mvneta_addr_crc(p_addr);
1947 if (queue == -1) {
1948 if (pp->mcast_count[crc_result] == 0) {
1949 netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n",
1950 crc_result);
1951 return -EINVAL;
1954 pp->mcast_count[crc_result]--;
1955 if (pp->mcast_count[crc_result] != 0) {
1956 netdev_info(pp->dev,
1957 "After delete there are %d valid Mcast for crc8=0x%02x\n",
1958 pp->mcast_count[crc_result], crc_result);
1959 return -EINVAL;
1961 } else
1962 pp->mcast_count[crc_result]++;
1964 mvneta_set_other_mcast_addr(pp, crc_result, queue);
1966 return 0;
1969 /* Configure Fitering mode of Ethernet port */
1970 static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp,
1971 int is_promisc)
1973 u32 port_cfg_reg, val;
1975 port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG);
1977 val = mvreg_read(pp, MVNETA_TYPE_PRIO);
1979 /* Set / Clear UPM bit in port configuration register */
1980 if (is_promisc) {
1981 /* Accept all Unicast addresses */
1982 port_cfg_reg |= MVNETA_UNI_PROMISC_MODE;
1983 val |= MVNETA_FORCE_UNI;
1984 mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff);
1985 mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff);
1986 } else {
1987 /* Reject all Unicast addresses */
1988 port_cfg_reg &= ~MVNETA_UNI_PROMISC_MODE;
1989 val &= ~MVNETA_FORCE_UNI;
1992 mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg);
1993 mvreg_write(pp, MVNETA_TYPE_PRIO, val);
1996 /* register unicast and multicast addresses */
1997 static void mvneta_set_rx_mode(struct net_device *dev)
1999 struct mvneta_port *pp = netdev_priv(dev);
2000 struct netdev_hw_addr *ha;
2002 if (dev->flags & IFF_PROMISC) {
2003 /* Accept all: Multicast + Unicast */
2004 mvneta_rx_unicast_promisc_set(pp, 1);
2005 mvneta_set_ucast_table(pp, rxq_def);
2006 mvneta_set_special_mcast_table(pp, rxq_def);
2007 mvneta_set_other_mcast_table(pp, rxq_def);
2008 } else {
2009 /* Accept single Unicast */
2010 mvneta_rx_unicast_promisc_set(pp, 0);
2011 mvneta_set_ucast_table(pp, -1);
2012 mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
2014 if (dev->flags & IFF_ALLMULTI) {
2015 /* Accept all multicast */
2016 mvneta_set_special_mcast_table(pp, rxq_def);
2017 mvneta_set_other_mcast_table(pp, rxq_def);
2018 } else {
2019 /* Accept only initialized multicast */
2020 mvneta_set_special_mcast_table(pp, -1);
2021 mvneta_set_other_mcast_table(pp, -1);
2023 if (!netdev_mc_empty(dev)) {
2024 netdev_for_each_mc_addr(ha, dev) {
2025 mvneta_mcast_addr_set(pp, ha->addr,
2026 rxq_def);
2033 /* Interrupt handling - the callback for request_irq() */
2034 static irqreturn_t mvneta_isr(int irq, void *dev_id)
2036 struct mvneta_port *pp = (struct mvneta_port *)dev_id;
2038 /* Mask all interrupts */
2039 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2041 napi_schedule(&pp->napi);
2043 return IRQ_HANDLED;
2046 /* NAPI handler
2047 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted
2048 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2049 * Bits 8 -15 of the cause Rx Tx register indicate that are received
2050 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2051 * Each CPU has its own causeRxTx register
2053 static int mvneta_poll(struct napi_struct *napi, int budget)
2055 int rx_done = 0;
2056 u32 cause_rx_tx;
2057 unsigned long flags;
2058 struct mvneta_port *pp = netdev_priv(napi->dev);
2060 if (!netif_running(pp->dev)) {
2061 napi_complete(napi);
2062 return rx_done;
2065 /* Read cause register */
2066 cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) &
2067 (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
2069 /* Release Tx descriptors */
2070 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
2071 mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
2072 cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
2075 /* For the case where the last mvneta_poll did not process all
2076 * RX packets
2078 cause_rx_tx |= pp->cause_rx_tx;
2079 if (rxq_number > 1) {
2080 while ((cause_rx_tx & MVNETA_RX_INTR_MASK_ALL) && (budget > 0)) {
2081 int count;
2082 struct mvneta_rx_queue *rxq;
2083 /* get rx queue number from cause_rx_tx */
2084 rxq = mvneta_rx_policy(pp, cause_rx_tx);
2085 if (!rxq)
2086 break;
2088 /* process the packet in that rx queue */
2089 count = mvneta_rx(pp, budget, rxq);
2090 rx_done += count;
2091 budget -= count;
2092 if (budget > 0) {
2093 /* set off the rx bit of the
2094 * corresponding bit in the cause rx
2095 * tx register, so that next iteration
2096 * will find the next rx queue where
2097 * packets are received on
2099 cause_rx_tx &= ~((1 << rxq->id) << 8);
2102 } else {
2103 rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
2104 budget -= rx_done;
2107 if (budget > 0) {
2108 cause_rx_tx = 0;
2109 napi_complete(napi);
2110 local_irq_save(flags);
2111 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2112 MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
2113 local_irq_restore(flags);
2116 pp->cause_rx_tx = cause_rx_tx;
2117 return rx_done;
2120 /* Handle rxq fill: allocates rxq skbs; called when initializing a port */
2121 static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
2122 int num)
2124 int i;
2126 for (i = 0; i < num; i++) {
2127 memset(rxq->descs + i, 0, sizeof(struct mvneta_rx_desc));
2128 if (mvneta_rx_refill(pp, rxq->descs + i) != 0) {
2129 netdev_err(pp->dev, "%s:rxq %d, %d of %d buffs filled\n",
2130 __func__, rxq->id, i, num);
2131 break;
2135 /* Add this number of RX descriptors as non occupied (ready to
2136 * get packets)
2138 mvneta_rxq_non_occup_desc_add(pp, rxq, i);
2140 return i;
2143 /* Free all packets pending transmit from all TXQs and reset TX port */
2144 static void mvneta_tx_reset(struct mvneta_port *pp)
2146 int queue;
2148 /* free the skb's in the tx ring */
2149 for (queue = 0; queue < txq_number; queue++)
2150 mvneta_txq_done_force(pp, &pp->txqs[queue]);
2152 mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET);
2153 mvreg_write(pp, MVNETA_PORT_TX_RESET, 0);
2156 static void mvneta_rx_reset(struct mvneta_port *pp)
2158 mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
2159 mvreg_write(pp, MVNETA_PORT_RX_RESET, 0);
2162 /* Rx/Tx queue initialization/cleanup methods */
2164 /* Create a specified RX queue */
2165 static int mvneta_rxq_init(struct mvneta_port *pp,
2166 struct mvneta_rx_queue *rxq)
2169 rxq->size = pp->rx_ring_size;
2171 /* Allocate memory for RX descriptors */
2172 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2173 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2174 &rxq->descs_phys, GFP_KERNEL);
2175 if (rxq->descs == NULL)
2176 return -ENOMEM;
2178 BUG_ON(rxq->descs !=
2179 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2181 rxq->last_desc = rxq->size - 1;
2183 /* Set Rx descriptors queue starting address */
2184 mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
2185 mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
2187 /* Set Offset */
2188 mvneta_rxq_offset_set(pp, rxq, NET_SKB_PAD);
2190 /* Set coalescing pkts and time */
2191 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2192 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2194 /* Fill RXQ with buffers from RX pool */
2195 mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size));
2196 mvneta_rxq_bm_disable(pp, rxq);
2197 mvneta_rxq_fill(pp, rxq, rxq->size);
2199 return 0;
2202 /* Cleanup Rx queue */
2203 static void mvneta_rxq_deinit(struct mvneta_port *pp,
2204 struct mvneta_rx_queue *rxq)
2206 mvneta_rxq_drop_pkts(pp, rxq);
2208 if (rxq->descs)
2209 dma_free_coherent(pp->dev->dev.parent,
2210 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
2211 rxq->descs,
2212 rxq->descs_phys);
2214 rxq->descs = NULL;
2215 rxq->last_desc = 0;
2216 rxq->next_desc_to_proc = 0;
2217 rxq->descs_phys = 0;
2220 /* Create and initialize a tx queue */
2221 static int mvneta_txq_init(struct mvneta_port *pp,
2222 struct mvneta_tx_queue *txq)
2224 txq->size = pp->tx_ring_size;
2226 /* A queue must always have room for at least one skb.
2227 * Therefore, stop the queue when the free entries reaches
2228 * the maximum number of descriptors per skb.
2230 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
2231 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2234 /* Allocate memory for TX descriptors */
2235 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2236 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2237 &txq->descs_phys, GFP_KERNEL);
2238 if (txq->descs == NULL)
2239 return -ENOMEM;
2241 /* Make sure descriptor address is cache line size aligned */
2242 BUG_ON(txq->descs !=
2243 PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
2245 txq->last_desc = txq->size - 1;
2247 /* Set maximum bandwidth for enabled TXQs */
2248 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2249 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2251 /* Set Tx descriptors queue starting address */
2252 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2253 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2255 txq->tx_skb = kmalloc(txq->size * sizeof(*txq->tx_skb), GFP_KERNEL);
2256 if (txq->tx_skb == NULL) {
2257 dma_free_coherent(pp->dev->dev.parent,
2258 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2259 txq->descs, txq->descs_phys);
2260 return -ENOMEM;
2263 /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
2264 txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
2265 txq->size * TSO_HEADER_SIZE,
2266 &txq->tso_hdrs_phys, GFP_KERNEL);
2267 if (txq->tso_hdrs == NULL) {
2268 kfree(txq->tx_skb);
2269 dma_free_coherent(pp->dev->dev.parent,
2270 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2271 txq->descs, txq->descs_phys);
2272 return -ENOMEM;
2274 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2276 return 0;
2279 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2280 static void mvneta_txq_deinit(struct mvneta_port *pp,
2281 struct mvneta_tx_queue *txq)
2283 kfree(txq->tx_skb);
2285 if (txq->tso_hdrs)
2286 dma_free_coherent(pp->dev->dev.parent,
2287 txq->size * TSO_HEADER_SIZE,
2288 txq->tso_hdrs, txq->tso_hdrs_phys);
2289 if (txq->descs)
2290 dma_free_coherent(pp->dev->dev.parent,
2291 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2292 txq->descs, txq->descs_phys);
2294 txq->descs = NULL;
2295 txq->last_desc = 0;
2296 txq->next_desc_to_proc = 0;
2297 txq->descs_phys = 0;
2299 /* Set minimum bandwidth for disabled TXQs */
2300 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
2301 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
2303 /* Set Tx descriptors queue starting address and size */
2304 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
2305 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
2308 /* Cleanup all Tx queues */
2309 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
2311 int queue;
2313 for (queue = 0; queue < txq_number; queue++)
2314 mvneta_txq_deinit(pp, &pp->txqs[queue]);
2317 /* Cleanup all Rx queues */
2318 static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
2320 int queue;
2322 for (queue = 0; queue < rxq_number; queue++)
2323 mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
2327 /* Init all Rx queues */
2328 static int mvneta_setup_rxqs(struct mvneta_port *pp)
2330 int queue;
2332 for (queue = 0; queue < rxq_number; queue++) {
2333 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
2334 if (err) {
2335 netdev_err(pp->dev, "%s: can't create rxq=%d\n",
2336 __func__, queue);
2337 mvneta_cleanup_rxqs(pp);
2338 return err;
2342 return 0;
2345 /* Init all tx queues */
2346 static int mvneta_setup_txqs(struct mvneta_port *pp)
2348 int queue;
2350 for (queue = 0; queue < txq_number; queue++) {
2351 int err = mvneta_txq_init(pp, &pp->txqs[queue]);
2352 if (err) {
2353 netdev_err(pp->dev, "%s: can't create txq=%d\n",
2354 __func__, queue);
2355 mvneta_cleanup_txqs(pp);
2356 return err;
2360 return 0;
2363 static void mvneta_start_dev(struct mvneta_port *pp)
2365 mvneta_max_rx_size_set(pp, pp->pkt_size);
2366 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
2368 /* start the Rx/Tx activity */
2369 mvneta_port_enable(pp);
2371 /* Enable polling on the port */
2372 napi_enable(&pp->napi);
2374 /* Unmask interrupts */
2375 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2376 MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number));
2378 phy_start(pp->phy_dev);
2379 netif_tx_start_all_queues(pp->dev);
2382 static void mvneta_stop_dev(struct mvneta_port *pp)
2384 phy_stop(pp->phy_dev);
2386 napi_disable(&pp->napi);
2388 netif_carrier_off(pp->dev);
2390 mvneta_port_down(pp);
2391 netif_tx_stop_all_queues(pp->dev);
2393 /* Stop the port activity */
2394 mvneta_port_disable(pp);
2396 /* Clear all ethernet port interrupts */
2397 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
2398 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2400 /* Mask all ethernet port interrupts */
2401 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2402 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2403 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2405 mvneta_tx_reset(pp);
2406 mvneta_rx_reset(pp);
2409 /* Return positive if MTU is valid */
2410 static int mvneta_check_mtu_valid(struct net_device *dev, int mtu)
2412 if (mtu < 68) {
2413 netdev_err(dev, "cannot change mtu to less than 68\n");
2414 return -EINVAL;
2417 /* 9676 == 9700 - 20 and rounding to 8 */
2418 if (mtu > 9676) {
2419 netdev_info(dev, "Illegal MTU value %d, round to 9676\n", mtu);
2420 mtu = 9676;
2423 if (!IS_ALIGNED(MVNETA_RX_PKT_SIZE(mtu), 8)) {
2424 netdev_info(dev, "Illegal MTU value %d, rounding to %d\n",
2425 mtu, ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8));
2426 mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8);
2429 return mtu;
2432 /* Change the device mtu */
2433 static int mvneta_change_mtu(struct net_device *dev, int mtu)
2435 struct mvneta_port *pp = netdev_priv(dev);
2436 int ret;
2438 mtu = mvneta_check_mtu_valid(dev, mtu);
2439 if (mtu < 0)
2440 return -EINVAL;
2442 dev->mtu = mtu;
2444 if (!netif_running(dev)) {
2445 netdev_update_features(dev);
2446 return 0;
2449 /* The interface is running, so we have to force a
2450 * reallocation of the queues
2452 mvneta_stop_dev(pp);
2454 mvneta_cleanup_txqs(pp);
2455 mvneta_cleanup_rxqs(pp);
2457 pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
2458 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2459 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2461 ret = mvneta_setup_rxqs(pp);
2462 if (ret) {
2463 netdev_err(dev, "unable to setup rxqs after MTU change\n");
2464 return ret;
2467 ret = mvneta_setup_txqs(pp);
2468 if (ret) {
2469 netdev_err(dev, "unable to setup txqs after MTU change\n");
2470 return ret;
2473 mvneta_start_dev(pp);
2474 mvneta_port_up(pp);
2476 netdev_update_features(dev);
2478 return 0;
2481 static netdev_features_t mvneta_fix_features(struct net_device *dev,
2482 netdev_features_t features)
2484 struct mvneta_port *pp = netdev_priv(dev);
2486 if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
2487 features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
2488 netdev_info(dev,
2489 "Disable IP checksum for MTU greater than %dB\n",
2490 pp->tx_csum_limit);
2493 return features;
2496 /* Get mac address */
2497 static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
2499 u32 mac_addr_l, mac_addr_h;
2501 mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW);
2502 mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH);
2503 addr[0] = (mac_addr_h >> 24) & 0xFF;
2504 addr[1] = (mac_addr_h >> 16) & 0xFF;
2505 addr[2] = (mac_addr_h >> 8) & 0xFF;
2506 addr[3] = mac_addr_h & 0xFF;
2507 addr[4] = (mac_addr_l >> 8) & 0xFF;
2508 addr[5] = mac_addr_l & 0xFF;
2511 /* Handle setting mac address */
2512 static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
2514 struct mvneta_port *pp = netdev_priv(dev);
2515 struct sockaddr *sockaddr = addr;
2516 int ret;
2518 ret = eth_prepare_mac_addr_change(dev, addr);
2519 if (ret < 0)
2520 return ret;
2521 /* Remove previous address table entry */
2522 mvneta_mac_addr_set(pp, dev->dev_addr, -1);
2524 /* Set new addr in hw */
2525 mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
2527 eth_commit_mac_addr_change(dev, addr);
2528 return 0;
2531 static void mvneta_adjust_link(struct net_device *ndev)
2533 struct mvneta_port *pp = netdev_priv(ndev);
2534 struct phy_device *phydev = pp->phy_dev;
2535 int status_change = 0;
2537 if (phydev->link) {
2538 if ((pp->speed != phydev->speed) ||
2539 (pp->duplex != phydev->duplex)) {
2540 u32 val;
2542 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2543 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2544 MVNETA_GMAC_CONFIG_GMII_SPEED |
2545 MVNETA_GMAC_CONFIG_FULL_DUPLEX |
2546 MVNETA_GMAC_AN_SPEED_EN |
2547 MVNETA_GMAC_AN_DUPLEX_EN);
2549 if (phydev->duplex)
2550 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2552 if (phydev->speed == SPEED_1000)
2553 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2554 else if (phydev->speed == SPEED_100)
2555 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2557 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2559 pp->duplex = phydev->duplex;
2560 pp->speed = phydev->speed;
2564 if (phydev->link != pp->link) {
2565 if (!phydev->link) {
2566 pp->duplex = -1;
2567 pp->speed = 0;
2570 pp->link = phydev->link;
2571 status_change = 1;
2574 if (status_change) {
2575 if (phydev->link) {
2576 u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2577 val |= (MVNETA_GMAC_FORCE_LINK_PASS |
2578 MVNETA_GMAC_FORCE_LINK_DOWN);
2579 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
2580 mvneta_port_up(pp);
2581 netdev_info(pp->dev, "link up\n");
2582 } else {
2583 mvneta_port_down(pp);
2584 netdev_info(pp->dev, "link down\n");
2589 static int mvneta_mdio_probe(struct mvneta_port *pp)
2591 struct phy_device *phy_dev;
2593 phy_dev = of_phy_connect(pp->dev, pp->phy_node, mvneta_adjust_link, 0,
2594 pp->phy_interface);
2595 if (!phy_dev) {
2596 netdev_err(pp->dev, "could not find the PHY\n");
2597 return -ENODEV;
2600 phy_dev->supported &= PHY_GBIT_FEATURES;
2601 phy_dev->advertising = phy_dev->supported;
2603 pp->phy_dev = phy_dev;
2604 pp->link = 0;
2605 pp->duplex = 0;
2606 pp->speed = 0;
2608 return 0;
2611 static void mvneta_mdio_remove(struct mvneta_port *pp)
2613 phy_disconnect(pp->phy_dev);
2614 pp->phy_dev = NULL;
2617 static int mvneta_open(struct net_device *dev)
2619 struct mvneta_port *pp = netdev_priv(dev);
2620 int ret;
2622 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2623 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
2624 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2626 ret = mvneta_setup_rxqs(pp);
2627 if (ret)
2628 return ret;
2630 ret = mvneta_setup_txqs(pp);
2631 if (ret)
2632 goto err_cleanup_rxqs;
2634 /* Connect to port interrupt line */
2635 ret = request_irq(pp->dev->irq, mvneta_isr, 0,
2636 MVNETA_DRIVER_NAME, pp);
2637 if (ret) {
2638 netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq);
2639 goto err_cleanup_txqs;
2642 /* In default link is down */
2643 netif_carrier_off(pp->dev);
2645 ret = mvneta_mdio_probe(pp);
2646 if (ret < 0) {
2647 netdev_err(dev, "cannot probe MDIO bus\n");
2648 goto err_free_irq;
2651 mvneta_start_dev(pp);
2653 return 0;
2655 err_free_irq:
2656 free_irq(pp->dev->irq, pp);
2657 err_cleanup_txqs:
2658 mvneta_cleanup_txqs(pp);
2659 err_cleanup_rxqs:
2660 mvneta_cleanup_rxqs(pp);
2661 return ret;
2664 /* Stop the port, free port interrupt line */
2665 static int mvneta_stop(struct net_device *dev)
2667 struct mvneta_port *pp = netdev_priv(dev);
2669 mvneta_stop_dev(pp);
2670 mvneta_mdio_remove(pp);
2671 free_irq(dev->irq, pp);
2672 mvneta_cleanup_rxqs(pp);
2673 mvneta_cleanup_txqs(pp);
2675 return 0;
2678 static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2680 struct mvneta_port *pp = netdev_priv(dev);
2681 int ret;
2683 if (!pp->phy_dev)
2684 return -ENOTSUPP;
2686 ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
2687 if (!ret)
2688 mvneta_adjust_link(dev);
2690 return ret;
2693 /* Ethtool methods */
2695 /* Get settings (phy address, speed) for ethtools */
2696 int mvneta_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2698 struct mvneta_port *pp = netdev_priv(dev);
2700 if (!pp->phy_dev)
2701 return -ENODEV;
2703 return phy_ethtool_gset(pp->phy_dev, cmd);
2706 /* Set settings (phy address, speed) for ethtools */
2707 int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2709 struct mvneta_port *pp = netdev_priv(dev);
2711 if (!pp->phy_dev)
2712 return -ENODEV;
2714 return phy_ethtool_sset(pp->phy_dev, cmd);
2717 /* Set interrupt coalescing for ethtools */
2718 static int mvneta_ethtool_set_coalesce(struct net_device *dev,
2719 struct ethtool_coalesce *c)
2721 struct mvneta_port *pp = netdev_priv(dev);
2722 int queue;
2724 for (queue = 0; queue < rxq_number; queue++) {
2725 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2726 rxq->time_coal = c->rx_coalesce_usecs;
2727 rxq->pkts_coal = c->rx_max_coalesced_frames;
2728 mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal);
2729 mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal);
2732 for (queue = 0; queue < txq_number; queue++) {
2733 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2734 txq->done_pkts_coal = c->tx_max_coalesced_frames;
2735 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2738 return 0;
2741 /* get coalescing for ethtools */
2742 static int mvneta_ethtool_get_coalesce(struct net_device *dev,
2743 struct ethtool_coalesce *c)
2745 struct mvneta_port *pp = netdev_priv(dev);
2747 c->rx_coalesce_usecs = pp->rxqs[0].time_coal;
2748 c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal;
2750 c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal;
2751 return 0;
2755 static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
2756 struct ethtool_drvinfo *drvinfo)
2758 strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
2759 sizeof(drvinfo->driver));
2760 strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
2761 sizeof(drvinfo->version));
2762 strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
2763 sizeof(drvinfo->bus_info));
2767 static void mvneta_ethtool_get_ringparam(struct net_device *netdev,
2768 struct ethtool_ringparam *ring)
2770 struct mvneta_port *pp = netdev_priv(netdev);
2772 ring->rx_max_pending = MVNETA_MAX_RXD;
2773 ring->tx_max_pending = MVNETA_MAX_TXD;
2774 ring->rx_pending = pp->rx_ring_size;
2775 ring->tx_pending = pp->tx_ring_size;
2778 static int mvneta_ethtool_set_ringparam(struct net_device *dev,
2779 struct ethtool_ringparam *ring)
2781 struct mvneta_port *pp = netdev_priv(dev);
2783 if ((ring->rx_pending == 0) || (ring->tx_pending == 0))
2784 return -EINVAL;
2785 pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
2786 ring->rx_pending : MVNETA_MAX_RXD;
2788 pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
2789 MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
2790 if (pp->tx_ring_size != ring->tx_pending)
2791 netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
2792 pp->tx_ring_size, ring->tx_pending);
2794 if (netif_running(dev)) {
2795 mvneta_stop(dev);
2796 if (mvneta_open(dev)) {
2797 netdev_err(dev,
2798 "error on opening device after ring param change\n");
2799 return -ENOMEM;
2803 return 0;
2806 static const struct net_device_ops mvneta_netdev_ops = {
2807 .ndo_open = mvneta_open,
2808 .ndo_stop = mvneta_stop,
2809 .ndo_start_xmit = mvneta_tx,
2810 .ndo_set_rx_mode = mvneta_set_rx_mode,
2811 .ndo_set_mac_address = mvneta_set_mac_addr,
2812 .ndo_change_mtu = mvneta_change_mtu,
2813 .ndo_fix_features = mvneta_fix_features,
2814 .ndo_get_stats64 = mvneta_get_stats64,
2815 .ndo_do_ioctl = mvneta_ioctl,
2818 const struct ethtool_ops mvneta_eth_tool_ops = {
2819 .get_link = ethtool_op_get_link,
2820 .get_settings = mvneta_ethtool_get_settings,
2821 .set_settings = mvneta_ethtool_set_settings,
2822 .set_coalesce = mvneta_ethtool_set_coalesce,
2823 .get_coalesce = mvneta_ethtool_get_coalesce,
2824 .get_drvinfo = mvneta_ethtool_get_drvinfo,
2825 .get_ringparam = mvneta_ethtool_get_ringparam,
2826 .set_ringparam = mvneta_ethtool_set_ringparam,
2829 /* Initialize hw */
2830 static int mvneta_init(struct device *dev, struct mvneta_port *pp)
2832 int queue;
2834 /* Disable port */
2835 mvneta_port_disable(pp);
2837 /* Set port default values */
2838 mvneta_defaults_set(pp);
2840 pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
2841 GFP_KERNEL);
2842 if (!pp->txqs)
2843 return -ENOMEM;
2845 /* Initialize TX descriptor rings */
2846 for (queue = 0; queue < txq_number; queue++) {
2847 struct mvneta_tx_queue *txq = &pp->txqs[queue];
2848 txq->id = queue;
2849 txq->size = pp->tx_ring_size;
2850 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
2853 pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
2854 GFP_KERNEL);
2855 if (!pp->rxqs)
2856 return -ENOMEM;
2858 /* Create Rx descriptor rings */
2859 for (queue = 0; queue < rxq_number; queue++) {
2860 struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
2861 rxq->id = queue;
2862 rxq->size = pp->rx_ring_size;
2863 rxq->pkts_coal = MVNETA_RX_COAL_PKTS;
2864 rxq->time_coal = MVNETA_RX_COAL_USEC;
2867 return 0;
2870 /* platform glue : initialize decoding windows */
2871 static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
2872 const struct mbus_dram_target_info *dram)
2874 u32 win_enable;
2875 u32 win_protect;
2876 int i;
2878 for (i = 0; i < 6; i++) {
2879 mvreg_write(pp, MVNETA_WIN_BASE(i), 0);
2880 mvreg_write(pp, MVNETA_WIN_SIZE(i), 0);
2882 if (i < 4)
2883 mvreg_write(pp, MVNETA_WIN_REMAP(i), 0);
2886 win_enable = 0x3f;
2887 win_protect = 0;
2889 for (i = 0; i < dram->num_cs; i++) {
2890 const struct mbus_dram_window *cs = dram->cs + i;
2891 mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) |
2892 (cs->mbus_attr << 8) | dram->mbus_dram_target_id);
2894 mvreg_write(pp, MVNETA_WIN_SIZE(i),
2895 (cs->size - 1) & 0xffff0000);
2897 win_enable &= ~(1 << i);
2898 win_protect |= 3 << (2 * i);
2901 mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable);
2902 mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect);
2905 /* Power up the port */
2906 static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
2908 u32 ctrl;
2910 /* MAC Cause register should be cleared */
2911 mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
2913 ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
2915 /* Even though it might look weird, when we're configured in
2916 * SGMII or QSGMII mode, the RGMII bit needs to be set.
2918 switch(phy_mode) {
2919 case PHY_INTERFACE_MODE_QSGMII:
2920 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO);
2921 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2922 break;
2923 case PHY_INTERFACE_MODE_SGMII:
2924 mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
2925 ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
2926 break;
2927 case PHY_INTERFACE_MODE_RGMII:
2928 case PHY_INTERFACE_MODE_RGMII_ID:
2929 ctrl |= MVNETA_GMAC2_PORT_RGMII;
2930 break;
2931 default:
2932 return -EINVAL;
2935 /* Cancel Port Reset */
2936 ctrl &= ~MVNETA_GMAC2_PORT_RESET;
2937 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl);
2939 while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) &
2940 MVNETA_GMAC2_PORT_RESET) != 0)
2941 continue;
2943 return 0;
2946 /* Device initialization routine */
2947 static int mvneta_probe(struct platform_device *pdev)
2949 const struct mbus_dram_target_info *dram_target_info;
2950 struct resource *res;
2951 struct device_node *dn = pdev->dev.of_node;
2952 struct device_node *phy_node;
2953 struct mvneta_port *pp;
2954 struct net_device *dev;
2955 const char *dt_mac_addr;
2956 char hw_mac_addr[ETH_ALEN];
2957 const char *mac_from;
2958 int phy_mode;
2959 int err;
2961 /* Our multiqueue support is not complete, so for now, only
2962 * allow the usage of the first RX queue
2964 if (rxq_def != 0) {
2965 dev_err(&pdev->dev, "Invalid rxq_def argument: %d\n", rxq_def);
2966 return -EINVAL;
2969 dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
2970 if (!dev)
2971 return -ENOMEM;
2973 dev->irq = irq_of_parse_and_map(dn, 0);
2974 if (dev->irq == 0) {
2975 err = -EINVAL;
2976 goto err_free_netdev;
2979 phy_node = of_parse_phandle(dn, "phy", 0);
2980 if (!phy_node) {
2981 if (!of_phy_is_fixed_link(dn)) {
2982 dev_err(&pdev->dev, "no PHY specified\n");
2983 err = -ENODEV;
2984 goto err_free_irq;
2987 err = of_phy_register_fixed_link(dn);
2988 if (err < 0) {
2989 dev_err(&pdev->dev, "cannot register fixed PHY\n");
2990 goto err_free_irq;
2993 /* In the case of a fixed PHY, the DT node associated
2994 * to the PHY is the Ethernet MAC DT node.
2996 phy_node = dn;
2999 phy_mode = of_get_phy_mode(dn);
3000 if (phy_mode < 0) {
3001 dev_err(&pdev->dev, "incorrect phy-mode\n");
3002 err = -EINVAL;
3003 goto err_free_irq;
3006 dev->tx_queue_len = MVNETA_MAX_TXD;
3007 dev->watchdog_timeo = 5 * HZ;
3008 dev->netdev_ops = &mvneta_netdev_ops;
3010 dev->ethtool_ops = &mvneta_eth_tool_ops;
3012 pp = netdev_priv(dev);
3013 pp->phy_node = phy_node;
3014 pp->phy_interface = phy_mode;
3016 pp->clk = devm_clk_get(&pdev->dev, NULL);
3017 if (IS_ERR(pp->clk)) {
3018 err = PTR_ERR(pp->clk);
3019 goto err_free_irq;
3022 clk_prepare_enable(pp->clk);
3024 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3025 pp->base = devm_ioremap_resource(&pdev->dev, res);
3026 if (IS_ERR(pp->base)) {
3027 err = PTR_ERR(pp->base);
3028 goto err_clk;
3031 /* Alloc per-cpu stats */
3032 pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
3033 if (!pp->stats) {
3034 err = -ENOMEM;
3035 goto err_clk;
3038 dt_mac_addr = of_get_mac_address(dn);
3039 if (dt_mac_addr) {
3040 mac_from = "device tree";
3041 memcpy(dev->dev_addr, dt_mac_addr, ETH_ALEN);
3042 } else {
3043 mvneta_get_mac_addr(pp, hw_mac_addr);
3044 if (is_valid_ether_addr(hw_mac_addr)) {
3045 mac_from = "hardware";
3046 memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
3047 } else {
3048 mac_from = "random";
3049 eth_hw_addr_random(dev);
3053 if (of_device_is_compatible(dn, "marvell,armada-370-neta"))
3054 pp->tx_csum_limit = 1600;
3056 pp->tx_ring_size = MVNETA_MAX_TXD;
3057 pp->rx_ring_size = MVNETA_MAX_RXD;
3059 pp->dev = dev;
3060 SET_NETDEV_DEV(dev, &pdev->dev);
3062 err = mvneta_init(&pdev->dev, pp);
3063 if (err < 0)
3064 goto err_free_stats;
3066 err = mvneta_port_power_up(pp, phy_mode);
3067 if (err < 0) {
3068 dev_err(&pdev->dev, "can't power up port\n");
3069 goto err_free_stats;
3072 dram_target_info = mv_mbus_dram_info();
3073 if (dram_target_info)
3074 mvneta_conf_mbus_windows(pp, dram_target_info);
3076 netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
3078 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
3079 dev->hw_features |= dev->features;
3080 dev->vlan_features |= dev->features;
3081 dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
3082 dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
3084 err = register_netdev(dev);
3085 if (err < 0) {
3086 dev_err(&pdev->dev, "failed to register\n");
3087 goto err_free_stats;
3090 netdev_info(dev, "Using %s mac address %pM\n", mac_from,
3091 dev->dev_addr);
3093 platform_set_drvdata(pdev, pp->dev);
3095 return 0;
3097 err_free_stats:
3098 free_percpu(pp->stats);
3099 err_clk:
3100 clk_disable_unprepare(pp->clk);
3101 err_free_irq:
3102 irq_dispose_mapping(dev->irq);
3103 err_free_netdev:
3104 free_netdev(dev);
3105 return err;
3108 /* Device removal routine */
3109 static int mvneta_remove(struct platform_device *pdev)
3111 struct net_device *dev = platform_get_drvdata(pdev);
3112 struct mvneta_port *pp = netdev_priv(dev);
3114 unregister_netdev(dev);
3115 clk_disable_unprepare(pp->clk);
3116 free_percpu(pp->stats);
3117 irq_dispose_mapping(dev->irq);
3118 free_netdev(dev);
3120 return 0;
3123 static const struct of_device_id mvneta_match[] = {
3124 { .compatible = "marvell,armada-370-neta" },
3125 { .compatible = "marvell,armada-xp-neta" },
3128 MODULE_DEVICE_TABLE(of, mvneta_match);
3130 static struct platform_driver mvneta_driver = {
3131 .probe = mvneta_probe,
3132 .remove = mvneta_remove,
3133 .driver = {
3134 .name = MVNETA_DRIVER_NAME,
3135 .of_match_table = mvneta_match,
3139 module_platform_driver(mvneta_driver);
3141 MODULE_DESCRIPTION("Marvell NETA Ethernet Driver - www.marvell.com");
3142 MODULE_AUTHOR("Rami Rosen <rosenr@marvell.com>, Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
3143 MODULE_LICENSE("GPL");
3145 module_param(rxq_number, int, S_IRUGO);
3146 module_param(txq_number, int, S_IRUGO);
3148 module_param(rxq_def, int, S_IRUGO);
3149 module_param(rx_copybreak, int, S_IRUGO | S_IWUSR);