treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / ethernet / qualcomm / emac / emac-mac.c
blobbebe38d74d6682790f009208bb4106027798054c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
3 */
5 /* Qualcomm Technologies, Inc. EMAC Ethernet Controller MAC layer support
6 */
8 #include <linux/tcp.h>
9 #include <linux/ip.h>
10 #include <linux/ipv6.h>
11 #include <linux/crc32.h>
12 #include <linux/if_vlan.h>
13 #include <linux/jiffies.h>
14 #include <linux/phy.h>
15 #include <linux/of.h>
16 #include <net/ip6_checksum.h>
17 #include "emac.h"
18 #include "emac-sgmii.h"
20 /* EMAC_MAC_CTRL */
21 #define SINGLE_PAUSE_MODE 0x10000000
22 #define DEBUG_MODE 0x08000000
23 #define BROAD_EN 0x04000000
24 #define MULTI_ALL 0x02000000
25 #define RX_CHKSUM_EN 0x01000000
26 #define HUGE 0x00800000
27 #define SPEED(x) (((x) & 0x3) << 20)
28 #define SPEED_MASK SPEED(0x3)
29 #define SIMR 0x00080000
30 #define TPAUSE 0x00010000
31 #define PROM_MODE 0x00008000
32 #define VLAN_STRIP 0x00004000
33 #define PRLEN_BMSK 0x00003c00
34 #define PRLEN_SHFT 10
35 #define HUGEN 0x00000200
36 #define FLCHK 0x00000100
37 #define PCRCE 0x00000080
38 #define CRCE 0x00000040
39 #define FULLD 0x00000020
40 #define MAC_LP_EN 0x00000010
41 #define RXFC 0x00000008
42 #define TXFC 0x00000004
43 #define RXEN 0x00000002
44 #define TXEN 0x00000001
46 /* EMAC_DESC_CTRL_3 */
47 #define RFD_RING_SIZE_BMSK 0xfff
49 /* EMAC_DESC_CTRL_4 */
50 #define RX_BUFFER_SIZE_BMSK 0xffff
52 /* EMAC_DESC_CTRL_6 */
53 #define RRD_RING_SIZE_BMSK 0xfff
55 /* EMAC_DESC_CTRL_9 */
56 #define TPD_RING_SIZE_BMSK 0xffff
58 /* EMAC_TXQ_CTRL_0 */
59 #define NUM_TXF_BURST_PREF_BMSK 0xffff0000
60 #define NUM_TXF_BURST_PREF_SHFT 16
61 #define LS_8023_SP 0x80
62 #define TXQ_MODE 0x40
63 #define TXQ_EN 0x20
64 #define IP_OP_SP 0x10
65 #define NUM_TPD_BURST_PREF_BMSK 0xf
66 #define NUM_TPD_BURST_PREF_SHFT 0
68 /* EMAC_TXQ_CTRL_1 */
69 #define JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK 0x7ff
71 /* EMAC_TXQ_CTRL_2 */
72 #define TXF_HWM_BMSK 0xfff0000
73 #define TXF_LWM_BMSK 0xfff
75 /* EMAC_RXQ_CTRL_0 */
76 #define RXQ_EN BIT(31)
77 #define CUT_THRU_EN BIT(30)
78 #define RSS_HASH_EN BIT(29)
79 #define NUM_RFD_BURST_PREF_BMSK 0x3f00000
80 #define NUM_RFD_BURST_PREF_SHFT 20
81 #define IDT_TABLE_SIZE_BMSK 0x1ff00
82 #define IDT_TABLE_SIZE_SHFT 8
83 #define SP_IPV6 0x80
85 /* EMAC_RXQ_CTRL_1 */
86 #define JUMBO_1KAH_BMSK 0xf000
87 #define JUMBO_1KAH_SHFT 12
88 #define RFD_PREF_LOW_TH 0x10
89 #define RFD_PREF_LOW_THRESHOLD_BMSK 0xfc0
90 #define RFD_PREF_LOW_THRESHOLD_SHFT 6
91 #define RFD_PREF_UP_TH 0x10
92 #define RFD_PREF_UP_THRESHOLD_BMSK 0x3f
93 #define RFD_PREF_UP_THRESHOLD_SHFT 0
95 /* EMAC_RXQ_CTRL_2 */
96 #define RXF_DOF_THRESFHOLD 0x1a0
97 #define RXF_DOF_THRESHOLD_BMSK 0xfff0000
98 #define RXF_DOF_THRESHOLD_SHFT 16
99 #define RXF_UOF_THRESFHOLD 0xbe
100 #define RXF_UOF_THRESHOLD_BMSK 0xfff
101 #define RXF_UOF_THRESHOLD_SHFT 0
103 /* EMAC_RXQ_CTRL_3 */
104 #define RXD_TIMER_BMSK 0xffff0000
105 #define RXD_THRESHOLD_BMSK 0xfff
106 #define RXD_THRESHOLD_SHFT 0
108 /* EMAC_DMA_CTRL */
109 #define DMAW_DLY_CNT_BMSK 0xf0000
110 #define DMAW_DLY_CNT_SHFT 16
111 #define DMAR_DLY_CNT_BMSK 0xf800
112 #define DMAR_DLY_CNT_SHFT 11
113 #define DMAR_REQ_PRI 0x400
114 #define REGWRBLEN_BMSK 0x380
115 #define REGWRBLEN_SHFT 7
116 #define REGRDBLEN_BMSK 0x70
117 #define REGRDBLEN_SHFT 4
118 #define OUT_ORDER_MODE 0x4
119 #define ENH_ORDER_MODE 0x2
120 #define IN_ORDER_MODE 0x1
122 /* EMAC_MAILBOX_13 */
123 #define RFD3_PROC_IDX_BMSK 0xfff0000
124 #define RFD3_PROC_IDX_SHFT 16
125 #define RFD3_PROD_IDX_BMSK 0xfff
126 #define RFD3_PROD_IDX_SHFT 0
128 /* EMAC_MAILBOX_2 */
129 #define NTPD_CONS_IDX_BMSK 0xffff0000
130 #define NTPD_CONS_IDX_SHFT 16
132 /* EMAC_MAILBOX_3 */
133 #define RFD0_CONS_IDX_BMSK 0xfff
134 #define RFD0_CONS_IDX_SHFT 0
136 /* EMAC_MAILBOX_11 */
137 #define H3TPD_PROD_IDX_BMSK 0xffff0000
138 #define H3TPD_PROD_IDX_SHFT 16
140 /* EMAC_AXI_MAST_CTRL */
141 #define DATA_BYTE_SWAP 0x8
142 #define MAX_BOUND 0x2
143 #define MAX_BTYPE 0x1
145 /* EMAC_MAILBOX_12 */
146 #define H3TPD_CONS_IDX_BMSK 0xffff0000
147 #define H3TPD_CONS_IDX_SHFT 16
149 /* EMAC_MAILBOX_9 */
150 #define H2TPD_PROD_IDX_BMSK 0xffff
151 #define H2TPD_PROD_IDX_SHFT 0
153 /* EMAC_MAILBOX_10 */
154 #define H1TPD_CONS_IDX_BMSK 0xffff0000
155 #define H1TPD_CONS_IDX_SHFT 16
156 #define H2TPD_CONS_IDX_BMSK 0xffff
157 #define H2TPD_CONS_IDX_SHFT 0
159 /* EMAC_ATHR_HEADER_CTRL */
160 #define HEADER_CNT_EN 0x2
161 #define HEADER_ENABLE 0x1
163 /* EMAC_MAILBOX_0 */
164 #define RFD0_PROC_IDX_BMSK 0xfff0000
165 #define RFD0_PROC_IDX_SHFT 16
166 #define RFD0_PROD_IDX_BMSK 0xfff
167 #define RFD0_PROD_IDX_SHFT 0
169 /* EMAC_MAILBOX_5 */
170 #define RFD1_PROC_IDX_BMSK 0xfff0000
171 #define RFD1_PROC_IDX_SHFT 16
172 #define RFD1_PROD_IDX_BMSK 0xfff
173 #define RFD1_PROD_IDX_SHFT 0
175 /* EMAC_MISC_CTRL */
176 #define RX_UNCPL_INT_EN 0x1
178 /* EMAC_MAILBOX_7 */
179 #define RFD2_CONS_IDX_BMSK 0xfff0000
180 #define RFD2_CONS_IDX_SHFT 16
181 #define RFD1_CONS_IDX_BMSK 0xfff
182 #define RFD1_CONS_IDX_SHFT 0
184 /* EMAC_MAILBOX_8 */
185 #define RFD3_CONS_IDX_BMSK 0xfff
186 #define RFD3_CONS_IDX_SHFT 0
188 /* EMAC_MAILBOX_15 */
189 #define NTPD_PROD_IDX_BMSK 0xffff
190 #define NTPD_PROD_IDX_SHFT 0
192 /* EMAC_MAILBOX_16 */
193 #define H1TPD_PROD_IDX_BMSK 0xffff
194 #define H1TPD_PROD_IDX_SHFT 0
196 #define RXQ0_RSS_HSTYP_IPV6_TCP_EN 0x20
197 #define RXQ0_RSS_HSTYP_IPV6_EN 0x10
198 #define RXQ0_RSS_HSTYP_IPV4_TCP_EN 0x8
199 #define RXQ0_RSS_HSTYP_IPV4_EN 0x4
201 /* EMAC_EMAC_WRAPPER_TX_TS_INX */
202 #define EMAC_WRAPPER_TX_TS_EMPTY BIT(31)
203 #define EMAC_WRAPPER_TX_TS_INX_BMSK 0xffff
205 struct emac_skb_cb {
206 u32 tpd_idx;
207 unsigned long jiffies;
210 #define EMAC_SKB_CB(skb) ((struct emac_skb_cb *)(skb)->cb)
211 #define EMAC_RSS_IDT_SIZE 256
212 #define JUMBO_1KAH 0x4
213 #define RXD_TH 0x100
214 #define EMAC_TPD_LAST_FRAGMENT 0x80000000
215 #define EMAC_TPD_TSTAMP_SAVE 0x80000000
217 /* EMAC Errors in emac_rrd.word[3] */
218 #define EMAC_RRD_L4F BIT(14)
219 #define EMAC_RRD_IPF BIT(15)
220 #define EMAC_RRD_CRC BIT(21)
221 #define EMAC_RRD_FAE BIT(22)
222 #define EMAC_RRD_TRN BIT(23)
223 #define EMAC_RRD_RNT BIT(24)
224 #define EMAC_RRD_INC BIT(25)
225 #define EMAC_RRD_FOV BIT(29)
226 #define EMAC_RRD_LEN BIT(30)
228 /* Error bits that will result in a received frame being discarded */
229 #define EMAC_RRD_ERROR (EMAC_RRD_IPF | EMAC_RRD_CRC | EMAC_RRD_FAE | \
230 EMAC_RRD_TRN | EMAC_RRD_RNT | EMAC_RRD_INC | \
231 EMAC_RRD_FOV | EMAC_RRD_LEN)
232 #define EMAC_RRD_STATS_DW_IDX 3
234 #define EMAC_RRD(RXQ, SIZE, IDX) ((RXQ)->rrd.v_addr + (SIZE * (IDX)))
235 #define EMAC_RFD(RXQ, SIZE, IDX) ((RXQ)->rfd.v_addr + (SIZE * (IDX)))
236 #define EMAC_TPD(TXQ, SIZE, IDX) ((TXQ)->tpd.v_addr + (SIZE * (IDX)))
238 #define GET_RFD_BUFFER(RXQ, IDX) (&((RXQ)->rfd.rfbuff[(IDX)]))
239 #define GET_TPD_BUFFER(RTQ, IDX) (&((RTQ)->tpd.tpbuff[(IDX)]))
241 #define EMAC_TX_POLL_HWTXTSTAMP_THRESHOLD 8
243 #define ISR_RX_PKT (\
244 RX_PKT_INT0 |\
245 RX_PKT_INT1 |\
246 RX_PKT_INT2 |\
247 RX_PKT_INT3)
249 void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr)
251 u32 crc32, bit, reg, mta;
253 /* Calculate the CRC of the MAC address */
254 crc32 = ether_crc(ETH_ALEN, addr);
256 /* The HASH Table is an array of 2 32-bit registers. It is
257 * treated like an array of 64 bits (BitArray[hash_value]).
258 * Use the upper 6 bits of the above CRC as the hash value.
260 reg = (crc32 >> 31) & 0x1;
261 bit = (crc32 >> 26) & 0x1F;
263 mta = readl(adpt->base + EMAC_HASH_TAB_REG0 + (reg << 2));
264 mta |= BIT(bit);
265 writel(mta, adpt->base + EMAC_HASH_TAB_REG0 + (reg << 2));
268 void emac_mac_multicast_addr_clear(struct emac_adapter *adpt)
270 writel(0, adpt->base + EMAC_HASH_TAB_REG0);
271 writel(0, adpt->base + EMAC_HASH_TAB_REG1);
274 /* definitions for RSS */
275 #define EMAC_RSS_KEY(_i, _type) \
276 (EMAC_RSS_KEY0 + ((_i) * sizeof(_type)))
277 #define EMAC_RSS_TBL(_i, _type) \
278 (EMAC_IDT_TABLE0 + ((_i) * sizeof(_type)))
280 /* Config MAC modes */
281 void emac_mac_mode_config(struct emac_adapter *adpt)
283 struct net_device *netdev = adpt->netdev;
284 u32 mac;
286 mac = readl(adpt->base + EMAC_MAC_CTRL);
287 mac &= ~(VLAN_STRIP | PROM_MODE | MULTI_ALL | MAC_LP_EN);
289 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
290 mac |= VLAN_STRIP;
292 if (netdev->flags & IFF_PROMISC)
293 mac |= PROM_MODE;
295 if (netdev->flags & IFF_ALLMULTI)
296 mac |= MULTI_ALL;
298 writel(mac, adpt->base + EMAC_MAC_CTRL);
301 /* Config descriptor rings */
302 static void emac_mac_dma_rings_config(struct emac_adapter *adpt)
304 /* TPD (Transmit Packet Descriptor) */
305 writel(upper_32_bits(adpt->tx_q.tpd.dma_addr),
306 adpt->base + EMAC_DESC_CTRL_1);
308 writel(lower_32_bits(adpt->tx_q.tpd.dma_addr),
309 adpt->base + EMAC_DESC_CTRL_8);
311 writel(adpt->tx_q.tpd.count & TPD_RING_SIZE_BMSK,
312 adpt->base + EMAC_DESC_CTRL_9);
314 /* RFD (Receive Free Descriptor) & RRD (Receive Return Descriptor) */
315 writel(upper_32_bits(adpt->rx_q.rfd.dma_addr),
316 adpt->base + EMAC_DESC_CTRL_0);
318 writel(lower_32_bits(adpt->rx_q.rfd.dma_addr),
319 adpt->base + EMAC_DESC_CTRL_2);
320 writel(lower_32_bits(adpt->rx_q.rrd.dma_addr),
321 adpt->base + EMAC_DESC_CTRL_5);
323 writel(adpt->rx_q.rfd.count & RFD_RING_SIZE_BMSK,
324 adpt->base + EMAC_DESC_CTRL_3);
325 writel(adpt->rx_q.rrd.count & RRD_RING_SIZE_BMSK,
326 adpt->base + EMAC_DESC_CTRL_6);
328 writel(adpt->rxbuf_size & RX_BUFFER_SIZE_BMSK,
329 adpt->base + EMAC_DESC_CTRL_4);
331 writel(0, adpt->base + EMAC_DESC_CTRL_11);
333 /* Load all of the base addresses above and ensure that triggering HW to
334 * read ring pointers is flushed
336 writel(1, adpt->base + EMAC_INTER_SRAM_PART9);
339 /* Config transmit parameters */
340 static void emac_mac_tx_config(struct emac_adapter *adpt)
342 u32 val;
344 writel((EMAC_MAX_TX_OFFLOAD_THRESH >> 3) &
345 JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK, adpt->base + EMAC_TXQ_CTRL_1);
347 val = (adpt->tpd_burst << NUM_TPD_BURST_PREF_SHFT) &
348 NUM_TPD_BURST_PREF_BMSK;
350 val |= TXQ_MODE | LS_8023_SP;
351 val |= (0x0100 << NUM_TXF_BURST_PREF_SHFT) &
352 NUM_TXF_BURST_PREF_BMSK;
354 writel(val, adpt->base + EMAC_TXQ_CTRL_0);
355 emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_2,
356 (TXF_HWM_BMSK | TXF_LWM_BMSK), 0);
359 /* Config receive parameters */
360 static void emac_mac_rx_config(struct emac_adapter *adpt)
362 u32 val;
364 val = (adpt->rfd_burst << NUM_RFD_BURST_PREF_SHFT) &
365 NUM_RFD_BURST_PREF_BMSK;
366 val |= (SP_IPV6 | CUT_THRU_EN);
368 writel(val, adpt->base + EMAC_RXQ_CTRL_0);
370 val = readl(adpt->base + EMAC_RXQ_CTRL_1);
371 val &= ~(JUMBO_1KAH_BMSK | RFD_PREF_LOW_THRESHOLD_BMSK |
372 RFD_PREF_UP_THRESHOLD_BMSK);
373 val |= (JUMBO_1KAH << JUMBO_1KAH_SHFT) |
374 (RFD_PREF_LOW_TH << RFD_PREF_LOW_THRESHOLD_SHFT) |
375 (RFD_PREF_UP_TH << RFD_PREF_UP_THRESHOLD_SHFT);
376 writel(val, adpt->base + EMAC_RXQ_CTRL_1);
378 val = readl(adpt->base + EMAC_RXQ_CTRL_2);
379 val &= ~(RXF_DOF_THRESHOLD_BMSK | RXF_UOF_THRESHOLD_BMSK);
380 val |= (RXF_DOF_THRESFHOLD << RXF_DOF_THRESHOLD_SHFT) |
381 (RXF_UOF_THRESFHOLD << RXF_UOF_THRESHOLD_SHFT);
382 writel(val, adpt->base + EMAC_RXQ_CTRL_2);
384 val = readl(adpt->base + EMAC_RXQ_CTRL_3);
385 val &= ~(RXD_TIMER_BMSK | RXD_THRESHOLD_BMSK);
386 val |= RXD_TH << RXD_THRESHOLD_SHFT;
387 writel(val, adpt->base + EMAC_RXQ_CTRL_3);
390 /* Config dma */
391 static void emac_mac_dma_config(struct emac_adapter *adpt)
393 u32 dma_ctrl = DMAR_REQ_PRI;
395 switch (adpt->dma_order) {
396 case emac_dma_ord_in:
397 dma_ctrl |= IN_ORDER_MODE;
398 break;
399 case emac_dma_ord_enh:
400 dma_ctrl |= ENH_ORDER_MODE;
401 break;
402 case emac_dma_ord_out:
403 dma_ctrl |= OUT_ORDER_MODE;
404 break;
405 default:
406 break;
409 dma_ctrl |= (((u32)adpt->dmar_block) << REGRDBLEN_SHFT) &
410 REGRDBLEN_BMSK;
411 dma_ctrl |= (((u32)adpt->dmaw_block) << REGWRBLEN_SHFT) &
412 REGWRBLEN_BMSK;
413 dma_ctrl |= (((u32)adpt->dmar_dly_cnt) << DMAR_DLY_CNT_SHFT) &
414 DMAR_DLY_CNT_BMSK;
415 dma_ctrl |= (((u32)adpt->dmaw_dly_cnt) << DMAW_DLY_CNT_SHFT) &
416 DMAW_DLY_CNT_BMSK;
418 /* config DMA and ensure that configuration is flushed to HW */
419 writel(dma_ctrl, adpt->base + EMAC_DMA_CTRL);
422 /* set MAC address */
423 static void emac_set_mac_address(struct emac_adapter *adpt, u8 *addr)
425 u32 sta;
427 /* for example: 00-A0-C6-11-22-33
428 * 0<-->C6112233, 1<-->00A0.
431 /* low 32bit word */
432 sta = (((u32)addr[2]) << 24) | (((u32)addr[3]) << 16) |
433 (((u32)addr[4]) << 8) | (((u32)addr[5]));
434 writel(sta, adpt->base + EMAC_MAC_STA_ADDR0);
436 /* hight 32bit word */
437 sta = (((u32)addr[0]) << 8) | (u32)addr[1];
438 writel(sta, adpt->base + EMAC_MAC_STA_ADDR1);
441 static void emac_mac_config(struct emac_adapter *adpt)
443 struct net_device *netdev = adpt->netdev;
444 unsigned int max_frame;
445 u32 val;
447 emac_set_mac_address(adpt, netdev->dev_addr);
449 max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
450 adpt->rxbuf_size = netdev->mtu > EMAC_DEF_RX_BUF_SIZE ?
451 ALIGN(max_frame, 8) : EMAC_DEF_RX_BUF_SIZE;
453 emac_mac_dma_rings_config(adpt);
455 writel(netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
456 adpt->base + EMAC_MAX_FRAM_LEN_CTRL);
458 emac_mac_tx_config(adpt);
459 emac_mac_rx_config(adpt);
460 emac_mac_dma_config(adpt);
462 val = readl(adpt->base + EMAC_AXI_MAST_CTRL);
463 val &= ~(DATA_BYTE_SWAP | MAX_BOUND);
464 val |= MAX_BTYPE;
465 writel(val, adpt->base + EMAC_AXI_MAST_CTRL);
466 writel(0, adpt->base + EMAC_CLK_GATE_CTRL);
467 writel(RX_UNCPL_INT_EN, adpt->base + EMAC_MISC_CTRL);
470 void emac_mac_reset(struct emac_adapter *adpt)
472 emac_mac_stop(adpt);
474 emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, SOFT_RST);
475 usleep_range(100, 150); /* reset may take up to 100usec */
477 /* interrupt clear-on-read */
478 emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN);
481 static void emac_mac_start(struct emac_adapter *adpt)
483 struct phy_device *phydev = adpt->phydev;
484 u32 mac, csr1;
486 /* enable tx queue */
487 emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_0, 0, TXQ_EN);
489 /* enable rx queue */
490 emac_reg_update32(adpt->base + EMAC_RXQ_CTRL_0, 0, RXQ_EN);
492 /* enable mac control */
493 mac = readl(adpt->base + EMAC_MAC_CTRL);
494 csr1 = readl(adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
496 mac |= TXEN | RXEN; /* enable RX/TX */
498 /* Configure MAC flow control. If set to automatic, then match
499 * whatever the PHY does. Otherwise, enable or disable it, depending
500 * on what the user configured via ethtool.
502 mac &= ~(RXFC | TXFC);
504 if (adpt->automatic) {
505 /* If it's set to automatic, then update our local values */
506 adpt->rx_flow_control = phydev->pause;
507 adpt->tx_flow_control = phydev->pause != phydev->asym_pause;
509 mac |= adpt->rx_flow_control ? RXFC : 0;
510 mac |= adpt->tx_flow_control ? TXFC : 0;
512 /* setup link speed */
513 mac &= ~SPEED_MASK;
514 if (phydev->speed == SPEED_1000) {
515 mac |= SPEED(2);
516 csr1 |= FREQ_MODE;
517 } else {
518 mac |= SPEED(1);
519 csr1 &= ~FREQ_MODE;
522 if (phydev->duplex == DUPLEX_FULL)
523 mac |= FULLD;
524 else
525 mac &= ~FULLD;
527 /* other parameters */
528 mac |= (CRCE | PCRCE);
529 mac |= ((adpt->preamble << PRLEN_SHFT) & PRLEN_BMSK);
530 mac |= BROAD_EN;
531 mac |= FLCHK;
532 mac &= ~RX_CHKSUM_EN;
533 mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL |
534 DEBUG_MODE | SINGLE_PAUSE_MODE);
536 /* Enable single-pause-frame mode if requested.
538 * If enabled, the EMAC will send a single pause frame when the RX
539 * queue is full. This normally leads to packet loss because
540 * the pause frame disables the remote MAC only for 33ms (the quanta),
541 * and then the remote MAC continues sending packets even though
542 * the RX queue is still full.
544 * If disabled, the EMAC sends a pause frame every 31ms until the RX
545 * queue is no longer full. Normally, this is the preferred
546 * method of operation. However, when the system is hung (e.g.
547 * cores are halted), the EMAC interrupt handler is never called
548 * and so the RX queue fills up quickly and stays full. The resuling
549 * non-stop "flood" of pause frames sometimes has the effect of
550 * disabling nearby switches. In some cases, other nearby switches
551 * are also affected, shutting down the entire network.
553 * The user can enable or disable single-pause-frame mode
554 * via ethtool.
556 mac |= adpt->single_pause_mode ? SINGLE_PAUSE_MODE : 0;
558 writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
560 writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL);
562 /* enable interrupt read clear, low power sleep mode and
563 * the irq moderators
566 writel_relaxed(adpt->irq_mod, adpt->base + EMAC_IRQ_MOD_TIM_INIT);
567 writel_relaxed(INT_RD_CLR_EN | LPW_MODE | IRQ_MODERATOR_EN |
568 IRQ_MODERATOR2_EN, adpt->base + EMAC_DMA_MAS_CTRL);
570 emac_mac_mode_config(adpt);
572 emac_reg_update32(adpt->base + EMAC_ATHR_HEADER_CTRL,
573 (HEADER_ENABLE | HEADER_CNT_EN), 0);
576 void emac_mac_stop(struct emac_adapter *adpt)
578 emac_reg_update32(adpt->base + EMAC_RXQ_CTRL_0, RXQ_EN, 0);
579 emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_0, TXQ_EN, 0);
580 emac_reg_update32(adpt->base + EMAC_MAC_CTRL, TXEN | RXEN, 0);
581 usleep_range(1000, 1050); /* stopping mac may take upto 1msec */
584 /* Free all descriptors of given transmit queue */
585 static void emac_tx_q_descs_free(struct emac_adapter *adpt)
587 struct emac_tx_queue *tx_q = &adpt->tx_q;
588 unsigned int i;
589 size_t size;
591 /* ring already cleared, nothing to do */
592 if (!tx_q->tpd.tpbuff)
593 return;
595 for (i = 0; i < tx_q->tpd.count; i++) {
596 struct emac_buffer *tpbuf = GET_TPD_BUFFER(tx_q, i);
598 if (tpbuf->dma_addr) {
599 dma_unmap_single(adpt->netdev->dev.parent,
600 tpbuf->dma_addr, tpbuf->length,
601 DMA_TO_DEVICE);
602 tpbuf->dma_addr = 0;
604 if (tpbuf->skb) {
605 dev_kfree_skb_any(tpbuf->skb);
606 tpbuf->skb = NULL;
610 size = sizeof(struct emac_buffer) * tx_q->tpd.count;
611 memset(tx_q->tpd.tpbuff, 0, size);
613 /* clear the descriptor ring */
614 memset(tx_q->tpd.v_addr, 0, tx_q->tpd.size);
616 tx_q->tpd.consume_idx = 0;
617 tx_q->tpd.produce_idx = 0;
620 /* Free all descriptors of given receive queue */
621 static void emac_rx_q_free_descs(struct emac_adapter *adpt)
623 struct device *dev = adpt->netdev->dev.parent;
624 struct emac_rx_queue *rx_q = &adpt->rx_q;
625 unsigned int i;
626 size_t size;
628 /* ring already cleared, nothing to do */
629 if (!rx_q->rfd.rfbuff)
630 return;
632 for (i = 0; i < rx_q->rfd.count; i++) {
633 struct emac_buffer *rfbuf = GET_RFD_BUFFER(rx_q, i);
635 if (rfbuf->dma_addr) {
636 dma_unmap_single(dev, rfbuf->dma_addr, rfbuf->length,
637 DMA_FROM_DEVICE);
638 rfbuf->dma_addr = 0;
640 if (rfbuf->skb) {
641 dev_kfree_skb(rfbuf->skb);
642 rfbuf->skb = NULL;
646 size = sizeof(struct emac_buffer) * rx_q->rfd.count;
647 memset(rx_q->rfd.rfbuff, 0, size);
649 /* clear the descriptor rings */
650 memset(rx_q->rrd.v_addr, 0, rx_q->rrd.size);
651 rx_q->rrd.produce_idx = 0;
652 rx_q->rrd.consume_idx = 0;
654 memset(rx_q->rfd.v_addr, 0, rx_q->rfd.size);
655 rx_q->rfd.produce_idx = 0;
656 rx_q->rfd.consume_idx = 0;
659 /* Free all buffers associated with given transmit queue */
660 static void emac_tx_q_bufs_free(struct emac_adapter *adpt)
662 struct emac_tx_queue *tx_q = &adpt->tx_q;
664 emac_tx_q_descs_free(adpt);
666 kfree(tx_q->tpd.tpbuff);
667 tx_q->tpd.tpbuff = NULL;
668 tx_q->tpd.v_addr = NULL;
669 tx_q->tpd.dma_addr = 0;
670 tx_q->tpd.size = 0;
673 /* Allocate TX descriptor ring for the given transmit queue */
674 static int emac_tx_q_desc_alloc(struct emac_adapter *adpt,
675 struct emac_tx_queue *tx_q)
677 struct emac_ring_header *ring_header = &adpt->ring_header;
678 int node = dev_to_node(adpt->netdev->dev.parent);
679 size_t size;
681 size = sizeof(struct emac_buffer) * tx_q->tpd.count;
682 tx_q->tpd.tpbuff = kzalloc_node(size, GFP_KERNEL, node);
683 if (!tx_q->tpd.tpbuff)
684 return -ENOMEM;
686 tx_q->tpd.size = tx_q->tpd.count * (adpt->tpd_size * 4);
687 tx_q->tpd.dma_addr = ring_header->dma_addr + ring_header->used;
688 tx_q->tpd.v_addr = ring_header->v_addr + ring_header->used;
689 ring_header->used += ALIGN(tx_q->tpd.size, 8);
690 tx_q->tpd.produce_idx = 0;
691 tx_q->tpd.consume_idx = 0;
693 return 0;
696 /* Free all buffers associated with given transmit queue */
697 static void emac_rx_q_bufs_free(struct emac_adapter *adpt)
699 struct emac_rx_queue *rx_q = &adpt->rx_q;
701 emac_rx_q_free_descs(adpt);
703 kfree(rx_q->rfd.rfbuff);
704 rx_q->rfd.rfbuff = NULL;
706 rx_q->rfd.v_addr = NULL;
707 rx_q->rfd.dma_addr = 0;
708 rx_q->rfd.size = 0;
710 rx_q->rrd.v_addr = NULL;
711 rx_q->rrd.dma_addr = 0;
712 rx_q->rrd.size = 0;
715 /* Allocate RX descriptor rings for the given receive queue */
716 static int emac_rx_descs_alloc(struct emac_adapter *adpt)
718 struct emac_ring_header *ring_header = &adpt->ring_header;
719 int node = dev_to_node(adpt->netdev->dev.parent);
720 struct emac_rx_queue *rx_q = &adpt->rx_q;
721 size_t size;
723 size = sizeof(struct emac_buffer) * rx_q->rfd.count;
724 rx_q->rfd.rfbuff = kzalloc_node(size, GFP_KERNEL, node);
725 if (!rx_q->rfd.rfbuff)
726 return -ENOMEM;
728 rx_q->rrd.size = rx_q->rrd.count * (adpt->rrd_size * 4);
729 rx_q->rfd.size = rx_q->rfd.count * (adpt->rfd_size * 4);
731 rx_q->rrd.dma_addr = ring_header->dma_addr + ring_header->used;
732 rx_q->rrd.v_addr = ring_header->v_addr + ring_header->used;
733 ring_header->used += ALIGN(rx_q->rrd.size, 8);
735 rx_q->rfd.dma_addr = ring_header->dma_addr + ring_header->used;
736 rx_q->rfd.v_addr = ring_header->v_addr + ring_header->used;
737 ring_header->used += ALIGN(rx_q->rfd.size, 8);
739 rx_q->rrd.produce_idx = 0;
740 rx_q->rrd.consume_idx = 0;
742 rx_q->rfd.produce_idx = 0;
743 rx_q->rfd.consume_idx = 0;
745 return 0;
748 /* Allocate all TX and RX descriptor rings */
749 int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt)
751 struct emac_ring_header *ring_header = &adpt->ring_header;
752 struct device *dev = adpt->netdev->dev.parent;
753 unsigned int num_tx_descs = adpt->tx_desc_cnt;
754 unsigned int num_rx_descs = adpt->rx_desc_cnt;
755 int ret;
757 adpt->tx_q.tpd.count = adpt->tx_desc_cnt;
759 adpt->rx_q.rrd.count = adpt->rx_desc_cnt;
760 adpt->rx_q.rfd.count = adpt->rx_desc_cnt;
762 /* Ring DMA buffer. Each ring may need up to 8 bytes for alignment,
763 * hence the additional padding bytes are allocated.
765 ring_header->size = num_tx_descs * (adpt->tpd_size * 4) +
766 num_rx_descs * (adpt->rfd_size * 4) +
767 num_rx_descs * (adpt->rrd_size * 4) +
768 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */
770 ring_header->used = 0;
771 ring_header->v_addr = dma_alloc_coherent(dev, ring_header->size,
772 &ring_header->dma_addr,
773 GFP_KERNEL);
774 if (!ring_header->v_addr)
775 return -ENOMEM;
777 ring_header->used = ALIGN(ring_header->dma_addr, 8) -
778 ring_header->dma_addr;
780 ret = emac_tx_q_desc_alloc(adpt, &adpt->tx_q);
781 if (ret) {
782 netdev_err(adpt->netdev, "error: Tx Queue alloc failed\n");
783 goto err_alloc_tx;
786 ret = emac_rx_descs_alloc(adpt);
787 if (ret) {
788 netdev_err(adpt->netdev, "error: Rx Queue alloc failed\n");
789 goto err_alloc_rx;
792 return 0;
794 err_alloc_rx:
795 emac_tx_q_bufs_free(adpt);
796 err_alloc_tx:
797 dma_free_coherent(dev, ring_header->size,
798 ring_header->v_addr, ring_header->dma_addr);
800 ring_header->v_addr = NULL;
801 ring_header->dma_addr = 0;
802 ring_header->size = 0;
803 ring_header->used = 0;
805 return ret;
808 /* Free all TX and RX descriptor rings */
809 void emac_mac_rx_tx_rings_free_all(struct emac_adapter *adpt)
811 struct emac_ring_header *ring_header = &adpt->ring_header;
812 struct device *dev = adpt->netdev->dev.parent;
814 emac_tx_q_bufs_free(adpt);
815 emac_rx_q_bufs_free(adpt);
817 dma_free_coherent(dev, ring_header->size,
818 ring_header->v_addr, ring_header->dma_addr);
820 ring_header->v_addr = NULL;
821 ring_header->dma_addr = 0;
822 ring_header->size = 0;
823 ring_header->used = 0;
826 /* Initialize descriptor rings */
827 static void emac_mac_rx_tx_ring_reset_all(struct emac_adapter *adpt)
829 unsigned int i;
831 adpt->tx_q.tpd.produce_idx = 0;
832 adpt->tx_q.tpd.consume_idx = 0;
833 for (i = 0; i < adpt->tx_q.tpd.count; i++)
834 adpt->tx_q.tpd.tpbuff[i].dma_addr = 0;
836 adpt->rx_q.rrd.produce_idx = 0;
837 adpt->rx_q.rrd.consume_idx = 0;
838 adpt->rx_q.rfd.produce_idx = 0;
839 adpt->rx_q.rfd.consume_idx = 0;
840 for (i = 0; i < adpt->rx_q.rfd.count; i++)
841 adpt->rx_q.rfd.rfbuff[i].dma_addr = 0;
844 /* Produce new receive free descriptor */
845 static void emac_mac_rx_rfd_create(struct emac_adapter *adpt,
846 struct emac_rx_queue *rx_q,
847 dma_addr_t addr)
849 u32 *hw_rfd = EMAC_RFD(rx_q, adpt->rfd_size, rx_q->rfd.produce_idx);
851 *(hw_rfd++) = lower_32_bits(addr);
852 *hw_rfd = upper_32_bits(addr);
854 if (++rx_q->rfd.produce_idx == rx_q->rfd.count)
855 rx_q->rfd.produce_idx = 0;
858 /* Fill up receive queue's RFD with preallocated receive buffers */
859 static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
860 struct emac_rx_queue *rx_q)
862 struct emac_buffer *curr_rxbuf;
863 struct emac_buffer *next_rxbuf;
864 unsigned int count = 0;
865 u32 next_produce_idx;
867 next_produce_idx = rx_q->rfd.produce_idx + 1;
868 if (next_produce_idx == rx_q->rfd.count)
869 next_produce_idx = 0;
871 curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx);
872 next_rxbuf = GET_RFD_BUFFER(rx_q, next_produce_idx);
874 /* this always has a blank rx_buffer*/
875 while (!next_rxbuf->dma_addr) {
876 struct sk_buff *skb;
877 int ret;
879 skb = netdev_alloc_skb_ip_align(adpt->netdev, adpt->rxbuf_size);
880 if (!skb)
881 break;
883 curr_rxbuf->dma_addr =
884 dma_map_single(adpt->netdev->dev.parent, skb->data,
885 adpt->rxbuf_size, DMA_FROM_DEVICE);
887 ret = dma_mapping_error(adpt->netdev->dev.parent,
888 curr_rxbuf->dma_addr);
889 if (ret) {
890 dev_kfree_skb(skb);
891 break;
893 curr_rxbuf->skb = skb;
894 curr_rxbuf->length = adpt->rxbuf_size;
896 emac_mac_rx_rfd_create(adpt, rx_q, curr_rxbuf->dma_addr);
897 next_produce_idx = rx_q->rfd.produce_idx + 1;
898 if (next_produce_idx == rx_q->rfd.count)
899 next_produce_idx = 0;
901 curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx);
902 next_rxbuf = GET_RFD_BUFFER(rx_q, next_produce_idx);
903 count++;
906 if (count) {
907 u32 prod_idx = (rx_q->rfd.produce_idx << rx_q->produce_shift) &
908 rx_q->produce_mask;
909 emac_reg_update32(adpt->base + rx_q->produce_reg,
910 rx_q->produce_mask, prod_idx);
914 static void emac_adjust_link(struct net_device *netdev)
916 struct emac_adapter *adpt = netdev_priv(netdev);
917 struct phy_device *phydev = netdev->phydev;
919 if (phydev->link) {
920 emac_mac_start(adpt);
921 emac_sgmii_link_change(adpt, true);
922 } else {
923 emac_sgmii_link_change(adpt, false);
924 emac_mac_stop(adpt);
927 phy_print_status(phydev);
930 /* Bringup the interface/HW */
931 int emac_mac_up(struct emac_adapter *adpt)
933 struct net_device *netdev = adpt->netdev;
934 int ret;
936 emac_mac_rx_tx_ring_reset_all(adpt);
937 emac_mac_config(adpt);
938 emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
940 adpt->phydev->irq = PHY_POLL;
941 ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
942 PHY_INTERFACE_MODE_SGMII);
943 if (ret) {
944 netdev_err(adpt->netdev, "could not connect phy\n");
945 return ret;
948 phy_attached_print(adpt->phydev, NULL);
950 /* enable mac irq */
951 writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS);
952 writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK);
954 phy_start(adpt->phydev);
956 napi_enable(&adpt->rx_q.napi);
957 netif_start_queue(netdev);
959 return 0;
962 /* Bring down the interface/HW */
963 void emac_mac_down(struct emac_adapter *adpt)
965 struct net_device *netdev = adpt->netdev;
967 netif_stop_queue(netdev);
968 napi_disable(&adpt->rx_q.napi);
970 phy_stop(adpt->phydev);
972 /* Interrupts must be disabled before the PHY is disconnected, to
973 * avoid a race condition where adjust_link is null when we get
974 * an interrupt.
976 writel(DIS_INT, adpt->base + EMAC_INT_STATUS);
977 writel(0, adpt->base + EMAC_INT_MASK);
978 synchronize_irq(adpt->irq.irq);
980 phy_disconnect(adpt->phydev);
982 emac_mac_reset(adpt);
984 emac_tx_q_descs_free(adpt);
985 netdev_reset_queue(adpt->netdev);
986 emac_rx_q_free_descs(adpt);
989 /* Consume next received packet descriptor */
990 static bool emac_rx_process_rrd(struct emac_adapter *adpt,
991 struct emac_rx_queue *rx_q,
992 struct emac_rrd *rrd)
994 u32 *hw_rrd = EMAC_RRD(rx_q, adpt->rrd_size, rx_q->rrd.consume_idx);
996 rrd->word[3] = *(hw_rrd + 3);
998 if (!RRD_UPDT(rrd))
999 return false;
1001 rrd->word[4] = 0;
1002 rrd->word[5] = 0;
1004 rrd->word[0] = *(hw_rrd++);
1005 rrd->word[1] = *(hw_rrd++);
1006 rrd->word[2] = *(hw_rrd++);
1008 if (unlikely(RRD_NOR(rrd) != 1)) {
1009 netdev_err(adpt->netdev,
1010 "error: multi-RFD not support yet! nor:%lu\n",
1011 RRD_NOR(rrd));
1014 /* mark rrd as processed */
1015 RRD_UPDT_SET(rrd, 0);
1016 *hw_rrd = rrd->word[3];
1018 if (++rx_q->rrd.consume_idx == rx_q->rrd.count)
1019 rx_q->rrd.consume_idx = 0;
1021 return true;
1024 /* Produce new transmit descriptor */
1025 static void emac_tx_tpd_create(struct emac_adapter *adpt,
1026 struct emac_tx_queue *tx_q, struct emac_tpd *tpd)
1028 u32 *hw_tpd;
1030 tx_q->tpd.last_produce_idx = tx_q->tpd.produce_idx;
1031 hw_tpd = EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.produce_idx);
1033 if (++tx_q->tpd.produce_idx == tx_q->tpd.count)
1034 tx_q->tpd.produce_idx = 0;
1036 *(hw_tpd++) = tpd->word[0];
1037 *(hw_tpd++) = tpd->word[1];
1038 *(hw_tpd++) = tpd->word[2];
1039 *hw_tpd = tpd->word[3];
1042 /* Mark the last transmit descriptor as such (for the transmit packet) */
1043 static void emac_tx_tpd_mark_last(struct emac_adapter *adpt,
1044 struct emac_tx_queue *tx_q)
1046 u32 *hw_tpd =
1047 EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.last_produce_idx);
1048 u32 tmp_tpd;
1050 tmp_tpd = *(hw_tpd + 1);
1051 tmp_tpd |= EMAC_TPD_LAST_FRAGMENT;
1052 *(hw_tpd + 1) = tmp_tpd;
1055 static void emac_rx_rfd_clean(struct emac_rx_queue *rx_q, struct emac_rrd *rrd)
1057 struct emac_buffer *rfbuf = rx_q->rfd.rfbuff;
1058 u32 consume_idx = RRD_SI(rrd);
1059 unsigned int i;
1061 for (i = 0; i < RRD_NOR(rrd); i++) {
1062 rfbuf[consume_idx].skb = NULL;
1063 if (++consume_idx == rx_q->rfd.count)
1064 consume_idx = 0;
1067 rx_q->rfd.consume_idx = consume_idx;
1068 rx_q->rfd.process_idx = consume_idx;
1071 /* Push the received skb to upper layers */
1072 static void emac_receive_skb(struct emac_rx_queue *rx_q,
1073 struct sk_buff *skb,
1074 u16 vlan_tag, bool vlan_flag)
1076 if (vlan_flag) {
1077 u16 vlan;
1079 EMAC_TAG_TO_VLAN(vlan_tag, vlan);
1080 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan);
1083 napi_gro_receive(&rx_q->napi, skb);
1086 /* Process receive event */
1087 void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
1088 int *num_pkts, int max_pkts)
1090 u32 proc_idx, hw_consume_idx, num_consume_pkts;
1091 struct net_device *netdev = adpt->netdev;
1092 struct emac_buffer *rfbuf;
1093 unsigned int count = 0;
1094 struct emac_rrd rrd;
1095 struct sk_buff *skb;
1096 u32 reg;
1098 reg = readl_relaxed(adpt->base + rx_q->consume_reg);
1100 hw_consume_idx = (reg & rx_q->consume_mask) >> rx_q->consume_shift;
1101 num_consume_pkts = (hw_consume_idx >= rx_q->rrd.consume_idx) ?
1102 (hw_consume_idx - rx_q->rrd.consume_idx) :
1103 (hw_consume_idx + rx_q->rrd.count - rx_q->rrd.consume_idx);
1105 do {
1106 if (!num_consume_pkts)
1107 break;
1109 if (!emac_rx_process_rrd(adpt, rx_q, &rrd))
1110 break;
1112 if (likely(RRD_NOR(&rrd) == 1)) {
1113 /* good receive */
1114 rfbuf = GET_RFD_BUFFER(rx_q, RRD_SI(&rrd));
1115 dma_unmap_single(adpt->netdev->dev.parent,
1116 rfbuf->dma_addr, rfbuf->length,
1117 DMA_FROM_DEVICE);
1118 rfbuf->dma_addr = 0;
1119 skb = rfbuf->skb;
1120 } else {
1121 netdev_err(adpt->netdev,
1122 "error: multi-RFD not support yet!\n");
1123 break;
1125 emac_rx_rfd_clean(rx_q, &rrd);
1126 num_consume_pkts--;
1127 count++;
1129 /* Due to a HW issue in L4 check sum detection (UDP/TCP frags
1130 * with DF set are marked as error), drop packets based on the
1131 * error mask rather than the summary bit (ignoring L4F errors)
1133 if (rrd.word[EMAC_RRD_STATS_DW_IDX] & EMAC_RRD_ERROR) {
1134 netif_dbg(adpt, rx_status, adpt->netdev,
1135 "Drop error packet[RRD: 0x%x:0x%x:0x%x:0x%x]\n",
1136 rrd.word[0], rrd.word[1],
1137 rrd.word[2], rrd.word[3]);
1139 dev_kfree_skb(skb);
1140 continue;
1143 skb_put(skb, RRD_PKT_SIZE(&rrd) - ETH_FCS_LEN);
1144 skb->dev = netdev;
1145 skb->protocol = eth_type_trans(skb, skb->dev);
1146 if (netdev->features & NETIF_F_RXCSUM)
1147 skb->ip_summed = RRD_L4F(&rrd) ?
1148 CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1149 else
1150 skb_checksum_none_assert(skb);
1152 emac_receive_skb(rx_q, skb, (u16)RRD_CVALN_TAG(&rrd),
1153 (bool)RRD_CVTAG(&rrd));
1155 (*num_pkts)++;
1156 } while (*num_pkts < max_pkts);
1158 if (count) {
1159 proc_idx = (rx_q->rfd.process_idx << rx_q->process_shft) &
1160 rx_q->process_mask;
1161 emac_reg_update32(adpt->base + rx_q->process_reg,
1162 rx_q->process_mask, proc_idx);
1163 emac_mac_rx_descs_refill(adpt, rx_q);
1167 /* get the number of free transmit descriptors */
1168 static unsigned int emac_tpd_num_free_descs(struct emac_tx_queue *tx_q)
1170 u32 produce_idx = tx_q->tpd.produce_idx;
1171 u32 consume_idx = tx_q->tpd.consume_idx;
1173 return (consume_idx > produce_idx) ?
1174 (consume_idx - produce_idx - 1) :
1175 (tx_q->tpd.count + consume_idx - produce_idx - 1);
1178 /* Process transmit event */
1179 void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q)
1181 u32 reg = readl_relaxed(adpt->base + tx_q->consume_reg);
1182 u32 hw_consume_idx, pkts_compl = 0, bytes_compl = 0;
1183 struct emac_buffer *tpbuf;
1185 hw_consume_idx = (reg & tx_q->consume_mask) >> tx_q->consume_shift;
1187 while (tx_q->tpd.consume_idx != hw_consume_idx) {
1188 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);
1189 if (tpbuf->dma_addr) {
1190 dma_unmap_page(adpt->netdev->dev.parent,
1191 tpbuf->dma_addr, tpbuf->length,
1192 DMA_TO_DEVICE);
1193 tpbuf->dma_addr = 0;
1196 if (tpbuf->skb) {
1197 pkts_compl++;
1198 bytes_compl += tpbuf->skb->len;
1199 dev_consume_skb_irq(tpbuf->skb);
1200 tpbuf->skb = NULL;
1203 if (++tx_q->tpd.consume_idx == tx_q->tpd.count)
1204 tx_q->tpd.consume_idx = 0;
1207 netdev_completed_queue(adpt->netdev, pkts_compl, bytes_compl);
1209 if (netif_queue_stopped(adpt->netdev))
1210 if (emac_tpd_num_free_descs(tx_q) > (MAX_SKB_FRAGS + 1))
1211 netif_wake_queue(adpt->netdev);
1214 /* Initialize all queue data structures */
1215 void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev,
1216 struct emac_adapter *adpt)
1218 adpt->rx_q.netdev = adpt->netdev;
1220 adpt->rx_q.produce_reg = EMAC_MAILBOX_0;
1221 adpt->rx_q.produce_mask = RFD0_PROD_IDX_BMSK;
1222 adpt->rx_q.produce_shift = RFD0_PROD_IDX_SHFT;
1224 adpt->rx_q.process_reg = EMAC_MAILBOX_0;
1225 adpt->rx_q.process_mask = RFD0_PROC_IDX_BMSK;
1226 adpt->rx_q.process_shft = RFD0_PROC_IDX_SHFT;
1228 adpt->rx_q.consume_reg = EMAC_MAILBOX_3;
1229 adpt->rx_q.consume_mask = RFD0_CONS_IDX_BMSK;
1230 adpt->rx_q.consume_shift = RFD0_CONS_IDX_SHFT;
1232 adpt->rx_q.irq = &adpt->irq;
1233 adpt->rx_q.intr = adpt->irq.mask & ISR_RX_PKT;
1235 adpt->tx_q.produce_reg = EMAC_MAILBOX_15;
1236 adpt->tx_q.produce_mask = NTPD_PROD_IDX_BMSK;
1237 adpt->tx_q.produce_shift = NTPD_PROD_IDX_SHFT;
1239 adpt->tx_q.consume_reg = EMAC_MAILBOX_2;
1240 adpt->tx_q.consume_mask = NTPD_CONS_IDX_BMSK;
1241 adpt->tx_q.consume_shift = NTPD_CONS_IDX_SHFT;
1244 /* Fill up transmit descriptors with TSO and Checksum offload information */
1245 static int emac_tso_csum(struct emac_adapter *adpt,
1246 struct emac_tx_queue *tx_q,
1247 struct sk_buff *skb,
1248 struct emac_tpd *tpd)
1250 unsigned int hdr_len;
1251 int ret;
1253 if (skb_is_gso(skb)) {
1254 if (skb_header_cloned(skb)) {
1255 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1256 if (unlikely(ret))
1257 return ret;
1260 if (skb->protocol == htons(ETH_P_IP)) {
1261 u32 pkt_len = ((unsigned char *)ip_hdr(skb) - skb->data)
1262 + ntohs(ip_hdr(skb)->tot_len);
1263 if (skb->len > pkt_len)
1264 pskb_trim(skb, pkt_len);
1267 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1268 if (unlikely(skb->len == hdr_len)) {
1269 /* we only need to do csum */
1270 netif_warn(adpt, tx_err, adpt->netdev,
1271 "tso not needed for packet with 0 data\n");
1272 goto do_csum;
1275 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
1276 ip_hdr(skb)->check = 0;
1277 tcp_hdr(skb)->check =
1278 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1279 ip_hdr(skb)->daddr,
1280 0, IPPROTO_TCP, 0);
1281 TPD_IPV4_SET(tpd, 1);
1284 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
1285 /* ipv6 tso need an extra tpd */
1286 struct emac_tpd extra_tpd;
1288 memset(tpd, 0, sizeof(*tpd));
1289 memset(&extra_tpd, 0, sizeof(extra_tpd));
1291 ipv6_hdr(skb)->payload_len = 0;
1292 tcp_hdr(skb)->check =
1293 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1294 &ipv6_hdr(skb)->daddr,
1295 0, IPPROTO_TCP, 0);
1296 TPD_PKT_LEN_SET(&extra_tpd, skb->len);
1297 TPD_LSO_SET(&extra_tpd, 1);
1298 TPD_LSOV_SET(&extra_tpd, 1);
1299 emac_tx_tpd_create(adpt, tx_q, &extra_tpd);
1300 TPD_LSOV_SET(tpd, 1);
1303 TPD_LSO_SET(tpd, 1);
1304 TPD_TCPHDR_OFFSET_SET(tpd, skb_transport_offset(skb));
1305 TPD_MSS_SET(tpd, skb_shinfo(skb)->gso_size);
1306 return 0;
1309 do_csum:
1310 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1311 unsigned int css, cso;
1313 cso = skb_transport_offset(skb);
1314 if (unlikely(cso & 0x1)) {
1315 netdev_err(adpt->netdev,
1316 "error: payload offset should be even\n");
1317 return -EINVAL;
1319 css = cso + skb->csum_offset;
1321 TPD_PAYLOAD_OFFSET_SET(tpd, cso >> 1);
1322 TPD_CXSUM_OFFSET_SET(tpd, css >> 1);
1323 TPD_CSX_SET(tpd, 1);
1326 return 0;
1329 /* Fill up transmit descriptors */
1330 static void emac_tx_fill_tpd(struct emac_adapter *adpt,
1331 struct emac_tx_queue *tx_q, struct sk_buff *skb,
1332 struct emac_tpd *tpd)
1334 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1335 unsigned int first = tx_q->tpd.produce_idx;
1336 unsigned int len = skb_headlen(skb);
1337 struct emac_buffer *tpbuf = NULL;
1338 unsigned int mapped_len = 0;
1339 unsigned int i;
1340 int count = 0;
1341 int ret;
1343 /* if Large Segment Offload is (in TCP Segmentation Offload struct) */
1344 if (TPD_LSO(tpd)) {
1345 mapped_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1347 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
1348 tpbuf->length = mapped_len;
1349 tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
1350 virt_to_page(skb->data),
1351 offset_in_page(skb->data),
1352 tpbuf->length,
1353 DMA_TO_DEVICE);
1354 ret = dma_mapping_error(adpt->netdev->dev.parent,
1355 tpbuf->dma_addr);
1356 if (ret)
1357 goto error;
1359 TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
1360 TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
1361 TPD_BUF_LEN_SET(tpd, tpbuf->length);
1362 emac_tx_tpd_create(adpt, tx_q, tpd);
1363 count++;
1366 if (mapped_len < len) {
1367 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
1368 tpbuf->length = len - mapped_len;
1369 tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
1370 virt_to_page(skb->data +
1371 mapped_len),
1372 offset_in_page(skb->data +
1373 mapped_len),
1374 tpbuf->length, DMA_TO_DEVICE);
1375 ret = dma_mapping_error(adpt->netdev->dev.parent,
1376 tpbuf->dma_addr);
1377 if (ret)
1378 goto error;
1380 TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
1381 TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
1382 TPD_BUF_LEN_SET(tpd, tpbuf->length);
1383 emac_tx_tpd_create(adpt, tx_q, tpd);
1384 count++;
1387 for (i = 0; i < nr_frags; i++) {
1388 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1390 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
1391 tpbuf->length = skb_frag_size(frag);
1392 tpbuf->dma_addr = skb_frag_dma_map(adpt->netdev->dev.parent,
1393 frag, 0, tpbuf->length,
1394 DMA_TO_DEVICE);
1395 ret = dma_mapping_error(adpt->netdev->dev.parent,
1396 tpbuf->dma_addr);
1397 if (ret)
1398 goto error;
1400 TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr));
1401 TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr));
1402 TPD_BUF_LEN_SET(tpd, tpbuf->length);
1403 emac_tx_tpd_create(adpt, tx_q, tpd);
1404 count++;
1407 /* The last tpd */
1408 wmb();
1409 emac_tx_tpd_mark_last(adpt, tx_q);
1411 /* The last buffer info contain the skb address,
1412 * so it will be freed after unmap
1414 tpbuf->skb = skb;
1416 return;
1418 error:
1419 /* One of the memory mappings failed, so undo everything */
1420 tx_q->tpd.produce_idx = first;
1422 while (count--) {
1423 tpbuf = GET_TPD_BUFFER(tx_q, first);
1424 dma_unmap_page(adpt->netdev->dev.parent, tpbuf->dma_addr,
1425 tpbuf->length, DMA_TO_DEVICE);
1426 tpbuf->dma_addr = 0;
1427 tpbuf->length = 0;
1429 if (++first == tx_q->tpd.count)
1430 first = 0;
1433 dev_kfree_skb(skb);
1436 /* Transmit the packet using specified transmit queue */
1437 int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q,
1438 struct sk_buff *skb)
1440 struct emac_tpd tpd;
1441 u32 prod_idx;
1443 memset(&tpd, 0, sizeof(tpd));
1445 if (emac_tso_csum(adpt, tx_q, skb, &tpd) != 0) {
1446 dev_kfree_skb_any(skb);
1447 return NETDEV_TX_OK;
1450 if (skb_vlan_tag_present(skb)) {
1451 u16 tag;
1453 EMAC_VLAN_TO_TAG(skb_vlan_tag_get(skb), tag);
1454 TPD_CVLAN_TAG_SET(&tpd, tag);
1455 TPD_INSTC_SET(&tpd, 1);
1458 if (skb_network_offset(skb) != ETH_HLEN)
1459 TPD_TYP_SET(&tpd, 1);
1461 emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
1463 netdev_sent_queue(adpt->netdev, skb->len);
1465 /* Make sure the are enough free descriptors to hold one
1466 * maximum-sized SKB. We need one desc for each fragment,
1467 * one for the checksum (emac_tso_csum), one for TSO, and
1468 * and one for the SKB header.
1470 if (emac_tpd_num_free_descs(tx_q) < (MAX_SKB_FRAGS + 3))
1471 netif_stop_queue(adpt->netdev);
1473 /* update produce idx */
1474 prod_idx = (tx_q->tpd.produce_idx << tx_q->produce_shift) &
1475 tx_q->produce_mask;
1476 emac_reg_update32(adpt->base + tx_q->produce_reg,
1477 tx_q->produce_mask, prod_idx);
1479 return NETDEV_TX_OK;