1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Applied Micro X-Gene SoC Ethernet Driver
4 * Copyright (c) 2014, Applied Micro Circuits Corporation
5 * Authors: Iyappan Subramanian <isubramanian@apm.com>
6 * Ravi Patel <rapatel@apm.com>
7 * Keyur Chudgar <kchudgar@apm.com>
10 #include <linux/gpio.h>
11 #include "xgene_enet_main.h"
12 #include "xgene_enet_hw.h"
13 #include "xgene_enet_sgmac.h"
14 #include "xgene_enet_xgmac.h"
16 #define RES_ENET_CSR 0
17 #define RES_RING_CSR 1
18 #define RES_RING_CMD 2
20 static void xgene_enet_init_bufpool(struct xgene_enet_desc_ring
*buf_pool
)
22 struct xgene_enet_raw_desc16
*raw_desc
;
28 for (i
= 0; i
< buf_pool
->slots
; i
++) {
29 raw_desc
= &buf_pool
->raw_desc16
[i
];
31 /* Hardware expects descriptor in little endian format */
32 raw_desc
->m0
= cpu_to_le64(i
|
33 SET_VAL(FPQNUM
, buf_pool
->dst_ring_num
) |
38 static u16
xgene_enet_get_data_len(u64 bufdatalen
)
42 hw_len
= GET_VAL(BUFDATALEN
, bufdatalen
);
44 if (unlikely(hw_len
== 0x7800)) {
46 } else if (!(hw_len
& BIT(14))) {
47 mask
= GENMASK(13, 0);
48 return (hw_len
& mask
) ? (hw_len
& mask
) : SIZE_16K
;
49 } else if (!(hw_len
& GENMASK(13, 12))) {
50 mask
= GENMASK(11, 0);
51 return (hw_len
& mask
) ? (hw_len
& mask
) : SIZE_4K
;
53 mask
= GENMASK(11, 0);
54 return (hw_len
& mask
) ? (hw_len
& mask
) : SIZE_2K
;
58 static u16
xgene_enet_set_data_len(u32 size
)
62 hw_len
= (size
== SIZE_4K
) ? BIT(14) : 0;
67 static int xgene_enet_refill_pagepool(struct xgene_enet_desc_ring
*buf_pool
,
70 struct xgene_enet_raw_desc16
*raw_desc
;
71 struct xgene_enet_pdata
*pdata
;
72 struct net_device
*ndev
;
80 if (unlikely(!buf_pool
))
83 ndev
= buf_pool
->ndev
;
84 pdata
= netdev_priv(ndev
);
85 dev
= ndev_to_dev(ndev
);
86 slots
= buf_pool
->slots
- 1;
87 tail
= buf_pool
->tail
;
89 for (i
= 0; i
< nbuf
; i
++) {
90 raw_desc
= &buf_pool
->raw_desc16
[tail
];
92 page
= dev_alloc_page();
96 dma_addr
= dma_map_page(dev
, page
, 0,
97 PAGE_SIZE
, DMA_FROM_DEVICE
);
98 if (unlikely(dma_mapping_error(dev
, dma_addr
))) {
103 hw_len
= xgene_enet_set_data_len(PAGE_SIZE
);
104 raw_desc
->m1
= cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
105 SET_VAL(BUFDATALEN
, hw_len
) |
108 buf_pool
->frag_page
[tail
] = page
;
109 tail
= (tail
+ 1) & slots
;
112 pdata
->ring_ops
->wr_cmd(buf_pool
, nbuf
);
113 buf_pool
->tail
= tail
;
118 static int xgene_enet_refill_bufpool(struct xgene_enet_desc_ring
*buf_pool
,
122 struct xgene_enet_raw_desc16
*raw_desc
;
123 struct xgene_enet_pdata
*pdata
;
124 struct net_device
*ndev
;
127 u32 tail
= buf_pool
->tail
;
128 u32 slots
= buf_pool
->slots
- 1;
132 ndev
= buf_pool
->ndev
;
133 dev
= ndev_to_dev(buf_pool
->ndev
);
134 pdata
= netdev_priv(ndev
);
136 bufdatalen
= BUF_LEN_CODE_2K
| (SKB_BUFFER_SIZE
& GENMASK(11, 0));
137 len
= XGENE_ENET_STD_MTU
;
139 for (i
= 0; i
< nbuf
; i
++) {
140 raw_desc
= &buf_pool
->raw_desc16
[tail
];
142 skb
= netdev_alloc_skb_ip_align(ndev
, len
);
146 dma_addr
= dma_map_single(dev
, skb
->data
, len
, DMA_FROM_DEVICE
);
147 if (dma_mapping_error(dev
, dma_addr
)) {
148 netdev_err(ndev
, "DMA mapping error\n");
149 dev_kfree_skb_any(skb
);
153 buf_pool
->rx_skb
[tail
] = skb
;
155 raw_desc
->m1
= cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
156 SET_VAL(BUFDATALEN
, bufdatalen
) |
158 tail
= (tail
+ 1) & slots
;
161 pdata
->ring_ops
->wr_cmd(buf_pool
, nbuf
);
162 buf_pool
->tail
= tail
;
167 static u8
xgene_enet_hdr_len(const void *data
)
169 const struct ethhdr
*eth
= data
;
171 return (eth
->h_proto
== htons(ETH_P_8021Q
)) ? VLAN_ETH_HLEN
: ETH_HLEN
;
174 static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring
*buf_pool
)
176 struct device
*dev
= ndev_to_dev(buf_pool
->ndev
);
177 struct xgene_enet_raw_desc16
*raw_desc
;
181 /* Free up the buffers held by hardware */
182 for (i
= 0; i
< buf_pool
->slots
; i
++) {
183 if (buf_pool
->rx_skb
[i
]) {
184 dev_kfree_skb_any(buf_pool
->rx_skb
[i
]);
186 raw_desc
= &buf_pool
->raw_desc16
[i
];
187 dma_addr
= GET_VAL(DATAADDR
, le64_to_cpu(raw_desc
->m1
));
188 dma_unmap_single(dev
, dma_addr
, XGENE_ENET_MAX_MTU
,
194 static void xgene_enet_delete_pagepool(struct xgene_enet_desc_ring
*buf_pool
)
196 struct device
*dev
= ndev_to_dev(buf_pool
->ndev
);
201 /* Free up the buffers held by hardware */
202 for (i
= 0; i
< buf_pool
->slots
; i
++) {
203 page
= buf_pool
->frag_page
[i
];
205 dma_addr
= buf_pool
->frag_dma_addr
[i
];
206 dma_unmap_page(dev
, dma_addr
, PAGE_SIZE
,
213 static irqreturn_t
xgene_enet_rx_irq(const int irq
, void *data
)
215 struct xgene_enet_desc_ring
*rx_ring
= data
;
217 if (napi_schedule_prep(&rx_ring
->napi
)) {
218 disable_irq_nosync(irq
);
219 __napi_schedule(&rx_ring
->napi
);
225 static int xgene_enet_tx_completion(struct xgene_enet_desc_ring
*cp_ring
,
226 struct xgene_enet_raw_desc
*raw_desc
)
228 struct xgene_enet_pdata
*pdata
= netdev_priv(cp_ring
->ndev
);
232 dma_addr_t
*frag_dma_addr
;
238 skb_index
= GET_VAL(USERINFO
, le64_to_cpu(raw_desc
->m0
));
239 skb
= cp_ring
->cp_skb
[skb_index
];
240 frag_dma_addr
= &cp_ring
->frag_dma_addr
[skb_index
* MAX_SKB_FRAGS
];
242 dev
= ndev_to_dev(cp_ring
->ndev
);
243 dma_unmap_single(dev
, GET_VAL(DATAADDR
, le64_to_cpu(raw_desc
->m1
)),
247 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
248 frag
= &skb_shinfo(skb
)->frags
[i
];
249 dma_unmap_page(dev
, frag_dma_addr
[i
], skb_frag_size(frag
),
253 if (GET_BIT(ET
, le64_to_cpu(raw_desc
->m3
))) {
254 mss_index
= GET_VAL(MSS
, le64_to_cpu(raw_desc
->m3
));
255 spin_lock(&pdata
->mss_lock
);
256 pdata
->mss_refcnt
[mss_index
]--;
257 spin_unlock(&pdata
->mss_lock
);
260 /* Checking for error */
261 status
= GET_VAL(LERR
, le64_to_cpu(raw_desc
->m0
));
262 if (unlikely(status
> 2)) {
263 cp_ring
->tx_dropped
++;
264 cp_ring
->tx_errors
++;
268 dev_kfree_skb_any(skb
);
270 netdev_err(cp_ring
->ndev
, "completion skb is NULL\n");
276 static int xgene_enet_setup_mss(struct net_device
*ndev
, u32 mss
)
278 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
279 int mss_index
= -EBUSY
;
282 spin_lock(&pdata
->mss_lock
);
284 /* Reuse the slot if MSS matches */
285 for (i
= 0; mss_index
< 0 && i
< NUM_MSS_REG
; i
++) {
286 if (pdata
->mss
[i
] == mss
) {
287 pdata
->mss_refcnt
[i
]++;
292 /* Overwrite the slot with ref_count = 0 */
293 for (i
= 0; mss_index
< 0 && i
< NUM_MSS_REG
; i
++) {
294 if (!pdata
->mss_refcnt
[i
]) {
295 pdata
->mss_refcnt
[i
]++;
296 pdata
->mac_ops
->set_mss(pdata
, mss
, i
);
302 spin_unlock(&pdata
->mss_lock
);
307 static int xgene_enet_work_msg(struct sk_buff
*skb
, u64
*hopinfo
)
309 struct net_device
*ndev
= skb
->dev
;
311 u8 l3hlen
= 0, l4hlen
= 0;
312 u8 ethhdr
, proto
= 0, csum_enable
= 0;
313 u32 hdr_len
, mss
= 0;
314 u32 i
, len
, nr_frags
;
317 ethhdr
= xgene_enet_hdr_len(skb
->data
);
319 if (unlikely(skb
->protocol
!= htons(ETH_P_IP
)) &&
320 unlikely(skb
->protocol
!= htons(ETH_P_8021Q
)))
323 if (unlikely(!(skb
->dev
->features
& NETIF_F_IP_CSUM
)))
327 if (unlikely(ip_is_fragment(iph
)))
330 if (likely(iph
->protocol
== IPPROTO_TCP
)) {
331 l4hlen
= tcp_hdrlen(skb
) >> 2;
333 proto
= TSO_IPPROTO_TCP
;
334 if (ndev
->features
& NETIF_F_TSO
) {
335 hdr_len
= ethhdr
+ ip_hdrlen(skb
) + tcp_hdrlen(skb
);
336 mss
= skb_shinfo(skb
)->gso_size
;
338 if (skb_is_nonlinear(skb
)) {
339 len
= skb_headlen(skb
);
340 nr_frags
= skb_shinfo(skb
)->nr_frags
;
342 for (i
= 0; i
< 2 && i
< nr_frags
; i
++)
343 len
+= skb_frag_size(
344 &skb_shinfo(skb
)->frags
[i
]);
346 /* HW requires header must reside in 3 buffer */
347 if (unlikely(hdr_len
> len
)) {
348 if (skb_linearize(skb
))
353 if (!mss
|| ((skb
->len
- hdr_len
) <= mss
))
356 mss_index
= xgene_enet_setup_mss(ndev
, mss
);
357 if (unlikely(mss_index
< 0))
360 *hopinfo
|= SET_BIT(ET
) | SET_VAL(MSS
, mss_index
);
362 } else if (iph
->protocol
== IPPROTO_UDP
) {
363 l4hlen
= UDP_HDR_SIZE
;
367 l3hlen
= ip_hdrlen(skb
) >> 2;
368 *hopinfo
|= SET_VAL(TCPHDR
, l4hlen
) |
369 SET_VAL(IPHDR
, l3hlen
) |
370 SET_VAL(ETHHDR
, ethhdr
) |
371 SET_VAL(EC
, csum_enable
) |
374 SET_BIT(TYPE_ETH_WORK_MESSAGE
);
379 static u16
xgene_enet_encode_len(u16 len
)
381 return (len
== BUFLEN_16K
) ? 0 : len
;
384 static void xgene_set_addr_len(__le64
*desc
, u32 idx
, dma_addr_t addr
, u32 len
)
386 desc
[idx
^ 1] = cpu_to_le64(SET_VAL(DATAADDR
, addr
) |
387 SET_VAL(BUFDATALEN
, len
));
390 static __le64
*xgene_enet_get_exp_bufs(struct xgene_enet_desc_ring
*ring
)
394 exp_bufs
= &ring
->exp_bufs
[ring
->exp_buf_tail
* MAX_EXP_BUFFS
];
395 memset(exp_bufs
, 0, sizeof(__le64
) * MAX_EXP_BUFFS
);
396 ring
->exp_buf_tail
= (ring
->exp_buf_tail
+ 1) & ((ring
->slots
/ 2) - 1);
401 static dma_addr_t
*xgene_get_frag_dma_array(struct xgene_enet_desc_ring
*ring
)
403 return &ring
->cp_ring
->frag_dma_addr
[ring
->tail
* MAX_SKB_FRAGS
];
406 static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring
*tx_ring
,
409 struct device
*dev
= ndev_to_dev(tx_ring
->ndev
);
410 struct xgene_enet_pdata
*pdata
= netdev_priv(tx_ring
->ndev
);
411 struct xgene_enet_raw_desc
*raw_desc
;
412 __le64
*exp_desc
= NULL
, *exp_bufs
= NULL
;
413 dma_addr_t dma_addr
, pbuf_addr
, *frag_dma_addr
;
415 u16 tail
= tx_ring
->tail
;
418 u8 ll
= 0, nv
= 0, idx
= 0;
420 u32 size
, offset
, ell_bytes
= 0;
421 u32 i
, fidx
, nr_frags
, count
= 1;
424 raw_desc
= &tx_ring
->raw_desc
[tail
];
425 tail
= (tail
+ 1) & (tx_ring
->slots
- 1);
426 memset(raw_desc
, 0, sizeof(struct xgene_enet_raw_desc
));
428 ret
= xgene_enet_work_msg(skb
, &hopinfo
);
432 raw_desc
->m3
= cpu_to_le64(SET_VAL(HENQNUM
, tx_ring
->dst_ring_num
) |
435 len
= skb_headlen(skb
);
436 hw_len
= xgene_enet_encode_len(len
);
438 dma_addr
= dma_map_single(dev
, skb
->data
, len
, DMA_TO_DEVICE
);
439 if (dma_mapping_error(dev
, dma_addr
)) {
440 netdev_err(tx_ring
->ndev
, "DMA mapping error\n");
444 /* Hardware expects descriptor in little endian format */
445 raw_desc
->m1
= cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
446 SET_VAL(BUFDATALEN
, hw_len
) |
449 if (!skb_is_nonlinear(skb
))
454 exp_desc
= (void *)&tx_ring
->raw_desc
[tail
];
455 tail
= (tail
+ 1) & (tx_ring
->slots
- 1);
456 memset(exp_desc
, 0, sizeof(struct xgene_enet_raw_desc
));
458 nr_frags
= skb_shinfo(skb
)->nr_frags
;
459 for (i
= nr_frags
; i
< 4 ; i
++)
460 exp_desc
[i
^ 1] = cpu_to_le64(LAST_BUFFER
);
462 frag_dma_addr
= xgene_get_frag_dma_array(tx_ring
);
464 for (i
= 0, fidx
= 0; split
|| (fidx
< nr_frags
); i
++) {
466 frag
= &skb_shinfo(skb
)->frags
[fidx
];
467 size
= skb_frag_size(frag
);
470 pbuf_addr
= skb_frag_dma_map(dev
, frag
, 0, size
,
472 if (dma_mapping_error(dev
, pbuf_addr
))
475 frag_dma_addr
[fidx
] = pbuf_addr
;
478 if (size
> BUFLEN_16K
)
482 if (size
> BUFLEN_16K
) {
490 dma_addr
= pbuf_addr
+ offset
;
491 hw_len
= xgene_enet_encode_len(len
);
497 xgene_set_addr_len(exp_desc
, i
, dma_addr
, hw_len
);
500 if (split
|| (fidx
!= nr_frags
)) {
501 exp_bufs
= xgene_enet_get_exp_bufs(tx_ring
);
502 xgene_set_addr_len(exp_bufs
, idx
, dma_addr
,
507 xgene_set_addr_len(exp_desc
, i
, dma_addr
,
512 xgene_set_addr_len(exp_bufs
, idx
, dma_addr
, hw_len
);
519 offset
+= BUFLEN_16K
;
525 dma_addr
= dma_map_single(dev
, exp_bufs
,
526 sizeof(u64
) * MAX_EXP_BUFFS
,
528 if (dma_mapping_error(dev
, dma_addr
)) {
529 dev_kfree_skb_any(skb
);
532 i
= ell_bytes
>> LL_BYTES_LSB_LEN
;
533 exp_desc
[2] = cpu_to_le64(SET_VAL(DATAADDR
, dma_addr
) |
534 SET_VAL(LL_BYTES_MSB
, i
) |
535 SET_VAL(LL_LEN
, idx
));
536 raw_desc
->m2
= cpu_to_le64(SET_VAL(LL_BYTES_LSB
, ell_bytes
));
540 raw_desc
->m0
= cpu_to_le64(SET_VAL(LL
, ll
) | SET_VAL(NV
, nv
) |
541 SET_VAL(USERINFO
, tx_ring
->tail
));
542 tx_ring
->cp_ring
->cp_skb
[tx_ring
->tail
] = skb
;
543 pdata
->tx_level
[tx_ring
->cp_ring
->index
] += count
;
544 tx_ring
->tail
= tail
;
549 static netdev_tx_t
xgene_enet_start_xmit(struct sk_buff
*skb
,
550 struct net_device
*ndev
)
552 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
553 struct xgene_enet_desc_ring
*tx_ring
;
554 int index
= skb
->queue_mapping
;
555 u32 tx_level
= pdata
->tx_level
[index
];
558 tx_ring
= pdata
->tx_ring
[index
];
559 if (tx_level
< pdata
->txc_level
[index
])
560 tx_level
+= ((typeof(pdata
->tx_level
[index
]))~0U);
562 if ((tx_level
- pdata
->txc_level
[index
]) > pdata
->tx_qcnt_hi
) {
563 netif_stop_subqueue(ndev
, index
);
564 return NETDEV_TX_BUSY
;
567 if (skb_padto(skb
, XGENE_MIN_ENET_FRAME_SIZE
))
570 count
= xgene_enet_setup_tx_desc(tx_ring
, skb
);
572 return NETDEV_TX_BUSY
;
575 dev_kfree_skb_any(skb
);
579 skb_tx_timestamp(skb
);
581 tx_ring
->tx_packets
++;
582 tx_ring
->tx_bytes
+= skb
->len
;
584 pdata
->ring_ops
->wr_cmd(tx_ring
, count
);
588 static void xgene_enet_rx_csum(struct sk_buff
*skb
)
590 struct net_device
*ndev
= skb
->dev
;
591 struct iphdr
*iph
= ip_hdr(skb
);
593 if (!(ndev
->features
& NETIF_F_RXCSUM
))
596 if (skb
->protocol
!= htons(ETH_P_IP
))
599 if (ip_is_fragment(iph
))
602 if (iph
->protocol
!= IPPROTO_TCP
&& iph
->protocol
!= IPPROTO_UDP
)
605 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
608 static void xgene_enet_free_pagepool(struct xgene_enet_desc_ring
*buf_pool
,
609 struct xgene_enet_raw_desc
*raw_desc
,
610 struct xgene_enet_raw_desc
*exp_desc
)
612 __le64
*desc
= (void *)exp_desc
;
620 if (!buf_pool
|| !raw_desc
|| !exp_desc
||
621 (!GET_VAL(NV
, le64_to_cpu(raw_desc
->m0
))))
624 dev
= ndev_to_dev(buf_pool
->ndev
);
625 slots
= buf_pool
->slots
- 1;
626 head
= buf_pool
->head
;
628 for (i
= 0; i
< 4; i
++) {
629 frag_size
= xgene_enet_get_data_len(le64_to_cpu(desc
[i
^ 1]));
633 dma_addr
= GET_VAL(DATAADDR
, le64_to_cpu(desc
[i
^ 1]));
634 dma_unmap_page(dev
, dma_addr
, PAGE_SIZE
, DMA_FROM_DEVICE
);
636 page
= buf_pool
->frag_page
[head
];
639 buf_pool
->frag_page
[head
] = NULL
;
640 head
= (head
+ 1) & slots
;
642 buf_pool
->head
= head
;
645 /* Errata 10GE_10 and ENET_15 - Fix duplicated HW statistic counters */
646 static bool xgene_enet_errata_10GE_10(struct sk_buff
*skb
, u32 len
, u8 status
)
648 if (status
== INGRESS_CRC
&&
649 len
>= (ETHER_STD_PACKET
+ 1) &&
650 len
<= (ETHER_STD_PACKET
+ 4) &&
651 skb
->protocol
== htons(ETH_P_8021Q
))
657 /* Errata 10GE_8 and ENET_11 - allow packet with length <=64B */
658 static bool xgene_enet_errata_10GE_8(struct sk_buff
*skb
, u32 len
, u8 status
)
660 if (status
== INGRESS_PKT_LEN
&& len
== ETHER_MIN_PACKET
) {
661 if (ntohs(eth_hdr(skb
)->h_proto
) < 46)
668 static int xgene_enet_rx_frame(struct xgene_enet_desc_ring
*rx_ring
,
669 struct xgene_enet_raw_desc
*raw_desc
,
670 struct xgene_enet_raw_desc
*exp_desc
)
672 struct xgene_enet_desc_ring
*buf_pool
, *page_pool
;
673 u32 datalen
, frag_size
, skb_index
;
674 struct xgene_enet_pdata
*pdata
;
675 struct net_device
*ndev
;
686 ndev
= rx_ring
->ndev
;
687 pdata
= netdev_priv(ndev
);
688 dev
= ndev_to_dev(rx_ring
->ndev
);
689 buf_pool
= rx_ring
->buf_pool
;
690 page_pool
= rx_ring
->page_pool
;
692 dma_unmap_single(dev
, GET_VAL(DATAADDR
, le64_to_cpu(raw_desc
->m1
)),
693 XGENE_ENET_STD_MTU
, DMA_FROM_DEVICE
);
694 skb_index
= GET_VAL(USERINFO
, le64_to_cpu(raw_desc
->m0
));
695 skb
= buf_pool
->rx_skb
[skb_index
];
696 buf_pool
->rx_skb
[skb_index
] = NULL
;
698 datalen
= xgene_enet_get_data_len(le64_to_cpu(raw_desc
->m1
));
699 skb_put(skb
, datalen
);
700 prefetch(skb
->data
- NET_IP_ALIGN
);
701 skb
->protocol
= eth_type_trans(skb
, ndev
);
703 /* checking for error */
704 status
= (GET_VAL(ELERR
, le64_to_cpu(raw_desc
->m0
)) << LERR_LEN
) |
705 GET_VAL(LERR
, le64_to_cpu(raw_desc
->m0
));
706 if (unlikely(status
)) {
707 if (xgene_enet_errata_10GE_8(skb
, datalen
, status
)) {
709 } else if (xgene_enet_errata_10GE_10(skb
, datalen
, status
)) {
712 dev_kfree_skb_any(skb
);
713 xgene_enet_free_pagepool(page_pool
, raw_desc
, exp_desc
);
714 xgene_enet_parse_error(rx_ring
, status
);
715 rx_ring
->rx_dropped
++;
720 nv
= GET_VAL(NV
, le64_to_cpu(raw_desc
->m0
));
722 /* strip off CRC as HW isn't doing this */
727 slots
= page_pool
->slots
- 1;
728 head
= page_pool
->head
;
729 desc
= (void *)exp_desc
;
731 for (i
= 0; i
< 4; i
++) {
732 frag_size
= xgene_enet_get_data_len(le64_to_cpu(desc
[i
^ 1]));
736 dma_addr
= GET_VAL(DATAADDR
, le64_to_cpu(desc
[i
^ 1]));
737 dma_unmap_page(dev
, dma_addr
, PAGE_SIZE
, DMA_FROM_DEVICE
);
739 page
= page_pool
->frag_page
[head
];
740 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
, page
, 0,
741 frag_size
, PAGE_SIZE
);
743 datalen
+= frag_size
;
745 page_pool
->frag_page
[head
] = NULL
;
746 head
= (head
+ 1) & slots
;
749 page_pool
->head
= head
;
750 rx_ring
->npagepool
-= skb_shinfo(skb
)->nr_frags
;
753 skb_checksum_none_assert(skb
);
754 xgene_enet_rx_csum(skb
);
756 rx_ring
->rx_packets
++;
757 rx_ring
->rx_bytes
+= datalen
;
758 napi_gro_receive(&rx_ring
->napi
, skb
);
761 if (rx_ring
->npagepool
<= 0) {
762 ret
= xgene_enet_refill_pagepool(page_pool
, NUM_NXTBUFPOOL
);
763 rx_ring
->npagepool
= NUM_NXTBUFPOOL
;
768 if (--rx_ring
->nbufpool
== 0) {
769 ret
= xgene_enet_refill_bufpool(buf_pool
, NUM_BUFPOOL
);
770 rx_ring
->nbufpool
= NUM_BUFPOOL
;
776 static bool is_rx_desc(struct xgene_enet_raw_desc
*raw_desc
)
778 return GET_VAL(FPQNUM
, le64_to_cpu(raw_desc
->m0
)) ? true : false;
781 static int xgene_enet_process_ring(struct xgene_enet_desc_ring
*ring
,
784 struct net_device
*ndev
= ring
->ndev
;
785 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
786 struct xgene_enet_raw_desc
*raw_desc
, *exp_desc
;
787 u16 head
= ring
->head
;
788 u16 slots
= ring
->slots
- 1;
789 int ret
, desc_count
, count
= 0, processed
= 0;
793 raw_desc
= &ring
->raw_desc
[head
];
795 is_completion
= false;
797 if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc
)))
800 /* read fpqnum field after dataaddr field */
802 if (GET_BIT(NV
, le64_to_cpu(raw_desc
->m0
))) {
803 head
= (head
+ 1) & slots
;
804 exp_desc
= &ring
->raw_desc
[head
];
806 if (unlikely(xgene_enet_is_desc_slot_empty(exp_desc
))) {
807 head
= (head
- 1) & slots
;
814 if (is_rx_desc(raw_desc
)) {
815 ret
= xgene_enet_rx_frame(ring
, raw_desc
, exp_desc
);
817 ret
= xgene_enet_tx_completion(ring
, raw_desc
);
818 is_completion
= true;
820 xgene_enet_mark_desc_slot_empty(raw_desc
);
822 xgene_enet_mark_desc_slot_empty(exp_desc
);
824 head
= (head
+ 1) & slots
;
829 pdata
->txc_level
[ring
->index
] += desc_count
;
836 pdata
->ring_ops
->wr_cmd(ring
, -count
);
839 if (__netif_subqueue_stopped(ndev
, ring
->index
))
840 netif_start_subqueue(ndev
, ring
->index
);
846 static int xgene_enet_napi(struct napi_struct
*napi
, const int budget
)
848 struct xgene_enet_desc_ring
*ring
;
851 ring
= container_of(napi
, struct xgene_enet_desc_ring
, napi
);
852 processed
= xgene_enet_process_ring(ring
, budget
);
854 if (processed
!= budget
) {
855 napi_complete_done(napi
, processed
);
856 enable_irq(ring
->irq
);
862 static void xgene_enet_timeout(struct net_device
*ndev
, unsigned int txqueue
)
864 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
865 struct netdev_queue
*txq
;
868 pdata
->mac_ops
->reset(pdata
);
870 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
871 txq
= netdev_get_tx_queue(ndev
, i
);
872 txq
->trans_start
= jiffies
;
873 netif_tx_start_queue(txq
);
877 static void xgene_enet_set_irq_name(struct net_device
*ndev
)
879 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
880 struct xgene_enet_desc_ring
*ring
;
883 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
884 ring
= pdata
->rx_ring
[i
];
885 if (!pdata
->cq_cnt
) {
886 snprintf(ring
->irq_name
, IRQ_ID_SIZE
, "%s-rx-txc",
889 snprintf(ring
->irq_name
, IRQ_ID_SIZE
, "%s-rx-%d",
894 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
895 ring
= pdata
->tx_ring
[i
]->cp_ring
;
896 snprintf(ring
->irq_name
, IRQ_ID_SIZE
, "%s-txc-%d",
901 static int xgene_enet_register_irq(struct net_device
*ndev
)
903 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
904 struct device
*dev
= ndev_to_dev(ndev
);
905 struct xgene_enet_desc_ring
*ring
;
908 xgene_enet_set_irq_name(ndev
);
909 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
910 ring
= pdata
->rx_ring
[i
];
911 irq_set_status_flags(ring
->irq
, IRQ_DISABLE_UNLAZY
);
912 ret
= devm_request_irq(dev
, ring
->irq
, xgene_enet_rx_irq
,
913 0, ring
->irq_name
, ring
);
915 netdev_err(ndev
, "Failed to request irq %s\n",
920 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
921 ring
= pdata
->tx_ring
[i
]->cp_ring
;
922 irq_set_status_flags(ring
->irq
, IRQ_DISABLE_UNLAZY
);
923 ret
= devm_request_irq(dev
, ring
->irq
, xgene_enet_rx_irq
,
924 0, ring
->irq_name
, ring
);
926 netdev_err(ndev
, "Failed to request irq %s\n",
934 static void xgene_enet_free_irq(struct net_device
*ndev
)
936 struct xgene_enet_pdata
*pdata
;
937 struct xgene_enet_desc_ring
*ring
;
941 pdata
= netdev_priv(ndev
);
942 dev
= ndev_to_dev(ndev
);
944 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
945 ring
= pdata
->rx_ring
[i
];
946 irq_clear_status_flags(ring
->irq
, IRQ_DISABLE_UNLAZY
);
947 devm_free_irq(dev
, ring
->irq
, ring
);
950 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
951 ring
= pdata
->tx_ring
[i
]->cp_ring
;
952 irq_clear_status_flags(ring
->irq
, IRQ_DISABLE_UNLAZY
);
953 devm_free_irq(dev
, ring
->irq
, ring
);
957 static void xgene_enet_napi_enable(struct xgene_enet_pdata
*pdata
)
959 struct napi_struct
*napi
;
962 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
963 napi
= &pdata
->rx_ring
[i
]->napi
;
967 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
968 napi
= &pdata
->tx_ring
[i
]->cp_ring
->napi
;
973 static void xgene_enet_napi_disable(struct xgene_enet_pdata
*pdata
)
975 struct napi_struct
*napi
;
978 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
979 napi
= &pdata
->rx_ring
[i
]->napi
;
983 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
984 napi
= &pdata
->tx_ring
[i
]->cp_ring
->napi
;
989 static int xgene_enet_open(struct net_device
*ndev
)
991 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
992 const struct xgene_mac_ops
*mac_ops
= pdata
->mac_ops
;
995 ret
= netif_set_real_num_tx_queues(ndev
, pdata
->txq_cnt
);
999 ret
= netif_set_real_num_rx_queues(ndev
, pdata
->rxq_cnt
);
1003 xgene_enet_napi_enable(pdata
);
1004 ret
= xgene_enet_register_irq(ndev
);
1009 phy_start(ndev
->phydev
);
1011 schedule_delayed_work(&pdata
->link_work
, PHY_POLL_LINK_OFF
);
1012 netif_carrier_off(ndev
);
1015 mac_ops
->tx_enable(pdata
);
1016 mac_ops
->rx_enable(pdata
);
1017 netif_tx_start_all_queues(ndev
);
1022 static int xgene_enet_close(struct net_device
*ndev
)
1024 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1025 const struct xgene_mac_ops
*mac_ops
= pdata
->mac_ops
;
1028 netif_tx_stop_all_queues(ndev
);
1029 mac_ops
->tx_disable(pdata
);
1030 mac_ops
->rx_disable(pdata
);
1033 phy_stop(ndev
->phydev
);
1035 cancel_delayed_work_sync(&pdata
->link_work
);
1037 xgene_enet_free_irq(ndev
);
1038 xgene_enet_napi_disable(pdata
);
1039 for (i
= 0; i
< pdata
->rxq_cnt
; i
++)
1040 xgene_enet_process_ring(pdata
->rx_ring
[i
], -1);
1044 static void xgene_enet_delete_ring(struct xgene_enet_desc_ring
*ring
)
1046 struct xgene_enet_pdata
*pdata
;
1049 pdata
= netdev_priv(ring
->ndev
);
1050 dev
= ndev_to_dev(ring
->ndev
);
1052 pdata
->ring_ops
->clear(ring
);
1053 dmam_free_coherent(dev
, ring
->size
, ring
->desc_addr
, ring
->dma
);
1056 static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata
*pdata
)
1058 struct xgene_enet_desc_ring
*buf_pool
, *page_pool
;
1059 struct xgene_enet_desc_ring
*ring
;
1062 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
1063 ring
= pdata
->tx_ring
[i
];
1065 xgene_enet_delete_ring(ring
);
1066 pdata
->port_ops
->clear(pdata
, ring
);
1068 xgene_enet_delete_ring(ring
->cp_ring
);
1069 pdata
->tx_ring
[i
] = NULL
;
1074 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1075 ring
= pdata
->rx_ring
[i
];
1077 page_pool
= ring
->page_pool
;
1079 xgene_enet_delete_pagepool(page_pool
);
1080 xgene_enet_delete_ring(page_pool
);
1081 pdata
->port_ops
->clear(pdata
, page_pool
);
1084 buf_pool
= ring
->buf_pool
;
1085 xgene_enet_delete_bufpool(buf_pool
);
1086 xgene_enet_delete_ring(buf_pool
);
1087 pdata
->port_ops
->clear(pdata
, buf_pool
);
1089 xgene_enet_delete_ring(ring
);
1090 pdata
->rx_ring
[i
] = NULL
;
1096 static int xgene_enet_get_ring_size(struct device
*dev
,
1097 enum xgene_enet_ring_cfgsize cfgsize
)
1102 case RING_CFGSIZE_512B
:
1105 case RING_CFGSIZE_2KB
:
1108 case RING_CFGSIZE_16KB
:
1111 case RING_CFGSIZE_64KB
:
1114 case RING_CFGSIZE_512KB
:
1118 dev_err(dev
, "Unsupported cfg ring size %d\n", cfgsize
);
1125 static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring
*ring
)
1127 struct xgene_enet_pdata
*pdata
;
1133 dev
= ndev_to_dev(ring
->ndev
);
1134 pdata
= netdev_priv(ring
->ndev
);
1136 if (ring
->desc_addr
) {
1137 pdata
->ring_ops
->clear(ring
);
1138 dmam_free_coherent(dev
, ring
->size
, ring
->desc_addr
, ring
->dma
);
1140 devm_kfree(dev
, ring
);
1143 static void xgene_enet_free_desc_rings(struct xgene_enet_pdata
*pdata
)
1145 struct xgene_enet_desc_ring
*page_pool
;
1146 struct device
*dev
= &pdata
->pdev
->dev
;
1147 struct xgene_enet_desc_ring
*ring
;
1151 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
1152 ring
= pdata
->tx_ring
[i
];
1154 if (ring
->cp_ring
&& ring
->cp_ring
->cp_skb
)
1155 devm_kfree(dev
, ring
->cp_ring
->cp_skb
);
1157 if (ring
->cp_ring
&& pdata
->cq_cnt
)
1158 xgene_enet_free_desc_ring(ring
->cp_ring
);
1160 xgene_enet_free_desc_ring(ring
);
1165 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1166 ring
= pdata
->rx_ring
[i
];
1168 if (ring
->buf_pool
) {
1169 if (ring
->buf_pool
->rx_skb
)
1170 devm_kfree(dev
, ring
->buf_pool
->rx_skb
);
1172 xgene_enet_free_desc_ring(ring
->buf_pool
);
1175 page_pool
= ring
->page_pool
;
1177 p
= page_pool
->frag_page
;
1181 p
= page_pool
->frag_dma_addr
;
1186 xgene_enet_free_desc_ring(ring
);
1191 static bool is_irq_mbox_required(struct xgene_enet_pdata
*pdata
,
1192 struct xgene_enet_desc_ring
*ring
)
1194 if ((pdata
->enet_id
== XGENE_ENET2
) &&
1195 (xgene_enet_ring_owner(ring
->id
) == RING_OWNER_CPU
)) {
1202 static void __iomem
*xgene_enet_ring_cmd_base(struct xgene_enet_pdata
*pdata
,
1203 struct xgene_enet_desc_ring
*ring
)
1205 u8 num_ring_id_shift
= pdata
->ring_ops
->num_ring_id_shift
;
1207 return pdata
->ring_cmd_addr
+ (ring
->num
<< num_ring_id_shift
);
1210 static struct xgene_enet_desc_ring
*xgene_enet_create_desc_ring(
1211 struct net_device
*ndev
, u32 ring_num
,
1212 enum xgene_enet_ring_cfgsize cfgsize
, u32 ring_id
)
1214 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1215 struct device
*dev
= ndev_to_dev(ndev
);
1216 struct xgene_enet_desc_ring
*ring
;
1217 void *irq_mbox_addr
;
1220 size
= xgene_enet_get_ring_size(dev
, cfgsize
);
1224 ring
= devm_kzalloc(dev
, sizeof(struct xgene_enet_desc_ring
),
1230 ring
->num
= ring_num
;
1231 ring
->cfgsize
= cfgsize
;
1234 ring
->desc_addr
= dmam_alloc_coherent(dev
, size
, &ring
->dma
,
1235 GFP_KERNEL
| __GFP_ZERO
);
1236 if (!ring
->desc_addr
) {
1237 devm_kfree(dev
, ring
);
1242 if (is_irq_mbox_required(pdata
, ring
)) {
1243 irq_mbox_addr
= dmam_alloc_coherent(dev
, INTR_MBOX_SIZE
,
1244 &ring
->irq_mbox_dma
,
1245 GFP_KERNEL
| __GFP_ZERO
);
1246 if (!irq_mbox_addr
) {
1247 dmam_free_coherent(dev
, size
, ring
->desc_addr
,
1249 devm_kfree(dev
, ring
);
1252 ring
->irq_mbox_addr
= irq_mbox_addr
;
1255 ring
->cmd_base
= xgene_enet_ring_cmd_base(pdata
, ring
);
1256 ring
->cmd
= ring
->cmd_base
+ INC_DEC_CMD_ADDR
;
1257 ring
= pdata
->ring_ops
->setup(ring
);
1258 netdev_dbg(ndev
, "ring info: num=%d size=%d id=%d slots=%d\n",
1259 ring
->num
, ring
->size
, ring
->id
, ring
->slots
);
1264 static u16
xgene_enet_get_ring_id(enum xgene_ring_owner owner
, u8 bufnum
)
1266 return (owner
<< 6) | (bufnum
& GENMASK(5, 0));
1269 static enum xgene_ring_owner
xgene_derive_ring_owner(struct xgene_enet_pdata
*p
)
1271 enum xgene_ring_owner owner
;
1273 if (p
->enet_id
== XGENE_ENET1
) {
1274 switch (p
->phy_mode
) {
1275 case PHY_INTERFACE_MODE_SGMII
:
1276 owner
= RING_OWNER_ETH0
;
1279 owner
= (!p
->port_id
) ? RING_OWNER_ETH0
:
1284 owner
= (!p
->port_id
) ? RING_OWNER_ETH0
: RING_OWNER_ETH1
;
1290 static u8
xgene_start_cpu_bufnum(struct xgene_enet_pdata
*pdata
)
1292 struct device
*dev
= &pdata
->pdev
->dev
;
1296 ret
= device_property_read_u32(dev
, "channel", &cpu_bufnum
);
1298 return (!ret
) ? cpu_bufnum
: pdata
->cpu_bufnum
;
1301 static int xgene_enet_create_desc_rings(struct net_device
*ndev
)
1303 struct xgene_enet_desc_ring
*rx_ring
, *tx_ring
, *cp_ring
;
1304 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1305 struct xgene_enet_desc_ring
*page_pool
= NULL
;
1306 struct xgene_enet_desc_ring
*buf_pool
= NULL
;
1307 struct device
*dev
= ndev_to_dev(ndev
);
1308 u8 eth_bufnum
= pdata
->eth_bufnum
;
1309 u8 bp_bufnum
= pdata
->bp_bufnum
;
1310 u16 ring_num
= pdata
->ring_num
;
1311 enum xgene_ring_owner owner
;
1312 dma_addr_t dma_exp_bufs
;
1318 cpu_bufnum
= xgene_start_cpu_bufnum(pdata
);
1320 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1321 /* allocate rx descriptor ring */
1322 owner
= xgene_derive_ring_owner(pdata
);
1323 ring_id
= xgene_enet_get_ring_id(RING_OWNER_CPU
, cpu_bufnum
++);
1324 rx_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
1332 /* allocate buffer pool for receiving packets */
1333 owner
= xgene_derive_ring_owner(pdata
);
1334 ring_id
= xgene_enet_get_ring_id(owner
, bp_bufnum
++);
1335 buf_pool
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
1343 rx_ring
->nbufpool
= NUM_BUFPOOL
;
1344 rx_ring
->npagepool
= NUM_NXTBUFPOOL
;
1345 rx_ring
->irq
= pdata
->irqs
[i
];
1346 buf_pool
->rx_skb
= devm_kcalloc(dev
, buf_pool
->slots
,
1347 sizeof(struct sk_buff
*),
1349 if (!buf_pool
->rx_skb
) {
1354 buf_pool
->dst_ring_num
= xgene_enet_dst_ring_num(buf_pool
);
1355 rx_ring
->buf_pool
= buf_pool
;
1356 pdata
->rx_ring
[i
] = rx_ring
;
1358 if ((pdata
->enet_id
== XGENE_ENET1
&& pdata
->rxq_cnt
> 4) ||
1359 (pdata
->enet_id
== XGENE_ENET2
&& pdata
->rxq_cnt
> 16)) {
1363 /* allocate next buffer pool for jumbo packets */
1364 owner
= xgene_derive_ring_owner(pdata
);
1365 ring_id
= xgene_enet_get_ring_id(owner
, bp_bufnum
++);
1366 page_pool
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
1374 slots
= page_pool
->slots
;
1375 page_pool
->frag_page
= devm_kcalloc(dev
, slots
,
1376 sizeof(struct page
*),
1378 if (!page_pool
->frag_page
) {
1383 page_pool
->frag_dma_addr
= devm_kcalloc(dev
, slots
,
1386 if (!page_pool
->frag_dma_addr
) {
1391 page_pool
->dst_ring_num
= xgene_enet_dst_ring_num(page_pool
);
1392 rx_ring
->page_pool
= page_pool
;
1395 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
1396 /* allocate tx descriptor ring */
1397 owner
= xgene_derive_ring_owner(pdata
);
1398 ring_id
= xgene_enet_get_ring_id(owner
, eth_bufnum
++);
1399 tx_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
1407 size
= (tx_ring
->slots
/ 2) * sizeof(__le64
) * MAX_EXP_BUFFS
;
1408 exp_bufs
= dmam_alloc_coherent(dev
, size
, &dma_exp_bufs
,
1409 GFP_KERNEL
| __GFP_ZERO
);
1414 tx_ring
->exp_bufs
= exp_bufs
;
1416 pdata
->tx_ring
[i
] = tx_ring
;
1418 if (!pdata
->cq_cnt
) {
1419 cp_ring
= pdata
->rx_ring
[i
];
1421 /* allocate tx completion descriptor ring */
1422 ring_id
= xgene_enet_get_ring_id(RING_OWNER_CPU
,
1424 cp_ring
= xgene_enet_create_desc_ring(ndev
, ring_num
++,
1432 cp_ring
->irq
= pdata
->irqs
[pdata
->rxq_cnt
+ i
];
1436 cp_ring
->cp_skb
= devm_kcalloc(dev
, tx_ring
->slots
,
1437 sizeof(struct sk_buff
*),
1439 if (!cp_ring
->cp_skb
) {
1444 size
= sizeof(dma_addr_t
) * MAX_SKB_FRAGS
;
1445 cp_ring
->frag_dma_addr
= devm_kcalloc(dev
, tx_ring
->slots
,
1447 if (!cp_ring
->frag_dma_addr
) {
1448 devm_kfree(dev
, cp_ring
->cp_skb
);
1453 tx_ring
->cp_ring
= cp_ring
;
1454 tx_ring
->dst_ring_num
= xgene_enet_dst_ring_num(cp_ring
);
1457 if (pdata
->ring_ops
->coalesce
)
1458 pdata
->ring_ops
->coalesce(pdata
->tx_ring
[0]);
1459 pdata
->tx_qcnt_hi
= pdata
->tx_ring
[0]->slots
- 128;
1464 xgene_enet_free_desc_rings(pdata
);
1468 static void xgene_enet_get_stats64(
1469 struct net_device
*ndev
,
1470 struct rtnl_link_stats64
*stats
)
1472 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1473 struct xgene_enet_desc_ring
*ring
;
1476 for (i
= 0; i
< pdata
->txq_cnt
; i
++) {
1477 ring
= pdata
->tx_ring
[i
];
1479 stats
->tx_packets
+= ring
->tx_packets
;
1480 stats
->tx_bytes
+= ring
->tx_bytes
;
1481 stats
->tx_dropped
+= ring
->tx_dropped
;
1482 stats
->tx_errors
+= ring
->tx_errors
;
1486 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1487 ring
= pdata
->rx_ring
[i
];
1489 stats
->rx_packets
+= ring
->rx_packets
;
1490 stats
->rx_bytes
+= ring
->rx_bytes
;
1491 stats
->rx_dropped
+= ring
->rx_dropped
;
1492 stats
->rx_errors
+= ring
->rx_errors
+
1493 ring
->rx_length_errors
+
1494 ring
->rx_crc_errors
+
1495 ring
->rx_frame_errors
+
1496 ring
->rx_fifo_errors
;
1497 stats
->rx_length_errors
+= ring
->rx_length_errors
;
1498 stats
->rx_crc_errors
+= ring
->rx_crc_errors
;
1499 stats
->rx_frame_errors
+= ring
->rx_frame_errors
;
1500 stats
->rx_fifo_errors
+= ring
->rx_fifo_errors
;
1505 static int xgene_enet_set_mac_address(struct net_device
*ndev
, void *addr
)
1507 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1510 ret
= eth_mac_addr(ndev
, addr
);
1513 pdata
->mac_ops
->set_mac_addr(pdata
);
1518 static int xgene_change_mtu(struct net_device
*ndev
, int new_mtu
)
1520 struct xgene_enet_pdata
*pdata
= netdev_priv(ndev
);
1523 if (!netif_running(ndev
))
1526 frame_size
= (new_mtu
> ETH_DATA_LEN
) ? (new_mtu
+ 18) : 0x600;
1528 xgene_enet_close(ndev
);
1529 ndev
->mtu
= new_mtu
;
1530 pdata
->mac_ops
->set_framesize(pdata
, frame_size
);
1531 xgene_enet_open(ndev
);
1536 static const struct net_device_ops xgene_ndev_ops
= {
1537 .ndo_open
= xgene_enet_open
,
1538 .ndo_stop
= xgene_enet_close
,
1539 .ndo_start_xmit
= xgene_enet_start_xmit
,
1540 .ndo_tx_timeout
= xgene_enet_timeout
,
1541 .ndo_get_stats64
= xgene_enet_get_stats64
,
1542 .ndo_change_mtu
= xgene_change_mtu
,
1543 .ndo_set_mac_address
= xgene_enet_set_mac_address
,
1547 static void xgene_get_port_id_acpi(struct device
*dev
,
1548 struct xgene_enet_pdata
*pdata
)
1553 status
= acpi_evaluate_integer(ACPI_HANDLE(dev
), "_SUN", NULL
, &temp
);
1554 if (ACPI_FAILURE(status
)) {
1557 pdata
->port_id
= temp
;
1564 static void xgene_get_port_id_dt(struct device
*dev
, struct xgene_enet_pdata
*pdata
)
1568 of_property_read_u32(dev
->of_node
, "port-id", &id
);
1570 pdata
->port_id
= id
& BIT(0);
1575 static int xgene_get_tx_delay(struct xgene_enet_pdata
*pdata
)
1577 struct device
*dev
= &pdata
->pdev
->dev
;
1580 ret
= device_property_read_u32(dev
, "tx-delay", &delay
);
1582 pdata
->tx_delay
= 4;
1586 if (delay
< 0 || delay
> 7) {
1587 dev_err(dev
, "Invalid tx-delay specified\n");
1591 pdata
->tx_delay
= delay
;
1596 static int xgene_get_rx_delay(struct xgene_enet_pdata
*pdata
)
1598 struct device
*dev
= &pdata
->pdev
->dev
;
1601 ret
= device_property_read_u32(dev
, "rx-delay", &delay
);
1603 pdata
->rx_delay
= 2;
1607 if (delay
< 0 || delay
> 7) {
1608 dev_err(dev
, "Invalid rx-delay specified\n");
1612 pdata
->rx_delay
= delay
;
1617 static int xgene_enet_get_irqs(struct xgene_enet_pdata
*pdata
)
1619 struct platform_device
*pdev
= pdata
->pdev
;
1620 int i
, ret
, max_irqs
;
1622 if (phy_interface_mode_is_rgmii(pdata
->phy_mode
))
1624 else if (pdata
->phy_mode
== PHY_INTERFACE_MODE_SGMII
)
1627 max_irqs
= XGENE_MAX_ENET_IRQ
;
1629 for (i
= 0; i
< max_irqs
; i
++) {
1630 ret
= platform_get_irq(pdev
, i
);
1632 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1634 pdata
->rxq_cnt
= max_irqs
/ 2;
1635 pdata
->txq_cnt
= max_irqs
/ 2;
1636 pdata
->cq_cnt
= max_irqs
/ 2;
1639 return ret
? : -ENXIO
;
1641 pdata
->irqs
[i
] = ret
;
1647 static void xgene_enet_check_phy_handle(struct xgene_enet_pdata
*pdata
)
1651 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
)
1654 if (!IS_ENABLED(CONFIG_MDIO_XGENE
))
1657 ret
= xgene_enet_phy_connect(pdata
->ndev
);
1659 pdata
->mdio_driver
= true;
1662 static void xgene_enet_gpiod_get(struct xgene_enet_pdata
*pdata
)
1664 struct device
*dev
= &pdata
->pdev
->dev
;
1666 pdata
->sfp_gpio_en
= false;
1667 if (pdata
->phy_mode
!= PHY_INTERFACE_MODE_XGMII
||
1668 (!device_property_present(dev
, "sfp-gpios") &&
1669 !device_property_present(dev
, "rxlos-gpios")))
1672 pdata
->sfp_gpio_en
= true;
1673 pdata
->sfp_rdy
= gpiod_get(dev
, "rxlos", GPIOD_IN
);
1674 if (IS_ERR(pdata
->sfp_rdy
))
1675 pdata
->sfp_rdy
= gpiod_get(dev
, "sfp", GPIOD_IN
);
1678 static int xgene_enet_get_resources(struct xgene_enet_pdata
*pdata
)
1680 struct platform_device
*pdev
;
1681 struct net_device
*ndev
;
1683 struct resource
*res
;
1684 void __iomem
*base_addr
;
1692 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, RES_ENET_CSR
);
1694 dev_err(dev
, "Resource enet_csr not defined\n");
1697 pdata
->base_addr
= devm_ioremap(dev
, res
->start
, resource_size(res
));
1698 if (!pdata
->base_addr
) {
1699 dev_err(dev
, "Unable to retrieve ENET Port CSR region\n");
1703 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, RES_RING_CSR
);
1705 dev_err(dev
, "Resource ring_csr not defined\n");
1708 pdata
->ring_csr_addr
= devm_ioremap(dev
, res
->start
,
1709 resource_size(res
));
1710 if (!pdata
->ring_csr_addr
) {
1711 dev_err(dev
, "Unable to retrieve ENET Ring CSR region\n");
1715 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, RES_RING_CMD
);
1717 dev_err(dev
, "Resource ring_cmd not defined\n");
1720 pdata
->ring_cmd_addr
= devm_ioremap(dev
, res
->start
,
1721 resource_size(res
));
1722 if (!pdata
->ring_cmd_addr
) {
1723 dev_err(dev
, "Unable to retrieve ENET Ring command region\n");
1728 xgene_get_port_id_dt(dev
, pdata
);
1731 xgene_get_port_id_acpi(dev
, pdata
);
1734 if (!device_get_mac_address(dev
, ndev
->dev_addr
, ETH_ALEN
))
1735 eth_hw_addr_random(ndev
);
1737 memcpy(ndev
->perm_addr
, ndev
->dev_addr
, ndev
->addr_len
);
1739 pdata
->phy_mode
= device_get_phy_mode(dev
);
1740 if (pdata
->phy_mode
< 0) {
1741 dev_err(dev
, "Unable to get phy-connection-type\n");
1742 return pdata
->phy_mode
;
1744 if (!phy_interface_mode_is_rgmii(pdata
->phy_mode
) &&
1745 pdata
->phy_mode
!= PHY_INTERFACE_MODE_SGMII
&&
1746 pdata
->phy_mode
!= PHY_INTERFACE_MODE_XGMII
) {
1747 dev_err(dev
, "Incorrect phy-connection-type specified\n");
1751 ret
= xgene_get_tx_delay(pdata
);
1755 ret
= xgene_get_rx_delay(pdata
);
1759 ret
= xgene_enet_get_irqs(pdata
);
1763 xgene_enet_gpiod_get(pdata
);
1765 pdata
->clk
= devm_clk_get(&pdev
->dev
, NULL
);
1766 if (IS_ERR(pdata
->clk
)) {
1767 if (pdata
->phy_mode
!= PHY_INTERFACE_MODE_SGMII
) {
1768 /* Abort if the clock is defined but couldn't be
1769 * retrived. Always abort if the clock is missing on
1770 * DT system as the driver can't cope with this case.
1772 if (PTR_ERR(pdata
->clk
) != -ENOENT
|| dev
->of_node
)
1773 return PTR_ERR(pdata
->clk
);
1774 /* Firmware may have set up the clock already. */
1775 dev_info(dev
, "clocks have been setup already\n");
1779 if (pdata
->phy_mode
!= PHY_INTERFACE_MODE_XGMII
)
1780 base_addr
= pdata
->base_addr
- (pdata
->port_id
* MAC_OFFSET
);
1782 base_addr
= pdata
->base_addr
;
1783 pdata
->eth_csr_addr
= base_addr
+ BLOCK_ETH_CSR_OFFSET
;
1784 pdata
->cle
.base
= base_addr
+ BLOCK_ETH_CLE_CSR_OFFSET
;
1785 pdata
->eth_ring_if_addr
= base_addr
+ BLOCK_ETH_RING_IF_OFFSET
;
1786 pdata
->eth_diag_csr_addr
= base_addr
+ BLOCK_ETH_DIAG_CSR_OFFSET
;
1787 if (phy_interface_mode_is_rgmii(pdata
->phy_mode
) ||
1788 pdata
->phy_mode
== PHY_INTERFACE_MODE_SGMII
) {
1789 pdata
->mcx_mac_addr
= pdata
->base_addr
+ BLOCK_ETH_MAC_OFFSET
;
1790 pdata
->mcx_stats_addr
=
1791 pdata
->base_addr
+ BLOCK_ETH_STATS_OFFSET
;
1792 offset
= (pdata
->enet_id
== XGENE_ENET1
) ?
1793 BLOCK_ETH_MAC_CSR_OFFSET
:
1794 X2_BLOCK_ETH_MAC_CSR_OFFSET
;
1795 pdata
->mcx_mac_csr_addr
= base_addr
+ offset
;
1797 pdata
->mcx_mac_addr
= base_addr
+ BLOCK_AXG_MAC_OFFSET
;
1798 pdata
->mcx_stats_addr
= base_addr
+ BLOCK_AXG_STATS_OFFSET
;
1799 pdata
->mcx_mac_csr_addr
= base_addr
+ BLOCK_AXG_MAC_CSR_OFFSET
;
1800 pdata
->pcs_addr
= base_addr
+ BLOCK_PCS_OFFSET
;
1802 pdata
->rx_buff_cnt
= NUM_PKT_BUF
;
1807 static int xgene_enet_init_hw(struct xgene_enet_pdata
*pdata
)
1809 struct xgene_enet_cle
*enet_cle
= &pdata
->cle
;
1810 struct xgene_enet_desc_ring
*page_pool
;
1811 struct net_device
*ndev
= pdata
->ndev
;
1812 struct xgene_enet_desc_ring
*buf_pool
;
1813 u16 dst_ring_num
, ring_id
;
1817 ret
= pdata
->port_ops
->reset(pdata
);
1821 ret
= xgene_enet_create_desc_rings(ndev
);
1823 netdev_err(ndev
, "Error in ring configuration\n");
1827 /* setup buffer pool */
1828 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1829 buf_pool
= pdata
->rx_ring
[i
]->buf_pool
;
1830 xgene_enet_init_bufpool(buf_pool
);
1831 page_pool
= pdata
->rx_ring
[i
]->page_pool
;
1832 xgene_enet_init_bufpool(page_pool
);
1834 count
= pdata
->rx_buff_cnt
;
1835 ret
= xgene_enet_refill_bufpool(buf_pool
, count
);
1839 ret
= xgene_enet_refill_pagepool(page_pool
, count
);
1845 dst_ring_num
= xgene_enet_dst_ring_num(pdata
->rx_ring
[0]);
1846 buf_pool
= pdata
->rx_ring
[0]->buf_pool
;
1847 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1848 /* Initialize and Enable PreClassifier Tree */
1849 enet_cle
->max_nodes
= 512;
1850 enet_cle
->max_dbptrs
= 1024;
1851 enet_cle
->parsers
= 3;
1852 enet_cle
->active_parser
= PARSER_ALL
;
1853 enet_cle
->ptree
.start_node
= 0;
1854 enet_cle
->ptree
.start_dbptr
= 0;
1855 enet_cle
->jump_bytes
= 8;
1856 ret
= pdata
->cle_ops
->cle_init(pdata
);
1858 netdev_err(ndev
, "Preclass Tree init error\n");
1863 dst_ring_num
= xgene_enet_dst_ring_num(pdata
->rx_ring
[0]);
1864 buf_pool
= pdata
->rx_ring
[0]->buf_pool
;
1865 page_pool
= pdata
->rx_ring
[0]->page_pool
;
1866 ring_id
= (page_pool
) ? page_pool
->id
: 0;
1867 pdata
->port_ops
->cle_bypass(pdata
, dst_ring_num
,
1868 buf_pool
->id
, ring_id
);
1871 ndev
->max_mtu
= XGENE_ENET_MAX_MTU
;
1872 pdata
->phy_speed
= SPEED_UNKNOWN
;
1873 pdata
->mac_ops
->init(pdata
);
1878 xgene_enet_delete_desc_rings(pdata
);
1882 static void xgene_enet_setup_ops(struct xgene_enet_pdata
*pdata
)
1884 switch (pdata
->phy_mode
) {
1885 case PHY_INTERFACE_MODE_RGMII
:
1886 case PHY_INTERFACE_MODE_RGMII_ID
:
1887 case PHY_INTERFACE_MODE_RGMII_RXID
:
1888 case PHY_INTERFACE_MODE_RGMII_TXID
:
1889 pdata
->mac_ops
= &xgene_gmac_ops
;
1890 pdata
->port_ops
= &xgene_gport_ops
;
1896 case PHY_INTERFACE_MODE_SGMII
:
1897 pdata
->mac_ops
= &xgene_sgmac_ops
;
1898 pdata
->port_ops
= &xgene_sgport_ops
;
1905 pdata
->mac_ops
= &xgene_xgmac_ops
;
1906 pdata
->port_ops
= &xgene_xgport_ops
;
1907 pdata
->cle_ops
= &xgene_cle3in_ops
;
1909 if (!pdata
->rxq_cnt
) {
1910 pdata
->rxq_cnt
= XGENE_NUM_RX_RING
;
1911 pdata
->txq_cnt
= XGENE_NUM_TX_RING
;
1912 pdata
->cq_cnt
= XGENE_NUM_TXC_RING
;
1917 if (pdata
->enet_id
== XGENE_ENET1
) {
1918 switch (pdata
->port_id
) {
1920 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1921 pdata
->cpu_bufnum
= X2_START_CPU_BUFNUM_0
;
1922 pdata
->eth_bufnum
= X2_START_ETH_BUFNUM_0
;
1923 pdata
->bp_bufnum
= X2_START_BP_BUFNUM_0
;
1924 pdata
->ring_num
= START_RING_NUM_0
;
1926 pdata
->cpu_bufnum
= START_CPU_BUFNUM_0
;
1927 pdata
->eth_bufnum
= START_ETH_BUFNUM_0
;
1928 pdata
->bp_bufnum
= START_BP_BUFNUM_0
;
1929 pdata
->ring_num
= START_RING_NUM_0
;
1933 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
1934 pdata
->cpu_bufnum
= XG_START_CPU_BUFNUM_1
;
1935 pdata
->eth_bufnum
= XG_START_ETH_BUFNUM_1
;
1936 pdata
->bp_bufnum
= XG_START_BP_BUFNUM_1
;
1937 pdata
->ring_num
= XG_START_RING_NUM_1
;
1939 pdata
->cpu_bufnum
= START_CPU_BUFNUM_1
;
1940 pdata
->eth_bufnum
= START_ETH_BUFNUM_1
;
1941 pdata
->bp_bufnum
= START_BP_BUFNUM_1
;
1942 pdata
->ring_num
= START_RING_NUM_1
;
1948 pdata
->ring_ops
= &xgene_ring1_ops
;
1950 switch (pdata
->port_id
) {
1952 pdata
->cpu_bufnum
= X2_START_CPU_BUFNUM_0
;
1953 pdata
->eth_bufnum
= X2_START_ETH_BUFNUM_0
;
1954 pdata
->bp_bufnum
= X2_START_BP_BUFNUM_0
;
1955 pdata
->ring_num
= X2_START_RING_NUM_0
;
1958 pdata
->cpu_bufnum
= X2_START_CPU_BUFNUM_1
;
1959 pdata
->eth_bufnum
= X2_START_ETH_BUFNUM_1
;
1960 pdata
->bp_bufnum
= X2_START_BP_BUFNUM_1
;
1961 pdata
->ring_num
= X2_START_RING_NUM_1
;
1967 pdata
->ring_ops
= &xgene_ring2_ops
;
1971 static void xgene_enet_napi_add(struct xgene_enet_pdata
*pdata
)
1973 struct napi_struct
*napi
;
1976 for (i
= 0; i
< pdata
->rxq_cnt
; i
++) {
1977 napi
= &pdata
->rx_ring
[i
]->napi
;
1978 netif_napi_add(pdata
->ndev
, napi
, xgene_enet_napi
,
1982 for (i
= 0; i
< pdata
->cq_cnt
; i
++) {
1983 napi
= &pdata
->tx_ring
[i
]->cp_ring
->napi
;
1984 netif_napi_add(pdata
->ndev
, napi
, xgene_enet_napi
,
1990 static const struct acpi_device_id xgene_enet_acpi_match
[] = {
1991 { "APMC0D05", XGENE_ENET1
},
1992 { "APMC0D30", XGENE_ENET1
},
1993 { "APMC0D31", XGENE_ENET1
},
1994 { "APMC0D3F", XGENE_ENET1
},
1995 { "APMC0D26", XGENE_ENET2
},
1996 { "APMC0D25", XGENE_ENET2
},
1999 MODULE_DEVICE_TABLE(acpi
, xgene_enet_acpi_match
);
2002 static const struct of_device_id xgene_enet_of_match
[] = {
2003 {.compatible
= "apm,xgene-enet", .data
= (void *)XGENE_ENET1
},
2004 {.compatible
= "apm,xgene1-sgenet", .data
= (void *)XGENE_ENET1
},
2005 {.compatible
= "apm,xgene1-xgenet", .data
= (void *)XGENE_ENET1
},
2006 {.compatible
= "apm,xgene2-sgenet", .data
= (void *)XGENE_ENET2
},
2007 {.compatible
= "apm,xgene2-xgenet", .data
= (void *)XGENE_ENET2
},
2011 MODULE_DEVICE_TABLE(of
, xgene_enet_of_match
);
2013 static int xgene_enet_probe(struct platform_device
*pdev
)
2015 struct net_device
*ndev
;
2016 struct xgene_enet_pdata
*pdata
;
2017 struct device
*dev
= &pdev
->dev
;
2018 void (*link_state
)(struct work_struct
*);
2019 const struct of_device_id
*of_id
;
2022 ndev
= alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata
),
2023 XGENE_NUM_TX_RING
, XGENE_NUM_RX_RING
);
2027 pdata
= netdev_priv(ndev
);
2031 SET_NETDEV_DEV(ndev
, dev
);
2032 platform_set_drvdata(pdev
, pdata
);
2033 ndev
->netdev_ops
= &xgene_ndev_ops
;
2034 xgene_enet_set_ethtool_ops(ndev
);
2035 ndev
->features
|= NETIF_F_IP_CSUM
|
2040 of_id
= of_match_device(xgene_enet_of_match
, &pdev
->dev
);
2042 pdata
->enet_id
= (enum xgene_enet_id
)of_id
->data
;
2046 const struct acpi_device_id
*acpi_id
;
2048 acpi_id
= acpi_match_device(xgene_enet_acpi_match
, &pdev
->dev
);
2050 pdata
->enet_id
= (enum xgene_enet_id
) acpi_id
->driver_data
;
2053 if (!pdata
->enet_id
) {
2058 ret
= xgene_enet_get_resources(pdata
);
2062 xgene_enet_setup_ops(pdata
);
2063 spin_lock_init(&pdata
->mac_lock
);
2065 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
2066 ndev
->features
|= NETIF_F_TSO
| NETIF_F_RXCSUM
;
2067 spin_lock_init(&pdata
->mss_lock
);
2069 ndev
->hw_features
= ndev
->features
;
2071 ret
= dma_coerce_mask_and_coherent(dev
, DMA_BIT_MASK(64));
2073 netdev_err(ndev
, "No usable DMA configuration\n");
2077 xgene_enet_check_phy_handle(pdata
);
2079 ret
= xgene_enet_init_hw(pdata
);
2083 link_state
= pdata
->mac_ops
->link_state
;
2084 if (pdata
->phy_mode
== PHY_INTERFACE_MODE_XGMII
) {
2085 INIT_DELAYED_WORK(&pdata
->link_work
, link_state
);
2086 } else if (!pdata
->mdio_driver
) {
2087 if (phy_interface_mode_is_rgmii(pdata
->phy_mode
))
2088 ret
= xgene_enet_mdio_config(pdata
);
2090 INIT_DELAYED_WORK(&pdata
->link_work
, link_state
);
2096 spin_lock_init(&pdata
->stats_lock
);
2097 ret
= xgene_extd_stats_init(pdata
);
2101 xgene_enet_napi_add(pdata
);
2102 ret
= register_netdev(ndev
);
2104 netdev_err(ndev
, "Failed to register netdev\n");
2112 * If necessary, free_netdev() will call netif_napi_del() and undo
2113 * the effects of xgene_enet_napi_add()'s calls to netif_napi_add().
2116 xgene_enet_delete_desc_rings(pdata
);
2119 if (pdata
->mdio_driver
)
2120 xgene_enet_phy_disconnect(pdata
);
2121 else if (phy_interface_mode_is_rgmii(pdata
->phy_mode
))
2122 xgene_enet_mdio_remove(pdata
);
2128 static int xgene_enet_remove(struct platform_device
*pdev
)
2130 struct xgene_enet_pdata
*pdata
;
2131 struct net_device
*ndev
;
2133 pdata
= platform_get_drvdata(pdev
);
2137 if (netif_running(ndev
))
2141 if (pdata
->mdio_driver
)
2142 xgene_enet_phy_disconnect(pdata
);
2143 else if (phy_interface_mode_is_rgmii(pdata
->phy_mode
))
2144 xgene_enet_mdio_remove(pdata
);
2146 unregister_netdev(ndev
);
2147 xgene_enet_delete_desc_rings(pdata
);
2148 pdata
->port_ops
->shutdown(pdata
);
2154 static void xgene_enet_shutdown(struct platform_device
*pdev
)
2156 struct xgene_enet_pdata
*pdata
;
2158 pdata
= platform_get_drvdata(pdev
);
2165 xgene_enet_remove(pdev
);
2168 static struct platform_driver xgene_enet_driver
= {
2170 .name
= "xgene-enet",
2171 .of_match_table
= of_match_ptr(xgene_enet_of_match
),
2172 .acpi_match_table
= ACPI_PTR(xgene_enet_acpi_match
),
2174 .probe
= xgene_enet_probe
,
2175 .remove
= xgene_enet_remove
,
2176 .shutdown
= xgene_enet_shutdown
,
2179 module_platform_driver(xgene_enet_driver
);
2181 MODULE_DESCRIPTION("APM X-Gene SoC Ethernet driver");
2182 MODULE_VERSION(XGENE_DRV_VERSION
);
2183 MODULE_AUTHOR("Iyappan Subramanian <isubramanian@apm.com>");
2184 MODULE_AUTHOR("Keyur Chudgar <kchudgar@apm.com>");
2185 MODULE_LICENSE("GPL");