1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2014-2015 Hisilicon Limited.
7 #include <linux/cpumask.h>
8 #include <linux/etherdevice.h>
9 #include <linux/if_vlan.h>
10 #include <linux/interrupt.h>
13 #include <linux/ipv6.h>
14 #include <linux/irq.h>
15 #include <linux/module.h>
16 #include <linux/phy.h>
17 #include <linux/platform_device.h>
18 #include <linux/skbuff.h>
22 #include "hns_dsaf_mac.h"
24 #define NIC_MAX_Q_PER_VF 16
25 #define HNS_NIC_TX_TIMEOUT (5 * HZ)
27 #define SERVICE_TIMER_HZ (1 * HZ)
29 #define RCB_IRQ_NOT_INITED 0
30 #define RCB_IRQ_INITED 1
31 #define HNS_BUFFER_SIZE_2048 2048
33 #define BD_MAX_SEND_SIZE 8191
35 static void fill_v2_desc_hw(struct hnae_ring
*ring
, void *priv
, int size
,
36 int send_sz
, dma_addr_t dma
, int frag_end
,
37 int buf_num
, enum hns_desc_type type
, int mtu
)
39 struct hnae_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
40 struct hnae_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
42 struct ipv6hdr
*ipv6hdr
;
54 desc_cb
->length
= size
;
58 desc
->addr
= cpu_to_le64(dma
);
59 desc
->tx
.send_size
= cpu_to_le16((u16
)send_sz
);
61 /* config bd buffer end */
62 hnae_set_bit(rrcfv
, HNSV2_TXD_VLD_B
, 1);
63 hnae_set_field(bn_pid
, HNSV2_TXD_BUFNUM_M
, 0, buf_num
- 1);
65 /* fill port_id in the tx bd for sending management pkts */
66 hnae_set_field(bn_pid
, HNSV2_TXD_PORTID_M
,
67 HNSV2_TXD_PORTID_S
, ring
->q
->handle
->dport_id
);
69 if (type
== DESC_TYPE_SKB
) {
70 skb
= (struct sk_buff
*)priv
;
72 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
73 skb_reset_mac_len(skb
);
74 protocol
= skb
->protocol
;
77 if (protocol
== htons(ETH_P_8021Q
)) {
78 ip_offset
+= VLAN_HLEN
;
79 protocol
= vlan_get_protocol(skb
);
80 skb
->protocol
= protocol
;
83 if (skb
->protocol
== htons(ETH_P_IP
)) {
85 hnae_set_bit(rrcfv
, HNSV2_TXD_L3CS_B
, 1);
86 hnae_set_bit(rrcfv
, HNSV2_TXD_L4CS_B
, 1);
88 /* check for tcp/udp header */
89 if (iphdr
->protocol
== IPPROTO_TCP
&&
93 l4_len
= tcp_hdrlen(skb
);
94 mss
= skb_shinfo(skb
)->gso_size
;
95 paylen
= skb
->len
- skb_tcp_all_headers(skb
);
97 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
98 hnae_set_bit(tvsvsn
, HNSV2_TXD_IPV6_B
, 1);
99 ipv6hdr
= ipv6_hdr(skb
);
100 hnae_set_bit(rrcfv
, HNSV2_TXD_L4CS_B
, 1);
102 /* check for tcp/udp header */
103 if (ipv6hdr
->nexthdr
== IPPROTO_TCP
&&
104 skb_is_gso(skb
) && skb_is_gso_v6(skb
)) {
107 l4_len
= tcp_hdrlen(skb
);
108 mss
= skb_shinfo(skb
)->gso_size
;
109 paylen
= skb
->len
- skb_tcp_all_headers(skb
);
112 desc
->tx
.ip_offset
= ip_offset
;
113 desc
->tx
.tse_vlan_snap_v6_sctp_nth
= tvsvsn
;
114 desc
->tx
.mss
= cpu_to_le16(mss
);
115 desc
->tx
.l4_len
= l4_len
;
116 desc
->tx
.paylen
= cpu_to_le16(paylen
);
120 hnae_set_bit(rrcfv
, HNSV2_TXD_FE_B
, frag_end
);
122 desc
->tx
.bn_pid
= bn_pid
;
123 desc
->tx
.ra_ri_cs_fe_vld
= rrcfv
;
125 ring_ptr_move_fw(ring
, next_to_use
);
128 static void fill_v2_desc(struct hnae_ring
*ring
, void *priv
,
129 int size
, dma_addr_t dma
, int frag_end
,
130 int buf_num
, enum hns_desc_type type
, int mtu
)
132 fill_v2_desc_hw(ring
, priv
, size
, size
, dma
, frag_end
,
136 static const struct acpi_device_id hns_enet_acpi_match
[] = {
141 MODULE_DEVICE_TABLE(acpi
, hns_enet_acpi_match
);
143 static void fill_desc(struct hnae_ring
*ring
, void *priv
,
144 int size
, dma_addr_t dma
, int frag_end
,
145 int buf_num
, enum hns_desc_type type
, int mtu
,
148 struct hnae_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
149 struct hnae_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
153 u32 asid_bufnum_pid
= 0;
154 u32 flag_ipoffset
= 0;
156 desc_cb
->priv
= priv
;
157 desc_cb
->length
= size
;
159 desc_cb
->type
= type
;
161 desc
->addr
= cpu_to_le64(dma
);
162 desc
->tx
.send_size
= cpu_to_le16((u16
)size
);
164 /*config bd buffer end */
165 flag_ipoffset
|= 1 << HNS_TXD_VLD_B
;
167 asid_bufnum_pid
|= buf_num
<< HNS_TXD_BUFNUM_S
;
169 if (type
== DESC_TYPE_SKB
) {
170 skb
= (struct sk_buff
*)priv
;
172 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
173 protocol
= skb
->protocol
;
174 ip_offset
= ETH_HLEN
;
176 /*if it is a SW VLAN check the next protocol*/
177 if (protocol
== htons(ETH_P_8021Q
)) {
178 ip_offset
+= VLAN_HLEN
;
179 protocol
= vlan_get_protocol(skb
);
180 skb
->protocol
= protocol
;
183 if (skb
->protocol
== htons(ETH_P_IP
)) {
184 flag_ipoffset
|= 1 << HNS_TXD_L3CS_B
;
185 /* check for tcp/udp header */
186 flag_ipoffset
|= 1 << HNS_TXD_L4CS_B
;
188 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
189 /* ipv6 has not l3 cs, check for L4 header */
190 flag_ipoffset
|= 1 << HNS_TXD_L4CS_B
;
193 flag_ipoffset
|= ip_offset
<< HNS_TXD_IPOFFSET_S
;
197 flag_ipoffset
|= frag_end
<< HNS_TXD_FE_B
;
199 desc
->tx
.asid_bufnum_pid
= cpu_to_le16(asid_bufnum_pid
);
200 desc
->tx
.flag_ipoffset
= cpu_to_le32(flag_ipoffset
);
202 ring_ptr_move_fw(ring
, next_to_use
);
205 static void unfill_desc(struct hnae_ring
*ring
)
207 ring_ptr_move_bw(ring
, next_to_use
);
210 static int hns_nic_maybe_stop_tx(
211 struct sk_buff
**out_skb
, int *bnum
, struct hnae_ring
*ring
)
213 struct sk_buff
*skb
= *out_skb
;
214 struct sk_buff
*new_skb
= NULL
;
217 /* no. of segments (plus a header) */
218 buf_num
= skb_shinfo(skb
)->nr_frags
+ 1;
220 if (unlikely(buf_num
> ring
->max_desc_num_per_pkt
)) {
221 if (ring_space(ring
) < 1)
224 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
228 dev_kfree_skb_any(skb
);
231 } else if (buf_num
> ring_space(ring
)) {
239 static int hns_nic_maybe_stop_tso(
240 struct sk_buff
**out_skb
, int *bnum
, struct hnae_ring
*ring
)
246 struct sk_buff
*skb
= *out_skb
;
247 struct sk_buff
*new_skb
= NULL
;
250 size
= skb_headlen(skb
);
251 buf_num
= (size
+ BD_MAX_SEND_SIZE
- 1) / BD_MAX_SEND_SIZE
;
253 frag_num
= skb_shinfo(skb
)->nr_frags
;
254 for (i
= 0; i
< frag_num
; i
++) {
255 frag
= &skb_shinfo(skb
)->frags
[i
];
256 size
= skb_frag_size(frag
);
257 buf_num
+= (size
+ BD_MAX_SEND_SIZE
- 1) / BD_MAX_SEND_SIZE
;
260 if (unlikely(buf_num
> ring
->max_desc_num_per_pkt
)) {
261 buf_num
= (skb
->len
+ BD_MAX_SEND_SIZE
- 1) / BD_MAX_SEND_SIZE
;
262 if (ring_space(ring
) < buf_num
)
264 /* manual split the send packet */
265 new_skb
= skb_copy(skb
, GFP_ATOMIC
);
268 dev_kfree_skb_any(skb
);
271 } else if (ring_space(ring
) < buf_num
) {
279 static int hns_nic_maybe_stop_tx_v2(struct sk_buff
**out_skb
, int *bnum
,
280 struct hnae_ring
*ring
)
282 if (skb_is_gso(*out_skb
))
283 return hns_nic_maybe_stop_tso(out_skb
, bnum
, ring
);
285 return hns_nic_maybe_stop_tx(out_skb
, bnum
, ring
);
288 static void fill_tso_desc(struct hnae_ring
*ring
, void *priv
,
289 int size
, dma_addr_t dma
, int frag_end
,
290 int buf_num
, enum hns_desc_type type
, int mtu
)
296 frag_buf_num
= (size
+ BD_MAX_SEND_SIZE
- 1) / BD_MAX_SEND_SIZE
;
297 sizeoflast
= size
% BD_MAX_SEND_SIZE
;
298 sizeoflast
= sizeoflast
? sizeoflast
: BD_MAX_SEND_SIZE
;
300 /* when the frag size is bigger than hardware, split this frag */
301 for (k
= 0; k
< frag_buf_num
; k
++)
302 fill_v2_desc_hw(ring
, priv
, k
== 0 ? size
: 0,
303 (k
== frag_buf_num
- 1) ?
304 sizeoflast
: BD_MAX_SEND_SIZE
,
305 dma
+ BD_MAX_SEND_SIZE
* k
,
306 frag_end
&& (k
== frag_buf_num
- 1) ? 1 : 0,
308 (type
== DESC_TYPE_SKB
&& !k
) ?
309 DESC_TYPE_SKB
: DESC_TYPE_PAGE
,
313 static void fill_desc_v2(struct hnae_ring
*ring
, void *priv
,
314 int size
, dma_addr_t dma
, int frag_end
,
315 int buf_num
, enum hns_desc_type type
, int mtu
,
319 fill_tso_desc(ring
, priv
, size
, dma
, frag_end
, buf_num
, type
,
322 fill_v2_desc(ring
, priv
, size
, dma
, frag_end
, buf_num
, type
,
326 netdev_tx_t
hns_nic_net_xmit_hw(struct net_device
*ndev
,
328 struct hns_nic_ring_data
*ring_data
)
330 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
331 struct hnae_ring
*ring
= ring_data
->ring
;
332 struct device
*dev
= ring_to_dev(ring
);
333 struct netdev_queue
*dev_queue
;
338 int size
, next_to_use
;
342 switch (priv
->ops
.maybe_stop_tx(&skb
, &buf_num
, ring
)) {
344 ring
->stats
.tx_busy
++;
345 goto out_net_tx_busy
;
347 ring
->stats
.sw_err_cnt
++;
348 netdev_err(ndev
, "no memory to xmit!\n");
354 /* no. of segments (plus a header) */
355 seg_num
= skb_shinfo(skb
)->nr_frags
+ 1;
356 next_to_use
= ring
->next_to_use
;
358 /* fill the first part */
359 size
= skb_headlen(skb
);
360 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
361 if (dma_mapping_error(dev
, dma
)) {
362 netdev_err(ndev
, "TX head DMA map failed\n");
363 ring
->stats
.sw_err_cnt
++;
366 is_gso
= skb_is_gso(skb
);
367 priv
->ops
.fill_desc(ring
, skb
, size
, dma
, seg_num
== 1 ? 1 : 0,
368 buf_num
, DESC_TYPE_SKB
, ndev
->mtu
, is_gso
);
370 /* fill the fragments */
371 for (i
= 1; i
< seg_num
; i
++) {
372 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
373 size
= skb_frag_size(frag
);
374 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
375 if (dma_mapping_error(dev
, dma
)) {
376 netdev_err(ndev
, "TX frag(%d) DMA map failed\n", i
);
377 ring
->stats
.sw_err_cnt
++;
378 goto out_map_frag_fail
;
380 priv
->ops
.fill_desc(ring
, skb_frag_page(frag
), size
, dma
,
381 seg_num
- 1 == i
? 1 : 0, buf_num
,
382 DESC_TYPE_PAGE
, ndev
->mtu
, is_gso
);
385 /*complete translate all packets*/
386 dev_queue
= netdev_get_tx_queue(ndev
, skb
->queue_mapping
);
387 netdev_tx_sent_queue(dev_queue
, skb
->len
);
389 netif_trans_update(ndev
);
390 ndev
->stats
.tx_bytes
+= skb
->len
;
391 ndev
->stats
.tx_packets
++;
393 wmb(); /* commit all data before submit */
394 assert(skb
->queue_mapping
< priv
->ae_handle
->q_num
);
395 hnae_queue_xmit(priv
->ae_handle
->qs
[skb
->queue_mapping
], buf_num
);
401 while (ring
->next_to_use
!= next_to_use
) {
403 if (ring
->next_to_use
!= next_to_use
)
405 ring
->desc_cb
[ring
->next_to_use
].dma
,
406 ring
->desc_cb
[ring
->next_to_use
].length
,
409 dma_unmap_single(dev
,
410 ring
->desc_cb
[next_to_use
].dma
,
411 ring
->desc_cb
[next_to_use
].length
,
417 dev_kfree_skb_any(skb
);
422 netif_stop_subqueue(ndev
, skb
->queue_mapping
);
424 /* Herbert's original patch had:
425 * smp_mb__after_netif_stop_queue();
426 * but since that doesn't exist yet, just open code it.
429 return NETDEV_TX_BUSY
;
432 static void hns_nic_reuse_page(struct sk_buff
*skb
, int i
,
433 struct hnae_ring
*ring
, int pull_len
,
434 struct hnae_desc_cb
*desc_cb
)
436 struct hnae_desc
*desc
;
442 twobufs
= ((PAGE_SIZE
< 8192) &&
443 hnae_buf_size(ring
) == HNS_BUFFER_SIZE_2048
);
445 desc
= &ring
->desc
[ring
->next_to_clean
];
446 size
= le16_to_cpu(desc
->rx
.size
);
449 truesize
= hnae_buf_size(ring
);
451 truesize
= ALIGN(size
, L1_CACHE_BYTES
);
452 last_offset
= hnae_page_size(ring
) - hnae_buf_size(ring
);
455 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, desc_cb
->page_offset
+ pull_len
,
456 size
- pull_len
, truesize
);
458 /* avoid re-using remote pages,flag default unreuse */
459 if (unlikely(page_to_nid(desc_cb
->priv
) != numa_node_id()))
463 /* if we are only owner of page we can reuse it */
464 if (likely(page_count(desc_cb
->priv
) == 1)) {
465 /* flip page offset to other buffer */
466 desc_cb
->page_offset
^= truesize
;
468 desc_cb
->reuse_flag
= 1;
469 /* bump ref count on page before it is given*/
470 get_page(desc_cb
->priv
);
475 /* move offset up to the next cache line */
476 desc_cb
->page_offset
+= truesize
;
478 if (desc_cb
->page_offset
<= last_offset
) {
479 desc_cb
->reuse_flag
= 1;
480 /* bump ref count on page before it is given*/
481 get_page(desc_cb
->priv
);
485 static void get_v2rx_desc_bnum(u32 bnum_flag
, int *out_bnum
)
487 *out_bnum
= hnae_get_field(bnum_flag
,
488 HNS_RXD_BUFNUM_M
, HNS_RXD_BUFNUM_S
) + 1;
491 static void get_rx_desc_bnum(u32 bnum_flag
, int *out_bnum
)
493 *out_bnum
= hnae_get_field(bnum_flag
,
494 HNS_RXD_BUFNUM_M
, HNS_RXD_BUFNUM_S
);
497 static void hns_nic_rx_checksum(struct hns_nic_ring_data
*ring_data
,
498 struct sk_buff
*skb
, u32 flag
)
500 struct net_device
*netdev
= ring_data
->napi
.dev
;
504 /* check if RX checksum offload is enabled */
505 if (unlikely(!(netdev
->features
& NETIF_F_RXCSUM
)))
508 /* In hardware, we only support checksum for the following protocols:
510 * 2) TCP(over IPv4 or IPv6),
511 * 3) UDP(over IPv4 or IPv6),
512 * 4) SCTP(over IPv4 or IPv6)
513 * but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP,
514 * UDP, GRE, SCTP, IGMP, ICMP etc.) protocols.
516 * Hardware limitation:
517 * Our present hardware RX Descriptor lacks L3/L4 checksum "Status &
518 * Error" bit (which usually can be used to indicate whether checksum
519 * was calculated by the hardware and if there was any error encountered
520 * during checksum calculation).
522 * Software workaround:
523 * We do get info within the RX descriptor about the kind of L3/L4
524 * protocol coming in the packet and the error status. These errors
525 * might not just be checksum errors but could be related to version,
526 * length of IPv4, UDP, TCP etc.
527 * Because there is no-way of knowing if it is a L3/L4 error due to bad
528 * checksum or any other L3/L4 error, we will not (cannot) convey
529 * checksum status for such cases to upper stack and will not maintain
530 * the RX L3/L4 checksum counters as well.
533 l3id
= hnae_get_field(flag
, HNS_RXD_L3ID_M
, HNS_RXD_L3ID_S
);
534 l4id
= hnae_get_field(flag
, HNS_RXD_L4ID_M
, HNS_RXD_L4ID_S
);
536 /* check L3 protocol for which checksum is supported */
537 if ((l3id
!= HNS_RX_FLAG_L3ID_IPV4
) && (l3id
!= HNS_RX_FLAG_L3ID_IPV6
))
540 /* check for any(not just checksum)flagged L3 protocol errors */
541 if (unlikely(hnae_get_bit(flag
, HNS_RXD_L3E_B
)))
544 /* we do not support checksum of fragmented packets */
545 if (unlikely(hnae_get_bit(flag
, HNS_RXD_FRAG_B
)))
548 /* check L4 protocol for which checksum is supported */
549 if ((l4id
!= HNS_RX_FLAG_L4ID_TCP
) &&
550 (l4id
!= HNS_RX_FLAG_L4ID_UDP
) &&
551 (l4id
!= HNS_RX_FLAG_L4ID_SCTP
))
554 /* check for any(not just checksum)flagged L4 protocol errors */
555 if (unlikely(hnae_get_bit(flag
, HNS_RXD_L4E_B
)))
558 /* now, this has to be a packet with valid RX checksum */
559 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
562 static int hns_nic_poll_rx_skb(struct hns_nic_ring_data
*ring_data
,
563 struct sk_buff
**out_skb
, int *out_bnum
)
565 struct hnae_ring
*ring
= ring_data
->ring
;
566 struct net_device
*ndev
= ring_data
->napi
.dev
;
567 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
569 struct hnae_desc
*desc
;
570 struct hnae_desc_cb
*desc_cb
;
576 desc
= &ring
->desc
[ring
->next_to_clean
];
577 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
581 va
= (unsigned char *)desc_cb
->buf
+ desc_cb
->page_offset
;
583 /* prefetch first cache line of first page */
586 skb
= *out_skb
= napi_alloc_skb(&ring_data
->napi
,
588 if (unlikely(!skb
)) {
589 ring
->stats
.sw_err_cnt
++;
593 prefetchw(skb
->data
);
594 length
= le16_to_cpu(desc
->rx
.pkt_len
);
595 bnum_flag
= le32_to_cpu(desc
->rx
.ipoff_bnum_pid_flag
);
596 priv
->ops
.get_rxd_bnum(bnum_flag
, &bnum
);
599 if (length
<= HNS_RX_HEAD_SIZE
) {
600 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
602 /* we can reuse buffer as-is, just make sure it is local */
603 if (likely(page_to_nid(desc_cb
->priv
) == numa_node_id()))
604 desc_cb
->reuse_flag
= 1;
605 else /* this page cannot be reused so discard it */
606 put_page(desc_cb
->priv
);
608 ring_ptr_move_fw(ring
, next_to_clean
);
610 if (unlikely(bnum
!= 1)) { /* check err*/
615 ring
->stats
.seg_pkt_cnt
++;
617 pull_len
= eth_get_headlen(ndev
, va
, HNS_RX_HEAD_SIZE
);
618 memcpy(__skb_put(skb
, pull_len
), va
,
619 ALIGN(pull_len
, sizeof(long)));
621 hns_nic_reuse_page(skb
, 0, ring
, pull_len
, desc_cb
);
622 ring_ptr_move_fw(ring
, next_to_clean
);
624 if (unlikely(bnum
>= (int)MAX_SKB_FRAGS
)) { /* check err*/
628 for (i
= 1; i
< bnum
; i
++) {
629 desc
= &ring
->desc
[ring
->next_to_clean
];
630 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
632 hns_nic_reuse_page(skb
, i
, ring
, 0, desc_cb
);
633 ring_ptr_move_fw(ring
, next_to_clean
);
637 /* check except process, free skb and jump the desc */
638 if (unlikely((!bnum
) || (bnum
> ring
->max_desc_num_per_pkt
))) {
640 *out_bnum
= *out_bnum
? *out_bnum
: 1; /* ntc moved,cannot 0*/
641 netdev_err(ndev
, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
642 bnum
, ring
->max_desc_num_per_pkt
,
643 length
, (int)MAX_SKB_FRAGS
,
644 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
645 ring
->stats
.err_bd_num
++;
646 dev_kfree_skb_any(skb
);
650 bnum_flag
= le32_to_cpu(desc
->rx
.ipoff_bnum_pid_flag
);
652 if (unlikely(!hnae_get_bit(bnum_flag
, HNS_RXD_VLD_B
))) {
653 netdev_err(ndev
, "no valid bd,%016llx,%016llx\n",
654 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
655 ring
->stats
.non_vld_descs
++;
656 dev_kfree_skb_any(skb
);
660 if (unlikely((!desc
->rx
.pkt_len
) ||
661 hnae_get_bit(bnum_flag
, HNS_RXD_DROP_B
))) {
662 ring
->stats
.err_pkt_len
++;
663 dev_kfree_skb_any(skb
);
667 if (unlikely(hnae_get_bit(bnum_flag
, HNS_RXD_L2E_B
))) {
668 ring
->stats
.l2_err
++;
669 dev_kfree_skb_any(skb
);
673 ring
->stats
.rx_pkts
++;
674 ring
->stats
.rx_bytes
+= skb
->len
;
676 /* indicate to upper stack if our hardware has already calculated
679 hns_nic_rx_checksum(ring_data
, skb
, bnum_flag
);
685 hns_nic_alloc_rx_buffers(struct hns_nic_ring_data
*ring_data
, int cleand_count
)
688 struct hnae_desc_cb res_cbs
;
689 struct hnae_desc_cb
*desc_cb
;
690 struct hnae_ring
*ring
= ring_data
->ring
;
691 struct net_device
*ndev
= ring_data
->napi
.dev
;
693 for (i
= 0; i
< cleand_count
; i
++) {
694 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
695 if (desc_cb
->reuse_flag
) {
696 ring
->stats
.reuse_pg_cnt
++;
697 hnae_reuse_buffer(ring
, ring
->next_to_use
);
699 ret
= hnae_reserve_buffer_map(ring
, &res_cbs
);
701 ring
->stats
.sw_err_cnt
++;
702 netdev_err(ndev
, "hnae reserve buffer map failed.\n");
705 hnae_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
708 ring_ptr_move_fw(ring
, next_to_use
);
711 wmb(); /* make all data has been write before submit */
712 writel_relaxed(i
, ring
->io_base
+ RCB_REG_HEAD
);
715 /* return error number for error or number of desc left to take
717 static void hns_nic_rx_up_pro(struct hns_nic_ring_data
*ring_data
,
720 struct net_device
*ndev
= ring_data
->napi
.dev
;
722 skb
->protocol
= eth_type_trans(skb
, ndev
);
723 napi_gro_receive(&ring_data
->napi
, skb
);
726 static int hns_desc_unused(struct hnae_ring
*ring
)
728 int ntc
= ring
->next_to_clean
;
729 int ntu
= ring
->next_to_use
;
731 return ((ntc
>= ntu
) ? 0 : ring
->desc_num
) + ntc
- ntu
;
734 #define HNS_LOWEST_LATENCY_RATE 27 /* 27 MB/s */
735 #define HNS_LOW_LATENCY_RATE 80 /* 80 MB/s */
737 #define HNS_COAL_BDNUM 3
739 static u32
hns_coal_rx_bdnum(struct hnae_ring
*ring
)
741 bool coal_enable
= ring
->q
->handle
->coal_adapt_en
;
744 ring
->coal_last_rx_bytes
> HNS_LOWEST_LATENCY_RATE
)
745 return HNS_COAL_BDNUM
;
750 static void hns_update_rx_rate(struct hnae_ring
*ring
)
752 bool coal_enable
= ring
->q
->handle
->coal_adapt_en
;
757 time_before(jiffies
, ring
->coal_last_jiffies
+ (HZ
>> 4)))
760 /* ring->stats.rx_bytes overflowed */
761 if (ring
->coal_last_rx_bytes
> ring
->stats
.rx_bytes
) {
762 ring
->coal_last_rx_bytes
= ring
->stats
.rx_bytes
;
763 ring
->coal_last_jiffies
= jiffies
;
767 total_bytes
= ring
->stats
.rx_bytes
- ring
->coal_last_rx_bytes
;
768 time_passed_ms
= jiffies_to_msecs(jiffies
- ring
->coal_last_jiffies
);
769 do_div(total_bytes
, time_passed_ms
);
770 ring
->coal_rx_rate
= total_bytes
>> 10;
772 ring
->coal_last_rx_bytes
= ring
->stats
.rx_bytes
;
773 ring
->coal_last_jiffies
= jiffies
;
777 * smooth_alg - smoothing algrithm for adjusting coalesce parameter
778 * @new_param: new value
779 * @old_param: old value
781 static u32
smooth_alg(u32 new_param
, u32 old_param
)
783 u32 gap
= (new_param
> old_param
) ? new_param
- old_param
784 : old_param
- new_param
;
789 if (new_param
> old_param
)
790 return old_param
+ gap
;
792 return old_param
- gap
;
796 * hns_nic_adpt_coalesce - self adapte coalesce according to rx rate
797 * @ring_data: pointer to hns_nic_ring_data
799 static void hns_nic_adpt_coalesce(struct hns_nic_ring_data
*ring_data
)
801 struct hnae_ring
*ring
= ring_data
->ring
;
802 struct hnae_handle
*handle
= ring
->q
->handle
;
803 u32 new_coal_param
, old_coal_param
= ring
->coal_param
;
805 if (ring
->coal_rx_rate
< HNS_LOWEST_LATENCY_RATE
)
806 new_coal_param
= HNAE_LOWEST_LATENCY_COAL_PARAM
;
807 else if (ring
->coal_rx_rate
< HNS_LOW_LATENCY_RATE
)
808 new_coal_param
= HNAE_LOW_LATENCY_COAL_PARAM
;
810 new_coal_param
= HNAE_BULK_LATENCY_COAL_PARAM
;
812 if (new_coal_param
== old_coal_param
&&
813 new_coal_param
== handle
->coal_param
)
816 new_coal_param
= smooth_alg(new_coal_param
, old_coal_param
);
817 ring
->coal_param
= new_coal_param
;
820 * Because all ring in one port has one coalesce param, when one ring
821 * calculate its own coalesce param, it cannot write to hardware at
822 * once. There are three conditions as follows:
823 * 1. current ring's coalesce param is larger than the hardware.
824 * 2. or ring which adapt last time can change again.
827 if (new_coal_param
== handle
->coal_param
) {
828 handle
->coal_last_jiffies
= jiffies
;
829 handle
->coal_ring_idx
= ring_data
->queue_index
;
830 } else if (new_coal_param
> handle
->coal_param
||
831 handle
->coal_ring_idx
== ring_data
->queue_index
||
832 time_after(jiffies
, handle
->coal_last_jiffies
+ (HZ
>> 4))) {
833 handle
->dev
->ops
->set_coalesce_usecs(handle
,
835 handle
->dev
->ops
->set_coalesce_frames(handle
,
837 handle
->coal_param
= new_coal_param
;
838 handle
->coal_ring_idx
= ring_data
->queue_index
;
839 handle
->coal_last_jiffies
= jiffies
;
843 static int hns_nic_rx_poll_one(struct hns_nic_ring_data
*ring_data
,
846 struct hnae_ring
*ring
= ring_data
->ring
;
849 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
850 int recv_pkts
, recv_bds
, clean_count
, err
;
851 int unused_count
= hns_desc_unused(ring
);
853 num
= readl_relaxed(ring
->io_base
+ RCB_REG_FBDNUM
);
854 rmb(); /* make sure num taken effect before the other data is touched */
856 recv_pkts
= 0, recv_bds
= 0, clean_count
= 0;
859 while (recv_pkts
< budget
&& recv_bds
< num
) {
860 /* reuse or realloc buffers */
861 if (clean_count
+ unused_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
862 hns_nic_alloc_rx_buffers(ring_data
,
863 clean_count
+ unused_count
);
865 unused_count
= hns_desc_unused(ring
);
869 err
= hns_nic_poll_rx_skb(ring_data
, &skb
, &bnum
);
870 if (unlikely(!skb
)) /* this fault cannot be repaired */
875 if (unlikely(err
)) { /* do jump the err */
880 /* do update ip stack process*/
881 ((void (*)(struct hns_nic_ring_data
*, struct sk_buff
*))v
)(
887 /* make all data has been write before submit */
888 if (clean_count
+ unused_count
> 0)
889 hns_nic_alloc_rx_buffers(ring_data
,
890 clean_count
+ unused_count
);
895 static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data
*ring_data
)
897 struct hnae_ring
*ring
= ring_data
->ring
;
901 hns_update_rx_rate(ring
);
903 /* for hardware bug fixed */
904 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(ring
, 0);
905 num
= readl_relaxed(ring
->io_base
+ RCB_REG_FBDNUM
);
907 if (num
<= hns_coal_rx_bdnum(ring
)) {
908 if (ring
->q
->handle
->coal_adapt_en
)
909 hns_nic_adpt_coalesce(ring_data
);
913 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(
922 static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data
*ring_data
)
924 struct hnae_ring
*ring
= ring_data
->ring
;
927 hns_update_rx_rate(ring
);
928 num
= readl_relaxed(ring
->io_base
+ RCB_REG_FBDNUM
);
930 if (num
<= hns_coal_rx_bdnum(ring
)) {
931 if (ring
->q
->handle
->coal_adapt_en
)
932 hns_nic_adpt_coalesce(ring_data
);
940 static inline void hns_nic_reclaim_one_desc(struct hnae_ring
*ring
,
941 int *bytes
, int *pkts
)
943 struct hnae_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
945 (*pkts
) += (desc_cb
->type
== DESC_TYPE_SKB
);
946 (*bytes
) += desc_cb
->length
;
947 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
948 hnae_free_buffer_detach(ring
, ring
->next_to_clean
);
950 ring_ptr_move_fw(ring
, next_to_clean
);
953 static int is_valid_clean_head(struct hnae_ring
*ring
, int h
)
955 int u
= ring
->next_to_use
;
956 int c
= ring
->next_to_clean
;
958 if (unlikely(h
> ring
->desc_num
))
961 assert(u
> 0 && u
< ring
->desc_num
);
962 assert(c
> 0 && c
< ring
->desc_num
);
963 assert(u
!= c
&& h
!= c
); /* must be checked before call this func */
965 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
968 /* reclaim all desc in one budget
969 * return error or number of desc left
971 static int hns_nic_tx_poll_one(struct hns_nic_ring_data
*ring_data
,
974 struct hnae_ring
*ring
= ring_data
->ring
;
975 struct net_device
*ndev
= ring_data
->napi
.dev
;
976 struct netdev_queue
*dev_queue
;
977 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
981 head
= readl_relaxed(ring
->io_base
+ RCB_REG_HEAD
);
982 rmb(); /* make sure head is ready before touch any data */
984 if (is_ring_empty(ring
) || head
== ring
->next_to_clean
)
985 return 0; /* no data to poll */
987 if (!is_valid_clean_head(ring
, head
)) {
988 netdev_err(ndev
, "wrong head (%d, %d-%d)\n", head
,
989 ring
->next_to_use
, ring
->next_to_clean
);
990 ring
->stats
.io_err_cnt
++;
996 while (head
!= ring
->next_to_clean
) {
997 hns_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
998 /* issue prefetch for next Tx descriptor */
999 prefetch(&ring
->desc_cb
[ring
->next_to_clean
]);
1001 /* update tx ring statistics. */
1002 ring
->stats
.tx_pkts
+= pkts
;
1003 ring
->stats
.tx_bytes
+= bytes
;
1005 dev_queue
= netdev_get_tx_queue(ndev
, ring_data
->queue_index
);
1006 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
1008 if (unlikely(priv
->link
&& !netif_carrier_ok(ndev
)))
1009 netif_carrier_on(ndev
);
1011 if (unlikely(pkts
&& netif_carrier_ok(ndev
) &&
1012 (ring_space(ring
) >= ring
->max_desc_num_per_pkt
* 2))) {
1013 /* Make sure that anybody stopping the queue after this
1014 * sees the new next_to_clean.
1017 if (netif_tx_queue_stopped(dev_queue
) &&
1018 !test_bit(NIC_STATE_DOWN
, &priv
->state
)) {
1019 netif_tx_wake_queue(dev_queue
);
1020 ring
->stats
.restart_queue
++;
1026 static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data
*ring_data
)
1028 struct hnae_ring
*ring
= ring_data
->ring
;
1031 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(ring
, 0);
1033 head
= readl_relaxed(ring
->io_base
+ RCB_REG_HEAD
);
1035 if (head
!= ring
->next_to_clean
) {
1036 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(
1037 ring_data
->ring
, 1);
1045 static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data
*ring_data
)
1047 struct hnae_ring
*ring
= ring_data
->ring
;
1048 int head
= readl_relaxed(ring
->io_base
+ RCB_REG_HEAD
);
1050 if (head
== ring
->next_to_clean
)
1056 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data
*ring_data
)
1058 struct hnae_ring
*ring
= ring_data
->ring
;
1059 struct net_device
*ndev
= ring_data
->napi
.dev
;
1060 struct netdev_queue
*dev_queue
;
1064 head
= ring
->next_to_use
; /* ntu :soft setted ring position*/
1067 while (head
!= ring
->next_to_clean
)
1068 hns_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
1070 dev_queue
= netdev_get_tx_queue(ndev
, ring_data
->queue_index
);
1071 netdev_tx_reset_queue(dev_queue
);
1074 static int hns_nic_common_poll(struct napi_struct
*napi
, int budget
)
1076 int clean_complete
= 0;
1077 struct hns_nic_ring_data
*ring_data
=
1078 container_of(napi
, struct hns_nic_ring_data
, napi
);
1079 struct hnae_ring
*ring
= ring_data
->ring
;
1081 clean_complete
+= ring_data
->poll_one(
1082 ring_data
, budget
- clean_complete
,
1083 ring_data
->ex_process
);
1085 if (clean_complete
< budget
) {
1086 if (ring_data
->fini_process(ring_data
)) {
1087 napi_complete(napi
);
1088 ring
->q
->handle
->dev
->ops
->toggle_ring_irq(ring
, 0);
1094 return clean_complete
;
1097 static irqreturn_t
hns_irq_handle(int irq
, void *dev
)
1099 struct hns_nic_ring_data
*ring_data
= (struct hns_nic_ring_data
*)dev
;
1101 ring_data
->ring
->q
->handle
->dev
->ops
->toggle_ring_irq(
1102 ring_data
->ring
, 1);
1103 napi_schedule(&ring_data
->napi
);
1109 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
1112 static void hns_nic_adjust_link(struct net_device
*ndev
)
1114 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1115 struct hnae_handle
*h
= priv
->ae_handle
;
1118 /* If there is no phy, do not need adjust link */
1120 /* When phy link down, do nothing */
1121 if (ndev
->phydev
->link
== 0)
1124 if (h
->dev
->ops
->need_adjust_link(h
, ndev
->phydev
->speed
,
1125 ndev
->phydev
->duplex
)) {
1126 /* because Hi161X chip don't support to change gmac
1127 * speed and duplex with traffic. Delay 200ms to
1128 * make sure there is no more data in chip FIFO.
1130 netif_carrier_off(ndev
);
1132 h
->dev
->ops
->adjust_link(h
, ndev
->phydev
->speed
,
1133 ndev
->phydev
->duplex
);
1134 netif_carrier_on(ndev
);
1138 state
= state
&& h
->dev
->ops
->get_status(h
);
1140 if (state
!= priv
->link
) {
1142 netif_carrier_on(ndev
);
1143 netif_tx_wake_all_queues(ndev
);
1144 netdev_info(ndev
, "link up\n");
1146 netif_carrier_off(ndev
);
1147 netdev_info(ndev
, "link down\n");
1154 *hns_nic_init_phy - init phy
1157 * Return 0 on success, negative on failure
1159 int hns_nic_init_phy(struct net_device
*ndev
, struct hnae_handle
*h
)
1161 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported
) = { 0, };
1162 struct phy_device
*phy_dev
= h
->phy_dev
;
1168 ethtool_convert_legacy_u32_to_link_mode(supported
, h
->if_support
);
1169 linkmode_and(phy_dev
->supported
, phy_dev
->supported
, supported
);
1170 linkmode_copy(phy_dev
->advertising
, phy_dev
->supported
);
1172 if (h
->phy_if
== PHY_INTERFACE_MODE_XGMII
)
1173 phy_dev
->autoneg
= false;
1175 if (h
->phy_if
!= PHY_INTERFACE_MODE_XGMII
) {
1176 phy_dev
->dev_flags
= 0;
1178 ret
= phy_connect_direct(ndev
, phy_dev
, hns_nic_adjust_link
,
1181 ret
= phy_attach_direct(ndev
, phy_dev
, 0, h
->phy_if
);
1186 phy_attached_info(phy_dev
);
1191 static int hns_nic_ring_open(struct net_device
*netdev
, int idx
)
1193 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1194 struct hnae_handle
*h
= priv
->ae_handle
;
1196 napi_enable(&priv
->ring_data
[idx
].napi
);
1198 enable_irq(priv
->ring_data
[idx
].ring
->irq
);
1199 h
->dev
->ops
->toggle_ring_irq(priv
->ring_data
[idx
].ring
, 0);
1204 static int hns_nic_net_set_mac_address(struct net_device
*ndev
, void *p
)
1206 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1207 struct hnae_handle
*h
= priv
->ae_handle
;
1208 struct sockaddr
*mac_addr
= p
;
1211 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
1212 return -EADDRNOTAVAIL
;
1214 ret
= h
->dev
->ops
->set_mac_addr(h
, mac_addr
->sa_data
);
1216 netdev_err(ndev
, "set_mac_address fail, ret=%d!\n", ret
);
1220 eth_hw_addr_set(ndev
, mac_addr
->sa_data
);
1225 static void hns_nic_update_stats(struct net_device
*netdev
)
1227 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1228 struct hnae_handle
*h
= priv
->ae_handle
;
1230 h
->dev
->ops
->update_stats(h
, &netdev
->stats
);
1233 /* set mac addr if it is configed. or leave it to the AE driver */
1234 static void hns_init_mac_addr(struct net_device
*ndev
)
1236 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1238 if (device_get_ethdev_address(priv
->dev
, ndev
)) {
1239 eth_hw_addr_random(ndev
);
1240 dev_warn(priv
->dev
, "No valid mac, use random mac %pM",
1245 static void hns_nic_ring_close(struct net_device
*netdev
, int idx
)
1247 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1248 struct hnae_handle
*h
= priv
->ae_handle
;
1250 h
->dev
->ops
->toggle_ring_irq(priv
->ring_data
[idx
].ring
, 1);
1251 disable_irq(priv
->ring_data
[idx
].ring
->irq
);
1253 napi_disable(&priv
->ring_data
[idx
].napi
);
1256 static int hns_nic_init_affinity_mask(int q_num
, int ring_idx
,
1257 struct hnae_ring
*ring
, cpumask_t
*mask
)
1261 /* Different irq balance between 16core and 32core.
1262 * The cpu mask set by ring index according to the ring flag
1263 * which indicate the ring is tx or rx.
1265 if (q_num
== num_possible_cpus()) {
1266 if (is_tx_ring(ring
))
1269 cpu
= ring_idx
- q_num
;
1271 if (is_tx_ring(ring
))
1274 cpu
= (ring_idx
- q_num
) * 2 + 1;
1277 cpumask_clear(mask
);
1278 cpumask_set_cpu(cpu
, mask
);
1283 static void hns_nic_free_irq(int q_num
, struct hns_nic_priv
*priv
)
1287 for (i
= 0; i
< q_num
* 2; i
++) {
1288 if (priv
->ring_data
[i
].ring
->irq_init_flag
== RCB_IRQ_INITED
) {
1289 irq_set_affinity_hint(priv
->ring_data
[i
].ring
->irq
,
1291 free_irq(priv
->ring_data
[i
].ring
->irq
,
1292 &priv
->ring_data
[i
]);
1293 priv
->ring_data
[i
].ring
->irq_init_flag
=
1299 static int hns_nic_init_irq(struct hns_nic_priv
*priv
)
1301 struct hnae_handle
*h
= priv
->ae_handle
;
1302 struct hns_nic_ring_data
*rd
;
1307 for (i
= 0; i
< h
->q_num
* 2; i
++) {
1308 rd
= &priv
->ring_data
[i
];
1310 if (rd
->ring
->irq_init_flag
== RCB_IRQ_INITED
)
1313 snprintf(rd
->ring
->ring_name
, RCB_RING_NAME_LEN
,
1314 "%s-%s%d", priv
->netdev
->name
,
1315 (is_tx_ring(rd
->ring
) ? "tx" : "rx"), rd
->queue_index
);
1317 rd
->ring
->ring_name
[RCB_RING_NAME_LEN
- 1] = '\0';
1319 irq_set_status_flags(rd
->ring
->irq
, IRQ_NOAUTOEN
);
1320 ret
= request_irq(rd
->ring
->irq
,
1321 hns_irq_handle
, 0, rd
->ring
->ring_name
, rd
);
1323 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
1328 cpu
= hns_nic_init_affinity_mask(h
->q_num
, i
,
1329 rd
->ring
, &rd
->mask
);
1331 if (cpu_online(cpu
))
1332 irq_set_affinity_hint(rd
->ring
->irq
,
1335 rd
->ring
->irq_init_flag
= RCB_IRQ_INITED
;
1341 hns_nic_free_irq(h
->q_num
, priv
);
1345 static int hns_nic_net_up(struct net_device
*ndev
)
1347 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1348 struct hnae_handle
*h
= priv
->ae_handle
;
1352 if (!test_bit(NIC_STATE_DOWN
, &priv
->state
))
1355 ret
= hns_nic_init_irq(priv
);
1357 netdev_err(ndev
, "hns init irq failed! ret=%d\n", ret
);
1361 for (i
= 0; i
< h
->q_num
* 2; i
++) {
1362 ret
= hns_nic_ring_open(ndev
, i
);
1364 goto out_has_some_queues
;
1367 ret
= h
->dev
->ops
->set_mac_addr(h
, ndev
->dev_addr
);
1369 goto out_set_mac_addr_err
;
1371 ret
= h
->dev
->ops
->start
? h
->dev
->ops
->start(h
) : 0;
1376 phy_start(ndev
->phydev
);
1378 clear_bit(NIC_STATE_DOWN
, &priv
->state
);
1379 (void)mod_timer(&priv
->service_timer
, jiffies
+ SERVICE_TIMER_HZ
);
1384 netif_stop_queue(ndev
);
1385 out_set_mac_addr_err
:
1386 out_has_some_queues
:
1387 for (j
= i
- 1; j
>= 0; j
--)
1388 hns_nic_ring_close(ndev
, j
);
1390 hns_nic_free_irq(h
->q_num
, priv
);
1391 set_bit(NIC_STATE_DOWN
, &priv
->state
);
1396 static void hns_nic_net_down(struct net_device
*ndev
)
1399 struct hnae_ae_ops
*ops
;
1400 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1402 if (test_and_set_bit(NIC_STATE_DOWN
, &priv
->state
))
1405 (void)del_timer_sync(&priv
->service_timer
);
1406 netif_tx_stop_all_queues(ndev
);
1407 netif_carrier_off(ndev
);
1408 netif_tx_disable(ndev
);
1412 phy_stop(ndev
->phydev
);
1414 ops
= priv
->ae_handle
->dev
->ops
;
1417 ops
->stop(priv
->ae_handle
);
1419 netif_tx_stop_all_queues(ndev
);
1421 for (i
= priv
->ae_handle
->q_num
- 1; i
>= 0; i
--) {
1422 hns_nic_ring_close(ndev
, i
);
1423 hns_nic_ring_close(ndev
, i
+ priv
->ae_handle
->q_num
);
1425 /* clean tx buffers*/
1426 hns_nic_tx_clr_all_bufs(priv
->ring_data
+ i
);
1430 void hns_nic_net_reset(struct net_device
*ndev
)
1432 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1433 struct hnae_handle
*handle
= priv
->ae_handle
;
1435 while (test_and_set_bit(NIC_STATE_RESETTING
, &priv
->state
))
1436 usleep_range(1000, 2000);
1438 (void)hnae_reinit_handle(handle
);
1440 clear_bit(NIC_STATE_RESETTING
, &priv
->state
);
1443 void hns_nic_net_reinit(struct net_device
*netdev
)
1445 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1446 enum hnae_port_type type
= priv
->ae_handle
->port_type
;
1448 netif_trans_update(priv
->netdev
);
1449 while (test_and_set_bit(NIC_STATE_REINITING
, &priv
->state
))
1450 usleep_range(1000, 2000);
1452 hns_nic_net_down(netdev
);
1454 /* Only do hns_nic_net_reset in debug mode
1455 * because of hardware limitation.
1457 if (type
== HNAE_PORT_DEBUG
)
1458 hns_nic_net_reset(netdev
);
1460 (void)hns_nic_net_up(netdev
);
1461 clear_bit(NIC_STATE_REINITING
, &priv
->state
);
1464 static int hns_nic_net_open(struct net_device
*ndev
)
1466 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1467 struct hnae_handle
*h
= priv
->ae_handle
;
1470 if (test_bit(NIC_STATE_TESTING
, &priv
->state
))
1474 netif_carrier_off(ndev
);
1476 ret
= netif_set_real_num_tx_queues(ndev
, h
->q_num
);
1478 netdev_err(ndev
, "netif_set_real_num_tx_queues fail, ret=%d!\n",
1483 ret
= netif_set_real_num_rx_queues(ndev
, h
->q_num
);
1486 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
1490 ret
= hns_nic_net_up(ndev
);
1493 "hns net up fail, ret=%d!\n", ret
);
1500 static int hns_nic_net_stop(struct net_device
*ndev
)
1502 hns_nic_net_down(ndev
);
1507 static void hns_tx_timeout_reset(struct hns_nic_priv
*priv
);
1508 #define HNS_TX_TIMEO_LIMIT (40 * HZ)
1509 static void hns_nic_net_timeout(struct net_device
*ndev
, unsigned int txqueue
)
1511 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1513 if (ndev
->watchdog_timeo
< HNS_TX_TIMEO_LIMIT
) {
1514 ndev
->watchdog_timeo
*= 2;
1515 netdev_info(ndev
, "watchdog_timo changed to %d.\n",
1516 ndev
->watchdog_timeo
);
1518 ndev
->watchdog_timeo
= HNS_NIC_TX_TIMEOUT
;
1519 hns_tx_timeout_reset(priv
);
1523 static netdev_tx_t
hns_nic_net_xmit(struct sk_buff
*skb
,
1524 struct net_device
*ndev
)
1526 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1528 assert(skb
->queue_mapping
< priv
->ae_handle
->q_num
);
1530 return hns_nic_net_xmit_hw(ndev
, skb
,
1531 &tx_ring_data(priv
, skb
->queue_mapping
));
1534 static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data
*ring_data
,
1535 struct sk_buff
*skb
)
1537 dev_kfree_skb_any(skb
);
1540 #define HNS_LB_TX_RING 0
1541 static struct sk_buff
*hns_assemble_skb(struct net_device
*ndev
)
1543 struct sk_buff
*skb
;
1544 struct ethhdr
*ethhdr
;
1547 /* allocate test skb */
1548 skb
= alloc_skb(64, GFP_KERNEL
);
1554 memset(skb
->data
, 0xFF, skb
->len
);
1556 /* must be tcp/ip package */
1557 ethhdr
= (struct ethhdr
*)skb
->data
;
1558 ethhdr
->h_proto
= htons(ETH_P_IP
);
1560 frame_len
= skb
->len
& (~1ul);
1561 memset(&skb
->data
[frame_len
/ 2], 0xAA,
1564 skb
->queue_mapping
= HNS_LB_TX_RING
;
1569 static int hns_enable_serdes_lb(struct net_device
*ndev
)
1571 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1572 struct hnae_handle
*h
= priv
->ae_handle
;
1573 struct hnae_ae_ops
*ops
= h
->dev
->ops
;
1577 ret
= ops
->set_loopback(h
, MAC_INTERNALLOOP_SERDES
, 1);
1581 ret
= ops
->start
? ops
->start(h
) : 0;
1585 /* link adjust duplex*/
1586 if (h
->phy_if
!= PHY_INTERFACE_MODE_XGMII
)
1592 ops
->adjust_link(h
, speed
, duplex
);
1594 /* wait h/w ready */
1600 static void hns_disable_serdes_lb(struct net_device
*ndev
)
1602 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1603 struct hnae_handle
*h
= priv
->ae_handle
;
1604 struct hnae_ae_ops
*ops
= h
->dev
->ops
;
1607 ops
->set_loopback(h
, MAC_INTERNALLOOP_SERDES
, 0);
1611 *hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The
1612 *function as follows:
1613 * 1. if one rx ring has found the page_offset is not equal 0 between head
1614 * and tail, it means that the chip fetched the wrong descs for the ring
1615 * which buffer size is 4096.
1616 * 2. we set the chip serdes loopback and set rss indirection to the ring.
1617 * 3. construct 64-bytes ip broadcast packages, wait the associated rx ring
1618 * receiving all packages and it will fetch new descriptions.
1619 * 4. recover to the original state.
1623 static int hns_nic_clear_all_rx_fetch(struct net_device
*ndev
)
1625 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1626 struct hnae_handle
*h
= priv
->ae_handle
;
1627 struct hnae_ae_ops
*ops
= h
->dev
->ops
;
1628 struct hns_nic_ring_data
*rd
;
1629 struct hnae_ring
*ring
;
1630 struct sk_buff
*skb
;
1641 /* alloc indir memory */
1642 indir_size
= ops
->get_rss_indir_size(h
) * sizeof(*org_indir
);
1643 org_indir
= kzalloc(indir_size
, GFP_KERNEL
);
1647 /* store the original indirection */
1648 ops
->get_rss(h
, org_indir
, NULL
, NULL
);
1650 cur_indir
= kzalloc(indir_size
, GFP_KERNEL
);
1653 goto cur_indir_alloc_err
;
1657 if (hns_enable_serdes_lb(ndev
)) {
1659 goto enable_serdes_lb_err
;
1662 /* foreach every rx ring to clear fetch desc */
1663 for (i
= 0; i
< h
->q_num
; i
++) {
1664 ring
= &h
->qs
[i
]->rx_ring
;
1665 head
= readl_relaxed(ring
->io_base
+ RCB_REG_HEAD
);
1666 tail
= readl_relaxed(ring
->io_base
+ RCB_REG_TAIL
);
1668 fetch_num
= ring_dist(ring
, head
, tail
);
1670 while (head
!= tail
) {
1671 if (ring
->desc_cb
[head
].page_offset
!= 0) {
1677 if (head
== ring
->desc_num
)
1682 for (j
= 0; j
< indir_size
/ sizeof(*org_indir
); j
++)
1684 ops
->set_rss(h
, cur_indir
, NULL
, 0);
1686 for (j
= 0; j
< fetch_num
; j
++) {
1687 /* alloc one skb and init */
1688 skb
= hns_assemble_skb(ndev
);
1693 rd
= &tx_ring_data(priv
, skb
->queue_mapping
);
1694 hns_nic_net_xmit_hw(ndev
, skb
, rd
);
1697 while (retry_times
++ < 10) {
1700 rd
= &rx_ring_data(priv
, i
);
1701 if (rd
->poll_one(rd
, fetch_num
,
1702 hns_nic_drop_rx_fetch
))
1707 while (retry_times
++ < 10) {
1709 /* clean tx ring 0 send package */
1710 rd
= &tx_ring_data(priv
,
1712 if (rd
->poll_one(rd
, fetch_num
, NULL
))
1720 /* restore everything */
1721 ops
->set_rss(h
, org_indir
, NULL
, 0);
1722 hns_disable_serdes_lb(ndev
);
1723 enable_serdes_lb_err
:
1725 cur_indir_alloc_err
:
1731 static int hns_nic_change_mtu(struct net_device
*ndev
, int new_mtu
)
1733 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1734 struct hnae_handle
*h
= priv
->ae_handle
;
1735 bool if_running
= netif_running(ndev
);
1738 /* MTU < 68 is an error and causes problems on some kernels */
1743 if (new_mtu
== ndev
->mtu
)
1746 if (!h
->dev
->ops
->set_mtu
)
1750 (void)hns_nic_net_stop(ndev
);
1754 if (priv
->enet_ver
!= AE_VERSION_1
&&
1755 ndev
->mtu
<= BD_SIZE_2048_MAX_MTU
&&
1756 new_mtu
> BD_SIZE_2048_MAX_MTU
) {
1758 hnae_reinit_all_ring_desc(h
);
1760 /* clear the package which the chip has fetched */
1761 ret
= hns_nic_clear_all_rx_fetch(ndev
);
1763 /* the page offset must be consist with desc */
1764 hnae_reinit_all_ring_page_off(h
);
1767 netdev_err(ndev
, "clear the fetched desc fail\n");
1772 ret
= h
->dev
->ops
->set_mtu(h
, new_mtu
);
1774 netdev_err(ndev
, "set mtu fail, return value %d\n",
1779 /* finally, set new mtu to netdevice */
1780 WRITE_ONCE(ndev
->mtu
, new_mtu
);
1784 if (hns_nic_net_open(ndev
)) {
1785 netdev_err(ndev
, "hns net open fail\n");
1793 static int hns_nic_set_features(struct net_device
*netdev
,
1794 netdev_features_t features
)
1796 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1798 switch (priv
->enet_ver
) {
1800 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
))
1801 netdev_info(netdev
, "enet v1 do not support tso!\n");
1806 netdev
->features
= features
;
1810 static netdev_features_t
hns_nic_fix_features(
1811 struct net_device
*netdev
, netdev_features_t features
)
1813 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1815 switch (priv
->enet_ver
) {
1817 features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
|
1818 NETIF_F_HW_VLAN_CTAG_FILTER
);
1826 static int hns_nic_uc_sync(struct net_device
*netdev
, const unsigned char *addr
)
1828 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1829 struct hnae_handle
*h
= priv
->ae_handle
;
1831 if (h
->dev
->ops
->add_uc_addr
)
1832 return h
->dev
->ops
->add_uc_addr(h
, addr
);
1837 static int hns_nic_uc_unsync(struct net_device
*netdev
,
1838 const unsigned char *addr
)
1840 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1841 struct hnae_handle
*h
= priv
->ae_handle
;
1843 if (h
->dev
->ops
->rm_uc_addr
)
1844 return h
->dev
->ops
->rm_uc_addr(h
, addr
);
1850 * hns_set_multicast_list - set mutl mac address
1855 static void hns_set_multicast_list(struct net_device
*ndev
)
1857 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1858 struct hnae_handle
*h
= priv
->ae_handle
;
1859 struct netdev_hw_addr
*ha
= NULL
;
1862 netdev_err(ndev
, "hnae handle is null\n");
1866 if (h
->dev
->ops
->clr_mc_addr
)
1867 if (h
->dev
->ops
->clr_mc_addr(h
))
1868 netdev_err(ndev
, "clear multicast address fail\n");
1870 if (h
->dev
->ops
->set_mc_addr
) {
1871 netdev_for_each_mc_addr(ha
, ndev
)
1872 if (h
->dev
->ops
->set_mc_addr(h
, ha
->addr
))
1873 netdev_err(ndev
, "set multicast fail\n");
1877 static void hns_nic_set_rx_mode(struct net_device
*ndev
)
1879 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1880 struct hnae_handle
*h
= priv
->ae_handle
;
1882 if (h
->dev
->ops
->set_promisc_mode
) {
1883 if (ndev
->flags
& IFF_PROMISC
)
1884 h
->dev
->ops
->set_promisc_mode(h
, 1);
1886 h
->dev
->ops
->set_promisc_mode(h
, 0);
1889 hns_set_multicast_list(ndev
);
1891 if (__dev_uc_sync(ndev
, hns_nic_uc_sync
, hns_nic_uc_unsync
))
1892 netdev_err(ndev
, "sync uc address fail\n");
1895 static void hns_nic_get_stats64(struct net_device
*ndev
,
1896 struct rtnl_link_stats64
*stats
)
1903 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1904 struct hnae_handle
*h
= priv
->ae_handle
;
1906 for (idx
= 0; idx
< h
->q_num
; idx
++) {
1907 tx_bytes
+= h
->qs
[idx
]->tx_ring
.stats
.tx_bytes
;
1908 tx_pkts
+= h
->qs
[idx
]->tx_ring
.stats
.tx_pkts
;
1909 rx_bytes
+= h
->qs
[idx
]->rx_ring
.stats
.rx_bytes
;
1910 rx_pkts
+= h
->qs
[idx
]->rx_ring
.stats
.rx_pkts
;
1913 stats
->tx_bytes
= tx_bytes
;
1914 stats
->tx_packets
= tx_pkts
;
1915 stats
->rx_bytes
= rx_bytes
;
1916 stats
->rx_packets
= rx_pkts
;
1918 stats
->rx_errors
= ndev
->stats
.rx_errors
;
1919 stats
->multicast
= ndev
->stats
.multicast
;
1920 stats
->rx_length_errors
= ndev
->stats
.rx_length_errors
;
1921 stats
->rx_crc_errors
= ndev
->stats
.rx_crc_errors
;
1922 stats
->rx_missed_errors
= ndev
->stats
.rx_missed_errors
;
1924 stats
->tx_errors
= ndev
->stats
.tx_errors
;
1925 stats
->rx_dropped
= ndev
->stats
.rx_dropped
;
1926 stats
->tx_dropped
= ndev
->stats
.tx_dropped
;
1927 stats
->collisions
= ndev
->stats
.collisions
;
1928 stats
->rx_over_errors
= ndev
->stats
.rx_over_errors
;
1929 stats
->rx_frame_errors
= ndev
->stats
.rx_frame_errors
;
1930 stats
->rx_fifo_errors
= ndev
->stats
.rx_fifo_errors
;
1931 stats
->tx_aborted_errors
= ndev
->stats
.tx_aborted_errors
;
1932 stats
->tx_carrier_errors
= ndev
->stats
.tx_carrier_errors
;
1933 stats
->tx_fifo_errors
= ndev
->stats
.tx_fifo_errors
;
1934 stats
->tx_heartbeat_errors
= ndev
->stats
.tx_heartbeat_errors
;
1935 stats
->tx_window_errors
= ndev
->stats
.tx_window_errors
;
1936 stats
->rx_compressed
= ndev
->stats
.rx_compressed
;
1937 stats
->tx_compressed
= ndev
->stats
.tx_compressed
;
1941 hns_nic_select_queue(struct net_device
*ndev
, struct sk_buff
*skb
,
1942 struct net_device
*sb_dev
)
1944 struct ethhdr
*eth_hdr
= (struct ethhdr
*)skb
->data
;
1945 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
1947 /* fix hardware broadcast/multicast packets queue loopback */
1948 if (!AE_IS_VER1(priv
->enet_ver
) &&
1949 is_multicast_ether_addr(eth_hdr
->h_dest
))
1952 return netdev_pick_tx(ndev
, skb
, NULL
);
1955 static const struct net_device_ops hns_nic_netdev_ops
= {
1956 .ndo_open
= hns_nic_net_open
,
1957 .ndo_stop
= hns_nic_net_stop
,
1958 .ndo_start_xmit
= hns_nic_net_xmit
,
1959 .ndo_tx_timeout
= hns_nic_net_timeout
,
1960 .ndo_set_mac_address
= hns_nic_net_set_mac_address
,
1961 .ndo_change_mtu
= hns_nic_change_mtu
,
1962 .ndo_eth_ioctl
= phy_do_ioctl_running
,
1963 .ndo_set_features
= hns_nic_set_features
,
1964 .ndo_fix_features
= hns_nic_fix_features
,
1965 .ndo_get_stats64
= hns_nic_get_stats64
,
1966 .ndo_set_rx_mode
= hns_nic_set_rx_mode
,
1967 .ndo_select_queue
= hns_nic_select_queue
,
1970 static void hns_nic_update_link_status(struct net_device
*netdev
)
1972 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
1974 struct hnae_handle
*h
= priv
->ae_handle
;
1977 if (h
->phy_if
!= PHY_INTERFACE_MODE_XGMII
)
1980 (void)genphy_read_status(h
->phy_dev
);
1982 hns_nic_adjust_link(netdev
);
1985 /* for dumping key regs*/
1986 static void hns_nic_dump(struct hns_nic_priv
*priv
)
1988 struct hnae_handle
*h
= priv
->ae_handle
;
1989 struct hnae_ae_ops
*ops
= h
->dev
->ops
;
1990 u32
*data
, reg_num
, i
;
1992 if (ops
->get_regs_len
&& ops
->get_regs
) {
1993 reg_num
= ops
->get_regs_len(priv
->ae_handle
);
1994 reg_num
= (reg_num
+ 3ul) & ~3ul;
1995 data
= kcalloc(reg_num
, sizeof(u32
), GFP_KERNEL
);
1997 ops
->get_regs(priv
->ae_handle
, data
);
1998 for (i
= 0; i
< reg_num
; i
+= 4)
1999 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
2000 i
, data
[i
], data
[i
+ 1],
2001 data
[i
+ 2], data
[i
+ 3]);
2006 for (i
= 0; i
< h
->q_num
; i
++) {
2007 pr_info("tx_queue%d_next_to_clean:%d\n",
2008 i
, h
->qs
[i
]->tx_ring
.next_to_clean
);
2009 pr_info("tx_queue%d_next_to_use:%d\n",
2010 i
, h
->qs
[i
]->tx_ring
.next_to_use
);
2011 pr_info("rx_queue%d_next_to_clean:%d\n",
2012 i
, h
->qs
[i
]->rx_ring
.next_to_clean
);
2013 pr_info("rx_queue%d_next_to_use:%d\n",
2014 i
, h
->qs
[i
]->rx_ring
.next_to_use
);
2018 /* for resetting subtask */
2019 static void hns_nic_reset_subtask(struct hns_nic_priv
*priv
)
2021 enum hnae_port_type type
= priv
->ae_handle
->port_type
;
2023 if (!test_bit(NIC_STATE2_RESET_REQUESTED
, &priv
->state
))
2025 clear_bit(NIC_STATE2_RESET_REQUESTED
, &priv
->state
);
2027 /* If we're already down, removing or resetting, just bail */
2028 if (test_bit(NIC_STATE_DOWN
, &priv
->state
) ||
2029 test_bit(NIC_STATE_REMOVING
, &priv
->state
) ||
2030 test_bit(NIC_STATE_RESETTING
, &priv
->state
))
2034 netdev_info(priv
->netdev
, "try to reset %s port!\n",
2035 (type
== HNAE_PORT_DEBUG
? "debug" : "service"));
2038 /* put off any impending NetWatchDogTimeout */
2039 netif_trans_update(priv
->netdev
);
2040 hns_nic_net_reinit(priv
->netdev
);
2045 /* for doing service complete*/
2046 static void hns_nic_service_event_complete(struct hns_nic_priv
*priv
)
2048 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
));
2049 /* make sure to commit the things */
2050 smp_mb__before_atomic();
2051 clear_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
);
2054 static void hns_nic_service_task(struct work_struct
*work
)
2056 struct hns_nic_priv
*priv
2057 = container_of(work
, struct hns_nic_priv
, service_task
);
2058 struct hnae_handle
*h
= priv
->ae_handle
;
2060 hns_nic_reset_subtask(priv
);
2061 hns_nic_update_link_status(priv
->netdev
);
2062 h
->dev
->ops
->update_led_status(h
);
2063 hns_nic_update_stats(priv
->netdev
);
2065 hns_nic_service_event_complete(priv
);
2068 static void hns_nic_task_schedule(struct hns_nic_priv
*priv
)
2070 if (!test_bit(NIC_STATE_DOWN
, &priv
->state
) &&
2071 !test_bit(NIC_STATE_REMOVING
, &priv
->state
) &&
2072 !test_and_set_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
))
2073 (void)schedule_work(&priv
->service_task
);
2076 static void hns_nic_service_timer(struct timer_list
*t
)
2078 struct hns_nic_priv
*priv
= from_timer(priv
, t
, service_timer
);
2080 (void)mod_timer(&priv
->service_timer
, jiffies
+ SERVICE_TIMER_HZ
);
2082 hns_nic_task_schedule(priv
);
2086 * hns_tx_timeout_reset - initiate reset due to Tx timeout
2087 * @priv: driver private struct
2089 static void hns_tx_timeout_reset(struct hns_nic_priv
*priv
)
2091 /* Do the reset outside of interrupt context */
2092 if (!test_bit(NIC_STATE_DOWN
, &priv
->state
)) {
2093 set_bit(NIC_STATE2_RESET_REQUESTED
, &priv
->state
);
2094 netdev_warn(priv
->netdev
,
2095 "initiating reset due to tx timeout(%llu,0x%lx)\n",
2096 priv
->tx_timeout_count
, priv
->state
);
2097 priv
->tx_timeout_count
++;
2098 hns_nic_task_schedule(priv
);
2102 static int hns_nic_init_ring_data(struct hns_nic_priv
*priv
)
2104 struct hnae_handle
*h
= priv
->ae_handle
;
2105 struct hns_nic_ring_data
*rd
;
2106 bool is_ver1
= AE_IS_VER1(priv
->enet_ver
);
2109 if (h
->q_num
> NIC_MAX_Q_PER_VF
) {
2110 netdev_err(priv
->netdev
, "too much queue (%d)\n", h
->q_num
);
2114 priv
->ring_data
= kzalloc(array3_size(h
->q_num
,
2115 sizeof(*priv
->ring_data
), 2),
2117 if (!priv
->ring_data
)
2120 for (i
= 0; i
< h
->q_num
; i
++) {
2121 rd
= &priv
->ring_data
[i
];
2122 rd
->queue_index
= i
;
2123 rd
->ring
= &h
->qs
[i
]->tx_ring
;
2124 rd
->poll_one
= hns_nic_tx_poll_one
;
2125 rd
->fini_process
= is_ver1
? hns_nic_tx_fini_pro
:
2126 hns_nic_tx_fini_pro_v2
;
2128 netif_napi_add(priv
->netdev
, &rd
->napi
, hns_nic_common_poll
);
2129 rd
->ring
->irq_init_flag
= RCB_IRQ_NOT_INITED
;
2131 for (i
= h
->q_num
; i
< h
->q_num
* 2; i
++) {
2132 rd
= &priv
->ring_data
[i
];
2133 rd
->queue_index
= i
- h
->q_num
;
2134 rd
->ring
= &h
->qs
[i
- h
->q_num
]->rx_ring
;
2135 rd
->poll_one
= hns_nic_rx_poll_one
;
2136 rd
->ex_process
= hns_nic_rx_up_pro
;
2137 rd
->fini_process
= is_ver1
? hns_nic_rx_fini_pro
:
2138 hns_nic_rx_fini_pro_v2
;
2140 netif_napi_add(priv
->netdev
, &rd
->napi
, hns_nic_common_poll
);
2141 rd
->ring
->irq_init_flag
= RCB_IRQ_NOT_INITED
;
2147 static void hns_nic_uninit_ring_data(struct hns_nic_priv
*priv
)
2149 struct hnae_handle
*h
= priv
->ae_handle
;
2152 for (i
= 0; i
< h
->q_num
* 2; i
++) {
2153 netif_napi_del(&priv
->ring_data
[i
].napi
);
2154 if (priv
->ring_data
[i
].ring
->irq_init_flag
== RCB_IRQ_INITED
) {
2155 (void)irq_set_affinity_hint(
2156 priv
->ring_data
[i
].ring
->irq
,
2158 free_irq(priv
->ring_data
[i
].ring
->irq
,
2159 &priv
->ring_data
[i
]);
2162 priv
->ring_data
[i
].ring
->irq_init_flag
= RCB_IRQ_NOT_INITED
;
2164 kfree(priv
->ring_data
);
2167 static void hns_nic_set_priv_ops(struct net_device
*netdev
)
2169 struct hns_nic_priv
*priv
= netdev_priv(netdev
);
2170 struct hnae_handle
*h
= priv
->ae_handle
;
2172 if (AE_IS_VER1(priv
->enet_ver
)) {
2173 priv
->ops
.fill_desc
= fill_desc
;
2174 priv
->ops
.get_rxd_bnum
= get_rx_desc_bnum
;
2175 priv
->ops
.maybe_stop_tx
= hns_nic_maybe_stop_tx
;
2177 priv
->ops
.get_rxd_bnum
= get_v2rx_desc_bnum
;
2178 priv
->ops
.fill_desc
= fill_desc_v2
;
2179 priv
->ops
.maybe_stop_tx
= hns_nic_maybe_stop_tx_v2
;
2180 netif_set_tso_max_size(netdev
, 7 * 4096);
2181 /* enable tso when init
2182 * control tso on/off through TSE bit in bd
2184 h
->dev
->ops
->set_tso_stats(h
, 1);
2188 static int hns_nic_try_get_ae(struct net_device
*ndev
)
2190 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
2191 struct hnae_handle
*h
;
2194 h
= hnae_get_handle(&priv
->netdev
->dev
,
2195 priv
->fwnode
, priv
->port_id
, NULL
);
2196 if (IS_ERR_OR_NULL(h
)) {
2198 dev_dbg(priv
->dev
, "has not handle, register notifier!\n");
2201 priv
->ae_handle
= h
;
2203 ret
= hns_nic_init_phy(ndev
, h
);
2205 dev_err(priv
->dev
, "probe phy device fail!\n");
2209 ret
= hns_nic_init_ring_data(priv
);
2212 goto out_init_ring_data
;
2215 hns_nic_set_priv_ops(ndev
);
2217 ret
= register_netdev(ndev
);
2219 dev_err(priv
->dev
, "probe register netdev fail!\n");
2220 goto out_reg_ndev_fail
;
2225 hns_nic_uninit_ring_data(priv
);
2226 priv
->ring_data
= NULL
;
2229 hnae_put_handle(priv
->ae_handle
);
2230 priv
->ae_handle
= NULL
;
2235 static int hns_nic_notifier_action(struct notifier_block
*nb
,
2236 unsigned long action
, void *data
)
2238 struct hns_nic_priv
*priv
=
2239 container_of(nb
, struct hns_nic_priv
, notifier_block
);
2241 assert(action
== HNAE_AE_REGISTER
);
2243 if (!hns_nic_try_get_ae(priv
->netdev
)) {
2244 hnae_unregister_notifier(&priv
->notifier_block
);
2245 priv
->notifier_block
.notifier_call
= NULL
;
2250 static int hns_nic_dev_probe(struct platform_device
*pdev
)
2252 struct device
*dev
= &pdev
->dev
;
2253 struct net_device
*ndev
;
2254 struct hns_nic_priv
*priv
;
2258 ndev
= alloc_etherdev_mq(sizeof(struct hns_nic_priv
), NIC_MAX_Q_PER_VF
);
2262 platform_set_drvdata(pdev
, ndev
);
2264 priv
= netdev_priv(ndev
);
2266 priv
->netdev
= ndev
;
2268 if (dev_of_node(dev
)) {
2269 struct device_node
*ae_node
;
2271 if (of_device_is_compatible(dev
->of_node
,
2272 "hisilicon,hns-nic-v1"))
2273 priv
->enet_ver
= AE_VERSION_1
;
2275 priv
->enet_ver
= AE_VERSION_2
;
2277 ae_node
= of_parse_phandle(dev
->of_node
, "ae-handle", 0);
2280 dev_err(dev
, "not find ae-handle\n");
2281 goto out_read_prop_fail
;
2283 priv
->fwnode
= &ae_node
->fwnode
;
2284 } else if (is_acpi_node(dev
->fwnode
)) {
2285 struct fwnode_reference_args args
;
2287 if (acpi_dev_found(hns_enet_acpi_match
[0].id
))
2288 priv
->enet_ver
= AE_VERSION_1
;
2289 else if (acpi_dev_found(hns_enet_acpi_match
[1].id
))
2290 priv
->enet_ver
= AE_VERSION_2
;
2293 goto out_read_prop_fail
;
2296 /* try to find port-idx-in-ae first */
2297 ret
= acpi_node_get_property_reference(dev
->fwnode
,
2298 "ae-handle", 0, &args
);
2300 dev_err(dev
, "not find ae-handle\n");
2301 goto out_read_prop_fail
;
2303 if (!is_acpi_device_node(args
.fwnode
)) {
2305 goto out_read_prop_fail
;
2307 priv
->fwnode
= args
.fwnode
;
2309 dev_err(dev
, "cannot read cfg data from OF or acpi\n");
2311 goto out_read_prop_fail
;
2314 ret
= device_property_read_u32(dev
, "port-idx-in-ae", &port_id
);
2316 /* only for old code compatible */
2317 ret
= device_property_read_u32(dev
, "port-id", &port_id
);
2319 goto out_read_prop_fail
;
2320 /* for old dts, we need to caculate the port offset */
2321 port_id
= port_id
< HNS_SRV_OFFSET
? port_id
+ HNS_DEBUG_OFFSET
2322 : port_id
- HNS_SRV_OFFSET
;
2324 priv
->port_id
= port_id
;
2326 hns_init_mac_addr(ndev
);
2328 ndev
->watchdog_timeo
= HNS_NIC_TX_TIMEOUT
;
2329 ndev
->priv_flags
|= IFF_UNICAST_FLT
;
2330 ndev
->netdev_ops
= &hns_nic_netdev_ops
;
2331 hns_ethtool_set_ops(ndev
);
2333 ndev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2334 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
2336 ndev
->vlan_features
|=
2337 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
;
2338 ndev
->vlan_features
|= NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
;
2340 /* MTU range: 68 - 9578 (v1) or 9706 (v2) */
2341 ndev
->min_mtu
= MAC_MIN_MTU
;
2342 switch (priv
->enet_ver
) {
2344 ndev
->features
|= NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_NTUPLE
;
2345 ndev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2346 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
2347 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
;
2348 ndev
->vlan_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
2349 ndev
->max_mtu
= MAC_MAX_MTU_V2
-
2350 (ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
2353 ndev
->max_mtu
= MAC_MAX_MTU
-
2354 (ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
2358 SET_NETDEV_DEV(ndev
, dev
);
2360 if (!dma_set_mask_and_coherent(dev
, DMA_BIT_MASK(64)))
2361 dev_dbg(dev
, "set mask to 64bit\n");
2363 dev_err(dev
, "set mask to 64bit fail!\n");
2365 /* carrier off reporting is important to ethtool even BEFORE open */
2366 netif_carrier_off(ndev
);
2368 timer_setup(&priv
->service_timer
, hns_nic_service_timer
, 0);
2369 INIT_WORK(&priv
->service_task
, hns_nic_service_task
);
2371 set_bit(NIC_STATE_SERVICE_INITED
, &priv
->state
);
2372 clear_bit(NIC_STATE_SERVICE_SCHED
, &priv
->state
);
2373 set_bit(NIC_STATE_DOWN
, &priv
->state
);
2375 if (hns_nic_try_get_ae(priv
->netdev
)) {
2376 priv
->notifier_block
.notifier_call
= hns_nic_notifier_action
;
2377 ret
= hnae_register_notifier(&priv
->notifier_block
);
2379 dev_err(dev
, "register notifier fail!\n");
2380 goto out_notify_fail
;
2382 dev_dbg(dev
, "has not handle, register notifier!\n");
2388 (void)cancel_work_sync(&priv
->service_task
);
2390 /* safe for ACPI FW */
2391 of_node_put(to_of_node(priv
->fwnode
));
2396 static void hns_nic_dev_remove(struct platform_device
*pdev
)
2398 struct net_device
*ndev
= platform_get_drvdata(pdev
);
2399 struct hns_nic_priv
*priv
= netdev_priv(ndev
);
2401 if (ndev
->reg_state
!= NETREG_UNINITIALIZED
)
2402 unregister_netdev(ndev
);
2404 if (priv
->ring_data
)
2405 hns_nic_uninit_ring_data(priv
);
2406 priv
->ring_data
= NULL
;
2409 phy_disconnect(ndev
->phydev
);
2411 if (!IS_ERR_OR_NULL(priv
->ae_handle
))
2412 hnae_put_handle(priv
->ae_handle
);
2413 priv
->ae_handle
= NULL
;
2414 if (priv
->notifier_block
.notifier_call
)
2415 hnae_unregister_notifier(&priv
->notifier_block
);
2416 priv
->notifier_block
.notifier_call
= NULL
;
2418 set_bit(NIC_STATE_REMOVING
, &priv
->state
);
2419 (void)cancel_work_sync(&priv
->service_task
);
2421 /* safe for ACPI FW */
2422 of_node_put(to_of_node(priv
->fwnode
));
2427 static const struct of_device_id hns_enet_of_match
[] = {
2428 {.compatible
= "hisilicon,hns-nic-v1",},
2429 {.compatible
= "hisilicon,hns-nic-v2",},
2433 MODULE_DEVICE_TABLE(of
, hns_enet_of_match
);
2435 static struct platform_driver hns_nic_dev_driver
= {
2438 .of_match_table
= hns_enet_of_match
,
2439 .acpi_match_table
= ACPI_PTR(hns_enet_acpi_match
),
2441 .probe
= hns_nic_dev_probe
,
2442 .remove
= hns_nic_dev_remove
,
2445 module_platform_driver(hns_nic_dev_driver
);
2447 MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
2448 MODULE_AUTHOR("Hisilicon, Inc.");
2449 MODULE_LICENSE("GPL");
2450 MODULE_ALIAS("platform:hns-nic");