2 * Copyright (c) 2016~2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/if_vlan.h>
15 #include <linux/ipv6.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/skbuff.h>
19 #include <linux/sctp.h>
20 #include <linux/vermagic.h>
22 #include <net/pkt_cls.h>
23 #include <net/vxlan.h>
26 #include "hns3_enet.h"
28 static const char hns3_driver_name
[] = "hns3";
29 const char hns3_driver_version
[] = VERMAGIC_STRING
;
30 static const char hns3_driver_string
[] =
31 "Hisilicon Ethernet Network Driver for Hip08 Family";
32 static const char hns3_copyright
[] = "Copyright (c) 2017 Huawei Corporation.";
33 static struct hnae3_client client
;
35 /* hns3_pci_tbl - PCI Device ID Table
37 * Last entry must be all 0s
39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
40 * Class, Class Mask, private data (not used) }
42 static const struct pci_device_id hns3_pci_tbl
[] = {
43 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_GE
), 0},
44 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE
), 0},
45 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA
),
46 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
47 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_25GE_RDMA_MACSEC
),
48 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
49 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA
),
50 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
51 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_50GE_RDMA_MACSEC
),
52 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
53 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_MACSEC
),
54 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS
},
55 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_VF
), 0},
56 {PCI_VDEVICE(HUAWEI
, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF
), 0},
57 /* required last entry */
60 MODULE_DEVICE_TABLE(pci
, hns3_pci_tbl
);
62 static irqreturn_t
hns3_irq_handle(int irq
, void *dev
)
64 struct hns3_enet_tqp_vector
*tqp_vector
= dev
;
66 napi_schedule(&tqp_vector
->napi
);
71 static void hns3_nic_uninit_irq(struct hns3_nic_priv
*priv
)
73 struct hns3_enet_tqp_vector
*tqp_vectors
;
76 for (i
= 0; i
< priv
->vector_num
; i
++) {
77 tqp_vectors
= &priv
->tqp_vector
[i
];
79 if (tqp_vectors
->irq_init_flag
!= HNS3_VECTOR_INITED
)
82 /* release the irq resource */
83 free_irq(tqp_vectors
->vector_irq
, tqp_vectors
);
84 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
88 static int hns3_nic_init_irq(struct hns3_nic_priv
*priv
)
90 struct hns3_enet_tqp_vector
*tqp_vectors
;
97 for (i
= 0; i
< priv
->vector_num
; i
++) {
98 tqp_vectors
= &priv
->tqp_vector
[i
];
100 if (tqp_vectors
->irq_init_flag
== HNS3_VECTOR_INITED
)
103 if (tqp_vectors
->tx_group
.ring
&& tqp_vectors
->rx_group
.ring
) {
104 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
105 "%s-%s-%d", priv
->netdev
->name
, "TxRx",
108 } else if (tqp_vectors
->rx_group
.ring
) {
109 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
110 "%s-%s-%d", priv
->netdev
->name
, "Rx",
112 } else if (tqp_vectors
->tx_group
.ring
) {
113 snprintf(tqp_vectors
->name
, HNAE3_INT_NAME_LEN
- 1,
114 "%s-%s-%d", priv
->netdev
->name
, "Tx",
117 /* Skip this unused q_vector */
121 tqp_vectors
->name
[HNAE3_INT_NAME_LEN
- 1] = '\0';
123 ret
= request_irq(tqp_vectors
->vector_irq
, hns3_irq_handle
, 0,
127 netdev_err(priv
->netdev
, "request irq(%d) fail\n",
128 tqp_vectors
->vector_irq
);
132 tqp_vectors
->irq_init_flag
= HNS3_VECTOR_INITED
;
138 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector
*tqp_vector
,
141 writel(mask_en
, tqp_vector
->mask_addr
);
144 static void hns3_vector_enable(struct hns3_enet_tqp_vector
*tqp_vector
)
146 napi_enable(&tqp_vector
->napi
);
149 hns3_mask_vector_irq(tqp_vector
, 1);
152 static void hns3_vector_disable(struct hns3_enet_tqp_vector
*tqp_vector
)
155 hns3_mask_vector_irq(tqp_vector
, 0);
157 disable_irq(tqp_vector
->vector_irq
);
158 napi_disable(&tqp_vector
->napi
);
161 void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector
*tqp_vector
,
164 u32 rl_reg
= hns3_rl_usec_to_reg(rl_value
);
166 /* this defines the configuration for RL (Interrupt Rate Limiter).
167 * Rl defines rate of interrupts i.e. number of interrupts-per-second
168 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
171 if (rl_reg
> 0 && !tqp_vector
->tx_group
.gl_adapt_enable
&&
172 !tqp_vector
->rx_group
.gl_adapt_enable
)
173 /* According to the hardware, the range of rl_reg is
174 * 0-59 and the unit is 4.
176 rl_reg
|= HNS3_INT_RL_ENABLE_MASK
;
178 writel(rl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_RL_OFFSET
);
181 void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
184 u32 rx_gl_reg
= hns3_gl_usec_to_reg(gl_value
);
186 writel(rx_gl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL0_OFFSET
);
189 void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector
*tqp_vector
,
192 u32 tx_gl_reg
= hns3_gl_usec_to_reg(gl_value
);
194 writel(tx_gl_reg
, tqp_vector
->mask_addr
+ HNS3_VECTOR_GL1_OFFSET
);
197 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector
*tqp_vector
,
198 struct hns3_nic_priv
*priv
)
200 struct hnae3_handle
*h
= priv
->ae_handle
;
202 /* initialize the configuration for interrupt coalescing.
203 * 1. GL (Interrupt Gap Limiter)
204 * 2. RL (Interrupt Rate Limiter)
207 /* Default: enable interrupt coalescing self-adaptive and GL */
208 tqp_vector
->tx_group
.gl_adapt_enable
= 1;
209 tqp_vector
->rx_group
.gl_adapt_enable
= 1;
211 tqp_vector
->tx_group
.int_gl
= HNS3_INT_GL_50K
;
212 tqp_vector
->rx_group
.int_gl
= HNS3_INT_GL_50K
;
214 hns3_set_vector_coalesce_tx_gl(tqp_vector
,
215 tqp_vector
->tx_group
.int_gl
);
216 hns3_set_vector_coalesce_rx_gl(tqp_vector
,
217 tqp_vector
->rx_group
.int_gl
);
219 /* Default: disable RL */
220 h
->kinfo
.int_rl_setting
= 0;
221 hns3_set_vector_coalesce_rl(tqp_vector
, h
->kinfo
.int_rl_setting
);
223 tqp_vector
->rx_group
.flow_level
= HNS3_FLOW_LOW
;
224 tqp_vector
->tx_group
.flow_level
= HNS3_FLOW_LOW
;
227 static int hns3_nic_set_real_num_queue(struct net_device
*netdev
)
229 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
230 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
231 unsigned int queue_size
= kinfo
->rss_size
* kinfo
->num_tc
;
234 ret
= netif_set_real_num_tx_queues(netdev
, queue_size
);
237 "netif_set_real_num_tx_queues fail, ret=%d!\n",
242 ret
= netif_set_real_num_rx_queues(netdev
, queue_size
);
245 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret
);
252 static int hns3_nic_net_up(struct net_device
*netdev
)
254 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
255 struct hnae3_handle
*h
= priv
->ae_handle
;
259 /* get irq resource for all vectors */
260 ret
= hns3_nic_init_irq(priv
);
262 netdev_err(netdev
, "hns init irq failed! ret=%d\n", ret
);
266 /* enable the vectors */
267 for (i
= 0; i
< priv
->vector_num
; i
++)
268 hns3_vector_enable(&priv
->tqp_vector
[i
]);
270 /* start the ae_dev */
271 ret
= h
->ae_algo
->ops
->start
? h
->ae_algo
->ops
->start(h
) : 0;
275 clear_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
);
280 for (j
= i
- 1; j
>= 0; j
--)
281 hns3_vector_disable(&priv
->tqp_vector
[j
]);
283 hns3_nic_uninit_irq(priv
);
288 static int hns3_nic_net_open(struct net_device
*netdev
)
290 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
293 netif_carrier_off(netdev
);
295 ret
= hns3_nic_set_real_num_queue(netdev
);
299 ret
= hns3_nic_net_up(netdev
);
302 "hns net up fail, ret=%d!\n", ret
);
306 priv
->last_reset_time
= jiffies
;
310 static void hns3_nic_net_down(struct net_device
*netdev
)
312 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
313 const struct hnae3_ae_ops
*ops
;
316 if (test_and_set_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
320 ops
= priv
->ae_handle
->ae_algo
->ops
;
322 ops
->stop(priv
->ae_handle
);
324 /* disable vectors */
325 for (i
= 0; i
< priv
->vector_num
; i
++)
326 hns3_vector_disable(&priv
->tqp_vector
[i
]);
328 /* free irq resources */
329 hns3_nic_uninit_irq(priv
);
332 static int hns3_nic_net_stop(struct net_device
*netdev
)
334 netif_tx_stop_all_queues(netdev
);
335 netif_carrier_off(netdev
);
337 hns3_nic_net_down(netdev
);
342 static int hns3_nic_uc_sync(struct net_device
*netdev
,
343 const unsigned char *addr
)
345 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
347 if (h
->ae_algo
->ops
->add_uc_addr
)
348 return h
->ae_algo
->ops
->add_uc_addr(h
, addr
);
353 static int hns3_nic_uc_unsync(struct net_device
*netdev
,
354 const unsigned char *addr
)
356 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
358 if (h
->ae_algo
->ops
->rm_uc_addr
)
359 return h
->ae_algo
->ops
->rm_uc_addr(h
, addr
);
364 static int hns3_nic_mc_sync(struct net_device
*netdev
,
365 const unsigned char *addr
)
367 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
369 if (h
->ae_algo
->ops
->add_mc_addr
)
370 return h
->ae_algo
->ops
->add_mc_addr(h
, addr
);
375 static int hns3_nic_mc_unsync(struct net_device
*netdev
,
376 const unsigned char *addr
)
378 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
380 if (h
->ae_algo
->ops
->rm_mc_addr
)
381 return h
->ae_algo
->ops
->rm_mc_addr(h
, addr
);
386 static void hns3_nic_set_rx_mode(struct net_device
*netdev
)
388 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
390 if (h
->ae_algo
->ops
->set_promisc_mode
) {
391 if (netdev
->flags
& IFF_PROMISC
)
392 h
->ae_algo
->ops
->set_promisc_mode(h
, 1);
394 h
->ae_algo
->ops
->set_promisc_mode(h
, 0);
396 if (__dev_uc_sync(netdev
, hns3_nic_uc_sync
, hns3_nic_uc_unsync
))
397 netdev_err(netdev
, "sync uc address fail\n");
398 if (netdev
->flags
& IFF_MULTICAST
)
399 if (__dev_mc_sync(netdev
, hns3_nic_mc_sync
, hns3_nic_mc_unsync
))
400 netdev_err(netdev
, "sync mc address fail\n");
403 static int hns3_set_tso(struct sk_buff
*skb
, u32
*paylen
,
404 u16
*mss
, u32
*type_cs_vlan_tso
)
406 u32 l4_offset
, hdr_len
;
407 union l3_hdr_info l3
;
408 union l4_hdr_info l4
;
412 if (!skb_is_gso(skb
))
415 ret
= skb_cow_head(skb
, 0);
419 l3
.hdr
= skb_network_header(skb
);
420 l4
.hdr
= skb_transport_header(skb
);
422 /* Software should clear the IPv4's checksum field when tso is
425 if (l3
.v4
->version
== 4)
429 if (skb_shinfo(skb
)->gso_type
& (SKB_GSO_GRE
|
432 SKB_GSO_UDP_TUNNEL_CSUM
)) {
433 if ((!(skb_shinfo(skb
)->gso_type
&
435 (skb_shinfo(skb
)->gso_type
&
436 SKB_GSO_UDP_TUNNEL_CSUM
)) {
437 /* Software should clear the udp's checksum
438 * field when tso is needed.
442 /* reset l3&l4 pointers from outer to inner headers */
443 l3
.hdr
= skb_inner_network_header(skb
);
444 l4
.hdr
= skb_inner_transport_header(skb
);
446 /* Software should clear the IPv4's checksum field when
449 if (l3
.v4
->version
== 4)
453 /* normal or tunnel packet*/
454 l4_offset
= l4
.hdr
- skb
->data
;
455 hdr_len
= (l4
.tcp
->doff
* 4) + l4_offset
;
457 /* remove payload length from inner pseudo checksum when tso*/
458 l4_paylen
= skb
->len
- l4_offset
;
459 csum_replace_by_diff(&l4
.tcp
->check
,
460 (__force __wsum
)htonl(l4_paylen
));
462 /* find the txbd field values */
463 *paylen
= skb
->len
- hdr_len
;
464 hnae_set_bit(*type_cs_vlan_tso
,
467 /* get MSS for TSO */
468 *mss
= skb_shinfo(skb
)->gso_size
;
473 static int hns3_get_l4_protocol(struct sk_buff
*skb
, u8
*ol4_proto
,
481 unsigned char *l4_hdr
;
482 unsigned char *exthdr
;
486 /* find outer header point */
487 l3
.hdr
= skb_network_header(skb
);
488 l4_hdr
= skb_inner_transport_header(skb
);
490 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
491 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
492 l4_proto_tmp
= l3
.v6
->nexthdr
;
493 if (l4_hdr
!= exthdr
)
494 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
495 &l4_proto_tmp
, &frag_off
);
496 } else if (skb
->protocol
== htons(ETH_P_IP
)) {
497 l4_proto_tmp
= l3
.v4
->protocol
;
502 *ol4_proto
= l4_proto_tmp
;
505 if (!skb
->encapsulation
) {
510 /* find inner header point */
511 l3
.hdr
= skb_inner_network_header(skb
);
512 l4_hdr
= skb_inner_transport_header(skb
);
514 if (l3
.v6
->version
== 6) {
515 exthdr
= l3
.hdr
+ sizeof(*l3
.v6
);
516 l4_proto_tmp
= l3
.v6
->nexthdr
;
517 if (l4_hdr
!= exthdr
)
518 ipv6_skip_exthdr(skb
, exthdr
- skb
->data
,
519 &l4_proto_tmp
, &frag_off
);
520 } else if (l3
.v4
->version
== 4) {
521 l4_proto_tmp
= l3
.v4
->protocol
;
524 *il4_proto
= l4_proto_tmp
;
529 static void hns3_set_l2l3l4_len(struct sk_buff
*skb
, u8 ol4_proto
,
530 u8 il4_proto
, u32
*type_cs_vlan_tso
,
531 u32
*ol_type_vlan_len_msec
)
541 struct gre_base_hdr
*gre
;
544 unsigned char *l2_hdr
;
545 u8 l4_proto
= ol4_proto
;
552 l3
.hdr
= skb_network_header(skb
);
553 l4
.hdr
= skb_transport_header(skb
);
555 /* compute L2 header size for normal packet, defined in 2 Bytes */
556 l2_len
= l3
.hdr
- skb
->data
;
557 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
558 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
561 if (skb
->encapsulation
) {
562 /* compute OL2 header size, defined in 2 Bytes */
564 hnae_set_field(*ol_type_vlan_len_msec
,
566 HNS3_TXD_L2LEN_S
, ol2_len
>> 1);
568 /* compute OL3 header size, defined in 4 Bytes */
569 ol3_len
= l4
.hdr
- l3
.hdr
;
570 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L3LEN_M
,
571 HNS3_TXD_L3LEN_S
, ol3_len
>> 2);
573 /* MAC in UDP, MAC in GRE (0x6558)*/
574 if ((ol4_proto
== IPPROTO_UDP
) || (ol4_proto
== IPPROTO_GRE
)) {
575 /* switch MAC header ptr from outer to inner header.*/
576 l2_hdr
= skb_inner_mac_header(skb
);
578 /* compute OL4 header size, defined in 4 Bytes. */
579 ol4_len
= l2_hdr
- l4
.hdr
;
580 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_L4LEN_M
,
581 HNS3_TXD_L4LEN_S
, ol4_len
>> 2);
583 /* switch IP header ptr from outer to inner header */
584 l3
.hdr
= skb_inner_network_header(skb
);
586 /* compute inner l2 header size, defined in 2 Bytes. */
587 l2_len
= l3
.hdr
- l2_hdr
;
588 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L2LEN_M
,
589 HNS3_TXD_L2LEN_S
, l2_len
>> 1);
591 /* skb packet types not supported by hardware,
592 * txbd len fild doesn't be filled.
597 /* switch L4 header pointer from outer to inner */
598 l4
.hdr
= skb_inner_transport_header(skb
);
600 l4_proto
= il4_proto
;
603 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
604 l3_len
= l4
.hdr
- l3
.hdr
;
605 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3LEN_M
,
606 HNS3_TXD_L3LEN_S
, l3_len
>> 2);
608 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
611 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
612 HNS3_TXD_L4LEN_S
, l4
.tcp
->doff
);
615 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
616 HNS3_TXD_L4LEN_S
, (sizeof(struct sctphdr
) >> 2));
619 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L4LEN_M
,
620 HNS3_TXD_L4LEN_S
, (sizeof(struct udphdr
) >> 2));
623 /* skb packet types not supported by hardware,
624 * txbd len fild doesn't be filled.
630 static int hns3_set_l3l4_type_csum(struct sk_buff
*skb
, u8 ol4_proto
,
631 u8 il4_proto
, u32
*type_cs_vlan_tso
,
632 u32
*ol_type_vlan_len_msec
)
639 u32 l4_proto
= ol4_proto
;
641 l3
.hdr
= skb_network_header(skb
);
643 /* define OL3 type and tunnel type(OL4).*/
644 if (skb
->encapsulation
) {
645 /* define outer network header type.*/
646 if (skb
->protocol
== htons(ETH_P_IP
)) {
648 hnae_set_field(*ol_type_vlan_len_msec
,
649 HNS3_TXD_OL3T_M
, HNS3_TXD_OL3T_S
,
650 HNS3_OL3T_IPV4_CSUM
);
652 hnae_set_field(*ol_type_vlan_len_msec
,
653 HNS3_TXD_OL3T_M
, HNS3_TXD_OL3T_S
,
654 HNS3_OL3T_IPV4_NO_CSUM
);
656 } else if (skb
->protocol
== htons(ETH_P_IPV6
)) {
657 hnae_set_field(*ol_type_vlan_len_msec
, HNS3_TXD_OL3T_M
,
658 HNS3_TXD_OL3T_S
, HNS3_OL3T_IPV6
);
661 /* define tunnel type(OL4).*/
664 hnae_set_field(*ol_type_vlan_len_msec
,
667 HNS3_TUN_MAC_IN_UDP
);
670 hnae_set_field(*ol_type_vlan_len_msec
,
676 /* drop the skb tunnel packet if hardware don't support,
677 * because hardware can't calculate csum when TSO.
682 /* the stack computes the IP header already,
683 * driver calculate l4 checksum when not TSO.
685 skb_checksum_help(skb
);
689 l3
.hdr
= skb_inner_network_header(skb
);
690 l4_proto
= il4_proto
;
693 if (l3
.v4
->version
== 4) {
694 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
695 HNS3_TXD_L3T_S
, HNS3_L3T_IPV4
);
697 /* the stack computes the IP header already, the only time we
698 * need the hardware to recompute it is in the case of TSO.
701 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L3CS_B
, 1);
703 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
704 } else if (l3
.v6
->version
== 6) {
705 hnae_set_field(*type_cs_vlan_tso
, HNS3_TXD_L3T_M
,
706 HNS3_TXD_L3T_S
, HNS3_L3T_IPV6
);
707 hnae_set_bit(*type_cs_vlan_tso
, HNS3_TXD_L4CS_B
, 1);
712 hnae_set_field(*type_cs_vlan_tso
,
718 hnae_set_field(*type_cs_vlan_tso
,
724 hnae_set_field(*type_cs_vlan_tso
,
730 /* drop the skb tunnel packet if hardware don't support,
731 * because hardware can't calculate csum when TSO.
736 /* the stack computes the IP header already,
737 * driver calculate l4 checksum when not TSO.
739 skb_checksum_help(skb
);
746 static void hns3_set_txbd_baseinfo(u16
*bdtp_fe_sc_vld_ra_ri
, int frag_end
)
748 /* Config bd buffer end */
749 hnae_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_BDTYPE_M
,
750 HNS3_TXD_BDTYPE_M
, 0);
751 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_FE_B
, !!frag_end
);
752 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_VLD_B
, 1);
753 hnae_set_field(*bdtp_fe_sc_vld_ra_ri
, HNS3_TXD_SC_M
, HNS3_TXD_SC_S
, 0);
756 static int hns3_fill_desc_vtags(struct sk_buff
*skb
,
757 struct hns3_enet_ring
*tx_ring
,
758 u32
*inner_vlan_flag
,
763 #define HNS3_TX_VLAN_PRIO_SHIFT 13
765 if (skb
->protocol
== htons(ETH_P_8021Q
) &&
766 !(tx_ring
->tqp
->handle
->kinfo
.netdev
->features
&
767 NETIF_F_HW_VLAN_CTAG_TX
)) {
768 /* When HW VLAN acceleration is turned off, and the stack
769 * sets the protocol to 802.1q, the driver just need to
770 * set the protocol to the encapsulated ethertype.
772 skb
->protocol
= vlan_get_protocol(skb
);
776 if (skb_vlan_tag_present(skb
)) {
779 vlan_tag
= skb_vlan_tag_get(skb
);
780 vlan_tag
|= (skb
->priority
& 0x7) << HNS3_TX_VLAN_PRIO_SHIFT
;
782 /* Based on hw strategy, use out_vtag in two layer tag case,
783 * and use inner_vtag in one tag case.
785 if (skb
->protocol
== htons(ETH_P_8021Q
)) {
786 hnae_set_bit(*out_vlan_flag
, HNS3_TXD_OVLAN_B
, 1);
787 *out_vtag
= vlan_tag
;
789 hnae_set_bit(*inner_vlan_flag
, HNS3_TXD_VLAN_B
, 1);
790 *inner_vtag
= vlan_tag
;
792 } else if (skb
->protocol
== htons(ETH_P_8021Q
)) {
793 struct vlan_ethhdr
*vhdr
;
796 rc
= skb_cow_head(skb
, 0);
799 vhdr
= (struct vlan_ethhdr
*)skb
->data
;
800 vhdr
->h_vlan_TCI
|= cpu_to_be16((skb
->priority
& 0x7)
801 << HNS3_TX_VLAN_PRIO_SHIFT
);
804 skb
->protocol
= vlan_get_protocol(skb
);
808 static int hns3_fill_desc(struct hns3_enet_ring
*ring
, void *priv
,
809 int size
, dma_addr_t dma
, int frag_end
,
810 enum hns_desc_type type
)
812 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
813 struct hns3_desc
*desc
= &ring
->desc
[ring
->next_to_use
];
814 u32 ol_type_vlan_len_msec
= 0;
815 u16 bdtp_fe_sc_vld_ra_ri
= 0;
816 u32 type_cs_vlan_tso
= 0;
827 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
828 desc_cb
->priv
= priv
;
829 desc_cb
->length
= size
;
831 desc_cb
->type
= type
;
833 /* now, fill the descriptor */
834 desc
->addr
= cpu_to_le64(dma
);
835 desc
->tx
.send_size
= cpu_to_le16((u16
)size
);
836 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri
, frag_end
);
837 desc
->tx
.bdtp_fe_sc_vld_ra_ri
= cpu_to_le16(bdtp_fe_sc_vld_ra_ri
);
839 if (type
== DESC_TYPE_SKB
) {
840 skb
= (struct sk_buff
*)priv
;
843 ret
= hns3_fill_desc_vtags(skb
, ring
, &type_cs_vlan_tso
,
844 &ol_type_vlan_len_msec
,
845 &inner_vtag
, &out_vtag
);
849 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
850 skb_reset_mac_len(skb
);
851 protocol
= skb
->protocol
;
853 ret
= hns3_get_l4_protocol(skb
, &ol4_proto
, &il4_proto
);
856 hns3_set_l2l3l4_len(skb
, ol4_proto
, il4_proto
,
858 &ol_type_vlan_len_msec
);
859 ret
= hns3_set_l3l4_type_csum(skb
, ol4_proto
, il4_proto
,
861 &ol_type_vlan_len_msec
);
865 ret
= hns3_set_tso(skb
, &paylen
, &mss
,
872 desc
->tx
.ol_type_vlan_len_msec
=
873 cpu_to_le32(ol_type_vlan_len_msec
);
874 desc
->tx
.type_cs_vlan_tso_len
=
875 cpu_to_le32(type_cs_vlan_tso
);
876 desc
->tx
.paylen
= cpu_to_le32(paylen
);
877 desc
->tx
.mss
= cpu_to_le16(mss
);
878 desc
->tx
.vlan_tag
= cpu_to_le16(inner_vtag
);
879 desc
->tx
.outer_vlan_tag
= cpu_to_le16(out_vtag
);
882 /* move ring pointer to next.*/
883 ring_ptr_move_fw(ring
, next_to_use
);
888 static int hns3_fill_desc_tso(struct hns3_enet_ring
*ring
, void *priv
,
889 int size
, dma_addr_t dma
, int frag_end
,
890 enum hns_desc_type type
)
892 unsigned int frag_buf_num
;
897 frag_buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
898 sizeoflast
= size
% HNS3_MAX_BD_SIZE
;
899 sizeoflast
= sizeoflast
? sizeoflast
: HNS3_MAX_BD_SIZE
;
901 /* When the frag size is bigger than hardware, split this frag */
902 for (k
= 0; k
< frag_buf_num
; k
++) {
903 ret
= hns3_fill_desc(ring
, priv
,
904 (k
== frag_buf_num
- 1) ?
905 sizeoflast
: HNS3_MAX_BD_SIZE
,
906 dma
+ HNS3_MAX_BD_SIZE
* k
,
907 frag_end
&& (k
== frag_buf_num
- 1) ? 1 : 0,
908 (type
== DESC_TYPE_SKB
&& !k
) ?
909 DESC_TYPE_SKB
: DESC_TYPE_PAGE
);
917 static int hns3_nic_maybe_stop_tso(struct sk_buff
**out_skb
, int *bnum
,
918 struct hns3_enet_ring
*ring
)
920 struct sk_buff
*skb
= *out_skb
;
921 struct skb_frag_struct
*frag
;
928 size
= skb_headlen(skb
);
929 buf_num
= (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
931 frag_num
= skb_shinfo(skb
)->nr_frags
;
932 for (i
= 0; i
< frag_num
; i
++) {
933 frag
= &skb_shinfo(skb
)->frags
[i
];
934 size
= skb_frag_size(frag
);
936 (size
+ HNS3_MAX_BD_SIZE
- 1) / HNS3_MAX_BD_SIZE
;
937 if (bdnum_for_frag
> HNS3_MAX_BD_PER_FRAG
)
940 buf_num
+= bdnum_for_frag
;
943 if (buf_num
> ring_space(ring
))
950 static int hns3_nic_maybe_stop_tx(struct sk_buff
**out_skb
, int *bnum
,
951 struct hns3_enet_ring
*ring
)
953 struct sk_buff
*skb
= *out_skb
;
956 /* No. of segments (plus a header) */
957 buf_num
= skb_shinfo(skb
)->nr_frags
+ 1;
959 if (buf_num
> ring_space(ring
))
967 static void hns_nic_dma_unmap(struct hns3_enet_ring
*ring
, int next_to_use_orig
)
969 struct device
*dev
= ring_to_dev(ring
);
972 for (i
= 0; i
< ring
->desc_num
; i
++) {
973 /* check if this is where we started */
974 if (ring
->next_to_use
== next_to_use_orig
)
977 /* unmap the descriptor dma address */
978 if (ring
->desc_cb
[ring
->next_to_use
].type
== DESC_TYPE_SKB
)
979 dma_unmap_single(dev
,
980 ring
->desc_cb
[ring
->next_to_use
].dma
,
981 ring
->desc_cb
[ring
->next_to_use
].length
,
985 ring
->desc_cb
[ring
->next_to_use
].dma
,
986 ring
->desc_cb
[ring
->next_to_use
].length
,
990 ring_ptr_move_bw(ring
, next_to_use
);
994 netdev_tx_t
hns3_nic_net_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
996 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
997 struct hns3_nic_ring_data
*ring_data
=
998 &tx_ring_data(priv
, skb
->queue_mapping
);
999 struct hns3_enet_ring
*ring
= ring_data
->ring
;
1000 struct device
*dev
= priv
->dev
;
1001 struct netdev_queue
*dev_queue
;
1002 struct skb_frag_struct
*frag
;
1003 int next_to_use_head
;
1004 int next_to_use_frag
;
1012 /* Prefetch the data used later */
1013 prefetch(skb
->data
);
1015 switch (priv
->ops
.maybe_stop_tx(&skb
, &buf_num
, ring
)) {
1017 u64_stats_update_begin(&ring
->syncp
);
1018 ring
->stats
.tx_busy
++;
1019 u64_stats_update_end(&ring
->syncp
);
1021 goto out_net_tx_busy
;
1023 u64_stats_update_begin(&ring
->syncp
);
1024 ring
->stats
.sw_err_cnt
++;
1025 u64_stats_update_end(&ring
->syncp
);
1026 netdev_err(netdev
, "no memory to xmit!\n");
1033 /* No. of segments (plus a header) */
1034 seg_num
= skb_shinfo(skb
)->nr_frags
+ 1;
1035 /* Fill the first part */
1036 size
= skb_headlen(skb
);
1038 next_to_use_head
= ring
->next_to_use
;
1040 dma
= dma_map_single(dev
, skb
->data
, size
, DMA_TO_DEVICE
);
1041 if (dma_mapping_error(dev
, dma
)) {
1042 netdev_err(netdev
, "TX head DMA map failed\n");
1043 ring
->stats
.sw_err_cnt
++;
1047 ret
= priv
->ops
.fill_desc(ring
, skb
, size
, dma
, seg_num
== 1 ? 1 : 0,
1050 goto head_dma_map_err
;
1052 next_to_use_frag
= ring
->next_to_use
;
1053 /* Fill the fragments */
1054 for (i
= 1; i
< seg_num
; i
++) {
1055 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1056 size
= skb_frag_size(frag
);
1057 dma
= skb_frag_dma_map(dev
, frag
, 0, size
, DMA_TO_DEVICE
);
1058 if (dma_mapping_error(dev
, dma
)) {
1059 netdev_err(netdev
, "TX frag(%d) DMA map failed\n", i
);
1060 ring
->stats
.sw_err_cnt
++;
1061 goto frag_dma_map_err
;
1063 ret
= priv
->ops
.fill_desc(ring
, skb_frag_page(frag
), size
, dma
,
1064 seg_num
- 1 == i
? 1 : 0,
1068 goto frag_dma_map_err
;
1071 /* Complete translate all packets */
1072 dev_queue
= netdev_get_tx_queue(netdev
, ring_data
->queue_index
);
1073 netdev_tx_sent_queue(dev_queue
, skb
->len
);
1075 wmb(); /* Commit all data before submit */
1077 hnae_queue_xmit(ring
->tqp
, buf_num
);
1079 return NETDEV_TX_OK
;
1082 hns_nic_dma_unmap(ring
, next_to_use_frag
);
1085 hns_nic_dma_unmap(ring
, next_to_use_head
);
1088 dev_kfree_skb_any(skb
);
1089 return NETDEV_TX_OK
;
1092 netif_stop_subqueue(netdev
, ring_data
->queue_index
);
1093 smp_mb(); /* Commit all data before submit */
1095 return NETDEV_TX_BUSY
;
1098 static int hns3_nic_net_set_mac_address(struct net_device
*netdev
, void *p
)
1100 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1101 struct sockaddr
*mac_addr
= p
;
1104 if (!mac_addr
|| !is_valid_ether_addr((const u8
*)mac_addr
->sa_data
))
1105 return -EADDRNOTAVAIL
;
1107 ret
= h
->ae_algo
->ops
->set_mac_addr(h
, mac_addr
->sa_data
);
1109 netdev_err(netdev
, "set_mac_address fail, ret=%d!\n", ret
);
1113 ether_addr_copy(netdev
->dev_addr
, mac_addr
->sa_data
);
1118 static int hns3_nic_set_features(struct net_device
*netdev
,
1119 netdev_features_t features
)
1121 netdev_features_t changed
= netdev
->features
^ features
;
1122 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1123 struct hnae3_handle
*h
= priv
->ae_handle
;
1126 if (changed
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1127 if (features
& (NETIF_F_TSO
| NETIF_F_TSO6
)) {
1128 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
1129 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
1131 priv
->ops
.fill_desc
= hns3_fill_desc
;
1132 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
1136 if ((changed
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
1137 h
->ae_algo
->ops
->enable_vlan_filter
) {
1138 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
1139 h
->ae_algo
->ops
->enable_vlan_filter(h
, true);
1141 h
->ae_algo
->ops
->enable_vlan_filter(h
, false);
1144 if ((changed
& NETIF_F_HW_VLAN_CTAG_RX
) &&
1145 h
->ae_algo
->ops
->enable_hw_strip_rxvtag
) {
1146 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1147 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, true);
1149 ret
= h
->ae_algo
->ops
->enable_hw_strip_rxvtag(h
, false);
1155 netdev
->features
= features
;
1159 static void hns3_nic_get_stats64(struct net_device
*netdev
,
1160 struct rtnl_link_stats64
*stats
)
1162 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1163 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
1164 struct hnae3_handle
*handle
= priv
->ae_handle
;
1165 struct hns3_enet_ring
*ring
;
1175 if (test_bit(HNS3_NIC_STATE_DOWN
, &priv
->state
))
1178 handle
->ae_algo
->ops
->update_stats(handle
, &netdev
->stats
);
1180 for (idx
= 0; idx
< queue_num
; idx
++) {
1181 /* fetch the tx stats */
1182 ring
= priv
->ring_data
[idx
].ring
;
1184 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1185 tx_bytes
+= ring
->stats
.tx_bytes
;
1186 tx_pkts
+= ring
->stats
.tx_pkts
;
1187 tx_drop
+= ring
->stats
.tx_busy
;
1188 tx_drop
+= ring
->stats
.sw_err_cnt
;
1189 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1191 /* fetch the rx stats */
1192 ring
= priv
->ring_data
[idx
+ queue_num
].ring
;
1194 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1195 rx_bytes
+= ring
->stats
.rx_bytes
;
1196 rx_pkts
+= ring
->stats
.rx_pkts
;
1197 rx_drop
+= ring
->stats
.non_vld_descs
;
1198 rx_drop
+= ring
->stats
.err_pkt_len
;
1199 rx_drop
+= ring
->stats
.l2_err
;
1200 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1203 stats
->tx_bytes
= tx_bytes
;
1204 stats
->tx_packets
= tx_pkts
;
1205 stats
->rx_bytes
= rx_bytes
;
1206 stats
->rx_packets
= rx_pkts
;
1208 stats
->rx_errors
= netdev
->stats
.rx_errors
;
1209 stats
->multicast
= netdev
->stats
.multicast
;
1210 stats
->rx_length_errors
= netdev
->stats
.rx_length_errors
;
1211 stats
->rx_crc_errors
= netdev
->stats
.rx_crc_errors
;
1212 stats
->rx_missed_errors
= netdev
->stats
.rx_missed_errors
;
1214 stats
->tx_errors
= netdev
->stats
.tx_errors
;
1215 stats
->rx_dropped
= rx_drop
+ netdev
->stats
.rx_dropped
;
1216 stats
->tx_dropped
= tx_drop
+ netdev
->stats
.tx_dropped
;
1217 stats
->collisions
= netdev
->stats
.collisions
;
1218 stats
->rx_over_errors
= netdev
->stats
.rx_over_errors
;
1219 stats
->rx_frame_errors
= netdev
->stats
.rx_frame_errors
;
1220 stats
->rx_fifo_errors
= netdev
->stats
.rx_fifo_errors
;
1221 stats
->tx_aborted_errors
= netdev
->stats
.tx_aborted_errors
;
1222 stats
->tx_carrier_errors
= netdev
->stats
.tx_carrier_errors
;
1223 stats
->tx_fifo_errors
= netdev
->stats
.tx_fifo_errors
;
1224 stats
->tx_heartbeat_errors
= netdev
->stats
.tx_heartbeat_errors
;
1225 stats
->tx_window_errors
= netdev
->stats
.tx_window_errors
;
1226 stats
->rx_compressed
= netdev
->stats
.rx_compressed
;
1227 stats
->tx_compressed
= netdev
->stats
.tx_compressed
;
1230 static void hns3_add_tunnel_port(struct net_device
*netdev
, u16 port
,
1231 enum hns3_udp_tnl_type type
)
1233 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1234 struct hns3_udp_tunnel
*udp_tnl
= &priv
->udp_tnl
[type
];
1235 struct hnae3_handle
*h
= priv
->ae_handle
;
1237 if (udp_tnl
->used
&& udp_tnl
->dst_port
== port
) {
1242 if (udp_tnl
->used
) {
1244 "UDP tunnel [%d], port [%d] offload\n", type
, port
);
1248 udp_tnl
->dst_port
= port
;
1250 /* TBD send command to hardware to add port */
1251 if (h
->ae_algo
->ops
->add_tunnel_udp
)
1252 h
->ae_algo
->ops
->add_tunnel_udp(h
, port
);
1255 static void hns3_del_tunnel_port(struct net_device
*netdev
, u16 port
,
1256 enum hns3_udp_tnl_type type
)
1258 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
1259 struct hns3_udp_tunnel
*udp_tnl
= &priv
->udp_tnl
[type
];
1260 struct hnae3_handle
*h
= priv
->ae_handle
;
1262 if (!udp_tnl
->used
|| udp_tnl
->dst_port
!= port
) {
1264 "Invalid UDP tunnel port %d\n", port
);
1272 udp_tnl
->dst_port
= 0;
1273 /* TBD send command to hardware to del port */
1274 if (h
->ae_algo
->ops
->del_tunnel_udp
)
1275 h
->ae_algo
->ops
->del_tunnel_udp(h
, port
);
1278 /* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1279 * @netdev: This physical ports's netdev
1280 * @ti: Tunnel information
1282 static void hns3_nic_udp_tunnel_add(struct net_device
*netdev
,
1283 struct udp_tunnel_info
*ti
)
1285 u16 port_n
= ntohs(ti
->port
);
1288 case UDP_TUNNEL_TYPE_VXLAN
:
1289 hns3_add_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_VXLAN
);
1291 case UDP_TUNNEL_TYPE_GENEVE
:
1292 hns3_add_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_GENEVE
);
1295 netdev_err(netdev
, "unsupported tunnel type %d\n", ti
->type
);
1300 static void hns3_nic_udp_tunnel_del(struct net_device
*netdev
,
1301 struct udp_tunnel_info
*ti
)
1303 u16 port_n
= ntohs(ti
->port
);
1306 case UDP_TUNNEL_TYPE_VXLAN
:
1307 hns3_del_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_VXLAN
);
1309 case UDP_TUNNEL_TYPE_GENEVE
:
1310 hns3_del_tunnel_port(netdev
, port_n
, HNS3_UDP_TNL_GENEVE
);
1317 static int hns3_setup_tc(struct net_device
*netdev
, void *type_data
)
1319 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
1320 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1321 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
1322 u8
*prio_tc
= mqprio_qopt
->qopt
.prio_tc_map
;
1323 u8 tc
= mqprio_qopt
->qopt
.num_tc
;
1324 u16 mode
= mqprio_qopt
->mode
;
1325 u8 hw
= mqprio_qopt
->qopt
.hw
;
1330 if (!((hw
== TC_MQPRIO_HW_OFFLOAD_TCS
&&
1331 mode
== TC_MQPRIO_MODE_CHANNEL
) || (!hw
&& tc
== 0)))
1334 if (tc
> HNAE3_MAX_TC
)
1340 if_running
= netif_running(netdev
);
1342 hns3_nic_net_stop(netdev
);
1346 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->setup_tc
) ?
1347 kinfo
->dcb_ops
->setup_tc(h
, tc
, prio_tc
) : -EOPNOTSUPP
;
1352 netdev_reset_tc(netdev
);
1354 ret
= netdev_set_num_tc(netdev
, tc
);
1358 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
1359 if (!kinfo
->tc_info
[i
].enable
)
1362 netdev_set_tc_queue(netdev
,
1363 kinfo
->tc_info
[i
].tc
,
1364 kinfo
->tc_info
[i
].tqp_count
,
1365 kinfo
->tc_info
[i
].tqp_offset
);
1369 ret
= hns3_nic_set_real_num_queue(netdev
);
1373 hns3_nic_net_open(netdev
);
1378 static int hns3_nic_setup_tc(struct net_device
*dev
, enum tc_setup_type type
,
1381 if (type
!= TC_SETUP_QDISC_MQPRIO
)
1384 return hns3_setup_tc(dev
, type_data
);
1387 static int hns3_vlan_rx_add_vid(struct net_device
*netdev
,
1388 __be16 proto
, u16 vid
)
1390 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1393 if (h
->ae_algo
->ops
->set_vlan_filter
)
1394 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, false);
1399 static int hns3_vlan_rx_kill_vid(struct net_device
*netdev
,
1400 __be16 proto
, u16 vid
)
1402 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1405 if (h
->ae_algo
->ops
->set_vlan_filter
)
1406 ret
= h
->ae_algo
->ops
->set_vlan_filter(h
, proto
, vid
, true);
1411 static int hns3_ndo_set_vf_vlan(struct net_device
*netdev
, int vf
, u16 vlan
,
1412 u8 qos
, __be16 vlan_proto
)
1414 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1417 if (h
->ae_algo
->ops
->set_vf_vlan_filter
)
1418 ret
= h
->ae_algo
->ops
->set_vf_vlan_filter(h
, vf
, vlan
,
1424 static int hns3_nic_change_mtu(struct net_device
*netdev
, int new_mtu
)
1426 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1427 bool if_running
= netif_running(netdev
);
1430 if (!h
->ae_algo
->ops
->set_mtu
)
1433 /* if this was called with netdev up then bring netdevice down */
1435 (void)hns3_nic_net_stop(netdev
);
1439 ret
= h
->ae_algo
->ops
->set_mtu(h
, new_mtu
);
1441 netdev_err(netdev
, "failed to change MTU in hardware %d\n",
1446 netdev
->mtu
= new_mtu
;
1448 /* if the netdev was running earlier, bring it up again */
1449 if (if_running
&& hns3_nic_net_open(netdev
))
1455 static bool hns3_get_tx_timeo_queue_info(struct net_device
*ndev
)
1457 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1458 struct hns3_enet_ring
*tx_ring
= NULL
;
1459 int timeout_queue
= 0;
1460 int hw_head
, hw_tail
;
1463 /* Find the stopped queue the same way the stack does */
1464 for (i
= 0; i
< ndev
->real_num_tx_queues
; i
++) {
1465 struct netdev_queue
*q
;
1466 unsigned long trans_start
;
1468 q
= netdev_get_tx_queue(ndev
, i
);
1469 trans_start
= q
->trans_start
;
1470 if (netif_xmit_stopped(q
) &&
1472 (trans_start
+ ndev
->watchdog_timeo
))) {
1478 if (i
== ndev
->num_tx_queues
) {
1480 "no netdev TX timeout queue found, timeout count: %llu\n",
1481 priv
->tx_timeout_count
);
1485 tx_ring
= priv
->ring_data
[timeout_queue
].ring
;
1487 hw_head
= readl_relaxed(tx_ring
->tqp
->io_base
+
1488 HNS3_RING_TX_RING_HEAD_REG
);
1489 hw_tail
= readl_relaxed(tx_ring
->tqp
->io_base
+
1490 HNS3_RING_TX_RING_TAIL_REG
);
1492 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1493 priv
->tx_timeout_count
,
1495 tx_ring
->next_to_use
,
1496 tx_ring
->next_to_clean
,
1499 readl(tx_ring
->tqp_vector
->mask_addr
));
1504 static void hns3_nic_net_timeout(struct net_device
*ndev
)
1506 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
1507 unsigned long last_reset_time
= priv
->last_reset_time
;
1508 struct hnae3_handle
*h
= priv
->ae_handle
;
1510 if (!hns3_get_tx_timeo_queue_info(ndev
))
1513 priv
->tx_timeout_count
++;
1515 /* This timeout is far away enough from last timeout,
1516 * if timeout again,set the reset type to PF reset
1518 if (time_after(jiffies
, (last_reset_time
+ 20 * HZ
)))
1519 priv
->reset_level
= HNAE3_FUNC_RESET
;
1521 /* Don't do any new action before the next timeout */
1522 else if (time_before(jiffies
, (last_reset_time
+ ndev
->watchdog_timeo
)))
1525 priv
->last_reset_time
= jiffies
;
1527 if (h
->ae_algo
->ops
->reset_event
)
1528 h
->ae_algo
->ops
->reset_event(h
, priv
->reset_level
);
1530 priv
->reset_level
++;
1531 if (priv
->reset_level
> HNAE3_GLOBAL_RESET
)
1532 priv
->reset_level
= HNAE3_GLOBAL_RESET
;
1535 static const struct net_device_ops hns3_nic_netdev_ops
= {
1536 .ndo_open
= hns3_nic_net_open
,
1537 .ndo_stop
= hns3_nic_net_stop
,
1538 .ndo_start_xmit
= hns3_nic_net_xmit
,
1539 .ndo_tx_timeout
= hns3_nic_net_timeout
,
1540 .ndo_set_mac_address
= hns3_nic_net_set_mac_address
,
1541 .ndo_change_mtu
= hns3_nic_change_mtu
,
1542 .ndo_set_features
= hns3_nic_set_features
,
1543 .ndo_get_stats64
= hns3_nic_get_stats64
,
1544 .ndo_setup_tc
= hns3_nic_setup_tc
,
1545 .ndo_set_rx_mode
= hns3_nic_set_rx_mode
,
1546 .ndo_udp_tunnel_add
= hns3_nic_udp_tunnel_add
,
1547 .ndo_udp_tunnel_del
= hns3_nic_udp_tunnel_del
,
1548 .ndo_vlan_rx_add_vid
= hns3_vlan_rx_add_vid
,
1549 .ndo_vlan_rx_kill_vid
= hns3_vlan_rx_kill_vid
,
1550 .ndo_set_vf_vlan
= hns3_ndo_set_vf_vlan
,
1553 /* hns3_probe - Device initialization routine
1554 * @pdev: PCI device information struct
1555 * @ent: entry in hns3_pci_tbl
1557 * hns3_probe initializes a PF identified by a pci_dev structure.
1558 * The OS initialization, configuring of the PF private structure,
1559 * and a hardware reset occur.
1561 * Returns 0 on success, negative on failure
1563 static int hns3_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1565 struct hnae3_ae_dev
*ae_dev
;
1568 ae_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*ae_dev
),
1575 ae_dev
->pdev
= pdev
;
1576 ae_dev
->flag
= ent
->driver_data
;
1577 ae_dev
->dev_type
= HNAE3_DEV_KNIC
;
1578 pci_set_drvdata(pdev
, ae_dev
);
1580 return hnae3_register_ae_dev(ae_dev
);
1583 /* hns3_remove - Device removal routine
1584 * @pdev: PCI device information struct
1586 static void hns3_remove(struct pci_dev
*pdev
)
1588 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(pdev
);
1590 hnae3_unregister_ae_dev(ae_dev
);
1592 devm_kfree(&pdev
->dev
, ae_dev
);
1594 pci_set_drvdata(pdev
, NULL
);
1597 static struct pci_driver hns3_driver
= {
1598 .name
= hns3_driver_name
,
1599 .id_table
= hns3_pci_tbl
,
1600 .probe
= hns3_probe
,
1601 .remove
= hns3_remove
,
1604 /* set default feature to hns3 */
1605 static void hns3_set_default_feature(struct net_device
*netdev
)
1607 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
1609 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
1611 netdev
->hw_enc_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1612 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1613 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1614 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1615 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1617 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
1619 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
1621 netdev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1622 NETIF_F_HW_VLAN_CTAG_FILTER
|
1623 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
1624 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1625 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1626 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1627 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1629 netdev
->vlan_features
|=
1630 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
1631 NETIF_F_SG
| NETIF_F_GSO
| NETIF_F_GRO
|
1632 NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1633 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1634 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1636 netdev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
1637 NETIF_F_HW_VLAN_CTAG_TX
|
1638 NETIF_F_RXCSUM
| NETIF_F_SG
| NETIF_F_GSO
|
1639 NETIF_F_GRO
| NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_GSO_GRE
|
1640 NETIF_F_GSO_GRE_CSUM
| NETIF_F_GSO_UDP_TUNNEL
|
1641 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
1643 if (!(h
->flags
& HNAE3_SUPPORT_VF
))
1644 netdev
->hw_features
|=
1645 NETIF_F_HW_VLAN_CTAG_FILTER
| NETIF_F_HW_VLAN_CTAG_RX
;
1648 static int hns3_alloc_buffer(struct hns3_enet_ring
*ring
,
1649 struct hns3_desc_cb
*cb
)
1651 unsigned int order
= hnae_page_order(ring
);
1654 p
= dev_alloc_pages(order
);
1659 cb
->page_offset
= 0;
1661 cb
->buf
= page_address(p
);
1662 cb
->length
= hnae_page_size(ring
);
1663 cb
->type
= DESC_TYPE_PAGE
;
1668 static void hns3_free_buffer(struct hns3_enet_ring
*ring
,
1669 struct hns3_desc_cb
*cb
)
1671 if (cb
->type
== DESC_TYPE_SKB
)
1672 dev_kfree_skb_any((struct sk_buff
*)cb
->priv
);
1673 else if (!HNAE3_IS_TX_RING(ring
))
1674 put_page((struct page
*)cb
->priv
);
1675 memset(cb
, 0, sizeof(*cb
));
1678 static int hns3_map_buffer(struct hns3_enet_ring
*ring
, struct hns3_desc_cb
*cb
)
1680 cb
->dma
= dma_map_page(ring_to_dev(ring
), cb
->priv
, 0,
1681 cb
->length
, ring_to_dma_dir(ring
));
1683 if (dma_mapping_error(ring_to_dev(ring
), cb
->dma
))
1689 static void hns3_unmap_buffer(struct hns3_enet_ring
*ring
,
1690 struct hns3_desc_cb
*cb
)
1692 if (cb
->type
== DESC_TYPE_SKB
)
1693 dma_unmap_single(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1694 ring_to_dma_dir(ring
));
1696 dma_unmap_page(ring_to_dev(ring
), cb
->dma
, cb
->length
,
1697 ring_to_dma_dir(ring
));
1700 static void hns3_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1702 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1703 ring
->desc
[i
].addr
= 0;
1706 static void hns3_free_buffer_detach(struct hns3_enet_ring
*ring
, int i
)
1708 struct hns3_desc_cb
*cb
= &ring
->desc_cb
[i
];
1710 if (!ring
->desc_cb
[i
].dma
)
1713 hns3_buffer_detach(ring
, i
);
1714 hns3_free_buffer(ring
, cb
);
1717 static void hns3_free_buffers(struct hns3_enet_ring
*ring
)
1721 for (i
= 0; i
< ring
->desc_num
; i
++)
1722 hns3_free_buffer_detach(ring
, i
);
1725 /* free desc along with its attached buffer */
1726 static void hns3_free_desc(struct hns3_enet_ring
*ring
)
1728 hns3_free_buffers(ring
);
1730 dma_unmap_single(ring_to_dev(ring
), ring
->desc_dma_addr
,
1731 ring
->desc_num
* sizeof(ring
->desc
[0]),
1733 ring
->desc_dma_addr
= 0;
1738 static int hns3_alloc_desc(struct hns3_enet_ring
*ring
)
1740 int size
= ring
->desc_num
* sizeof(ring
->desc
[0]);
1742 ring
->desc
= kzalloc(size
, GFP_KERNEL
);
1746 ring
->desc_dma_addr
= dma_map_single(ring_to_dev(ring
), ring
->desc
,
1747 size
, DMA_BIDIRECTIONAL
);
1748 if (dma_mapping_error(ring_to_dev(ring
), ring
->desc_dma_addr
)) {
1749 ring
->desc_dma_addr
= 0;
1758 static int hns3_reserve_buffer_map(struct hns3_enet_ring
*ring
,
1759 struct hns3_desc_cb
*cb
)
1763 ret
= hns3_alloc_buffer(ring
, cb
);
1767 ret
= hns3_map_buffer(ring
, cb
);
1774 hns3_free_buffer(ring
, cb
);
1779 static int hns3_alloc_buffer_attach(struct hns3_enet_ring
*ring
, int i
)
1781 int ret
= hns3_reserve_buffer_map(ring
, &ring
->desc_cb
[i
]);
1786 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1791 /* Allocate memory for raw pkg, and map with dma */
1792 static int hns3_alloc_ring_buffers(struct hns3_enet_ring
*ring
)
1796 for (i
= 0; i
< ring
->desc_num
; i
++) {
1797 ret
= hns3_alloc_buffer_attach(ring
, i
);
1799 goto out_buffer_fail
;
1805 for (j
= i
- 1; j
>= 0; j
--)
1806 hns3_free_buffer_detach(ring
, j
);
1810 /* detach a in-used buffer and replace with a reserved one */
1811 static void hns3_replace_buffer(struct hns3_enet_ring
*ring
, int i
,
1812 struct hns3_desc_cb
*res_cb
)
1814 hns3_unmap_buffer(ring
, &ring
->desc_cb
[i
]);
1815 ring
->desc_cb
[i
] = *res_cb
;
1816 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
);
1819 static void hns3_reuse_buffer(struct hns3_enet_ring
*ring
, int i
)
1821 ring
->desc_cb
[i
].reuse_flag
= 0;
1822 ring
->desc
[i
].addr
= cpu_to_le64(ring
->desc_cb
[i
].dma
1823 + ring
->desc_cb
[i
].page_offset
);
1826 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring
*ring
, int *bytes
,
1829 struct hns3_desc_cb
*desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
1831 (*pkts
) += (desc_cb
->type
== DESC_TYPE_SKB
);
1832 (*bytes
) += desc_cb
->length
;
1833 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1834 hns3_free_buffer_detach(ring
, ring
->next_to_clean
);
1836 ring_ptr_move_fw(ring
, next_to_clean
);
1839 static int is_valid_clean_head(struct hns3_enet_ring
*ring
, int h
)
1841 int u
= ring
->next_to_use
;
1842 int c
= ring
->next_to_clean
;
1844 if (unlikely(h
> ring
->desc_num
))
1847 return u
> c
? (h
> c
&& h
<= u
) : (h
> c
|| h
<= u
);
1850 bool hns3_clean_tx_ring(struct hns3_enet_ring
*ring
, int budget
)
1852 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
1853 struct netdev_queue
*dev_queue
;
1857 head
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_TX_RING_HEAD_REG
);
1858 rmb(); /* Make sure head is ready before touch any data */
1860 if (is_ring_empty(ring
) || head
== ring
->next_to_clean
)
1861 return true; /* no data to poll */
1863 if (!is_valid_clean_head(ring
, head
)) {
1864 netdev_err(netdev
, "wrong head (%d, %d-%d)\n", head
,
1865 ring
->next_to_use
, ring
->next_to_clean
);
1867 u64_stats_update_begin(&ring
->syncp
);
1868 ring
->stats
.io_err_cnt
++;
1869 u64_stats_update_end(&ring
->syncp
);
1875 while (head
!= ring
->next_to_clean
&& budget
) {
1876 hns3_nic_reclaim_one_desc(ring
, &bytes
, &pkts
);
1877 /* Issue prefetch for next Tx descriptor */
1878 prefetch(&ring
->desc_cb
[ring
->next_to_clean
]);
1882 ring
->tqp_vector
->tx_group
.total_bytes
+= bytes
;
1883 ring
->tqp_vector
->tx_group
.total_packets
+= pkts
;
1885 u64_stats_update_begin(&ring
->syncp
);
1886 ring
->stats
.tx_bytes
+= bytes
;
1887 ring
->stats
.tx_pkts
+= pkts
;
1888 u64_stats_update_end(&ring
->syncp
);
1890 dev_queue
= netdev_get_tx_queue(netdev
, ring
->tqp
->tqp_index
);
1891 netdev_tx_completed_queue(dev_queue
, pkts
, bytes
);
1893 if (unlikely(pkts
&& netif_carrier_ok(netdev
) &&
1894 (ring_space(ring
) > HNS3_MAX_BD_PER_PKT
))) {
1895 /* Make sure that anybody stopping the queue after this
1896 * sees the new next_to_clean.
1899 if (netif_tx_queue_stopped(dev_queue
)) {
1900 netif_tx_wake_queue(dev_queue
);
1901 ring
->stats
.restart_queue
++;
1908 static int hns3_desc_unused(struct hns3_enet_ring
*ring
)
1910 int ntc
= ring
->next_to_clean
;
1911 int ntu
= ring
->next_to_use
;
1913 return ((ntc
>= ntu
) ? 0 : ring
->desc_num
) + ntc
- ntu
;
1917 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring
*ring
, int cleand_count
)
1919 struct hns3_desc_cb
*desc_cb
;
1920 struct hns3_desc_cb res_cbs
;
1923 for (i
= 0; i
< cleand_count
; i
++) {
1924 desc_cb
= &ring
->desc_cb
[ring
->next_to_use
];
1925 if (desc_cb
->reuse_flag
) {
1926 u64_stats_update_begin(&ring
->syncp
);
1927 ring
->stats
.reuse_pg_cnt
++;
1928 u64_stats_update_end(&ring
->syncp
);
1930 hns3_reuse_buffer(ring
, ring
->next_to_use
);
1932 ret
= hns3_reserve_buffer_map(ring
, &res_cbs
);
1934 u64_stats_update_begin(&ring
->syncp
);
1935 ring
->stats
.sw_err_cnt
++;
1936 u64_stats_update_end(&ring
->syncp
);
1938 netdev_err(ring
->tqp
->handle
->kinfo
.netdev
,
1939 "hnae reserve buffer map failed.\n");
1942 hns3_replace_buffer(ring
, ring
->next_to_use
, &res_cbs
);
1945 ring_ptr_move_fw(ring
, next_to_use
);
1948 wmb(); /* Make all data has been write before submit */
1949 writel_relaxed(i
, ring
->tqp
->io_base
+ HNS3_RING_RX_RING_HEAD_REG
);
1952 /* hns3_nic_get_headlen - determine size of header for LRO/GRO
1953 * @data: pointer to the start of the headers
1954 * @max: total length of section to find headers in
1956 * This function is meant to determine the length of headers that will
1957 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1958 * motivation of doing this is to only perform one pull for IPv4 TCP
1959 * packets so that we can do basic things like calculating the gso_size
1960 * based on the average data per packet.
1962 static unsigned int hns3_nic_get_headlen(unsigned char *data
, u32 flag
,
1963 unsigned int max_size
)
1965 unsigned char *network
;
1968 /* This should never happen, but better safe than sorry */
1969 if (max_size
< ETH_HLEN
)
1972 /* Initialize network frame pointer */
1975 /* Set first protocol and move network header forward */
1976 network
+= ETH_HLEN
;
1978 /* Handle any vlan tag if present */
1979 if (hnae_get_field(flag
, HNS3_RXD_VLAN_M
, HNS3_RXD_VLAN_S
)
1980 == HNS3_RX_FLAG_VLAN_PRESENT
) {
1981 if ((typeof(max_size
))(network
- data
) > (max_size
- VLAN_HLEN
))
1984 network
+= VLAN_HLEN
;
1987 /* Handle L3 protocols */
1988 if (hnae_get_field(flag
, HNS3_RXD_L3ID_M
, HNS3_RXD_L3ID_S
)
1989 == HNS3_RX_FLAG_L3ID_IPV4
) {
1990 if ((typeof(max_size
))(network
- data
) >
1991 (max_size
- sizeof(struct iphdr
)))
1994 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1995 hlen
= (network
[0] & 0x0F) << 2;
1997 /* Verify hlen meets minimum size requirements */
1998 if (hlen
< sizeof(struct iphdr
))
1999 return network
- data
;
2001 /* Record next protocol if header is present */
2002 } else if (hnae_get_field(flag
, HNS3_RXD_L3ID_M
, HNS3_RXD_L3ID_S
)
2003 == HNS3_RX_FLAG_L3ID_IPV6
) {
2004 if ((typeof(max_size
))(network
- data
) >
2005 (max_size
- sizeof(struct ipv6hdr
)))
2008 /* Record next protocol */
2009 hlen
= sizeof(struct ipv6hdr
);
2011 return network
- data
;
2014 /* Relocate pointer to start of L4 header */
2017 /* Finally sort out TCP/UDP */
2018 if (hnae_get_field(flag
, HNS3_RXD_L4ID_M
, HNS3_RXD_L4ID_S
)
2019 == HNS3_RX_FLAG_L4ID_TCP
) {
2020 if ((typeof(max_size
))(network
- data
) >
2021 (max_size
- sizeof(struct tcphdr
)))
2024 /* Access doff as a u8 to avoid unaligned access on ia64 */
2025 hlen
= (network
[12] & 0xF0) >> 2;
2027 /* Verify hlen meets minimum size requirements */
2028 if (hlen
< sizeof(struct tcphdr
))
2029 return network
- data
;
2032 } else if (hnae_get_field(flag
, HNS3_RXD_L4ID_M
, HNS3_RXD_L4ID_S
)
2033 == HNS3_RX_FLAG_L4ID_UDP
) {
2034 if ((typeof(max_size
))(network
- data
) >
2035 (max_size
- sizeof(struct udphdr
)))
2038 network
+= sizeof(struct udphdr
);
2041 /* If everything has gone correctly network should be the
2042 * data section of the packet and will be the end of the header.
2043 * If not then it probably represents the end of the last recognized
2046 if ((typeof(max_size
))(network
- data
) < max_size
)
2047 return network
- data
;
2052 static void hns3_nic_reuse_page(struct sk_buff
*skb
, int i
,
2053 struct hns3_enet_ring
*ring
, int pull_len
,
2054 struct hns3_desc_cb
*desc_cb
)
2056 struct hns3_desc
*desc
;
2061 twobufs
= ((PAGE_SIZE
< 8192) &&
2062 hnae_buf_size(ring
) == HNS3_BUFFER_SIZE_2048
);
2064 desc
= &ring
->desc
[ring
->next_to_clean
];
2065 size
= le16_to_cpu(desc
->rx
.size
);
2068 truesize
= hnae_buf_size(ring
);
2070 truesize
= ALIGN(size
, L1_CACHE_BYTES
);
2071 last_offset
= hnae_page_size(ring
) - hnae_buf_size(ring
);
2074 skb_add_rx_frag(skb
, i
, desc_cb
->priv
, desc_cb
->page_offset
+ pull_len
,
2075 size
- pull_len
, truesize
- pull_len
);
2077 /* Avoid re-using remote pages,flag default unreuse */
2078 if (unlikely(page_to_nid(desc_cb
->priv
) != numa_node_id()))
2082 /* If we are only owner of page we can reuse it */
2083 if (likely(page_count(desc_cb
->priv
) == 1)) {
2084 /* Flip page offset to other buffer */
2085 desc_cb
->page_offset
^= truesize
;
2087 desc_cb
->reuse_flag
= 1;
2088 /* bump ref count on page before it is given*/
2089 get_page(desc_cb
->priv
);
2094 /* Move offset up to the next cache line */
2095 desc_cb
->page_offset
+= truesize
;
2097 if (desc_cb
->page_offset
<= last_offset
) {
2098 desc_cb
->reuse_flag
= 1;
2099 /* Bump ref count on page before it is given*/
2100 get_page(desc_cb
->priv
);
2104 static void hns3_rx_checksum(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
,
2105 struct hns3_desc
*desc
)
2107 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2108 int l3_type
, l4_type
;
2113 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2114 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2116 skb
->ip_summed
= CHECKSUM_NONE
;
2118 skb_checksum_none_assert(skb
);
2120 if (!(netdev
->features
& NETIF_F_RXCSUM
))
2123 /* check if hardware has done checksum */
2124 if (!hnae_get_bit(bd_base_info
, HNS3_RXD_L3L4P_B
))
2127 if (unlikely(hnae_get_bit(l234info
, HNS3_RXD_L3E_B
) ||
2128 hnae_get_bit(l234info
, HNS3_RXD_L4E_B
) ||
2129 hnae_get_bit(l234info
, HNS3_RXD_OL3E_B
) ||
2130 hnae_get_bit(l234info
, HNS3_RXD_OL4E_B
))) {
2131 netdev_err(netdev
, "L3/L4 error pkt\n");
2132 u64_stats_update_begin(&ring
->syncp
);
2133 ring
->stats
.l3l4_csum_err
++;
2134 u64_stats_update_end(&ring
->syncp
);
2139 l3_type
= hnae_get_field(l234info
, HNS3_RXD_L3ID_M
,
2141 l4_type
= hnae_get_field(l234info
, HNS3_RXD_L4ID_M
,
2144 ol4_type
= hnae_get_field(l234info
, HNS3_RXD_OL4ID_M
, HNS3_RXD_OL4ID_S
);
2146 case HNS3_OL4_TYPE_MAC_IN_UDP
:
2147 case HNS3_OL4_TYPE_NVGRE
:
2148 skb
->csum_level
= 1;
2149 case HNS3_OL4_TYPE_NO_TUN
:
2150 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2151 if (l3_type
== HNS3_L3_TYPE_IPV4
||
2152 (l3_type
== HNS3_L3_TYPE_IPV6
&&
2153 (l4_type
== HNS3_L4_TYPE_UDP
||
2154 l4_type
== HNS3_L4_TYPE_TCP
||
2155 l4_type
== HNS3_L4_TYPE_SCTP
)))
2156 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2161 static void hns3_rx_skb(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
2163 napi_gro_receive(&ring
->tqp_vector
->napi
, skb
);
2166 static int hns3_handle_rx_bd(struct hns3_enet_ring
*ring
,
2167 struct sk_buff
**out_skb
, int *out_bnum
)
2169 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2170 struct hns3_desc_cb
*desc_cb
;
2171 struct hns3_desc
*desc
;
2172 struct sk_buff
*skb
;
2180 desc
= &ring
->desc
[ring
->next_to_clean
];
2181 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2185 length
= le16_to_cpu(desc
->rx
.pkt_len
);
2186 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2187 l234info
= le32_to_cpu(desc
->rx
.l234_info
);
2189 /* Check valid BD */
2190 if (!hnae_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))
2193 va
= (unsigned char *)desc_cb
->buf
+ desc_cb
->page_offset
;
2195 /* Prefetch first cache line of first page
2196 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2197 * line size is 64B so need to prefetch twice to make it 128B. But in
2198 * actual we can have greater size of caches with 128B Level 1 cache
2199 * lines. In such a case, single fetch would suffice to cache in the
2200 * relevant part of the header.
2203 #if L1_CACHE_BYTES < 128
2204 prefetch(va
+ L1_CACHE_BYTES
);
2207 skb
= *out_skb
= napi_alloc_skb(&ring
->tqp_vector
->napi
,
2209 if (unlikely(!skb
)) {
2210 netdev_err(netdev
, "alloc rx skb fail\n");
2212 u64_stats_update_begin(&ring
->syncp
);
2213 ring
->stats
.sw_err_cnt
++;
2214 u64_stats_update_end(&ring
->syncp
);
2219 prefetchw(skb
->data
);
2221 /* Based on hw strategy, the tag offloaded will be stored at
2222 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2223 * in one layer tag case.
2225 if (netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) {
2228 vlan_tag
= le16_to_cpu(desc
->rx
.ot_vlan_tag
);
2229 if (!(vlan_tag
& VLAN_VID_MASK
))
2230 vlan_tag
= le16_to_cpu(desc
->rx
.vlan_tag
);
2231 if (vlan_tag
& VLAN_VID_MASK
)
2232 __vlan_hwaccel_put_tag(skb
,
2238 if (length
<= HNS3_RX_HEAD_SIZE
) {
2239 memcpy(__skb_put(skb
, length
), va
, ALIGN(length
, sizeof(long)));
2241 /* We can reuse buffer as-is, just make sure it is local */
2242 if (likely(page_to_nid(desc_cb
->priv
) == numa_node_id()))
2243 desc_cb
->reuse_flag
= 1;
2244 else /* This page cannot be reused so discard it */
2245 put_page(desc_cb
->priv
);
2247 ring_ptr_move_fw(ring
, next_to_clean
);
2249 u64_stats_update_begin(&ring
->syncp
);
2250 ring
->stats
.seg_pkt_cnt
++;
2251 u64_stats_update_end(&ring
->syncp
);
2253 pull_len
= hns3_nic_get_headlen(va
, l234info
,
2255 memcpy(__skb_put(skb
, pull_len
), va
,
2256 ALIGN(pull_len
, sizeof(long)));
2258 hns3_nic_reuse_page(skb
, 0, ring
, pull_len
, desc_cb
);
2259 ring_ptr_move_fw(ring
, next_to_clean
);
2261 while (!hnae_get_bit(bd_base_info
, HNS3_RXD_FE_B
)) {
2262 desc
= &ring
->desc
[ring
->next_to_clean
];
2263 desc_cb
= &ring
->desc_cb
[ring
->next_to_clean
];
2264 bd_base_info
= le32_to_cpu(desc
->rx
.bd_base_info
);
2265 hns3_nic_reuse_page(skb
, bnum
, ring
, 0, desc_cb
);
2266 ring_ptr_move_fw(ring
, next_to_clean
);
2273 if (unlikely(!hnae_get_bit(bd_base_info
, HNS3_RXD_VLD_B
))) {
2274 netdev_err(netdev
, "no valid bd,%016llx,%016llx\n",
2275 ((u64
*)desc
)[0], ((u64
*)desc
)[1]);
2276 u64_stats_update_begin(&ring
->syncp
);
2277 ring
->stats
.non_vld_descs
++;
2278 u64_stats_update_end(&ring
->syncp
);
2280 dev_kfree_skb_any(skb
);
2284 if (unlikely((!desc
->rx
.pkt_len
) ||
2285 hnae_get_bit(l234info
, HNS3_RXD_TRUNCAT_B
))) {
2286 netdev_err(netdev
, "truncated pkt\n");
2287 u64_stats_update_begin(&ring
->syncp
);
2288 ring
->stats
.err_pkt_len
++;
2289 u64_stats_update_end(&ring
->syncp
);
2291 dev_kfree_skb_any(skb
);
2295 if (unlikely(hnae_get_bit(l234info
, HNS3_RXD_L2E_B
))) {
2296 netdev_err(netdev
, "L2 error pkt\n");
2297 u64_stats_update_begin(&ring
->syncp
);
2298 ring
->stats
.l2_err
++;
2299 u64_stats_update_end(&ring
->syncp
);
2301 dev_kfree_skb_any(skb
);
2305 u64_stats_update_begin(&ring
->syncp
);
2306 ring
->stats
.rx_pkts
++;
2307 ring
->stats
.rx_bytes
+= skb
->len
;
2308 u64_stats_update_end(&ring
->syncp
);
2310 ring
->tqp_vector
->rx_group
.total_bytes
+= skb
->len
;
2312 hns3_rx_checksum(ring
, skb
, desc
);
2316 int hns3_clean_rx_ring(
2317 struct hns3_enet_ring
*ring
, int budget
,
2318 void (*rx_fn
)(struct hns3_enet_ring
*, struct sk_buff
*))
2320 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2321 struct net_device
*netdev
= ring
->tqp
->handle
->kinfo
.netdev
;
2322 int recv_pkts
, recv_bds
, clean_count
, err
;
2323 int unused_count
= hns3_desc_unused(ring
);
2324 struct sk_buff
*skb
= NULL
;
2327 num
= readl_relaxed(ring
->tqp
->io_base
+ HNS3_RING_RX_RING_FBDNUM_REG
);
2328 rmb(); /* Make sure num taken effect before the other data is touched */
2330 recv_pkts
= 0, recv_bds
= 0, clean_count
= 0;
2331 num
-= unused_count
;
2333 while (recv_pkts
< budget
&& recv_bds
< num
) {
2334 /* Reuse or realloc buffers */
2335 if (clean_count
+ unused_count
>= RCB_NOF_ALLOC_RX_BUFF_ONCE
) {
2336 hns3_nic_alloc_rx_buffers(ring
,
2337 clean_count
+ unused_count
);
2339 unused_count
= hns3_desc_unused(ring
);
2343 err
= hns3_handle_rx_bd(ring
, &skb
, &bnum
);
2344 if (unlikely(!skb
)) /* This fault cannot be repaired */
2348 clean_count
+= bnum
;
2349 if (unlikely(err
)) { /* Do jump the err */
2354 /* Do update ip stack process */
2355 skb
->protocol
= eth_type_trans(skb
, netdev
);
2362 /* Make all data has been write before submit */
2363 if (clean_count
+ unused_count
> 0)
2364 hns3_nic_alloc_rx_buffers(ring
,
2365 clean_count
+ unused_count
);
2370 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group
*ring_group
)
2372 #define HNS3_RX_ULTRA_PACKET_RATE 40000
2373 enum hns3_flow_level_range new_flow_level
;
2374 struct hns3_enet_tqp_vector
*tqp_vector
;
2375 int packets_per_secs
;
2376 int bytes_per_usecs
;
2380 if (!ring_group
->int_gl
)
2383 if (ring_group
->total_packets
== 0) {
2384 ring_group
->int_gl
= HNS3_INT_GL_50K
;
2385 ring_group
->flow_level
= HNS3_FLOW_LOW
;
2389 /* Simple throttlerate management
2390 * 0-10MB/s lower (50000 ints/s)
2391 * 10-20MB/s middle (20000 ints/s)
2392 * 20-1249MB/s high (18000 ints/s)
2393 * > 40000pps ultra (8000 ints/s)
2395 new_flow_level
= ring_group
->flow_level
;
2396 new_int_gl
= ring_group
->int_gl
;
2397 tqp_vector
= ring_group
->ring
->tqp_vector
;
2398 usecs
= (ring_group
->int_gl
<< 1);
2399 bytes_per_usecs
= ring_group
->total_bytes
/ usecs
;
2400 /* 1000000 microseconds */
2401 packets_per_secs
= ring_group
->total_packets
* 1000000 / usecs
;
2403 switch (new_flow_level
) {
2405 if (bytes_per_usecs
> 10)
2406 new_flow_level
= HNS3_FLOW_MID
;
2409 if (bytes_per_usecs
> 20)
2410 new_flow_level
= HNS3_FLOW_HIGH
;
2411 else if (bytes_per_usecs
<= 10)
2412 new_flow_level
= HNS3_FLOW_LOW
;
2414 case HNS3_FLOW_HIGH
:
2415 case HNS3_FLOW_ULTRA
:
2417 if (bytes_per_usecs
<= 20)
2418 new_flow_level
= HNS3_FLOW_MID
;
2422 if ((packets_per_secs
> HNS3_RX_ULTRA_PACKET_RATE
) &&
2423 (&tqp_vector
->rx_group
== ring_group
))
2424 new_flow_level
= HNS3_FLOW_ULTRA
;
2426 switch (new_flow_level
) {
2428 new_int_gl
= HNS3_INT_GL_50K
;
2431 new_int_gl
= HNS3_INT_GL_20K
;
2433 case HNS3_FLOW_HIGH
:
2434 new_int_gl
= HNS3_INT_GL_18K
;
2436 case HNS3_FLOW_ULTRA
:
2437 new_int_gl
= HNS3_INT_GL_8K
;
2443 ring_group
->total_bytes
= 0;
2444 ring_group
->total_packets
= 0;
2445 ring_group
->flow_level
= new_flow_level
;
2446 if (new_int_gl
!= ring_group
->int_gl
) {
2447 ring_group
->int_gl
= new_int_gl
;
2453 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector
*tqp_vector
)
2455 struct hns3_enet_ring_group
*rx_group
= &tqp_vector
->rx_group
;
2456 struct hns3_enet_ring_group
*tx_group
= &tqp_vector
->tx_group
;
2457 bool rx_update
, tx_update
;
2459 if (rx_group
->gl_adapt_enable
) {
2460 rx_update
= hns3_get_new_int_gl(rx_group
);
2462 hns3_set_vector_coalesce_rx_gl(tqp_vector
,
2466 if (tx_group
->gl_adapt_enable
) {
2467 tx_update
= hns3_get_new_int_gl(&tqp_vector
->tx_group
);
2469 hns3_set_vector_coalesce_tx_gl(tqp_vector
,
2474 static int hns3_nic_common_poll(struct napi_struct
*napi
, int budget
)
2476 struct hns3_enet_ring
*ring
;
2477 int rx_pkt_total
= 0;
2479 struct hns3_enet_tqp_vector
*tqp_vector
=
2480 container_of(napi
, struct hns3_enet_tqp_vector
, napi
);
2481 bool clean_complete
= true;
2484 /* Since the actual Tx work is minimal, we can give the Tx a larger
2485 * budget and be more aggressive about cleaning up the Tx descriptors.
2487 hns3_for_each_ring(ring
, tqp_vector
->tx_group
) {
2488 if (!hns3_clean_tx_ring(ring
, budget
))
2489 clean_complete
= false;
2492 /* make sure rx ring budget not smaller than 1 */
2493 rx_budget
= max(budget
/ tqp_vector
->num_tqps
, 1);
2495 hns3_for_each_ring(ring
, tqp_vector
->rx_group
) {
2496 int rx_cleaned
= hns3_clean_rx_ring(ring
, rx_budget
,
2499 if (rx_cleaned
>= rx_budget
)
2500 clean_complete
= false;
2502 rx_pkt_total
+= rx_cleaned
;
2505 tqp_vector
->rx_group
.total_packets
+= rx_pkt_total
;
2507 if (!clean_complete
)
2510 napi_complete(napi
);
2511 hns3_update_new_int_gl(tqp_vector
);
2512 hns3_mask_vector_irq(tqp_vector
, 1);
2514 return rx_pkt_total
;
2517 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2518 struct hnae3_ring_chain_node
*head
)
2520 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2521 struct hnae3_ring_chain_node
*cur_chain
= head
;
2522 struct hnae3_ring_chain_node
*chain
;
2523 struct hns3_enet_ring
*tx_ring
;
2524 struct hns3_enet_ring
*rx_ring
;
2526 tx_ring
= tqp_vector
->tx_group
.ring
;
2528 cur_chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2529 hnae_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2530 HNAE3_RING_TYPE_TX
);
2531 hnae_set_field(cur_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2532 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_TX
);
2534 cur_chain
->next
= NULL
;
2536 while (tx_ring
->next
) {
2537 tx_ring
= tx_ring
->next
;
2539 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
),
2544 cur_chain
->next
= chain
;
2545 chain
->tqp_index
= tx_ring
->tqp
->tqp_index
;
2546 hnae_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2547 HNAE3_RING_TYPE_TX
);
2548 hnae_set_field(chain
->int_gl_idx
,
2549 HNAE3_RING_GL_IDX_M
,
2550 HNAE3_RING_GL_IDX_S
,
2557 rx_ring
= tqp_vector
->rx_group
.ring
;
2558 if (!tx_ring
&& rx_ring
) {
2559 cur_chain
->next
= NULL
;
2560 cur_chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2561 hnae_set_bit(cur_chain
->flag
, HNAE3_RING_TYPE_B
,
2562 HNAE3_RING_TYPE_RX
);
2563 hnae_set_field(cur_chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2564 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_RX
);
2566 rx_ring
= rx_ring
->next
;
2570 chain
= devm_kzalloc(&pdev
->dev
, sizeof(*chain
), GFP_KERNEL
);
2574 cur_chain
->next
= chain
;
2575 chain
->tqp_index
= rx_ring
->tqp
->tqp_index
;
2576 hnae_set_bit(chain
->flag
, HNAE3_RING_TYPE_B
,
2577 HNAE3_RING_TYPE_RX
);
2578 hnae_set_field(chain
->int_gl_idx
, HNAE3_RING_GL_IDX_M
,
2579 HNAE3_RING_GL_IDX_S
, HNAE3_RING_GL_RX
);
2583 rx_ring
= rx_ring
->next
;
2589 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector
*tqp_vector
,
2590 struct hnae3_ring_chain_node
*head
)
2592 struct pci_dev
*pdev
= tqp_vector
->handle
->pdev
;
2593 struct hnae3_ring_chain_node
*chain_tmp
, *chain
;
2598 chain_tmp
= chain
->next
;
2599 devm_kfree(&pdev
->dev
, chain
);
2604 static void hns3_add_ring_to_group(struct hns3_enet_ring_group
*group
,
2605 struct hns3_enet_ring
*ring
)
2607 ring
->next
= group
->ring
;
2613 static int hns3_nic_init_vector_data(struct hns3_nic_priv
*priv
)
2615 struct hnae3_ring_chain_node vector_ring_chain
;
2616 struct hnae3_handle
*h
= priv
->ae_handle
;
2617 struct hns3_enet_tqp_vector
*tqp_vector
;
2618 struct hnae3_vector_info
*vector
;
2619 struct pci_dev
*pdev
= h
->pdev
;
2620 u16 tqp_num
= h
->kinfo
.num_tqps
;
2625 /* RSS size, cpu online and vector_num should be the same */
2626 /* Should consider 2p/4p later */
2627 vector_num
= min_t(u16
, num_online_cpus(), tqp_num
);
2628 vector
= devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*vector
),
2633 vector_num
= h
->ae_algo
->ops
->get_vector(h
, vector_num
, vector
);
2635 priv
->vector_num
= vector_num
;
2636 priv
->tqp_vector
= (struct hns3_enet_tqp_vector
*)
2637 devm_kcalloc(&pdev
->dev
, vector_num
, sizeof(*priv
->tqp_vector
),
2639 if (!priv
->tqp_vector
)
2642 for (i
= 0; i
< tqp_num
; i
++) {
2643 u16 vector_i
= i
% vector_num
;
2645 tqp_vector
= &priv
->tqp_vector
[vector_i
];
2647 hns3_add_ring_to_group(&tqp_vector
->tx_group
,
2648 priv
->ring_data
[i
].ring
);
2650 hns3_add_ring_to_group(&tqp_vector
->rx_group
,
2651 priv
->ring_data
[i
+ tqp_num
].ring
);
2653 tqp_vector
->idx
= vector_i
;
2654 tqp_vector
->mask_addr
= vector
[vector_i
].io_addr
;
2655 tqp_vector
->vector_irq
= vector
[vector_i
].vector
;
2656 tqp_vector
->num_tqps
++;
2658 priv
->ring_data
[i
].ring
->tqp_vector
= tqp_vector
;
2659 priv
->ring_data
[i
+ tqp_num
].ring
->tqp_vector
= tqp_vector
;
2662 for (i
= 0; i
< vector_num
; i
++) {
2663 tqp_vector
= &priv
->tqp_vector
[i
];
2665 tqp_vector
->rx_group
.total_bytes
= 0;
2666 tqp_vector
->rx_group
.total_packets
= 0;
2667 tqp_vector
->tx_group
.total_bytes
= 0;
2668 tqp_vector
->tx_group
.total_packets
= 0;
2669 hns3_vector_gl_rl_init(tqp_vector
, priv
);
2670 tqp_vector
->handle
= h
;
2672 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2673 &vector_ring_chain
);
2677 ret
= h
->ae_algo
->ops
->map_ring_to_vector(h
,
2678 tqp_vector
->vector_irq
, &vector_ring_chain
);
2682 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2684 netif_napi_add(priv
->netdev
, &tqp_vector
->napi
,
2685 hns3_nic_common_poll
, NAPI_POLL_WEIGHT
);
2689 devm_kfree(&pdev
->dev
, vector
);
2693 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv
*priv
)
2695 struct hnae3_ring_chain_node vector_ring_chain
;
2696 struct hnae3_handle
*h
= priv
->ae_handle
;
2697 struct hns3_enet_tqp_vector
*tqp_vector
;
2698 struct pci_dev
*pdev
= h
->pdev
;
2701 for (i
= 0; i
< priv
->vector_num
; i
++) {
2702 tqp_vector
= &priv
->tqp_vector
[i
];
2704 ret
= hns3_get_vector_ring_chain(tqp_vector
,
2705 &vector_ring_chain
);
2709 ret
= h
->ae_algo
->ops
->unmap_ring_from_vector(h
,
2710 tqp_vector
->vector_irq
, &vector_ring_chain
);
2714 hns3_free_vector_ring_chain(tqp_vector
, &vector_ring_chain
);
2716 if (priv
->tqp_vector
[i
].irq_init_flag
== HNS3_VECTOR_INITED
) {
2717 (void)irq_set_affinity_hint(
2718 priv
->tqp_vector
[i
].vector_irq
,
2720 free_irq(priv
->tqp_vector
[i
].vector_irq
,
2721 &priv
->tqp_vector
[i
]);
2724 priv
->ring_data
[i
].ring
->irq_init_flag
= HNS3_VECTOR_NOT_INITED
;
2726 netif_napi_del(&priv
->tqp_vector
[i
].napi
);
2729 devm_kfree(&pdev
->dev
, priv
->tqp_vector
);
2734 static int hns3_ring_get_cfg(struct hnae3_queue
*q
, struct hns3_nic_priv
*priv
,
2737 struct hns3_nic_ring_data
*ring_data
= priv
->ring_data
;
2738 int queue_num
= priv
->ae_handle
->kinfo
.num_tqps
;
2739 struct pci_dev
*pdev
= priv
->ae_handle
->pdev
;
2740 struct hns3_enet_ring
*ring
;
2742 ring
= devm_kzalloc(&pdev
->dev
, sizeof(*ring
), GFP_KERNEL
);
2746 if (ring_type
== HNAE3_RING_TYPE_TX
) {
2747 ring_data
[q
->tqp_index
].ring
= ring
;
2748 ring_data
[q
->tqp_index
].queue_index
= q
->tqp_index
;
2749 ring
->io_base
= (u8 __iomem
*)q
->io_base
+ HNS3_TX_REG_OFFSET
;
2751 ring_data
[q
->tqp_index
+ queue_num
].ring
= ring
;
2752 ring_data
[q
->tqp_index
+ queue_num
].queue_index
= q
->tqp_index
;
2753 ring
->io_base
= q
->io_base
;
2756 hnae_set_bit(ring
->flag
, HNAE3_RING_TYPE_B
, ring_type
);
2760 ring
->desc_cb
= NULL
;
2761 ring
->dev
= priv
->dev
;
2762 ring
->desc_dma_addr
= 0;
2763 ring
->buf_size
= q
->buf_size
;
2764 ring
->desc_num
= q
->desc_num
;
2765 ring
->next_to_use
= 0;
2766 ring
->next_to_clean
= 0;
2771 static int hns3_queue_to_ring(struct hnae3_queue
*tqp
,
2772 struct hns3_nic_priv
*priv
)
2776 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_TX
);
2780 ret
= hns3_ring_get_cfg(tqp
, priv
, HNAE3_RING_TYPE_RX
);
2787 static int hns3_get_ring_config(struct hns3_nic_priv
*priv
)
2789 struct hnae3_handle
*h
= priv
->ae_handle
;
2790 struct pci_dev
*pdev
= h
->pdev
;
2793 priv
->ring_data
= devm_kzalloc(&pdev
->dev
, h
->kinfo
.num_tqps
*
2794 sizeof(*priv
->ring_data
) * 2,
2796 if (!priv
->ring_data
)
2799 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2800 ret
= hns3_queue_to_ring(h
->kinfo
.tqp
[i
], priv
);
2807 devm_kfree(&pdev
->dev
, priv
->ring_data
);
2811 static void hns3_put_ring_config(struct hns3_nic_priv
*priv
)
2813 struct hnae3_handle
*h
= priv
->ae_handle
;
2816 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2817 devm_kfree(priv
->dev
, priv
->ring_data
[i
].ring
);
2818 devm_kfree(priv
->dev
,
2819 priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
2821 devm_kfree(priv
->dev
, priv
->ring_data
);
2824 static int hns3_alloc_ring_memory(struct hns3_enet_ring
*ring
)
2828 if (ring
->desc_num
<= 0 || ring
->buf_size
<= 0)
2831 ring
->desc_cb
= kcalloc(ring
->desc_num
, sizeof(ring
->desc_cb
[0]),
2833 if (!ring
->desc_cb
) {
2838 ret
= hns3_alloc_desc(ring
);
2840 goto out_with_desc_cb
;
2842 if (!HNAE3_IS_TX_RING(ring
)) {
2843 ret
= hns3_alloc_ring_buffers(ring
);
2851 hns3_free_desc(ring
);
2853 kfree(ring
->desc_cb
);
2854 ring
->desc_cb
= NULL
;
2859 static void hns3_fini_ring(struct hns3_enet_ring
*ring
)
2861 hns3_free_desc(ring
);
2862 kfree(ring
->desc_cb
);
2863 ring
->desc_cb
= NULL
;
2864 ring
->next_to_clean
= 0;
2865 ring
->next_to_use
= 0;
2868 static int hns3_buf_size2type(u32 buf_size
)
2874 bd_size_type
= HNS3_BD_SIZE_512_TYPE
;
2877 bd_size_type
= HNS3_BD_SIZE_1024_TYPE
;
2880 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2883 bd_size_type
= HNS3_BD_SIZE_4096_TYPE
;
2886 bd_size_type
= HNS3_BD_SIZE_2048_TYPE
;
2889 return bd_size_type
;
2892 static void hns3_init_ring_hw(struct hns3_enet_ring
*ring
)
2894 dma_addr_t dma
= ring
->desc_dma_addr
;
2895 struct hnae3_queue
*q
= ring
->tqp
;
2897 if (!HNAE3_IS_TX_RING(ring
)) {
2898 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_L_REG
,
2900 hns3_write_dev(q
, HNS3_RING_RX_RING_BASEADDR_H_REG
,
2901 (u32
)((dma
>> 31) >> 1));
2903 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_LEN_REG
,
2904 hns3_buf_size2type(ring
->buf_size
));
2905 hns3_write_dev(q
, HNS3_RING_RX_RING_BD_NUM_REG
,
2906 ring
->desc_num
/ 8 - 1);
2909 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_L_REG
,
2911 hns3_write_dev(q
, HNS3_RING_TX_RING_BASEADDR_H_REG
,
2912 (u32
)((dma
>> 31) >> 1));
2914 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_LEN_REG
,
2915 hns3_buf_size2type(ring
->buf_size
));
2916 hns3_write_dev(q
, HNS3_RING_TX_RING_BD_NUM_REG
,
2917 ring
->desc_num
/ 8 - 1);
2921 int hns3_init_all_ring(struct hns3_nic_priv
*priv
)
2923 struct hnae3_handle
*h
= priv
->ae_handle
;
2924 int ring_num
= h
->kinfo
.num_tqps
* 2;
2928 for (i
= 0; i
< ring_num
; i
++) {
2929 ret
= hns3_alloc_ring_memory(priv
->ring_data
[i
].ring
);
2932 "Alloc ring memory fail! ret=%d\n", ret
);
2933 goto out_when_alloc_ring_memory
;
2936 hns3_init_ring_hw(priv
->ring_data
[i
].ring
);
2938 u64_stats_init(&priv
->ring_data
[i
].ring
->syncp
);
2943 out_when_alloc_ring_memory
:
2944 for (j
= i
- 1; j
>= 0; j
--)
2945 hns3_fini_ring(priv
->ring_data
[j
].ring
);
2950 int hns3_uninit_all_ring(struct hns3_nic_priv
*priv
)
2952 struct hnae3_handle
*h
= priv
->ae_handle
;
2955 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
2956 if (h
->ae_algo
->ops
->reset_queue
)
2957 h
->ae_algo
->ops
->reset_queue(h
, i
);
2959 hns3_fini_ring(priv
->ring_data
[i
].ring
);
2960 devm_kfree(priv
->dev
, priv
->ring_data
[i
].ring
);
2961 hns3_fini_ring(priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
2962 devm_kfree(priv
->dev
,
2963 priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
);
2965 devm_kfree(priv
->dev
, priv
->ring_data
);
2970 /* Set mac addr if it is configured. or leave it to the AE driver */
2971 static void hns3_init_mac_addr(struct net_device
*netdev
)
2973 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2974 struct hnae3_handle
*h
= priv
->ae_handle
;
2975 u8 mac_addr_temp
[ETH_ALEN
];
2977 if (h
->ae_algo
->ops
->get_mac_addr
) {
2978 h
->ae_algo
->ops
->get_mac_addr(h
, mac_addr_temp
);
2979 ether_addr_copy(netdev
->dev_addr
, mac_addr_temp
);
2982 /* Check if the MAC address is valid, if not get a random one */
2983 if (!is_valid_ether_addr(netdev
->dev_addr
)) {
2984 eth_hw_addr_random(netdev
);
2985 dev_warn(priv
->dev
, "using random MAC address %pM\n",
2989 if (h
->ae_algo
->ops
->set_mac_addr
)
2990 h
->ae_algo
->ops
->set_mac_addr(h
, netdev
->dev_addr
);
2994 static void hns3_nic_set_priv_ops(struct net_device
*netdev
)
2996 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
2998 if ((netdev
->features
& NETIF_F_TSO
) ||
2999 (netdev
->features
& NETIF_F_TSO6
)) {
3000 priv
->ops
.fill_desc
= hns3_fill_desc_tso
;
3001 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tso
;
3003 priv
->ops
.fill_desc
= hns3_fill_desc
;
3004 priv
->ops
.maybe_stop_tx
= hns3_nic_maybe_stop_tx
;
3008 static int hns3_client_init(struct hnae3_handle
*handle
)
3010 struct pci_dev
*pdev
= handle
->pdev
;
3011 struct hns3_nic_priv
*priv
;
3012 struct net_device
*netdev
;
3015 netdev
= alloc_etherdev_mq(sizeof(struct hns3_nic_priv
),
3016 handle
->kinfo
.num_tqps
);
3020 priv
= netdev_priv(netdev
);
3021 priv
->dev
= &pdev
->dev
;
3022 priv
->netdev
= netdev
;
3023 priv
->ae_handle
= handle
;
3024 priv
->last_reset_time
= jiffies
;
3025 priv
->reset_level
= HNAE3_FUNC_RESET
;
3026 priv
->tx_timeout_count
= 0;
3028 handle
->kinfo
.netdev
= netdev
;
3029 handle
->priv
= (void *)priv
;
3031 hns3_init_mac_addr(netdev
);
3033 hns3_set_default_feature(netdev
);
3035 netdev
->watchdog_timeo
= HNS3_TX_TIMEOUT
;
3036 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3037 netdev
->netdev_ops
= &hns3_nic_netdev_ops
;
3038 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3039 hns3_ethtool_set_ops(netdev
);
3040 hns3_nic_set_priv_ops(netdev
);
3042 /* Carrier off reporting is important to ethtool even BEFORE open */
3043 netif_carrier_off(netdev
);
3045 ret
= hns3_get_ring_config(priv
);
3048 goto out_get_ring_cfg
;
3051 ret
= hns3_nic_init_vector_data(priv
);
3054 goto out_init_vector_data
;
3057 ret
= hns3_init_all_ring(priv
);
3060 goto out_init_ring_data
;
3063 ret
= register_netdev(netdev
);
3065 dev_err(priv
->dev
, "probe register netdev fail!\n");
3066 goto out_reg_netdev_fail
;
3069 hns3_dcbnl_setup(handle
);
3071 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3072 netdev
->max_mtu
= HNS3_MAX_MTU
- (ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
);
3076 out_reg_netdev_fail
:
3078 (void)hns3_nic_uninit_vector_data(priv
);
3079 priv
->ring_data
= NULL
;
3080 out_init_vector_data
:
3082 priv
->ae_handle
= NULL
;
3083 free_netdev(netdev
);
3087 static void hns3_client_uninit(struct hnae3_handle
*handle
, bool reset
)
3089 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3090 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3093 if (netdev
->reg_state
!= NETREG_UNINITIALIZED
)
3094 unregister_netdev(netdev
);
3096 ret
= hns3_nic_uninit_vector_data(priv
);
3098 netdev_err(netdev
, "uninit vector error\n");
3100 ret
= hns3_uninit_all_ring(priv
);
3102 netdev_err(netdev
, "uninit ring error\n");
3104 priv
->ring_data
= NULL
;
3106 free_netdev(netdev
);
3109 static void hns3_link_status_change(struct hnae3_handle
*handle
, bool linkup
)
3111 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3117 netif_carrier_on(netdev
);
3118 netif_tx_wake_all_queues(netdev
);
3119 netdev_info(netdev
, "link up\n");
3121 netif_carrier_off(netdev
);
3122 netif_tx_stop_all_queues(netdev
);
3123 netdev_info(netdev
, "link down\n");
3127 static int hns3_client_setup_tc(struct hnae3_handle
*handle
, u8 tc
)
3129 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3130 struct net_device
*ndev
= kinfo
->netdev
;
3135 if (tc
> HNAE3_MAX_TC
)
3141 if_running
= netif_running(ndev
);
3143 ret
= netdev_set_num_tc(ndev
, tc
);
3148 (void)hns3_nic_net_stop(ndev
);
3152 ret
= (kinfo
->dcb_ops
&& kinfo
->dcb_ops
->map_update
) ?
3153 kinfo
->dcb_ops
->map_update(handle
) : -EOPNOTSUPP
;
3158 netdev_reset_tc(ndev
);
3162 for (i
= 0; i
< HNAE3_MAX_TC
; i
++) {
3163 struct hnae3_tc_info
*tc_info
= &kinfo
->tc_info
[i
];
3165 if (tc_info
->enable
)
3166 netdev_set_tc_queue(ndev
,
3169 tc_info
->tqp_offset
);
3172 for (i
= 0; i
< HNAE3_MAX_USER_PRIO
; i
++) {
3173 netdev_set_prio_tc_map(ndev
, i
,
3178 ret
= hns3_nic_set_real_num_queue(ndev
);
3182 (void)hns3_nic_net_open(ndev
);
3187 static void hns3_recover_hw_addr(struct net_device
*ndev
)
3189 struct netdev_hw_addr_list
*list
;
3190 struct netdev_hw_addr
*ha
, *tmp
;
3192 /* go through and sync uc_addr entries to the device */
3194 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3195 hns3_nic_uc_sync(ndev
, ha
->addr
);
3197 /* go through and sync mc_addr entries to the device */
3199 list_for_each_entry_safe(ha
, tmp
, &list
->list
, list
)
3200 hns3_nic_mc_sync(ndev
, ha
->addr
);
3203 static void hns3_drop_skb_data(struct hns3_enet_ring
*ring
, struct sk_buff
*skb
)
3205 dev_kfree_skb_any(skb
);
3208 static void hns3_clear_all_ring(struct hnae3_handle
*h
)
3210 struct net_device
*ndev
= h
->kinfo
.netdev
;
3211 struct hns3_nic_priv
*priv
= netdev_priv(ndev
);
3214 for (i
= 0; i
< h
->kinfo
.num_tqps
; i
++) {
3215 struct netdev_queue
*dev_queue
;
3216 struct hns3_enet_ring
*ring
;
3218 ring
= priv
->ring_data
[i
].ring
;
3219 hns3_clean_tx_ring(ring
, ring
->desc_num
);
3220 dev_queue
= netdev_get_tx_queue(ndev
,
3221 priv
->ring_data
[i
].queue_index
);
3222 netdev_tx_reset_queue(dev_queue
);
3224 ring
= priv
->ring_data
[i
+ h
->kinfo
.num_tqps
].ring
;
3225 hns3_clean_rx_ring(ring
, ring
->desc_num
, hns3_drop_skb_data
);
3229 static int hns3_reset_notify_down_enet(struct hnae3_handle
*handle
)
3231 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3232 struct net_device
*ndev
= kinfo
->netdev
;
3234 if (!netif_running(ndev
))
3237 return hns3_nic_net_stop(ndev
);
3240 static int hns3_reset_notify_up_enet(struct hnae3_handle
*handle
)
3242 struct hnae3_knic_private_info
*kinfo
= &handle
->kinfo
;
3243 struct hns3_nic_priv
*priv
= netdev_priv(kinfo
->netdev
);
3246 if (netif_running(kinfo
->netdev
)) {
3247 ret
= hns3_nic_net_up(kinfo
->netdev
);
3249 netdev_err(kinfo
->netdev
,
3250 "hns net up fail, ret=%d!\n", ret
);
3254 priv
->last_reset_time
= jiffies
;
3260 static int hns3_reset_notify_init_enet(struct hnae3_handle
*handle
)
3262 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3263 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3266 priv
->reset_level
= 1;
3267 hns3_init_mac_addr(netdev
);
3268 hns3_nic_set_rx_mode(netdev
);
3269 hns3_recover_hw_addr(netdev
);
3271 /* Carrier off reporting is important to ethtool even BEFORE open */
3272 netif_carrier_off(netdev
);
3274 ret
= hns3_get_ring_config(priv
);
3278 ret
= hns3_nic_init_vector_data(priv
);
3282 ret
= hns3_init_all_ring(priv
);
3284 hns3_nic_uninit_vector_data(priv
);
3285 priv
->ring_data
= NULL
;
3291 static int hns3_reset_notify_uninit_enet(struct hnae3_handle
*handle
)
3293 struct net_device
*netdev
= handle
->kinfo
.netdev
;
3294 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3297 hns3_clear_all_ring(handle
);
3299 ret
= hns3_nic_uninit_vector_data(priv
);
3301 netdev_err(netdev
, "uninit vector error\n");
3305 ret
= hns3_uninit_all_ring(priv
);
3307 netdev_err(netdev
, "uninit ring error\n");
3309 priv
->ring_data
= NULL
;
3314 static int hns3_reset_notify(struct hnae3_handle
*handle
,
3315 enum hnae3_reset_notify_type type
)
3320 case HNAE3_UP_CLIENT
:
3321 ret
= hns3_reset_notify_up_enet(handle
);
3323 case HNAE3_DOWN_CLIENT
:
3324 ret
= hns3_reset_notify_down_enet(handle
);
3326 case HNAE3_INIT_CLIENT
:
3327 ret
= hns3_reset_notify_init_enet(handle
);
3329 case HNAE3_UNINIT_CLIENT
:
3330 ret
= hns3_reset_notify_uninit_enet(handle
);
3339 static u16
hns3_get_max_available_channels(struct net_device
*netdev
)
3341 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3342 u16 free_tqps
, max_rss_size
, max_tqps
;
3344 h
->ae_algo
->ops
->get_tqps_and_rss_info(h
, &free_tqps
, &max_rss_size
);
3345 max_tqps
= h
->kinfo
.num_tc
* max_rss_size
;
3347 return min_t(u16
, max_tqps
, (free_tqps
+ h
->kinfo
.num_tqps
));
3350 static int hns3_modify_tqp_num(struct net_device
*netdev
, u16 new_tqp_num
)
3352 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3353 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3356 ret
= h
->ae_algo
->ops
->set_channels(h
, new_tqp_num
);
3360 ret
= hns3_get_ring_config(priv
);
3364 ret
= hns3_nic_init_vector_data(priv
);
3366 goto err_uninit_vector
;
3368 ret
= hns3_init_all_ring(priv
);
3375 hns3_put_ring_config(priv
);
3377 hns3_nic_uninit_vector_data(priv
);
3381 static int hns3_adjust_tqps_num(u8 num_tc
, u32 new_tqp_num
)
3383 return (new_tqp_num
/ num_tc
) * num_tc
;
3386 int hns3_set_channels(struct net_device
*netdev
,
3387 struct ethtool_channels
*ch
)
3389 struct hns3_nic_priv
*priv
= netdev_priv(netdev
);
3390 struct hnae3_handle
*h
= hns3_get_handle(netdev
);
3391 struct hnae3_knic_private_info
*kinfo
= &h
->kinfo
;
3392 bool if_running
= netif_running(netdev
);
3393 u32 new_tqp_num
= ch
->combined_count
;
3397 if (ch
->rx_count
|| ch
->tx_count
)
3400 if (new_tqp_num
> hns3_get_max_available_channels(netdev
) ||
3401 new_tqp_num
< kinfo
->num_tc
) {
3402 dev_err(&netdev
->dev
,
3403 "Change tqps fail, the tqp range is from %d to %d",
3405 hns3_get_max_available_channels(netdev
));
3409 new_tqp_num
= hns3_adjust_tqps_num(kinfo
->num_tc
, new_tqp_num
);
3410 if (kinfo
->num_tqps
== new_tqp_num
)
3416 hns3_clear_all_ring(h
);
3418 ret
= hns3_nic_uninit_vector_data(priv
);
3420 dev_err(&netdev
->dev
,
3421 "Unbind vector with tqp fail, nothing is changed");
3425 hns3_uninit_all_ring(priv
);
3427 org_tqp_num
= h
->kinfo
.num_tqps
;
3428 ret
= hns3_modify_tqp_num(netdev
, new_tqp_num
);
3430 ret
= hns3_modify_tqp_num(netdev
, org_tqp_num
);
3432 /* If revert to old tqp failed, fatal error occurred */
3433 dev_err(&netdev
->dev
,
3434 "Revert to old tqp num fail, ret=%d", ret
);
3437 dev_info(&netdev
->dev
,
3438 "Change tqp num fail, Revert to old tqp num");
3448 static const struct hnae3_client_ops client_ops
= {
3449 .init_instance
= hns3_client_init
,
3450 .uninit_instance
= hns3_client_uninit
,
3451 .link_status_change
= hns3_link_status_change
,
3452 .setup_tc
= hns3_client_setup_tc
,
3453 .reset_notify
= hns3_reset_notify
,
3456 /* hns3_init_module - Driver registration routine
3457 * hns3_init_module is the first routine called when the driver is
3458 * loaded. All it does is register with the PCI subsystem.
3460 static int __init
hns3_init_module(void)
3464 pr_info("%s: %s - version\n", hns3_driver_name
, hns3_driver_string
);
3465 pr_info("%s: %s\n", hns3_driver_name
, hns3_copyright
);
3467 client
.type
= HNAE3_CLIENT_KNIC
;
3468 snprintf(client
.name
, HNAE3_CLIENT_NAME_LENGTH
- 1, "%s",
3471 client
.ops
= &client_ops
;
3473 ret
= hnae3_register_client(&client
);
3477 ret
= pci_register_driver(&hns3_driver
);
3479 hnae3_unregister_client(&client
);
3483 module_init(hns3_init_module
);
3485 /* hns3_exit_module - Driver exit cleanup routine
3486 * hns3_exit_module is called just before the driver is removed
3489 static void __exit
hns3_exit_module(void)
3491 pci_unregister_driver(&hns3_driver
);
3492 hnae3_unregister_client(&client
);
3494 module_exit(hns3_exit_module
);
3496 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3497 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3498 MODULE_LICENSE("GPL");
3499 MODULE_ALIAS("pci:hns-nic");