2 * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/etherdevice.h>
35 #include <linux/tcp.h>
36 #include <linux/if_vlan.h>
37 #include <linux/delay.h>
38 #include <linux/slab.h>
39 #include <linux/hash.h>
41 #include <net/busy_poll.h>
42 #include <net/vxlan.h>
44 #include <linux/mlx4/driver.h>
45 #include <linux/mlx4/device.h>
46 #include <linux/mlx4/cmd.h>
47 #include <linux/mlx4/cq.h>
52 int mlx4_en_setup_tc(struct net_device
*dev
, u8 up
)
54 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
56 unsigned int offset
= 0;
58 if (up
&& up
!= MLX4_EN_NUM_UP
)
61 netdev_set_num_tc(dev
, up
);
63 /* Partition Tx queues evenly amongst UP's */
64 for (i
= 0; i
< up
; i
++) {
65 netdev_set_tc_queue(dev
, i
, priv
->num_tx_rings_p_up
, offset
);
66 offset
+= priv
->num_tx_rings_p_up
;
72 #ifdef CONFIG_NET_RX_BUSY_POLL
73 /* must be called with local_bh_disable()d */
74 static int mlx4_en_low_latency_recv(struct napi_struct
*napi
)
76 struct mlx4_en_cq
*cq
= container_of(napi
, struct mlx4_en_cq
, napi
);
77 struct net_device
*dev
= cq
->dev
;
78 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
79 struct mlx4_en_rx_ring
*rx_ring
= priv
->rx_ring
[cq
->ring
];
83 return LL_FLUSH_FAILED
;
85 if (!mlx4_en_cq_lock_poll(cq
))
88 done
= mlx4_en_process_rx_cq(dev
, cq
, 4);
90 rx_ring
->cleaned
+= done
;
94 mlx4_en_cq_unlock_poll(cq
);
98 #endif /* CONFIG_NET_RX_BUSY_POLL */
100 #ifdef CONFIG_RFS_ACCEL
102 struct mlx4_en_filter
{
103 struct list_head next
;
104 struct work_struct work
;
113 struct mlx4_en_priv
*priv
;
114 u32 flow_id
; /* RFS infrastructure id */
115 int id
; /* mlx4_en driver id */
116 u64 reg_id
; /* Flow steering API id */
117 u8 activated
; /* Used to prevent expiry before filter
120 struct hlist_node filter_chain
;
123 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv
*priv
);
125 static enum mlx4_net_trans_rule_id
mlx4_ip_proto_to_trans_rule_id(u8 ip_proto
)
129 return MLX4_NET_TRANS_RULE_ID_UDP
;
131 return MLX4_NET_TRANS_RULE_ID_TCP
;
133 return MLX4_NET_TRANS_RULE_NUM
;
137 static void mlx4_en_filter_work(struct work_struct
*work
)
139 struct mlx4_en_filter
*filter
= container_of(work
,
140 struct mlx4_en_filter
,
142 struct mlx4_en_priv
*priv
= filter
->priv
;
143 struct mlx4_spec_list spec_tcp_udp
= {
144 .id
= mlx4_ip_proto_to_trans_rule_id(filter
->ip_proto
),
147 .dst_port
= filter
->dst_port
,
148 .dst_port_msk
= (__force __be16
)-1,
149 .src_port
= filter
->src_port
,
150 .src_port_msk
= (__force __be16
)-1,
154 struct mlx4_spec_list spec_ip
= {
155 .id
= MLX4_NET_TRANS_RULE_ID_IPV4
,
158 .dst_ip
= filter
->dst_ip
,
159 .dst_ip_msk
= (__force __be32
)-1,
160 .src_ip
= filter
->src_ip
,
161 .src_ip_msk
= (__force __be32
)-1,
165 struct mlx4_spec_list spec_eth
= {
166 .id
= MLX4_NET_TRANS_RULE_ID_ETH
,
168 struct mlx4_net_trans_rule rule
= {
169 .list
= LIST_HEAD_INIT(rule
.list
),
170 .queue_mode
= MLX4_NET_TRANS_Q_LIFO
,
173 .promisc_mode
= MLX4_FS_REGULAR
,
175 .priority
= MLX4_DOMAIN_RFS
,
178 __be64 mac_mask
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
180 if (spec_tcp_udp
.id
>= MLX4_NET_TRANS_RULE_NUM
) {
181 en_warn(priv
, "RFS: ignoring unsupported ip protocol (%d)\n",
185 list_add_tail(&spec_eth
.list
, &rule
.list
);
186 list_add_tail(&spec_ip
.list
, &rule
.list
);
187 list_add_tail(&spec_tcp_udp
.list
, &rule
.list
);
189 rule
.qpn
= priv
->rss_map
.qps
[filter
->rxq_index
].qpn
;
190 memcpy(spec_eth
.eth
.dst_mac
, priv
->dev
->dev_addr
, ETH_ALEN
);
191 memcpy(spec_eth
.eth
.dst_mac_msk
, &mac_mask
, ETH_ALEN
);
193 filter
->activated
= 0;
195 if (filter
->reg_id
) {
196 rc
= mlx4_flow_detach(priv
->mdev
->dev
, filter
->reg_id
);
197 if (rc
&& rc
!= -ENOENT
)
198 en_err(priv
, "Error detaching flow. rc = %d\n", rc
);
201 rc
= mlx4_flow_attach(priv
->mdev
->dev
, &rule
, &filter
->reg_id
);
203 en_err(priv
, "Error attaching flow. err = %d\n", rc
);
206 mlx4_en_filter_rfs_expire(priv
);
208 filter
->activated
= 1;
211 static inline struct hlist_head
*
212 filter_hash_bucket(struct mlx4_en_priv
*priv
, __be32 src_ip
, __be32 dst_ip
,
213 __be16 src_port
, __be16 dst_port
)
218 l
= (__force
unsigned long)src_port
|
219 ((__force
unsigned long)dst_port
<< 2);
220 l
^= (__force
unsigned long)(src_ip
^ dst_ip
);
222 bucket_idx
= hash_long(l
, MLX4_EN_FILTER_HASH_SHIFT
);
224 return &priv
->filter_hash
[bucket_idx
];
227 static struct mlx4_en_filter
*
228 mlx4_en_filter_alloc(struct mlx4_en_priv
*priv
, int rxq_index
, __be32 src_ip
,
229 __be32 dst_ip
, u8 ip_proto
, __be16 src_port
,
230 __be16 dst_port
, u32 flow_id
)
232 struct mlx4_en_filter
*filter
= NULL
;
234 filter
= kzalloc(sizeof(struct mlx4_en_filter
), GFP_ATOMIC
);
239 filter
->rxq_index
= rxq_index
;
240 INIT_WORK(&filter
->work
, mlx4_en_filter_work
);
242 filter
->src_ip
= src_ip
;
243 filter
->dst_ip
= dst_ip
;
244 filter
->ip_proto
= ip_proto
;
245 filter
->src_port
= src_port
;
246 filter
->dst_port
= dst_port
;
248 filter
->flow_id
= flow_id
;
250 filter
->id
= priv
->last_filter_id
++ % RPS_NO_FILTER
;
252 list_add_tail(&filter
->next
, &priv
->filters
);
253 hlist_add_head(&filter
->filter_chain
,
254 filter_hash_bucket(priv
, src_ip
, dst_ip
, src_port
,
260 static void mlx4_en_filter_free(struct mlx4_en_filter
*filter
)
262 struct mlx4_en_priv
*priv
= filter
->priv
;
265 list_del(&filter
->next
);
267 rc
= mlx4_flow_detach(priv
->mdev
->dev
, filter
->reg_id
);
268 if (rc
&& rc
!= -ENOENT
)
269 en_err(priv
, "Error detaching flow. rc = %d\n", rc
);
274 static inline struct mlx4_en_filter
*
275 mlx4_en_filter_find(struct mlx4_en_priv
*priv
, __be32 src_ip
, __be32 dst_ip
,
276 u8 ip_proto
, __be16 src_port
, __be16 dst_port
)
278 struct mlx4_en_filter
*filter
;
279 struct mlx4_en_filter
*ret
= NULL
;
281 hlist_for_each_entry(filter
,
282 filter_hash_bucket(priv
, src_ip
, dst_ip
,
285 if (filter
->src_ip
== src_ip
&&
286 filter
->dst_ip
== dst_ip
&&
287 filter
->ip_proto
== ip_proto
&&
288 filter
->src_port
== src_port
&&
289 filter
->dst_port
== dst_port
) {
299 mlx4_en_filter_rfs(struct net_device
*net_dev
, const struct sk_buff
*skb
,
300 u16 rxq_index
, u32 flow_id
)
302 struct mlx4_en_priv
*priv
= netdev_priv(net_dev
);
303 struct mlx4_en_filter
*filter
;
304 const struct iphdr
*ip
;
311 int nhoff
= skb_network_offset(skb
);
314 if (skb
->protocol
!= htons(ETH_P_IP
))
315 return -EPROTONOSUPPORT
;
317 ip
= (const struct iphdr
*)(skb
->data
+ nhoff
);
318 if (ip_is_fragment(ip
))
319 return -EPROTONOSUPPORT
;
321 if ((ip
->protocol
!= IPPROTO_TCP
) && (ip
->protocol
!= IPPROTO_UDP
))
322 return -EPROTONOSUPPORT
;
323 ports
= (const __be16
*)(skb
->data
+ nhoff
+ 4 * ip
->ihl
);
325 ip_proto
= ip
->protocol
;
331 spin_lock_bh(&priv
->filters_lock
);
332 filter
= mlx4_en_filter_find(priv
, src_ip
, dst_ip
, ip_proto
,
335 if (filter
->rxq_index
== rxq_index
)
338 filter
->rxq_index
= rxq_index
;
340 filter
= mlx4_en_filter_alloc(priv
, rxq_index
,
341 src_ip
, dst_ip
, ip_proto
,
342 src_port
, dst_port
, flow_id
);
349 queue_work(priv
->mdev
->workqueue
, &filter
->work
);
354 spin_unlock_bh(&priv
->filters_lock
);
359 void mlx4_en_cleanup_filters(struct mlx4_en_priv
*priv
)
361 struct mlx4_en_filter
*filter
, *tmp
;
364 spin_lock_bh(&priv
->filters_lock
);
365 list_for_each_entry_safe(filter
, tmp
, &priv
->filters
, next
) {
366 list_move(&filter
->next
, &del_list
);
367 hlist_del(&filter
->filter_chain
);
369 spin_unlock_bh(&priv
->filters_lock
);
371 list_for_each_entry_safe(filter
, tmp
, &del_list
, next
) {
372 cancel_work_sync(&filter
->work
);
373 mlx4_en_filter_free(filter
);
377 static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv
*priv
)
379 struct mlx4_en_filter
*filter
= NULL
, *tmp
, *last_filter
= NULL
;
383 spin_lock_bh(&priv
->filters_lock
);
384 list_for_each_entry_safe(filter
, tmp
, &priv
->filters
, next
) {
385 if (i
> MLX4_EN_FILTER_EXPIRY_QUOTA
)
388 if (filter
->activated
&&
389 !work_pending(&filter
->work
) &&
390 rps_may_expire_flow(priv
->dev
,
391 filter
->rxq_index
, filter
->flow_id
,
393 list_move(&filter
->next
, &del_list
);
394 hlist_del(&filter
->filter_chain
);
396 last_filter
= filter
;
401 if (last_filter
&& (&last_filter
->next
!= priv
->filters
.next
))
402 list_move(&priv
->filters
, &last_filter
->next
);
404 spin_unlock_bh(&priv
->filters_lock
);
406 list_for_each_entry_safe(filter
, tmp
, &del_list
, next
)
407 mlx4_en_filter_free(filter
);
411 static int mlx4_en_vlan_rx_add_vid(struct net_device
*dev
,
412 __be16 proto
, u16 vid
)
414 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
415 struct mlx4_en_dev
*mdev
= priv
->mdev
;
419 en_dbg(HW
, priv
, "adding VLAN:%d\n", vid
);
421 set_bit(vid
, priv
->active_vlans
);
423 /* Add VID to port VLAN filter */
424 mutex_lock(&mdev
->state_lock
);
425 if (mdev
->device_up
&& priv
->port_up
) {
426 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
428 en_err(priv
, "Failed configuring VLAN filter\n");
430 if (mlx4_register_vlan(mdev
->dev
, priv
->port
, vid
, &idx
))
431 en_dbg(HW
, priv
, "failed adding vlan %d\n", vid
);
432 mutex_unlock(&mdev
->state_lock
);
437 static int mlx4_en_vlan_rx_kill_vid(struct net_device
*dev
,
438 __be16 proto
, u16 vid
)
440 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
441 struct mlx4_en_dev
*mdev
= priv
->mdev
;
444 en_dbg(HW
, priv
, "Killing VID:%d\n", vid
);
446 clear_bit(vid
, priv
->active_vlans
);
448 /* Remove VID from port VLAN filter */
449 mutex_lock(&mdev
->state_lock
);
450 mlx4_unregister_vlan(mdev
->dev
, priv
->port
, vid
);
452 if (mdev
->device_up
&& priv
->port_up
) {
453 err
= mlx4_SET_VLAN_FLTR(mdev
->dev
, priv
);
455 en_err(priv
, "Failed configuring VLAN filter\n");
457 mutex_unlock(&mdev
->state_lock
);
462 static void mlx4_en_u64_to_mac(unsigned char dst_mac
[ETH_ALEN
+ 2], u64 src_mac
)
465 for (i
= ETH_ALEN
- 1; i
>= 0; --i
) {
466 dst_mac
[i
] = src_mac
& 0xff;
469 memset(&dst_mac
[ETH_ALEN
], 0, 2);
473 static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv
*priv
, unsigned char *addr
,
474 int qpn
, u64
*reg_id
)
478 if (priv
->mdev
->dev
->caps
.tunnel_offload_mode
!= MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
)
479 return 0; /* do nothing */
481 err
= mlx4_tunnel_steer_add(priv
->mdev
->dev
, addr
, priv
->port
, qpn
,
482 MLX4_DOMAIN_NIC
, reg_id
);
484 en_err(priv
, "failed to add vxlan steering rule, err %d\n", err
);
487 en_dbg(DRV
, priv
, "added vxlan steering rule, mac %pM reg_id %llx\n", addr
, *reg_id
);
492 static int mlx4_en_uc_steer_add(struct mlx4_en_priv
*priv
,
493 unsigned char *mac
, int *qpn
, u64
*reg_id
)
495 struct mlx4_en_dev
*mdev
= priv
->mdev
;
496 struct mlx4_dev
*dev
= mdev
->dev
;
499 switch (dev
->caps
.steering_mode
) {
500 case MLX4_STEERING_MODE_B0
: {
505 memcpy(&gid
[10], mac
, ETH_ALEN
);
508 err
= mlx4_unicast_attach(dev
, &qp
, gid
, 0, MLX4_PROT_ETH
);
511 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
512 struct mlx4_spec_list spec_eth
= { {NULL
} };
513 __be64 mac_mask
= cpu_to_be64(MLX4_MAC_MASK
<< 16);
515 struct mlx4_net_trans_rule rule
= {
516 .queue_mode
= MLX4_NET_TRANS_Q_FIFO
,
519 .promisc_mode
= MLX4_FS_REGULAR
,
520 .priority
= MLX4_DOMAIN_NIC
,
523 rule
.port
= priv
->port
;
525 INIT_LIST_HEAD(&rule
.list
);
527 spec_eth
.id
= MLX4_NET_TRANS_RULE_ID_ETH
;
528 memcpy(spec_eth
.eth
.dst_mac
, mac
, ETH_ALEN
);
529 memcpy(spec_eth
.eth
.dst_mac_msk
, &mac_mask
, ETH_ALEN
);
530 list_add_tail(&spec_eth
.list
, &rule
.list
);
532 err
= mlx4_flow_attach(dev
, &rule
, reg_id
);
539 en_warn(priv
, "Failed Attaching Unicast\n");
544 static void mlx4_en_uc_steer_release(struct mlx4_en_priv
*priv
,
545 unsigned char *mac
, int qpn
, u64 reg_id
)
547 struct mlx4_en_dev
*mdev
= priv
->mdev
;
548 struct mlx4_dev
*dev
= mdev
->dev
;
550 switch (dev
->caps
.steering_mode
) {
551 case MLX4_STEERING_MODE_B0
: {
556 memcpy(&gid
[10], mac
, ETH_ALEN
);
559 mlx4_unicast_detach(dev
, &qp
, gid
, MLX4_PROT_ETH
);
562 case MLX4_STEERING_MODE_DEVICE_MANAGED
: {
563 mlx4_flow_detach(dev
, reg_id
);
567 en_err(priv
, "Invalid steering mode.\n");
571 static int mlx4_en_get_qp(struct mlx4_en_priv
*priv
)
573 struct mlx4_en_dev
*mdev
= priv
->mdev
;
574 struct mlx4_dev
*dev
= mdev
->dev
;
575 struct mlx4_mac_entry
*entry
;
579 int *qpn
= &priv
->base_qpn
;
580 u64 mac
= mlx4_mac_to_u64(priv
->dev
->dev_addr
);
582 en_dbg(DRV
, priv
, "Registering MAC: %pM for adding\n",
583 priv
->dev
->dev_addr
);
584 index
= mlx4_register_mac(dev
, priv
->port
, mac
);
587 en_err(priv
, "Failed adding MAC: %pM\n",
588 priv
->dev
->dev_addr
);
592 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_A0
) {
593 int base_qpn
= mlx4_get_base_qpn(dev
, priv
->port
);
594 *qpn
= base_qpn
+ index
;
598 err
= mlx4_qp_reserve_range(dev
, 1, 1, qpn
);
599 en_dbg(DRV
, priv
, "Reserved qp %d\n", *qpn
);
601 en_err(priv
, "Failed to reserve qp for mac registration\n");
605 err
= mlx4_en_uc_steer_add(priv
, priv
->dev
->dev_addr
, qpn
, ®_id
);
609 err
= mlx4_en_tunnel_steer_add(priv
, priv
->dev
->dev_addr
, *qpn
,
610 &priv
->tunnel_reg_id
);
614 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
619 memcpy(entry
->mac
, priv
->dev
->dev_addr
, sizeof(entry
->mac
));
620 memcpy(priv
->current_mac
, entry
->mac
, sizeof(priv
->current_mac
));
621 entry
->reg_id
= reg_id
;
623 hlist_add_head_rcu(&entry
->hlist
,
624 &priv
->mac_hash
[entry
->mac
[MLX4_EN_MAC_HASH_IDX
]]);
629 if (priv
->tunnel_reg_id
)
630 mlx4_flow_detach(priv
->mdev
->dev
, priv
->tunnel_reg_id
);
632 mlx4_en_uc_steer_release(priv
, priv
->dev
->dev_addr
, *qpn
, reg_id
);
635 mlx4_qp_release_range(dev
, *qpn
, 1);
638 mlx4_unregister_mac(dev
, priv
->port
, mac
);
642 static void mlx4_en_put_qp(struct mlx4_en_priv
*priv
)
644 struct mlx4_en_dev
*mdev
= priv
->mdev
;
645 struct mlx4_dev
*dev
= mdev
->dev
;
646 int qpn
= priv
->base_qpn
;
649 if (dev
->caps
.steering_mode
== MLX4_STEERING_MODE_A0
) {
650 mac
= mlx4_mac_to_u64(priv
->dev
->dev_addr
);
651 en_dbg(DRV
, priv
, "Registering MAC: %pM for deleting\n",
652 priv
->dev
->dev_addr
);
653 mlx4_unregister_mac(dev
, priv
->port
, mac
);
655 struct mlx4_mac_entry
*entry
;
656 struct hlist_node
*tmp
;
657 struct hlist_head
*bucket
;
660 for (i
= 0; i
< MLX4_EN_MAC_HASH_SIZE
; ++i
) {
661 bucket
= &priv
->mac_hash
[i
];
662 hlist_for_each_entry_safe(entry
, tmp
, bucket
, hlist
) {
663 mac
= mlx4_mac_to_u64(entry
->mac
);
664 en_dbg(DRV
, priv
, "Registering MAC: %pM for deleting\n",
666 mlx4_en_uc_steer_release(priv
, entry
->mac
,
669 mlx4_unregister_mac(dev
, priv
->port
, mac
);
670 hlist_del_rcu(&entry
->hlist
);
671 kfree_rcu(entry
, rcu
);
675 if (priv
->tunnel_reg_id
) {
676 mlx4_flow_detach(priv
->mdev
->dev
, priv
->tunnel_reg_id
);
677 priv
->tunnel_reg_id
= 0;
680 en_dbg(DRV
, priv
, "Releasing qp: port %d, qpn %d\n",
682 mlx4_qp_release_range(dev
, qpn
, 1);
683 priv
->flags
&= ~MLX4_EN_FLAG_FORCE_PROMISC
;
687 static int mlx4_en_replace_mac(struct mlx4_en_priv
*priv
, int qpn
,
688 unsigned char *new_mac
, unsigned char *prev_mac
)
690 struct mlx4_en_dev
*mdev
= priv
->mdev
;
691 struct mlx4_dev
*dev
= mdev
->dev
;
693 u64 new_mac_u64
= mlx4_mac_to_u64(new_mac
);
695 if (dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
) {
696 struct hlist_head
*bucket
;
697 unsigned int mac_hash
;
698 struct mlx4_mac_entry
*entry
;
699 struct hlist_node
*tmp
;
700 u64 prev_mac_u64
= mlx4_mac_to_u64(prev_mac
);
702 bucket
= &priv
->mac_hash
[prev_mac
[MLX4_EN_MAC_HASH_IDX
]];
703 hlist_for_each_entry_safe(entry
, tmp
, bucket
, hlist
) {
704 if (ether_addr_equal_64bits(entry
->mac
, prev_mac
)) {
705 mlx4_en_uc_steer_release(priv
, entry
->mac
,
707 mlx4_unregister_mac(dev
, priv
->port
,
709 hlist_del_rcu(&entry
->hlist
);
711 memcpy(entry
->mac
, new_mac
, ETH_ALEN
);
713 mac_hash
= new_mac
[MLX4_EN_MAC_HASH_IDX
];
714 hlist_add_head_rcu(&entry
->hlist
,
715 &priv
->mac_hash
[mac_hash
]);
716 mlx4_register_mac(dev
, priv
->port
, new_mac_u64
);
717 err
= mlx4_en_uc_steer_add(priv
, new_mac
,
722 if (priv
->tunnel_reg_id
) {
723 mlx4_flow_detach(priv
->mdev
->dev
, priv
->tunnel_reg_id
);
724 priv
->tunnel_reg_id
= 0;
726 err
= mlx4_en_tunnel_steer_add(priv
, new_mac
, qpn
,
727 &priv
->tunnel_reg_id
);
734 return __mlx4_replace_mac(dev
, priv
->port
, qpn
, new_mac_u64
);
737 static int mlx4_en_do_set_mac(struct mlx4_en_priv
*priv
,
738 unsigned char new_mac
[ETH_ALEN
+ 2])
743 /* Remove old MAC and insert the new one */
744 err
= mlx4_en_replace_mac(priv
, priv
->base_qpn
,
745 new_mac
, priv
->current_mac
);
747 en_err(priv
, "Failed changing HW MAC address\n");
749 en_dbg(HW
, priv
, "Port is down while registering mac, exiting...\n");
752 memcpy(priv
->current_mac
, new_mac
, sizeof(priv
->current_mac
));
757 static int mlx4_en_set_mac(struct net_device
*dev
, void *addr
)
759 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
760 struct mlx4_en_dev
*mdev
= priv
->mdev
;
761 struct sockaddr
*saddr
= addr
;
762 unsigned char new_mac
[ETH_ALEN
+ 2];
765 if (!is_valid_ether_addr(saddr
->sa_data
))
766 return -EADDRNOTAVAIL
;
768 mutex_lock(&mdev
->state_lock
);
769 memcpy(new_mac
, saddr
->sa_data
, ETH_ALEN
);
770 err
= mlx4_en_do_set_mac(priv
, new_mac
);
772 memcpy(dev
->dev_addr
, saddr
->sa_data
, ETH_ALEN
);
773 mutex_unlock(&mdev
->state_lock
);
778 static void mlx4_en_clear_list(struct net_device
*dev
)
780 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
781 struct mlx4_en_mc_list
*tmp
, *mc_to_del
;
783 list_for_each_entry_safe(mc_to_del
, tmp
, &priv
->mc_list
, list
) {
784 list_del(&mc_to_del
->list
);
789 static void mlx4_en_cache_mclist(struct net_device
*dev
)
791 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
792 struct netdev_hw_addr
*ha
;
793 struct mlx4_en_mc_list
*tmp
;
795 mlx4_en_clear_list(dev
);
796 netdev_for_each_mc_addr(ha
, dev
) {
797 tmp
= kzalloc(sizeof(struct mlx4_en_mc_list
), GFP_ATOMIC
);
799 mlx4_en_clear_list(dev
);
802 memcpy(tmp
->addr
, ha
->addr
, ETH_ALEN
);
803 list_add_tail(&tmp
->list
, &priv
->mc_list
);
807 static void update_mclist_flags(struct mlx4_en_priv
*priv
,
808 struct list_head
*dst
,
809 struct list_head
*src
)
811 struct mlx4_en_mc_list
*dst_tmp
, *src_tmp
, *new_mc
;
814 /* Find all the entries that should be removed from dst,
815 * These are the entries that are not found in src
817 list_for_each_entry(dst_tmp
, dst
, list
) {
819 list_for_each_entry(src_tmp
, src
, list
) {
820 if (ether_addr_equal(dst_tmp
->addr
, src_tmp
->addr
)) {
826 dst_tmp
->action
= MCLIST_REM
;
829 /* Add entries that exist in src but not in dst
830 * mark them as need to add
832 list_for_each_entry(src_tmp
, src
, list
) {
834 list_for_each_entry(dst_tmp
, dst
, list
) {
835 if (ether_addr_equal(dst_tmp
->addr
, src_tmp
->addr
)) {
836 dst_tmp
->action
= MCLIST_NONE
;
842 new_mc
= kmemdup(src_tmp
,
843 sizeof(struct mlx4_en_mc_list
),
848 new_mc
->action
= MCLIST_ADD
;
849 list_add_tail(&new_mc
->list
, dst
);
854 static void mlx4_en_set_rx_mode(struct net_device
*dev
)
856 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
861 queue_work(priv
->mdev
->workqueue
, &priv
->rx_mode_task
);
864 static void mlx4_en_set_promisc_mode(struct mlx4_en_priv
*priv
,
865 struct mlx4_en_dev
*mdev
)
869 if (!(priv
->flags
& MLX4_EN_FLAG_PROMISC
)) {
870 if (netif_msg_rx_status(priv
))
871 en_warn(priv
, "Entering promiscuous mode\n");
872 priv
->flags
|= MLX4_EN_FLAG_PROMISC
;
874 /* Enable promiscouos mode */
875 switch (mdev
->dev
->caps
.steering_mode
) {
876 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
877 err
= mlx4_flow_steer_promisc_add(mdev
->dev
,
880 MLX4_FS_ALL_DEFAULT
);
882 en_err(priv
, "Failed enabling promiscuous mode\n");
883 priv
->flags
|= MLX4_EN_FLAG_MC_PROMISC
;
886 case MLX4_STEERING_MODE_B0
:
887 err
= mlx4_unicast_promisc_add(mdev
->dev
,
891 en_err(priv
, "Failed enabling unicast promiscuous mode\n");
893 /* Add the default qp number as multicast
896 if (!(priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
)) {
897 err
= mlx4_multicast_promisc_add(mdev
->dev
,
901 en_err(priv
, "Failed enabling multicast promiscuous mode\n");
902 priv
->flags
|= MLX4_EN_FLAG_MC_PROMISC
;
906 case MLX4_STEERING_MODE_A0
:
907 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
,
912 en_err(priv
, "Failed enabling promiscuous mode\n");
916 /* Disable port multicast filter (unconditionally) */
917 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
918 0, MLX4_MCAST_DISABLE
);
920 en_err(priv
, "Failed disabling multicast filter\n");
924 static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv
*priv
,
925 struct mlx4_en_dev
*mdev
)
929 if (netif_msg_rx_status(priv
))
930 en_warn(priv
, "Leaving promiscuous mode\n");
931 priv
->flags
&= ~MLX4_EN_FLAG_PROMISC
;
933 /* Disable promiscouos mode */
934 switch (mdev
->dev
->caps
.steering_mode
) {
935 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
936 err
= mlx4_flow_steer_promisc_remove(mdev
->dev
,
938 MLX4_FS_ALL_DEFAULT
);
940 en_err(priv
, "Failed disabling promiscuous mode\n");
941 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
944 case MLX4_STEERING_MODE_B0
:
945 err
= mlx4_unicast_promisc_remove(mdev
->dev
,
949 en_err(priv
, "Failed disabling unicast promiscuous mode\n");
950 /* Disable Multicast promisc */
951 if (priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
) {
952 err
= mlx4_multicast_promisc_remove(mdev
->dev
,
956 en_err(priv
, "Failed disabling multicast promiscuous mode\n");
957 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
961 case MLX4_STEERING_MODE_A0
:
962 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
,
966 en_err(priv
, "Failed disabling promiscuous mode\n");
971 static void mlx4_en_do_multicast(struct mlx4_en_priv
*priv
,
972 struct net_device
*dev
,
973 struct mlx4_en_dev
*mdev
)
975 struct mlx4_en_mc_list
*mclist
, *tmp
;
977 u8 mc_list
[16] = {0};
980 /* Enable/disable the multicast filter according to IFF_ALLMULTI */
981 if (dev
->flags
& IFF_ALLMULTI
) {
982 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
983 0, MLX4_MCAST_DISABLE
);
985 en_err(priv
, "Failed disabling multicast filter\n");
987 /* Add the default qp number as multicast promisc */
988 if (!(priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
)) {
989 switch (mdev
->dev
->caps
.steering_mode
) {
990 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
991 err
= mlx4_flow_steer_promisc_add(mdev
->dev
,
997 case MLX4_STEERING_MODE_B0
:
998 err
= mlx4_multicast_promisc_add(mdev
->dev
,
1003 case MLX4_STEERING_MODE_A0
:
1007 en_err(priv
, "Failed entering multicast promisc mode\n");
1008 priv
->flags
|= MLX4_EN_FLAG_MC_PROMISC
;
1011 /* Disable Multicast promisc */
1012 if (priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
) {
1013 switch (mdev
->dev
->caps
.steering_mode
) {
1014 case MLX4_STEERING_MODE_DEVICE_MANAGED
:
1015 err
= mlx4_flow_steer_promisc_remove(mdev
->dev
,
1017 MLX4_FS_MC_DEFAULT
);
1020 case MLX4_STEERING_MODE_B0
:
1021 err
= mlx4_multicast_promisc_remove(mdev
->dev
,
1026 case MLX4_STEERING_MODE_A0
:
1030 en_err(priv
, "Failed disabling multicast promiscuous mode\n");
1031 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
1034 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
1035 0, MLX4_MCAST_DISABLE
);
1037 en_err(priv
, "Failed disabling multicast filter\n");
1039 /* Flush mcast filter and init it with broadcast address */
1040 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, ETH_BCAST
,
1041 1, MLX4_MCAST_CONFIG
);
1043 /* Update multicast list - we cache all addresses so they won't
1044 * change while HW is updated holding the command semaphor */
1045 netif_addr_lock_bh(dev
);
1046 mlx4_en_cache_mclist(dev
);
1047 netif_addr_unlock_bh(dev
);
1048 list_for_each_entry(mclist
, &priv
->mc_list
, list
) {
1049 mcast_addr
= mlx4_mac_to_u64(mclist
->addr
);
1050 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
,
1051 mcast_addr
, 0, MLX4_MCAST_CONFIG
);
1053 err
= mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0,
1054 0, MLX4_MCAST_ENABLE
);
1056 en_err(priv
, "Failed enabling multicast filter\n");
1058 update_mclist_flags(priv
, &priv
->curr_list
, &priv
->mc_list
);
1059 list_for_each_entry_safe(mclist
, tmp
, &priv
->curr_list
, list
) {
1060 if (mclist
->action
== MCLIST_REM
) {
1061 /* detach this address and delete from list */
1062 memcpy(&mc_list
[10], mclist
->addr
, ETH_ALEN
);
1063 mc_list
[5] = priv
->port
;
1064 err
= mlx4_multicast_detach(mdev
->dev
,
1065 &priv
->rss_map
.indir_qp
,
1070 en_err(priv
, "Fail to detach multicast address\n");
1072 if (mclist
->tunnel_reg_id
) {
1073 err
= mlx4_flow_detach(priv
->mdev
->dev
, mclist
->tunnel_reg_id
);
1075 en_err(priv
, "Failed to detach multicast address\n");
1078 /* remove from list */
1079 list_del(&mclist
->list
);
1081 } else if (mclist
->action
== MCLIST_ADD
) {
1082 /* attach the address */
1083 memcpy(&mc_list
[10], mclist
->addr
, ETH_ALEN
);
1084 /* needed for B0 steering support */
1085 mc_list
[5] = priv
->port
;
1086 err
= mlx4_multicast_attach(mdev
->dev
,
1087 &priv
->rss_map
.indir_qp
,
1093 en_err(priv
, "Fail to attach multicast address\n");
1095 err
= mlx4_en_tunnel_steer_add(priv
, &mc_list
[10], priv
->base_qpn
,
1096 &mclist
->tunnel_reg_id
);
1098 en_err(priv
, "Failed to attach multicast address\n");
1104 static void mlx4_en_do_uc_filter(struct mlx4_en_priv
*priv
,
1105 struct net_device
*dev
,
1106 struct mlx4_en_dev
*mdev
)
1108 struct netdev_hw_addr
*ha
;
1109 struct mlx4_mac_entry
*entry
;
1110 struct hlist_node
*tmp
;
1114 struct hlist_head
*bucket
;
1119 /* Note that we do not need to protect our mac_hash traversal with rcu,
1120 * since all modification code is protected by mdev->state_lock
1123 /* find what to remove */
1124 for (i
= 0; i
< MLX4_EN_MAC_HASH_SIZE
; ++i
) {
1125 bucket
= &priv
->mac_hash
[i
];
1126 hlist_for_each_entry_safe(entry
, tmp
, bucket
, hlist
) {
1128 netdev_for_each_uc_addr(ha
, dev
) {
1129 if (ether_addr_equal_64bits(entry
->mac
,
1136 /* MAC address of the port is not in uc list */
1137 if (ether_addr_equal_64bits(entry
->mac
,
1142 mac
= mlx4_mac_to_u64(entry
->mac
);
1143 mlx4_en_uc_steer_release(priv
, entry
->mac
,
1146 mlx4_unregister_mac(mdev
->dev
, priv
->port
, mac
);
1148 hlist_del_rcu(&entry
->hlist
);
1149 kfree_rcu(entry
, rcu
);
1150 en_dbg(DRV
, priv
, "Removed MAC %pM on port:%d\n",
1151 entry
->mac
, priv
->port
);
1157 /* if we didn't remove anything, there is no use in trying to add
1158 * again once we are in a forced promisc mode state
1160 if ((priv
->flags
& MLX4_EN_FLAG_FORCE_PROMISC
) && 0 == removed
)
1163 prev_flags
= priv
->flags
;
1164 priv
->flags
&= ~MLX4_EN_FLAG_FORCE_PROMISC
;
1166 /* find what to add */
1167 netdev_for_each_uc_addr(ha
, dev
) {
1169 bucket
= &priv
->mac_hash
[ha
->addr
[MLX4_EN_MAC_HASH_IDX
]];
1170 hlist_for_each_entry(entry
, bucket
, hlist
) {
1171 if (ether_addr_equal_64bits(entry
->mac
, ha
->addr
)) {
1178 entry
= kmalloc(sizeof(*entry
), GFP_KERNEL
);
1180 en_err(priv
, "Failed adding MAC %pM on port:%d (out of memory)\n",
1181 ha
->addr
, priv
->port
);
1182 priv
->flags
|= MLX4_EN_FLAG_FORCE_PROMISC
;
1185 mac
= mlx4_mac_to_u64(ha
->addr
);
1186 memcpy(entry
->mac
, ha
->addr
, ETH_ALEN
);
1187 err
= mlx4_register_mac(mdev
->dev
, priv
->port
, mac
);
1189 en_err(priv
, "Failed registering MAC %pM on port %d: %d\n",
1190 ha
->addr
, priv
->port
, err
);
1192 priv
->flags
|= MLX4_EN_FLAG_FORCE_PROMISC
;
1195 err
= mlx4_en_uc_steer_add(priv
, ha
->addr
,
1199 en_err(priv
, "Failed adding MAC %pM on port %d: %d\n",
1200 ha
->addr
, priv
->port
, err
);
1201 mlx4_unregister_mac(mdev
->dev
, priv
->port
, mac
);
1203 priv
->flags
|= MLX4_EN_FLAG_FORCE_PROMISC
;
1206 unsigned int mac_hash
;
1207 en_dbg(DRV
, priv
, "Added MAC %pM on port:%d\n",
1208 ha
->addr
, priv
->port
);
1209 mac_hash
= ha
->addr
[MLX4_EN_MAC_HASH_IDX
];
1210 bucket
= &priv
->mac_hash
[mac_hash
];
1211 hlist_add_head_rcu(&entry
->hlist
, bucket
);
1216 if (priv
->flags
& MLX4_EN_FLAG_FORCE_PROMISC
) {
1217 en_warn(priv
, "Forcing promiscuous mode on port:%d\n",
1219 } else if (prev_flags
& MLX4_EN_FLAG_FORCE_PROMISC
) {
1220 en_warn(priv
, "Stop forcing promiscuous mode on port:%d\n",
1225 static void mlx4_en_do_set_rx_mode(struct work_struct
*work
)
1227 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
1229 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1230 struct net_device
*dev
= priv
->dev
;
1232 mutex_lock(&mdev
->state_lock
);
1233 if (!mdev
->device_up
) {
1234 en_dbg(HW
, priv
, "Card is not up, ignoring rx mode change.\n");
1237 if (!priv
->port_up
) {
1238 en_dbg(HW
, priv
, "Port is down, ignoring rx mode change.\n");
1242 if (!netif_carrier_ok(dev
)) {
1243 if (!mlx4_en_QUERY_PORT(mdev
, priv
->port
)) {
1244 if (priv
->port_state
.link_state
) {
1245 priv
->last_link_state
= MLX4_DEV_EVENT_PORT_UP
;
1246 netif_carrier_on(dev
);
1247 en_dbg(LINK
, priv
, "Link Up\n");
1252 if (dev
->priv_flags
& IFF_UNICAST_FLT
)
1253 mlx4_en_do_uc_filter(priv
, dev
, mdev
);
1255 /* Promsicuous mode: disable all filters */
1256 if ((dev
->flags
& IFF_PROMISC
) ||
1257 (priv
->flags
& MLX4_EN_FLAG_FORCE_PROMISC
)) {
1258 mlx4_en_set_promisc_mode(priv
, mdev
);
1262 /* Not in promiscuous mode */
1263 if (priv
->flags
& MLX4_EN_FLAG_PROMISC
)
1264 mlx4_en_clear_promisc_mode(priv
, mdev
);
1266 mlx4_en_do_multicast(priv
, dev
, mdev
);
1268 mutex_unlock(&mdev
->state_lock
);
1271 #ifdef CONFIG_NET_POLL_CONTROLLER
1272 static void mlx4_en_netpoll(struct net_device
*dev
)
1274 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1275 struct mlx4_en_cq
*cq
;
1278 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1279 cq
= priv
->rx_cq
[i
];
1280 napi_schedule(&cq
->napi
);
1285 static void mlx4_en_tx_timeout(struct net_device
*dev
)
1287 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1288 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1291 if (netif_msg_timer(priv
))
1292 en_warn(priv
, "Tx timeout called on port:%d\n", priv
->port
);
1294 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1295 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev
, i
)))
1297 en_warn(priv
, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
1298 i
, priv
->tx_ring
[i
]->qpn
, priv
->tx_ring
[i
]->cqn
,
1299 priv
->tx_ring
[i
]->cons
, priv
->tx_ring
[i
]->prod
);
1302 priv
->port_stats
.tx_timeout
++;
1303 en_dbg(DRV
, priv
, "Scheduling watchdog\n");
1304 queue_work(mdev
->workqueue
, &priv
->watchdog_task
);
1308 static struct net_device_stats
*mlx4_en_get_stats(struct net_device
*dev
)
1310 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1312 spin_lock_bh(&priv
->stats_lock
);
1313 memcpy(&priv
->ret_stats
, &priv
->stats
, sizeof(priv
->stats
));
1314 spin_unlock_bh(&priv
->stats_lock
);
1316 return &priv
->ret_stats
;
1319 static void mlx4_en_set_default_moderation(struct mlx4_en_priv
*priv
)
1321 struct mlx4_en_cq
*cq
;
1324 /* If we haven't received a specific coalescing setting
1325 * (module param), we set the moderation parameters as follows:
1326 * - moder_cnt is set to the number of mtu sized packets to
1327 * satisfy our coalescing target.
1328 * - moder_time is set to a fixed value.
1330 priv
->rx_frames
= MLX4_EN_RX_COAL_TARGET
;
1331 priv
->rx_usecs
= MLX4_EN_RX_COAL_TIME
;
1332 priv
->tx_frames
= MLX4_EN_TX_COAL_PKTS
;
1333 priv
->tx_usecs
= MLX4_EN_TX_COAL_TIME
;
1334 en_dbg(INTR
, priv
, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
1335 priv
->dev
->mtu
, priv
->rx_frames
, priv
->rx_usecs
);
1337 /* Setup cq moderation params */
1338 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1339 cq
= priv
->rx_cq
[i
];
1340 cq
->moder_cnt
= priv
->rx_frames
;
1341 cq
->moder_time
= priv
->rx_usecs
;
1342 priv
->last_moder_time
[i
] = MLX4_EN_AUTO_CONF
;
1343 priv
->last_moder_packets
[i
] = 0;
1344 priv
->last_moder_bytes
[i
] = 0;
1347 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1348 cq
= priv
->tx_cq
[i
];
1349 cq
->moder_cnt
= priv
->tx_frames
;
1350 cq
->moder_time
= priv
->tx_usecs
;
1353 /* Reset auto-moderation params */
1354 priv
->pkt_rate_low
= MLX4_EN_RX_RATE_LOW
;
1355 priv
->rx_usecs_low
= MLX4_EN_RX_COAL_TIME_LOW
;
1356 priv
->pkt_rate_high
= MLX4_EN_RX_RATE_HIGH
;
1357 priv
->rx_usecs_high
= MLX4_EN_RX_COAL_TIME_HIGH
;
1358 priv
->sample_interval
= MLX4_EN_SAMPLE_INTERVAL
;
1359 priv
->adaptive_rx_coal
= 1;
1360 priv
->last_moder_jiffies
= 0;
1361 priv
->last_moder_tx_packets
= 0;
1364 static void mlx4_en_auto_moderation(struct mlx4_en_priv
*priv
)
1366 unsigned long period
= (unsigned long) (jiffies
- priv
->last_moder_jiffies
);
1367 struct mlx4_en_cq
*cq
;
1368 unsigned long packets
;
1370 unsigned long avg_pkt_size
;
1371 unsigned long rx_packets
;
1372 unsigned long rx_bytes
;
1373 unsigned long rx_pkt_diff
;
1377 if (!priv
->adaptive_rx_coal
|| period
< priv
->sample_interval
* HZ
)
1380 for (ring
= 0; ring
< priv
->rx_ring_num
; ring
++) {
1381 spin_lock_bh(&priv
->stats_lock
);
1382 rx_packets
= priv
->rx_ring
[ring
]->packets
;
1383 rx_bytes
= priv
->rx_ring
[ring
]->bytes
;
1384 spin_unlock_bh(&priv
->stats_lock
);
1386 rx_pkt_diff
= ((unsigned long) (rx_packets
-
1387 priv
->last_moder_packets
[ring
]));
1388 packets
= rx_pkt_diff
;
1389 rate
= packets
* HZ
/ period
;
1390 avg_pkt_size
= packets
? ((unsigned long) (rx_bytes
-
1391 priv
->last_moder_bytes
[ring
])) / packets
: 0;
1393 /* Apply auto-moderation only when packet rate
1394 * exceeds a rate that it matters */
1395 if (rate
> (MLX4_EN_RX_RATE_THRESH
/ priv
->rx_ring_num
) &&
1396 avg_pkt_size
> MLX4_EN_AVG_PKT_SMALL
) {
1397 if (rate
< priv
->pkt_rate_low
)
1398 moder_time
= priv
->rx_usecs_low
;
1399 else if (rate
> priv
->pkt_rate_high
)
1400 moder_time
= priv
->rx_usecs_high
;
1402 moder_time
= (rate
- priv
->pkt_rate_low
) *
1403 (priv
->rx_usecs_high
- priv
->rx_usecs_low
) /
1404 (priv
->pkt_rate_high
- priv
->pkt_rate_low
) +
1407 moder_time
= priv
->rx_usecs_low
;
1410 if (moder_time
!= priv
->last_moder_time
[ring
]) {
1411 priv
->last_moder_time
[ring
] = moder_time
;
1412 cq
= priv
->rx_cq
[ring
];
1413 cq
->moder_time
= moder_time
;
1414 cq
->moder_cnt
= priv
->rx_frames
;
1415 err
= mlx4_en_set_cq_moder(priv
, cq
);
1417 en_err(priv
, "Failed modifying moderation for cq:%d\n",
1420 priv
->last_moder_packets
[ring
] = rx_packets
;
1421 priv
->last_moder_bytes
[ring
] = rx_bytes
;
1424 priv
->last_moder_jiffies
= jiffies
;
1427 static void mlx4_en_do_get_stats(struct work_struct
*work
)
1429 struct delayed_work
*delay
= to_delayed_work(work
);
1430 struct mlx4_en_priv
*priv
= container_of(delay
, struct mlx4_en_priv
,
1432 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1435 mutex_lock(&mdev
->state_lock
);
1436 if (mdev
->device_up
) {
1437 if (priv
->port_up
) {
1438 err
= mlx4_en_DUMP_ETH_STATS(mdev
, priv
->port
, 0);
1440 en_dbg(HW
, priv
, "Could not update stats\n");
1442 mlx4_en_auto_moderation(priv
);
1445 queue_delayed_work(mdev
->workqueue
, &priv
->stats_task
, STATS_DELAY
);
1447 if (mdev
->mac_removed
[MLX4_MAX_PORTS
+ 1 - priv
->port
]) {
1448 mlx4_en_do_set_mac(priv
, priv
->current_mac
);
1449 mdev
->mac_removed
[MLX4_MAX_PORTS
+ 1 - priv
->port
] = 0;
1451 mutex_unlock(&mdev
->state_lock
);
1454 /* mlx4_en_service_task - Run service task for tasks that needed to be done
1457 static void mlx4_en_service_task(struct work_struct
*work
)
1459 struct delayed_work
*delay
= to_delayed_work(work
);
1460 struct mlx4_en_priv
*priv
= container_of(delay
, struct mlx4_en_priv
,
1462 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1464 mutex_lock(&mdev
->state_lock
);
1465 if (mdev
->device_up
) {
1466 if (mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_TS
)
1467 mlx4_en_ptp_overflow_check(mdev
);
1469 mlx4_en_recover_from_oom(priv
);
1470 queue_delayed_work(mdev
->workqueue
, &priv
->service_task
,
1471 SERVICE_TASK_DELAY
);
1473 mutex_unlock(&mdev
->state_lock
);
1476 static void mlx4_en_linkstate(struct work_struct
*work
)
1478 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
1480 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1481 int linkstate
= priv
->link_state
;
1483 mutex_lock(&mdev
->state_lock
);
1484 /* If observable port state changed set carrier state and
1485 * report to system log */
1486 if (priv
->last_link_state
!= linkstate
) {
1487 if (linkstate
== MLX4_DEV_EVENT_PORT_DOWN
) {
1488 en_info(priv
, "Link Down\n");
1489 netif_carrier_off(priv
->dev
);
1491 en_info(priv
, "Link Up\n");
1492 netif_carrier_on(priv
->dev
);
1495 priv
->last_link_state
= linkstate
;
1496 mutex_unlock(&mdev
->state_lock
);
1499 static int mlx4_en_init_affinity_hint(struct mlx4_en_priv
*priv
, int ring_idx
)
1501 struct mlx4_en_rx_ring
*ring
= priv
->rx_ring
[ring_idx
];
1502 int numa_node
= priv
->mdev
->dev
->numa_node
;
1504 if (!zalloc_cpumask_var(&ring
->affinity_mask
, GFP_KERNEL
))
1507 cpumask_set_cpu(cpumask_local_spread(ring_idx
, numa_node
),
1508 ring
->affinity_mask
);
1512 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv
*priv
, int ring_idx
)
1514 free_cpumask_var(priv
->rx_ring
[ring_idx
]->affinity_mask
);
1517 int mlx4_en_start_port(struct net_device
*dev
)
1519 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1520 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1521 struct mlx4_en_cq
*cq
;
1522 struct mlx4_en_tx_ring
*tx_ring
;
1528 u8 mc_list
[16] = {0};
1530 if (priv
->port_up
) {
1531 en_dbg(DRV
, priv
, "start port called while port already up\n");
1535 INIT_LIST_HEAD(&priv
->mc_list
);
1536 INIT_LIST_HEAD(&priv
->curr_list
);
1537 INIT_LIST_HEAD(&priv
->ethtool_list
);
1538 memset(&priv
->ethtool_rules
[0], 0,
1539 sizeof(struct ethtool_flow_id
) * MAX_NUM_OF_FS_RULES
);
1541 /* Calculate Rx buf size */
1542 dev
->mtu
= min(dev
->mtu
, priv
->max_mtu
);
1543 mlx4_en_calc_rx_buf(dev
);
1544 en_dbg(DRV
, priv
, "Rx buf size:%d\n", priv
->rx_skb_size
);
1546 /* Configure rx cq's and rings */
1547 err
= mlx4_en_activate_rx_rings(priv
);
1549 en_err(priv
, "Failed to activate RX rings\n");
1552 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1553 cq
= priv
->rx_cq
[i
];
1555 mlx4_en_cq_init_lock(cq
);
1557 err
= mlx4_en_init_affinity_hint(priv
, i
);
1559 en_err(priv
, "Failed preparing IRQ affinity hint\n");
1563 err
= mlx4_en_activate_cq(priv
, cq
, i
);
1565 en_err(priv
, "Failed activating Rx CQ\n");
1566 mlx4_en_free_affinity_hint(priv
, i
);
1570 for (j
= 0; j
< cq
->size
; j
++) {
1571 struct mlx4_cqe
*cqe
= NULL
;
1573 cqe
= mlx4_en_get_cqe(cq
->buf
, j
, priv
->cqe_size
) +
1575 cqe
->owner_sr_opcode
= MLX4_CQE_OWNER_MASK
;
1578 err
= mlx4_en_set_cq_moder(priv
, cq
);
1580 en_err(priv
, "Failed setting cq moderation parameters\n");
1581 mlx4_en_deactivate_cq(priv
, cq
);
1582 mlx4_en_free_affinity_hint(priv
, i
);
1585 mlx4_en_arm_cq(priv
, cq
);
1586 priv
->rx_ring
[i
]->cqn
= cq
->mcq
.cqn
;
1591 en_dbg(DRV
, priv
, "Getting qp number for port %d\n", priv
->port
);
1592 err
= mlx4_en_get_qp(priv
);
1594 en_err(priv
, "Failed getting eth qp\n");
1597 mdev
->mac_removed
[priv
->port
] = 0;
1599 err
= mlx4_en_config_rss_steer(priv
);
1601 en_err(priv
, "Failed configuring rss steering\n");
1605 err
= mlx4_en_create_drop_qp(priv
);
1609 /* Configure tx cq's and rings */
1610 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1612 cq
= priv
->tx_cq
[i
];
1613 err
= mlx4_en_activate_cq(priv
, cq
, i
);
1615 en_err(priv
, "Failed allocating Tx CQ\n");
1618 err
= mlx4_en_set_cq_moder(priv
, cq
);
1620 en_err(priv
, "Failed setting cq moderation parameters\n");
1621 mlx4_en_deactivate_cq(priv
, cq
);
1624 en_dbg(DRV
, priv
, "Resetting index of collapsed CQ:%d to -1\n", i
);
1625 cq
->buf
->wqe_index
= cpu_to_be16(0xffff);
1627 /* Configure ring */
1628 tx_ring
= priv
->tx_ring
[i
];
1629 err
= mlx4_en_activate_tx_ring(priv
, tx_ring
, cq
->mcq
.cqn
,
1630 i
/ priv
->num_tx_rings_p_up
);
1632 en_err(priv
, "Failed allocating Tx ring\n");
1633 mlx4_en_deactivate_cq(priv
, cq
);
1636 tx_ring
->tx_queue
= netdev_get_tx_queue(dev
, i
);
1638 /* Arm CQ for TX completions */
1639 mlx4_en_arm_cq(priv
, cq
);
1641 /* Set initial ownership of all Tx TXBBs to SW (1) */
1642 for (j
= 0; j
< tx_ring
->buf_size
; j
+= STAMP_STRIDE
)
1643 *((u32
*) (tx_ring
->buf
+ j
)) = 0xffffffff;
1647 /* Configure port */
1648 err
= mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
1649 priv
->rx_skb_size
+ ETH_FCS_LEN
,
1650 priv
->prof
->tx_pause
,
1652 priv
->prof
->rx_pause
,
1653 priv
->prof
->rx_ppp
);
1655 en_err(priv
, "Failed setting port general configurations for port %d, with error %d\n",
1659 /* Set default qp number */
1660 err
= mlx4_SET_PORT_qpn_calc(mdev
->dev
, priv
->port
, priv
->base_qpn
, 0);
1662 en_err(priv
, "Failed setting default qp numbers\n");
1666 if (mdev
->dev
->caps
.tunnel_offload_mode
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) {
1667 err
= mlx4_SET_PORT_VXLAN(mdev
->dev
, priv
->port
, VXLAN_STEER_BY_OUTER_MAC
, 1);
1669 en_err(priv
, "Failed setting port L2 tunnel configuration, err %d\n",
1676 en_dbg(HW
, priv
, "Initializing port\n");
1677 err
= mlx4_INIT_PORT(mdev
->dev
, priv
->port
);
1679 en_err(priv
, "Failed Initializing port\n");
1683 /* Attach rx QP to bradcast address */
1684 memset(&mc_list
[10], 0xff, ETH_ALEN
);
1685 mc_list
[5] = priv
->port
; /* needed for B0 steering support */
1686 if (mlx4_multicast_attach(mdev
->dev
, &priv
->rss_map
.indir_qp
, mc_list
,
1687 priv
->port
, 0, MLX4_PROT_ETH
,
1688 &priv
->broadcast_id
))
1689 mlx4_warn(mdev
, "Failed Attaching Broadcast\n");
1691 /* Must redo promiscuous mode setup. */
1692 priv
->flags
&= ~(MLX4_EN_FLAG_PROMISC
| MLX4_EN_FLAG_MC_PROMISC
);
1694 /* Schedule multicast task to populate multicast list */
1695 queue_work(mdev
->workqueue
, &priv
->rx_mode_task
);
1697 mlx4_set_stats_bitmap(mdev
->dev
, &priv
->stats_bitmap
);
1699 #ifdef CONFIG_MLX4_EN_VXLAN
1700 if (priv
->mdev
->dev
->caps
.tunnel_offload_mode
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
)
1701 vxlan_get_rx_port(dev
);
1703 priv
->port_up
= true;
1704 netif_tx_start_all_queues(dev
);
1705 netif_device_attach(dev
);
1710 while (tx_index
--) {
1711 mlx4_en_deactivate_tx_ring(priv
, priv
->tx_ring
[tx_index
]);
1712 mlx4_en_deactivate_cq(priv
, priv
->tx_cq
[tx_index
]);
1714 mlx4_en_destroy_drop_qp(priv
);
1716 mlx4_en_release_rss_steer(priv
);
1718 mlx4_en_put_qp(priv
);
1720 while (rx_index
--) {
1721 mlx4_en_deactivate_cq(priv
, priv
->rx_cq
[rx_index
]);
1722 mlx4_en_free_affinity_hint(priv
, i
);
1724 for (i
= 0; i
< priv
->rx_ring_num
; i
++)
1725 mlx4_en_deactivate_rx_ring(priv
, priv
->rx_ring
[i
]);
1727 return err
; /* need to close devices */
1731 void mlx4_en_stop_port(struct net_device
*dev
, int detach
)
1733 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1734 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1735 struct mlx4_en_mc_list
*mclist
, *tmp
;
1736 struct ethtool_flow_id
*flow
, *tmp_flow
;
1738 u8 mc_list
[16] = {0};
1740 if (!priv
->port_up
) {
1741 en_dbg(DRV
, priv
, "stop port called while port already down\n");
1746 mlx4_CLOSE_PORT(mdev
->dev
, priv
->port
);
1748 /* Synchronize with tx routine */
1749 netif_tx_lock_bh(dev
);
1751 netif_device_detach(dev
);
1752 netif_tx_stop_all_queues(dev
);
1753 netif_tx_unlock_bh(dev
);
1755 netif_tx_disable(dev
);
1757 /* Set port as not active */
1758 priv
->port_up
= false;
1760 /* Promsicuous mode */
1761 if (mdev
->dev
->caps
.steering_mode
==
1762 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1763 priv
->flags
&= ~(MLX4_EN_FLAG_PROMISC
|
1764 MLX4_EN_FLAG_MC_PROMISC
);
1765 mlx4_flow_steer_promisc_remove(mdev
->dev
,
1767 MLX4_FS_ALL_DEFAULT
);
1768 mlx4_flow_steer_promisc_remove(mdev
->dev
,
1770 MLX4_FS_MC_DEFAULT
);
1771 } else if (priv
->flags
& MLX4_EN_FLAG_PROMISC
) {
1772 priv
->flags
&= ~MLX4_EN_FLAG_PROMISC
;
1774 /* Disable promiscouos mode */
1775 mlx4_unicast_promisc_remove(mdev
->dev
, priv
->base_qpn
,
1778 /* Disable Multicast promisc */
1779 if (priv
->flags
& MLX4_EN_FLAG_MC_PROMISC
) {
1780 mlx4_multicast_promisc_remove(mdev
->dev
, priv
->base_qpn
,
1782 priv
->flags
&= ~MLX4_EN_FLAG_MC_PROMISC
;
1786 /* Detach All multicasts */
1787 memset(&mc_list
[10], 0xff, ETH_ALEN
);
1788 mc_list
[5] = priv
->port
; /* needed for B0 steering support */
1789 mlx4_multicast_detach(mdev
->dev
, &priv
->rss_map
.indir_qp
, mc_list
,
1790 MLX4_PROT_ETH
, priv
->broadcast_id
);
1791 list_for_each_entry(mclist
, &priv
->curr_list
, list
) {
1792 memcpy(&mc_list
[10], mclist
->addr
, ETH_ALEN
);
1793 mc_list
[5] = priv
->port
;
1794 mlx4_multicast_detach(mdev
->dev
, &priv
->rss_map
.indir_qp
,
1795 mc_list
, MLX4_PROT_ETH
, mclist
->reg_id
);
1796 if (mclist
->tunnel_reg_id
)
1797 mlx4_flow_detach(mdev
->dev
, mclist
->tunnel_reg_id
);
1799 mlx4_en_clear_list(dev
);
1800 list_for_each_entry_safe(mclist
, tmp
, &priv
->curr_list
, list
) {
1801 list_del(&mclist
->list
);
1805 /* Flush multicast filter */
1806 mlx4_SET_MCAST_FLTR(mdev
->dev
, priv
->port
, 0, 1, MLX4_MCAST_CONFIG
);
1808 /* Remove flow steering rules for the port*/
1809 if (mdev
->dev
->caps
.steering_mode
==
1810 MLX4_STEERING_MODE_DEVICE_MANAGED
) {
1812 list_for_each_entry_safe(flow
, tmp_flow
,
1813 &priv
->ethtool_list
, list
) {
1814 mlx4_flow_detach(mdev
->dev
, flow
->id
);
1815 list_del(&flow
->list
);
1819 mlx4_en_destroy_drop_qp(priv
);
1822 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1823 mlx4_en_deactivate_tx_ring(priv
, priv
->tx_ring
[i
]);
1824 mlx4_en_deactivate_cq(priv
, priv
->tx_cq
[i
]);
1828 for (i
= 0; i
< priv
->tx_ring_num
; i
++)
1829 mlx4_en_free_tx_buf(dev
, priv
->tx_ring
[i
]);
1832 mlx4_en_release_rss_steer(priv
);
1834 /* Unregister Mac address for the port */
1835 mlx4_en_put_qp(priv
);
1836 if (!(mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN
))
1837 mdev
->mac_removed
[priv
->port
] = 1;
1840 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1841 struct mlx4_en_cq
*cq
= priv
->rx_cq
[i
];
1844 while (!mlx4_en_cq_lock_napi(cq
)) {
1845 pr_info("CQ %d locked\n", i
);
1850 while (test_bit(NAPI_STATE_SCHED
, &cq
->napi
.state
))
1852 mlx4_en_deactivate_rx_ring(priv
, priv
->rx_ring
[i
]);
1853 mlx4_en_deactivate_cq(priv
, cq
);
1855 mlx4_en_free_affinity_hint(priv
, i
);
1859 static void mlx4_en_restart(struct work_struct
*work
)
1861 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
1863 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1864 struct net_device
*dev
= priv
->dev
;
1866 en_dbg(DRV
, priv
, "Watchdog task called for port %d\n", priv
->port
);
1868 mutex_lock(&mdev
->state_lock
);
1869 if (priv
->port_up
) {
1870 mlx4_en_stop_port(dev
, 1);
1871 if (mlx4_en_start_port(dev
))
1872 en_err(priv
, "Failed restarting port %d\n", priv
->port
);
1874 mutex_unlock(&mdev
->state_lock
);
1877 static void mlx4_en_clear_stats(struct net_device
*dev
)
1879 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1880 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1883 if (mlx4_en_DUMP_ETH_STATS(mdev
, priv
->port
, 1))
1884 en_dbg(HW
, priv
, "Failed dumping statistics\n");
1886 memset(&priv
->stats
, 0, sizeof(priv
->stats
));
1887 memset(&priv
->pstats
, 0, sizeof(priv
->pstats
));
1888 memset(&priv
->pkstats
, 0, sizeof(priv
->pkstats
));
1889 memset(&priv
->port_stats
, 0, sizeof(priv
->port_stats
));
1891 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1892 priv
->tx_ring
[i
]->bytes
= 0;
1893 priv
->tx_ring
[i
]->packets
= 0;
1894 priv
->tx_ring
[i
]->tx_csum
= 0;
1896 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1897 priv
->rx_ring
[i
]->bytes
= 0;
1898 priv
->rx_ring
[i
]->packets
= 0;
1899 priv
->rx_ring
[i
]->csum_ok
= 0;
1900 priv
->rx_ring
[i
]->csum_none
= 0;
1904 static int mlx4_en_open(struct net_device
*dev
)
1906 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1907 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1910 mutex_lock(&mdev
->state_lock
);
1912 if (!mdev
->device_up
) {
1913 en_err(priv
, "Cannot open - device down/disabled\n");
1918 /* Reset HW statistics and SW counters */
1919 mlx4_en_clear_stats(dev
);
1921 err
= mlx4_en_start_port(dev
);
1923 en_err(priv
, "Failed starting port:%d\n", priv
->port
);
1926 mutex_unlock(&mdev
->state_lock
);
1931 static int mlx4_en_close(struct net_device
*dev
)
1933 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
1934 struct mlx4_en_dev
*mdev
= priv
->mdev
;
1936 en_dbg(IFDOWN
, priv
, "Close port called\n");
1938 mutex_lock(&mdev
->state_lock
);
1940 mlx4_en_stop_port(dev
, 0);
1941 netif_carrier_off(dev
);
1943 mutex_unlock(&mdev
->state_lock
);
1947 void mlx4_en_free_resources(struct mlx4_en_priv
*priv
)
1951 #ifdef CONFIG_RFS_ACCEL
1952 free_irq_cpu_rmap(priv
->dev
->rx_cpu_rmap
);
1953 priv
->dev
->rx_cpu_rmap
= NULL
;
1956 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1957 if (priv
->tx_ring
&& priv
->tx_ring
[i
])
1958 mlx4_en_destroy_tx_ring(priv
, &priv
->tx_ring
[i
]);
1959 if (priv
->tx_cq
&& priv
->tx_cq
[i
])
1960 mlx4_en_destroy_cq(priv
, &priv
->tx_cq
[i
]);
1963 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
1964 if (priv
->rx_ring
[i
])
1965 mlx4_en_destroy_rx_ring(priv
, &priv
->rx_ring
[i
],
1966 priv
->prof
->rx_ring_size
, priv
->stride
);
1968 mlx4_en_destroy_cq(priv
, &priv
->rx_cq
[i
]);
1971 if (priv
->base_tx_qpn
) {
1972 mlx4_qp_release_range(priv
->mdev
->dev
, priv
->base_tx_qpn
, priv
->tx_ring_num
);
1973 priv
->base_tx_qpn
= 0;
1977 int mlx4_en_alloc_resources(struct mlx4_en_priv
*priv
)
1979 struct mlx4_en_port_profile
*prof
= priv
->prof
;
1984 err
= mlx4_qp_reserve_range(priv
->mdev
->dev
, priv
->tx_ring_num
, 256, &priv
->base_tx_qpn
);
1986 en_err(priv
, "failed reserving range for TX rings\n");
1990 /* Create tx Rings */
1991 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
1992 node
= cpu_to_node(i
% num_online_cpus());
1993 if (mlx4_en_create_cq(priv
, &priv
->tx_cq
[i
],
1994 prof
->tx_ring_size
, i
, TX
, node
))
1997 if (mlx4_en_create_tx_ring(priv
, &priv
->tx_ring
[i
],
1998 priv
->base_tx_qpn
+ i
,
1999 prof
->tx_ring_size
, TXBB_SIZE
,
2004 /* Create rx Rings */
2005 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
2006 node
= cpu_to_node(i
% num_online_cpus());
2007 if (mlx4_en_create_cq(priv
, &priv
->rx_cq
[i
],
2008 prof
->rx_ring_size
, i
, RX
, node
))
2011 if (mlx4_en_create_rx_ring(priv
, &priv
->rx_ring
[i
],
2012 prof
->rx_ring_size
, priv
->stride
,
2017 #ifdef CONFIG_RFS_ACCEL
2018 if (priv
->mdev
->dev
->caps
.comp_pool
) {
2019 priv
->dev
->rx_cpu_rmap
= alloc_irq_cpu_rmap(priv
->mdev
->dev
->caps
.comp_pool
);
2020 if (!priv
->dev
->rx_cpu_rmap
)
2028 en_err(priv
, "Failed to allocate NIC resources\n");
2029 for (i
= 0; i
< priv
->rx_ring_num
; i
++) {
2030 if (priv
->rx_ring
[i
])
2031 mlx4_en_destroy_rx_ring(priv
, &priv
->rx_ring
[i
],
2035 mlx4_en_destroy_cq(priv
, &priv
->rx_cq
[i
]);
2037 for (i
= 0; i
< priv
->tx_ring_num
; i
++) {
2038 if (priv
->tx_ring
[i
])
2039 mlx4_en_destroy_tx_ring(priv
, &priv
->tx_ring
[i
]);
2041 mlx4_en_destroy_cq(priv
, &priv
->tx_cq
[i
]);
2047 void mlx4_en_destroy_netdev(struct net_device
*dev
)
2049 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
2050 struct mlx4_en_dev
*mdev
= priv
->mdev
;
2052 en_dbg(DRV
, priv
, "Destroying netdev on port:%d\n", priv
->port
);
2054 /* Unregister device - this will close the port if it was up */
2055 if (priv
->registered
)
2056 unregister_netdev(dev
);
2058 if (priv
->allocated
)
2059 mlx4_free_hwq_res(mdev
->dev
, &priv
->res
, MLX4_EN_PAGE_SIZE
);
2061 cancel_delayed_work(&priv
->stats_task
);
2062 cancel_delayed_work(&priv
->service_task
);
2063 /* flush any pending task for this netdev */
2064 flush_workqueue(mdev
->workqueue
);
2066 /* Detach the netdev so tasks would not attempt to access it */
2067 mutex_lock(&mdev
->state_lock
);
2068 mdev
->pndev
[priv
->port
] = NULL
;
2069 mutex_unlock(&mdev
->state_lock
);
2071 mlx4_en_free_resources(priv
);
2073 kfree(priv
->tx_ring
);
2079 static int mlx4_en_change_mtu(struct net_device
*dev
, int new_mtu
)
2081 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
2082 struct mlx4_en_dev
*mdev
= priv
->mdev
;
2085 en_dbg(DRV
, priv
, "Change MTU called - current:%d new:%d\n",
2088 if ((new_mtu
< MLX4_EN_MIN_MTU
) || (new_mtu
> priv
->max_mtu
)) {
2089 en_err(priv
, "Bad MTU size:%d.\n", new_mtu
);
2094 if (netif_running(dev
)) {
2095 mutex_lock(&mdev
->state_lock
);
2096 if (!mdev
->device_up
) {
2097 /* NIC is probably restarting - let watchdog task reset
2099 en_dbg(DRV
, priv
, "Change MTU called with card down!?\n");
2101 mlx4_en_stop_port(dev
, 1);
2102 err
= mlx4_en_start_port(dev
);
2104 en_err(priv
, "Failed restarting port:%d\n",
2106 queue_work(mdev
->workqueue
, &priv
->watchdog_task
);
2109 mutex_unlock(&mdev
->state_lock
);
2114 static int mlx4_en_hwtstamp_set(struct net_device
*dev
, struct ifreq
*ifr
)
2116 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
2117 struct mlx4_en_dev
*mdev
= priv
->mdev
;
2118 struct hwtstamp_config config
;
2120 if (copy_from_user(&config
, ifr
->ifr_data
, sizeof(config
)))
2123 /* reserved for future extensions */
2127 /* device doesn't support time stamping */
2128 if (!(mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_TS
))
2131 /* TX HW timestamp */
2132 switch (config
.tx_type
) {
2133 case HWTSTAMP_TX_OFF
:
2134 case HWTSTAMP_TX_ON
:
2140 /* RX HW timestamp */
2141 switch (config
.rx_filter
) {
2142 case HWTSTAMP_FILTER_NONE
:
2144 case HWTSTAMP_FILTER_ALL
:
2145 case HWTSTAMP_FILTER_SOME
:
2146 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
2147 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
2148 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
2149 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
2150 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
2151 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
2152 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
2153 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
2154 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
2155 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
2156 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
2157 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
2158 config
.rx_filter
= HWTSTAMP_FILTER_ALL
;
2164 if (mlx4_en_timestamp_config(dev
, config
.tx_type
, config
.rx_filter
)) {
2165 config
.tx_type
= HWTSTAMP_TX_OFF
;
2166 config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
2169 return copy_to_user(ifr
->ifr_data
, &config
,
2170 sizeof(config
)) ? -EFAULT
: 0;
2173 static int mlx4_en_hwtstamp_get(struct net_device
*dev
, struct ifreq
*ifr
)
2175 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
2177 return copy_to_user(ifr
->ifr_data
, &priv
->hwtstamp_config
,
2178 sizeof(priv
->hwtstamp_config
)) ? -EFAULT
: 0;
2181 static int mlx4_en_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
2185 return mlx4_en_hwtstamp_set(dev
, ifr
);
2187 return mlx4_en_hwtstamp_get(dev
, ifr
);
2193 static int mlx4_en_set_features(struct net_device
*netdev
,
2194 netdev_features_t features
)
2196 struct mlx4_en_priv
*priv
= netdev_priv(netdev
);
2198 if (features
& NETIF_F_LOOPBACK
)
2199 priv
->ctrl_flags
|= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK
);
2202 cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK
);
2204 mlx4_en_update_loopback_state(netdev
, features
);
2210 static int mlx4_en_set_vf_mac(struct net_device
*dev
, int queue
, u8
*mac
)
2212 struct mlx4_en_priv
*en_priv
= netdev_priv(dev
);
2213 struct mlx4_en_dev
*mdev
= en_priv
->mdev
;
2214 u64 mac_u64
= mlx4_mac_to_u64(mac
);
2216 if (!is_valid_ether_addr(mac
))
2219 return mlx4_set_vf_mac(mdev
->dev
, en_priv
->port
, queue
, mac_u64
);
2222 static int mlx4_en_set_vf_vlan(struct net_device
*dev
, int vf
, u16 vlan
, u8 qos
)
2224 struct mlx4_en_priv
*en_priv
= netdev_priv(dev
);
2225 struct mlx4_en_dev
*mdev
= en_priv
->mdev
;
2227 return mlx4_set_vf_vlan(mdev
->dev
, en_priv
->port
, vf
, vlan
, qos
);
2230 static int mlx4_en_set_vf_spoofchk(struct net_device
*dev
, int vf
, bool setting
)
2232 struct mlx4_en_priv
*en_priv
= netdev_priv(dev
);
2233 struct mlx4_en_dev
*mdev
= en_priv
->mdev
;
2235 return mlx4_set_vf_spoofchk(mdev
->dev
, en_priv
->port
, vf
, setting
);
2238 static int mlx4_en_get_vf_config(struct net_device
*dev
, int vf
, struct ifla_vf_info
*ivf
)
2240 struct mlx4_en_priv
*en_priv
= netdev_priv(dev
);
2241 struct mlx4_en_dev
*mdev
= en_priv
->mdev
;
2243 return mlx4_get_vf_config(mdev
->dev
, en_priv
->port
, vf
, ivf
);
2246 static int mlx4_en_set_vf_link_state(struct net_device
*dev
, int vf
, int link_state
)
2248 struct mlx4_en_priv
*en_priv
= netdev_priv(dev
);
2249 struct mlx4_en_dev
*mdev
= en_priv
->mdev
;
2251 return mlx4_set_vf_link_state(mdev
->dev
, en_priv
->port
, vf
, link_state
);
2254 #define PORT_ID_BYTE_LEN 8
2255 static int mlx4_en_get_phys_port_id(struct net_device
*dev
,
2256 struct netdev_phys_port_id
*ppid
)
2258 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
2259 struct mlx4_dev
*mdev
= priv
->mdev
->dev
;
2261 u64 phys_port_id
= mdev
->caps
.phys_port_id
[priv
->port
];
2266 ppid
->id_len
= sizeof(phys_port_id
);
2267 for (i
= PORT_ID_BYTE_LEN
- 1; i
>= 0; --i
) {
2268 ppid
->id
[i
] = phys_port_id
& 0xff;
2274 #ifdef CONFIG_MLX4_EN_VXLAN
2275 static void mlx4_en_add_vxlan_offloads(struct work_struct
*work
)
2278 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
2281 ret
= mlx4_config_vxlan_port(priv
->mdev
->dev
, priv
->vxlan_port
);
2285 ret
= mlx4_SET_PORT_VXLAN(priv
->mdev
->dev
, priv
->port
,
2286 VXLAN_STEER_BY_OUTER_MAC
, 1);
2289 en_err(priv
, "failed setting L2 tunnel configuration ret %d\n", ret
);
2294 priv
->dev
->hw_enc_features
|= NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
|
2295 NETIF_F_TSO
| NETIF_F_GSO_UDP_TUNNEL
;
2296 priv
->dev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
;
2297 priv
->dev
->features
|= NETIF_F_GSO_UDP_TUNNEL
;
2300 static void mlx4_en_del_vxlan_offloads(struct work_struct
*work
)
2303 struct mlx4_en_priv
*priv
= container_of(work
, struct mlx4_en_priv
,
2305 /* unset offloads */
2306 priv
->dev
->hw_enc_features
&= ~(NETIF_F_IP_CSUM
| NETIF_F_RXCSUM
|
2307 NETIF_F_TSO
| NETIF_F_GSO_UDP_TUNNEL
);
2308 priv
->dev
->hw_features
&= ~NETIF_F_GSO_UDP_TUNNEL
;
2309 priv
->dev
->features
&= ~NETIF_F_GSO_UDP_TUNNEL
;
2311 ret
= mlx4_SET_PORT_VXLAN(priv
->mdev
->dev
, priv
->port
,
2312 VXLAN_STEER_BY_OUTER_MAC
, 0);
2314 en_err(priv
, "failed setting L2 tunnel configuration ret %d\n", ret
);
2316 priv
->vxlan_port
= 0;
2319 static void mlx4_en_add_vxlan_port(struct net_device
*dev
,
2320 sa_family_t sa_family
, __be16 port
)
2322 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
2323 __be16 current_port
;
2325 if (priv
->mdev
->dev
->caps
.tunnel_offload_mode
!= MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
)
2328 if (sa_family
== AF_INET6
)
2331 current_port
= priv
->vxlan_port
;
2332 if (current_port
&& current_port
!= port
) {
2333 en_warn(priv
, "vxlan port %d configured, can't add port %d\n",
2334 ntohs(current_port
), ntohs(port
));
2338 priv
->vxlan_port
= port
;
2339 queue_work(priv
->mdev
->workqueue
, &priv
->vxlan_add_task
);
2342 static void mlx4_en_del_vxlan_port(struct net_device
*dev
,
2343 sa_family_t sa_family
, __be16 port
)
2345 struct mlx4_en_priv
*priv
= netdev_priv(dev
);
2346 __be16 current_port
;
2348 if (priv
->mdev
->dev
->caps
.tunnel_offload_mode
!= MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
)
2351 if (sa_family
== AF_INET6
)
2354 current_port
= priv
->vxlan_port
;
2355 if (current_port
!= port
) {
2356 en_dbg(DRV
, priv
, "vxlan port %d isn't configured, ignoring\n", ntohs(port
));
2360 queue_work(priv
->mdev
->workqueue
, &priv
->vxlan_del_task
);
2363 static netdev_features_t
mlx4_en_features_check(struct sk_buff
*skb
,
2364 struct net_device
*dev
,
2365 netdev_features_t features
)
2367 return vxlan_features_check(skb
, features
);
2371 static const struct net_device_ops mlx4_netdev_ops
= {
2372 .ndo_open
= mlx4_en_open
,
2373 .ndo_stop
= mlx4_en_close
,
2374 .ndo_start_xmit
= mlx4_en_xmit
,
2375 .ndo_select_queue
= mlx4_en_select_queue
,
2376 .ndo_get_stats
= mlx4_en_get_stats
,
2377 .ndo_set_rx_mode
= mlx4_en_set_rx_mode
,
2378 .ndo_set_mac_address
= mlx4_en_set_mac
,
2379 .ndo_validate_addr
= eth_validate_addr
,
2380 .ndo_change_mtu
= mlx4_en_change_mtu
,
2381 .ndo_do_ioctl
= mlx4_en_ioctl
,
2382 .ndo_tx_timeout
= mlx4_en_tx_timeout
,
2383 .ndo_vlan_rx_add_vid
= mlx4_en_vlan_rx_add_vid
,
2384 .ndo_vlan_rx_kill_vid
= mlx4_en_vlan_rx_kill_vid
,
2385 #ifdef CONFIG_NET_POLL_CONTROLLER
2386 .ndo_poll_controller
= mlx4_en_netpoll
,
2388 .ndo_set_features
= mlx4_en_set_features
,
2389 .ndo_setup_tc
= mlx4_en_setup_tc
,
2390 #ifdef CONFIG_RFS_ACCEL
2391 .ndo_rx_flow_steer
= mlx4_en_filter_rfs
,
2393 #ifdef CONFIG_NET_RX_BUSY_POLL
2394 .ndo_busy_poll
= mlx4_en_low_latency_recv
,
2396 .ndo_get_phys_port_id
= mlx4_en_get_phys_port_id
,
2397 #ifdef CONFIG_MLX4_EN_VXLAN
2398 .ndo_add_vxlan_port
= mlx4_en_add_vxlan_port
,
2399 .ndo_del_vxlan_port
= mlx4_en_del_vxlan_port
,
2400 .ndo_features_check
= mlx4_en_features_check
,
2404 static const struct net_device_ops mlx4_netdev_ops_master
= {
2405 .ndo_open
= mlx4_en_open
,
2406 .ndo_stop
= mlx4_en_close
,
2407 .ndo_start_xmit
= mlx4_en_xmit
,
2408 .ndo_select_queue
= mlx4_en_select_queue
,
2409 .ndo_get_stats
= mlx4_en_get_stats
,
2410 .ndo_set_rx_mode
= mlx4_en_set_rx_mode
,
2411 .ndo_set_mac_address
= mlx4_en_set_mac
,
2412 .ndo_validate_addr
= eth_validate_addr
,
2413 .ndo_change_mtu
= mlx4_en_change_mtu
,
2414 .ndo_tx_timeout
= mlx4_en_tx_timeout
,
2415 .ndo_vlan_rx_add_vid
= mlx4_en_vlan_rx_add_vid
,
2416 .ndo_vlan_rx_kill_vid
= mlx4_en_vlan_rx_kill_vid
,
2417 .ndo_set_vf_mac
= mlx4_en_set_vf_mac
,
2418 .ndo_set_vf_vlan
= mlx4_en_set_vf_vlan
,
2419 .ndo_set_vf_spoofchk
= mlx4_en_set_vf_spoofchk
,
2420 .ndo_set_vf_link_state
= mlx4_en_set_vf_link_state
,
2421 .ndo_get_vf_config
= mlx4_en_get_vf_config
,
2422 #ifdef CONFIG_NET_POLL_CONTROLLER
2423 .ndo_poll_controller
= mlx4_en_netpoll
,
2425 .ndo_set_features
= mlx4_en_set_features
,
2426 .ndo_setup_tc
= mlx4_en_setup_tc
,
2427 #ifdef CONFIG_RFS_ACCEL
2428 .ndo_rx_flow_steer
= mlx4_en_filter_rfs
,
2430 .ndo_get_phys_port_id
= mlx4_en_get_phys_port_id
,
2431 #ifdef CONFIG_MLX4_EN_VXLAN
2432 .ndo_add_vxlan_port
= mlx4_en_add_vxlan_port
,
2433 .ndo_del_vxlan_port
= mlx4_en_del_vxlan_port
,
2434 .ndo_features_check
= mlx4_en_features_check
,
2438 int mlx4_en_init_netdev(struct mlx4_en_dev
*mdev
, int port
,
2439 struct mlx4_en_port_profile
*prof
)
2441 struct net_device
*dev
;
2442 struct mlx4_en_priv
*priv
;
2447 dev
= alloc_etherdev_mqs(sizeof(struct mlx4_en_priv
),
2448 MAX_TX_RINGS
, MAX_RX_RINGS
);
2452 netif_set_real_num_tx_queues(dev
, prof
->tx_ring_num
);
2453 netif_set_real_num_rx_queues(dev
, prof
->rx_ring_num
);
2455 SET_NETDEV_DEV(dev
, &mdev
->dev
->pdev
->dev
);
2456 dev
->dev_port
= port
- 1;
2459 * Initialize driver private data
2462 priv
= netdev_priv(dev
);
2463 memset(priv
, 0, sizeof(struct mlx4_en_priv
));
2466 priv
->ddev
= &mdev
->pdev
->dev
;
2469 priv
->port_up
= false;
2470 priv
->flags
= prof
->flags
;
2471 priv
->pflags
= MLX4_EN_PRIV_FLAGS_BLUEFLAME
;
2472 priv
->ctrl_flags
= cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE
|
2473 MLX4_WQE_CTRL_SOLICITED
);
2474 priv
->num_tx_rings_p_up
= mdev
->profile
.num_tx_rings_p_up
;
2475 priv
->tx_ring_num
= prof
->tx_ring_num
;
2476 priv
->tx_work_limit
= MLX4_EN_DEFAULT_TX_WORK
;
2478 priv
->tx_ring
= kzalloc(sizeof(struct mlx4_en_tx_ring
*) * MAX_TX_RINGS
,
2480 if (!priv
->tx_ring
) {
2484 priv
->tx_cq
= kzalloc(sizeof(struct mlx4_en_cq
*) * MAX_TX_RINGS
,
2490 priv
->rx_ring_num
= prof
->rx_ring_num
;
2491 priv
->cqe_factor
= (mdev
->dev
->caps
.cqe_size
== 64) ? 1 : 0;
2492 priv
->cqe_size
= mdev
->dev
->caps
.cqe_size
;
2493 priv
->mac_index
= -1;
2494 priv
->msg_enable
= MLX4_EN_MSG_LEVEL
;
2495 spin_lock_init(&priv
->stats_lock
);
2496 INIT_WORK(&priv
->rx_mode_task
, mlx4_en_do_set_rx_mode
);
2497 INIT_WORK(&priv
->watchdog_task
, mlx4_en_restart
);
2498 INIT_WORK(&priv
->linkstate_task
, mlx4_en_linkstate
);
2499 INIT_DELAYED_WORK(&priv
->stats_task
, mlx4_en_do_get_stats
);
2500 INIT_DELAYED_WORK(&priv
->service_task
, mlx4_en_service_task
);
2501 #ifdef CONFIG_MLX4_EN_VXLAN
2502 INIT_WORK(&priv
->vxlan_add_task
, mlx4_en_add_vxlan_offloads
);
2503 INIT_WORK(&priv
->vxlan_del_task
, mlx4_en_del_vxlan_offloads
);
2505 #ifdef CONFIG_MLX4_EN_DCB
2506 if (!mlx4_is_slave(priv
->mdev
->dev
)) {
2507 if (mdev
->dev
->caps
.flags
& MLX4_DEV_CAP_FLAG_SET_ETH_SCHED
) {
2508 dev
->dcbnl_ops
= &mlx4_en_dcbnl_ops
;
2510 en_info(priv
, "enabling only PFC DCB ops\n");
2511 dev
->dcbnl_ops
= &mlx4_en_dcbnl_pfc_ops
;
2516 for (i
= 0; i
< MLX4_EN_MAC_HASH_SIZE
; ++i
)
2517 INIT_HLIST_HEAD(&priv
->mac_hash
[i
]);
2519 /* Query for default mac and max mtu */
2520 priv
->max_mtu
= mdev
->dev
->caps
.eth_mtu_cap
[priv
->port
];
2522 /* Set default MAC */
2523 dev
->addr_len
= ETH_ALEN
;
2524 mlx4_en_u64_to_mac(dev
->dev_addr
, mdev
->dev
->caps
.def_mac
[priv
->port
]);
2525 if (!is_valid_ether_addr(dev
->dev_addr
)) {
2526 if (mlx4_is_slave(priv
->mdev
->dev
)) {
2527 eth_hw_addr_random(dev
);
2528 en_warn(priv
, "Assigned random MAC address %pM\n", dev
->dev_addr
);
2529 mac_u64
= mlx4_mac_to_u64(dev
->dev_addr
);
2530 mdev
->dev
->caps
.def_mac
[priv
->port
] = mac_u64
;
2532 en_err(priv
, "Port: %d, invalid mac burned: %pM, quiting\n",
2533 priv
->port
, dev
->dev_addr
);
2539 memcpy(priv
->current_mac
, dev
->dev_addr
, sizeof(priv
->current_mac
));
2541 priv
->stride
= roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc
) +
2542 DS_SIZE
* MLX4_EN_MAX_RX_FRAGS
);
2543 err
= mlx4_en_alloc_resources(priv
);
2547 #ifdef CONFIG_RFS_ACCEL
2548 INIT_LIST_HEAD(&priv
->filters
);
2549 spin_lock_init(&priv
->filters_lock
);
2552 /* Initialize time stamping config */
2553 priv
->hwtstamp_config
.flags
= 0;
2554 priv
->hwtstamp_config
.tx_type
= HWTSTAMP_TX_OFF
;
2555 priv
->hwtstamp_config
.rx_filter
= HWTSTAMP_FILTER_NONE
;
2557 /* Allocate page for receive rings */
2558 err
= mlx4_alloc_hwq_res(mdev
->dev
, &priv
->res
,
2559 MLX4_EN_PAGE_SIZE
, MLX4_EN_PAGE_SIZE
);
2561 en_err(priv
, "Failed to allocate page for rx qps\n");
2564 priv
->allocated
= 1;
2567 * Initialize netdev entry points
2569 if (mlx4_is_master(priv
->mdev
->dev
))
2570 dev
->netdev_ops
= &mlx4_netdev_ops_master
;
2572 dev
->netdev_ops
= &mlx4_netdev_ops
;
2573 dev
->watchdog_timeo
= MLX4_EN_WATCHDOG_TIMEOUT
;
2574 netif_set_real_num_tx_queues(dev
, priv
->tx_ring_num
);
2575 netif_set_real_num_rx_queues(dev
, priv
->rx_ring_num
);
2577 dev
->ethtool_ops
= &mlx4_en_ethtool_ops
;
2580 * Set driver features
2582 dev
->hw_features
= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
2583 if (mdev
->LSO_support
)
2584 dev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
2586 dev
->vlan_features
= dev
->hw_features
;
2588 dev
->hw_features
|= NETIF_F_RXCSUM
| NETIF_F_RXHASH
;
2589 dev
->features
= dev
->hw_features
| NETIF_F_HIGHDMA
|
2590 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
|
2591 NETIF_F_HW_VLAN_CTAG_FILTER
;
2592 dev
->hw_features
|= NETIF_F_LOOPBACK
;
2594 if (mdev
->dev
->caps
.steering_mode
==
2595 MLX4_STEERING_MODE_DEVICE_MANAGED
)
2596 dev
->hw_features
|= NETIF_F_NTUPLE
;
2598 if (mdev
->dev
->caps
.steering_mode
!= MLX4_STEERING_MODE_A0
)
2599 dev
->priv_flags
|= IFF_UNICAST_FLT
;
2601 mdev
->pndev
[port
] = dev
;
2603 netif_carrier_off(dev
);
2604 mlx4_en_set_default_moderation(priv
);
2606 en_warn(priv
, "Using %d TX rings\n", prof
->tx_ring_num
);
2607 en_warn(priv
, "Using %d RX rings\n", prof
->rx_ring_num
);
2609 mlx4_en_update_loopback_state(priv
->dev
, priv
->dev
->features
);
2611 /* Configure port */
2612 mlx4_en_calc_rx_buf(dev
);
2613 err
= mlx4_SET_PORT_general(mdev
->dev
, priv
->port
,
2614 priv
->rx_skb_size
+ ETH_FCS_LEN
,
2615 prof
->tx_pause
, prof
->tx_ppp
,
2616 prof
->rx_pause
, prof
->rx_ppp
);
2618 en_err(priv
, "Failed setting port general configurations for port %d, with error %d\n",
2623 if (mdev
->dev
->caps
.tunnel_offload_mode
== MLX4_TUNNEL_OFFLOAD_MODE_VXLAN
) {
2624 err
= mlx4_SET_PORT_VXLAN(mdev
->dev
, priv
->port
, VXLAN_STEER_BY_OUTER_MAC
, 1);
2626 en_err(priv
, "Failed setting port L2 tunnel configuration, err %d\n",
2633 en_warn(priv
, "Initializing port\n");
2634 err
= mlx4_INIT_PORT(mdev
->dev
, priv
->port
);
2636 en_err(priv
, "Failed Initializing port\n");
2639 queue_delayed_work(mdev
->workqueue
, &priv
->stats_task
, STATS_DELAY
);
2641 if (mdev
->dev
->caps
.flags2
& MLX4_DEV_CAP_FLAG2_TS
)
2642 queue_delayed_work(mdev
->workqueue
, &priv
->service_task
,
2643 SERVICE_TASK_DELAY
);
2645 err
= register_netdev(dev
);
2647 en_err(priv
, "Netdev registration failed for port %d\n", port
);
2651 priv
->registered
= 1;
2656 mlx4_en_destroy_netdev(dev
);