1 // SPDX-License-Identifier: GPL-2.0-only
3 * aQuantia Corporation Network Driver
4 * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
7 /* File aq_nic.c: Definition of common code for NIC. */
13 #include "aq_pci_func.h"
14 #include "aq_macsec.h"
18 #include "aq_filters.h"
20 #include <linux/moduleparam.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/timer.h>
24 #include <linux/cpu.h>
26 #include <linux/tcp.h>
29 static unsigned int aq_itr
= AQ_CFG_INTERRUPT_MODERATION_AUTO
;
30 module_param_named(aq_itr
, aq_itr
, uint
, 0644);
31 MODULE_PARM_DESC(aq_itr
, "Interrupt throttling mode");
33 static unsigned int aq_itr_tx
;
34 module_param_named(aq_itr_tx
, aq_itr_tx
, uint
, 0644);
35 MODULE_PARM_DESC(aq_itr_tx
, "TX interrupt throttle rate");
37 static unsigned int aq_itr_rx
;
38 module_param_named(aq_itr_rx
, aq_itr_rx
, uint
, 0644);
39 MODULE_PARM_DESC(aq_itr_rx
, "RX interrupt throttle rate");
41 static void aq_nic_update_ndev_stats(struct aq_nic_s
*self
);
43 static void aq_nic_rss_init(struct aq_nic_s
*self
, unsigned int num_rss_queues
)
45 static u8 rss_key
[AQ_CFG_RSS_HASHKEY_SIZE
] = {
46 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
47 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
48 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
49 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
50 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
52 struct aq_nic_cfg_s
*cfg
= &self
->aq_nic_cfg
;
53 struct aq_rss_parameters
*rss_params
;
56 rss_params
= &cfg
->aq_rss
;
58 rss_params
->hash_secret_key_size
= sizeof(rss_key
);
59 memcpy(rss_params
->hash_secret_key
, rss_key
, sizeof(rss_key
));
60 rss_params
->indirection_table_size
= AQ_CFG_RSS_INDIRECTION_TABLE_MAX
;
62 for (i
= rss_params
->indirection_table_size
; i
--;)
63 rss_params
->indirection_table
[i
] = i
& (num_rss_queues
- 1);
66 /* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
67 void aq_nic_cfg_start(struct aq_nic_s
*self
)
69 struct aq_nic_cfg_s
*cfg
= &self
->aq_nic_cfg
;
71 cfg
->tcs
= AQ_CFG_TCS_DEF
;
73 cfg
->is_polling
= AQ_CFG_IS_POLLING_DEF
;
76 cfg
->tx_itr
= aq_itr_tx
;
77 cfg
->rx_itr
= aq_itr_rx
;
79 cfg
->rxpageorder
= AQ_CFG_RX_PAGEORDER
;
80 cfg
->is_rss
= AQ_CFG_IS_RSS_DEF
;
81 cfg
->num_rss_queues
= AQ_CFG_NUM_RSS_QUEUES_DEF
;
82 cfg
->aq_rss
.base_cpu_number
= AQ_CFG_RSS_BASE_CPU_NUM_DEF
;
83 cfg
->fc
.req
= AQ_CFG_FC_MODE
;
84 cfg
->wol
= AQ_CFG_WOL_MODES
;
86 cfg
->mtu
= AQ_CFG_MTU_DEF
;
87 cfg
->link_speed_msk
= AQ_CFG_SPEED_MSK
;
88 cfg
->is_autoneg
= AQ_CFG_IS_AUTONEG_DEF
;
90 cfg
->is_lro
= AQ_CFG_IS_LRO_DEF
;
93 cfg
->rxds
= min(cfg
->aq_hw_caps
->rxds_max
, AQ_CFG_RXDS_DEF
);
94 cfg
->txds
= min(cfg
->aq_hw_caps
->txds_max
, AQ_CFG_TXDS_DEF
);
97 cfg
->vecs
= min(cfg
->aq_hw_caps
->vecs
, AQ_CFG_VECS_DEF
);
98 cfg
->vecs
= min(cfg
->vecs
, num_online_cpus());
99 if (self
->irqvecs
> AQ_HW_SERVICE_IRQS
)
100 cfg
->vecs
= min(cfg
->vecs
, self
->irqvecs
- AQ_HW_SERVICE_IRQS
);
101 /* cfg->vecs should be power of 2 for RSS */
104 else if (cfg
->vecs
>= 4U)
106 else if (cfg
->vecs
>= 2U)
111 cfg
->num_rss_queues
= min(cfg
->vecs
, AQ_CFG_NUM_RSS_QUEUES_DEF
);
113 aq_nic_rss_init(self
, cfg
->num_rss_queues
);
115 cfg
->irq_type
= aq_pci_func_get_irq_type(self
);
117 if ((cfg
->irq_type
== AQ_HW_IRQ_LEGACY
) ||
118 (cfg
->aq_hw_caps
->vecs
== 1U) ||
124 /* Check if we have enough vectors allocated for
125 * link status IRQ. If no - we'll know link state from
126 * slower service task.
128 if (AQ_HW_SERVICE_IRQS
> 0 && cfg
->vecs
+ 1 <= self
->irqvecs
)
129 cfg
->link_irq_vec
= cfg
->vecs
;
131 cfg
->link_irq_vec
= 0;
133 cfg
->link_speed_msk
&= cfg
->aq_hw_caps
->link_speed_msk
;
134 cfg
->features
= cfg
->aq_hw_caps
->hw_features
;
135 cfg
->is_vlan_rx_strip
= !!(cfg
->features
& NETIF_F_HW_VLAN_CTAG_RX
);
136 cfg
->is_vlan_tx_insert
= !!(cfg
->features
& NETIF_F_HW_VLAN_CTAG_TX
);
137 cfg
->is_vlan_force_promisc
= true;
140 static int aq_nic_update_link_status(struct aq_nic_s
*self
)
142 int err
= self
->aq_fw_ops
->update_link_status(self
->aq_hw
);
148 if (self
->aq_fw_ops
->get_flow_control
)
149 self
->aq_fw_ops
->get_flow_control(self
->aq_hw
, &fc
);
150 self
->aq_nic_cfg
.fc
.cur
= fc
;
152 if (self
->link_status
.mbps
!= self
->aq_hw
->aq_link_status
.mbps
) {
153 netdev_info(self
->ndev
, "%s: link change old %d new %d\n",
154 AQ_CFG_DRV_NAME
, self
->link_status
.mbps
,
155 self
->aq_hw
->aq_link_status
.mbps
);
156 aq_nic_update_interrupt_moderation_settings(self
);
159 aq_ptp_clock_init(self
);
160 aq_ptp_tm_offset_set(self
,
161 self
->aq_hw
->aq_link_status
.mbps
);
162 aq_ptp_link_change(self
);
165 /* Driver has to update flow control settings on RX block
167 * We should query FW whether it negotiated FC.
169 if (self
->aq_hw_ops
->hw_set_fc
)
170 self
->aq_hw_ops
->hw_set_fc(self
->aq_hw
, fc
, 0);
173 self
->link_status
= self
->aq_hw
->aq_link_status
;
174 if (!netif_carrier_ok(self
->ndev
) && self
->link_status
.mbps
) {
175 aq_utils_obj_set(&self
->flags
,
176 AQ_NIC_FLAG_STARTED
);
177 aq_utils_obj_clear(&self
->flags
,
179 netif_carrier_on(self
->ndev
);
180 #if IS_ENABLED(CONFIG_MACSEC)
181 aq_macsec_enable(self
);
183 netif_tx_wake_all_queues(self
->ndev
);
185 if (netif_carrier_ok(self
->ndev
) && !self
->link_status
.mbps
) {
186 netif_carrier_off(self
->ndev
);
187 netif_tx_disable(self
->ndev
);
188 aq_utils_obj_set(&self
->flags
, AQ_NIC_LINK_DOWN
);
194 static irqreturn_t
aq_linkstate_threaded_isr(int irq
, void *private)
196 struct aq_nic_s
*self
= private;
201 aq_nic_update_link_status(self
);
203 self
->aq_hw_ops
->hw_irq_enable(self
->aq_hw
,
204 BIT(self
->aq_nic_cfg
.link_irq_vec
));
209 static void aq_nic_service_task(struct work_struct
*work
)
211 struct aq_nic_s
*self
= container_of(work
, struct aq_nic_s
,
215 aq_ptp_service_task(self
);
217 if (aq_utils_obj_test(&self
->flags
, AQ_NIC_FLAGS_IS_NOT_READY
))
220 err
= aq_nic_update_link_status(self
);
224 #if IS_ENABLED(CONFIG_MACSEC)
225 aq_macsec_work(self
);
228 mutex_lock(&self
->fwreq_mutex
);
229 if (self
->aq_fw_ops
->update_stats
)
230 self
->aq_fw_ops
->update_stats(self
->aq_hw
);
231 mutex_unlock(&self
->fwreq_mutex
);
233 aq_nic_update_ndev_stats(self
);
236 static void aq_nic_service_timer_cb(struct timer_list
*t
)
238 struct aq_nic_s
*self
= from_timer(self
, t
, service_timer
);
240 mod_timer(&self
->service_timer
,
241 jiffies
+ AQ_CFG_SERVICE_TIMER_INTERVAL
);
243 aq_ndev_schedule_work(&self
->service_task
);
246 static void aq_nic_polling_timer_cb(struct timer_list
*t
)
248 struct aq_nic_s
*self
= from_timer(self
, t
, polling_timer
);
249 struct aq_vec_s
*aq_vec
= NULL
;
252 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
253 self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
])
254 aq_vec_isr(i
, (void *)aq_vec
);
256 mod_timer(&self
->polling_timer
, jiffies
+
257 AQ_CFG_POLLING_TIMER_INTERVAL
);
260 int aq_nic_ndev_register(struct aq_nic_s
*self
)
269 err
= hw_atl_utils_initfw(self
->aq_hw
, &self
->aq_fw_ops
);
273 #if IS_ENABLED(CONFIG_MACSEC)
274 aq_macsec_init(self
);
277 mutex_lock(&self
->fwreq_mutex
);
278 err
= self
->aq_fw_ops
->get_mac_permanent(self
->aq_hw
,
279 self
->ndev
->dev_addr
);
280 mutex_unlock(&self
->fwreq_mutex
);
284 #if defined(AQ_CFG_MAC_ADDR_PERMANENT)
286 static u8 mac_addr_permanent
[] = AQ_CFG_MAC_ADDR_PERMANENT
;
288 ether_addr_copy(self
->ndev
->dev_addr
, mac_addr_permanent
);
292 for (self
->aq_vecs
= 0; self
->aq_vecs
< aq_nic_get_cfg(self
)->vecs
;
294 self
->aq_vec
[self
->aq_vecs
] =
295 aq_vec_alloc(self
, self
->aq_vecs
, aq_nic_get_cfg(self
));
296 if (!self
->aq_vec
[self
->aq_vecs
]) {
302 netif_carrier_off(self
->ndev
);
304 netif_tx_disable(self
->ndev
);
306 err
= register_netdev(self
->ndev
);
311 #if IS_ENABLED(CONFIG_MACSEC)
313 aq_macsec_free(self
);
318 void aq_nic_ndev_init(struct aq_nic_s
*self
)
320 const struct aq_hw_caps_s
*aq_hw_caps
= self
->aq_nic_cfg
.aq_hw_caps
;
321 struct aq_nic_cfg_s
*aq_nic_cfg
= &self
->aq_nic_cfg
;
323 self
->ndev
->hw_features
|= aq_hw_caps
->hw_features
;
324 self
->ndev
->features
= aq_hw_caps
->hw_features
;
325 self
->ndev
->vlan_features
|= NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
|
326 NETIF_F_RXHASH
| NETIF_F_SG
|
327 NETIF_F_LRO
| NETIF_F_TSO
;
328 self
->ndev
->gso_partial_features
= NETIF_F_GSO_UDP_L4
;
329 self
->ndev
->priv_flags
= aq_hw_caps
->hw_priv_flags
;
330 self
->ndev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
332 self
->msg_enable
= NETIF_MSG_DRV
| NETIF_MSG_LINK
;
333 self
->ndev
->mtu
= aq_nic_cfg
->mtu
- ETH_HLEN
;
334 self
->ndev
->max_mtu
= aq_hw_caps
->mtu
- ETH_FCS_LEN
- ETH_HLEN
;
338 void aq_nic_set_tx_ring(struct aq_nic_s
*self
, unsigned int idx
,
339 struct aq_ring_s
*ring
)
341 self
->aq_ring_tx
[idx
] = ring
;
344 struct net_device
*aq_nic_get_ndev(struct aq_nic_s
*self
)
349 int aq_nic_init(struct aq_nic_s
*self
)
351 struct aq_vec_s
*aq_vec
= NULL
;
355 self
->power_state
= AQ_HW_POWER_STATE_D0
;
356 mutex_lock(&self
->fwreq_mutex
);
357 err
= self
->aq_hw_ops
->hw_reset(self
->aq_hw
);
358 mutex_unlock(&self
->fwreq_mutex
);
362 err
= self
->aq_hw_ops
->hw_init(self
->aq_hw
,
363 aq_nic_get_ndev(self
)->dev_addr
);
367 if (self
->aq_nic_cfg
.aq_hw_caps
->media_type
== AQ_HW_MEDIA_TYPE_TP
) {
368 self
->aq_hw
->phy_id
= HW_ATL_PHY_ID_MAX
;
369 err
= aq_phy_init(self
->aq_hw
);
372 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
373 self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
])
374 aq_vec_init(aq_vec
, self
->aq_hw_ops
, self
->aq_hw
);
376 err
= aq_ptp_init(self
, self
->irqvecs
- 1);
380 err
= aq_ptp_ring_alloc(self
);
384 err
= aq_ptp_ring_init(self
);
388 netif_carrier_off(self
->ndev
);
394 int aq_nic_start(struct aq_nic_s
*self
)
396 struct aq_vec_s
*aq_vec
= NULL
;
400 err
= self
->aq_hw_ops
->hw_multicast_list_set(self
->aq_hw
,
402 self
->mc_list
.count
);
406 err
= self
->aq_hw_ops
->hw_packet_filter_set(self
->aq_hw
,
407 self
->packet_filter
);
411 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
412 self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
]) {
413 err
= aq_vec_start(aq_vec
);
418 err
= aq_ptp_ring_start(self
);
422 aq_nic_set_loopback(self
);
424 err
= self
->aq_hw_ops
->hw_start(self
->aq_hw
);
428 err
= aq_nic_update_interrupt_moderation_settings(self
);
432 INIT_WORK(&self
->service_task
, aq_nic_service_task
);
434 timer_setup(&self
->service_timer
, aq_nic_service_timer_cb
, 0);
435 aq_nic_service_timer_cb(&self
->service_timer
);
437 if (self
->aq_nic_cfg
.is_polling
) {
438 timer_setup(&self
->polling_timer
, aq_nic_polling_timer_cb
, 0);
439 mod_timer(&self
->polling_timer
, jiffies
+
440 AQ_CFG_POLLING_TIMER_INTERVAL
);
442 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
443 self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
]) {
444 err
= aq_pci_func_alloc_irq(self
, i
, self
->ndev
->name
,
446 aq_vec_get_affinity_mask(aq_vec
));
451 err
= aq_ptp_irq_alloc(self
);
455 if (self
->aq_nic_cfg
.link_irq_vec
) {
456 int irqvec
= pci_irq_vector(self
->pdev
,
457 self
->aq_nic_cfg
.link_irq_vec
);
458 err
= request_threaded_irq(irqvec
, NULL
,
459 aq_linkstate_threaded_isr
,
460 IRQF_SHARED
| IRQF_ONESHOT
,
461 self
->ndev
->name
, self
);
464 self
->msix_entry_mask
|= (1 << self
->aq_nic_cfg
.link_irq_vec
);
467 err
= self
->aq_hw_ops
->hw_irq_enable(self
->aq_hw
,
473 err
= netif_set_real_num_tx_queues(self
->ndev
, self
->aq_vecs
);
477 err
= netif_set_real_num_rx_queues(self
->ndev
, self
->aq_vecs
);
481 netif_tx_start_all_queues(self
->ndev
);
487 unsigned int aq_nic_map_skb(struct aq_nic_s
*self
, struct sk_buff
*skb
,
488 struct aq_ring_s
*ring
)
490 unsigned int nr_frags
= skb_shinfo(skb
)->nr_frags
;
491 struct aq_ring_buff_s
*first
= NULL
;
492 u8 ipver
= ip_hdr(skb
)->version
;
493 struct aq_ring_buff_s
*dx_buff
;
494 bool need_context_tag
= false;
495 unsigned int frag_count
= 0U;
496 unsigned int ret
= 0U;
501 l4proto
= ip_hdr(skb
)->protocol
;
503 l4proto
= ipv6_hdr(skb
)->nexthdr
;
506 dx_buff
= &ring
->buff_ring
[dx
];
509 if (unlikely(skb_is_gso(skb
))) {
510 dx_buff
->mss
= skb_shinfo(skb
)->gso_size
;
511 if (l4proto
== IPPROTO_TCP
) {
512 dx_buff
->is_gso_tcp
= 1U;
513 dx_buff
->len_l4
= tcp_hdrlen(skb
);
514 } else if (l4proto
== IPPROTO_UDP
) {
515 dx_buff
->is_gso_udp
= 1U;
516 dx_buff
->len_l4
= sizeof(struct udphdr
);
517 /* UDP GSO Hardware does not replace packet length. */
518 udp_hdr(skb
)->len
= htons(dx_buff
->mss
+
521 WARN_ONCE(true, "Bad GSO mode");
524 dx_buff
->len_pkt
= skb
->len
;
525 dx_buff
->len_l2
= ETH_HLEN
;
526 dx_buff
->len_l3
= skb_network_header_len(skb
);
527 dx_buff
->eop_index
= 0xffffU
;
528 dx_buff
->is_ipv6
= (ipver
== 6);
529 need_context_tag
= true;
532 if (self
->aq_nic_cfg
.is_vlan_tx_insert
&& skb_vlan_tag_present(skb
)) {
533 dx_buff
->vlan_tx_tag
= skb_vlan_tag_get(skb
);
534 dx_buff
->len_pkt
= skb
->len
;
535 dx_buff
->is_vlan
= 1U;
536 need_context_tag
= true;
539 if (need_context_tag
) {
540 dx
= aq_ring_next_dx(ring
, dx
);
541 dx_buff
= &ring
->buff_ring
[dx
];
546 dx_buff
->len
= skb_headlen(skb
);
547 dx_buff
->pa
= dma_map_single(aq_nic_get_dev(self
),
552 if (unlikely(dma_mapping_error(aq_nic_get_dev(self
), dx_buff
->pa
))) {
558 dx_buff
->len_pkt
= skb
->len
;
559 dx_buff
->is_sop
= 1U;
560 dx_buff
->is_mapped
= 1U;
563 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
564 dx_buff
->is_ip_cso
= (htons(ETH_P_IP
) == skb
->protocol
);
565 dx_buff
->is_tcp_cso
= (l4proto
== IPPROTO_TCP
);
566 dx_buff
->is_udp_cso
= (l4proto
== IPPROTO_UDP
);
569 for (; nr_frags
--; ++frag_count
) {
570 unsigned int frag_len
= 0U;
571 unsigned int buff_offset
= 0U;
572 unsigned int buff_size
= 0U;
574 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[frag_count
];
576 frag_len
= skb_frag_size(frag
);
579 if (frag_len
> AQ_CFG_TX_FRAME_MAX
)
580 buff_size
= AQ_CFG_TX_FRAME_MAX
;
582 buff_size
= frag_len
;
584 frag_pa
= skb_frag_dma_map(aq_nic_get_dev(self
),
590 if (unlikely(dma_mapping_error(aq_nic_get_dev(self
),
594 dx
= aq_ring_next_dx(ring
, dx
);
595 dx_buff
= &ring
->buff_ring
[dx
];
598 dx_buff
->len
= buff_size
;
599 dx_buff
->pa
= frag_pa
;
600 dx_buff
->is_mapped
= 1U;
601 dx_buff
->eop_index
= 0xffffU
;
603 frag_len
-= buff_size
;
604 buff_offset
+= buff_size
;
610 first
->eop_index
= dx
;
611 dx_buff
->is_eop
= 1U;
616 for (dx
= ring
->sw_tail
;
618 --ret
, dx
= aq_ring_next_dx(ring
, dx
)) {
619 dx_buff
= &ring
->buff_ring
[dx
];
621 if (!(dx_buff
->is_gso_tcp
|| dx_buff
->is_gso_udp
) &&
622 !dx_buff
->is_vlan
&& dx_buff
->pa
) {
623 if (unlikely(dx_buff
->is_sop
)) {
624 dma_unmap_single(aq_nic_get_dev(self
),
629 dma_unmap_page(aq_nic_get_dev(self
),
641 int aq_nic_xmit(struct aq_nic_s
*self
, struct sk_buff
*skb
)
643 unsigned int vec
= skb
->queue_mapping
% self
->aq_nic_cfg
.vecs
;
644 struct aq_ring_s
*ring
= NULL
;
645 unsigned int frags
= 0U;
646 int err
= NETDEV_TX_OK
;
647 unsigned int tc
= 0U;
649 frags
= skb_shinfo(skb
)->nr_frags
+ 1;
651 ring
= self
->aq_ring_tx
[AQ_NIC_TCVEC2RING(self
, tc
, vec
)];
653 if (frags
> AQ_CFG_SKB_FRAGS_MAX
) {
654 dev_kfree_skb_any(skb
);
658 aq_ring_update_queue_state(ring
);
660 if (self
->aq_nic_cfg
.priv_flags
& BIT(AQ_HW_LOOPBACK_DMA_NET
)) {
661 err
= NETDEV_TX_BUSY
;
665 /* Above status update may stop the queue. Check this. */
666 if (__netif_subqueue_stopped(self
->ndev
, ring
->idx
)) {
667 err
= NETDEV_TX_BUSY
;
671 frags
= aq_nic_map_skb(self
, skb
, ring
);
674 err
= self
->aq_hw_ops
->hw_ring_tx_xmit(self
->aq_hw
,
677 err
= NETDEV_TX_BUSY
;
684 int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s
*self
)
686 return self
->aq_hw_ops
->hw_interrupt_moderation_set(self
->aq_hw
);
689 int aq_nic_set_packet_filter(struct aq_nic_s
*self
, unsigned int flags
)
693 err
= self
->aq_hw_ops
->hw_packet_filter_set(self
->aq_hw
, flags
);
697 self
->packet_filter
= flags
;
703 int aq_nic_set_multicast_list(struct aq_nic_s
*self
, struct net_device
*ndev
)
705 const struct aq_hw_ops
*hw_ops
= self
->aq_hw_ops
;
706 struct aq_nic_cfg_s
*cfg
= &self
->aq_nic_cfg
;
707 unsigned int packet_filter
= ndev
->flags
;
708 struct netdev_hw_addr
*ha
= NULL
;
712 self
->mc_list
.count
= 0;
713 if (netdev_uc_count(ndev
) > AQ_HW_MULTICAST_ADDRESS_MAX
) {
714 packet_filter
|= IFF_PROMISC
;
716 netdev_for_each_uc_addr(ha
, ndev
) {
717 ether_addr_copy(self
->mc_list
.ar
[i
++], ha
->addr
);
721 cfg
->is_mc_list_enabled
= !!(packet_filter
& IFF_MULTICAST
);
722 if (cfg
->is_mc_list_enabled
) {
723 if (i
+ netdev_mc_count(ndev
) > AQ_HW_MULTICAST_ADDRESS_MAX
) {
724 packet_filter
|= IFF_ALLMULTI
;
726 netdev_for_each_mc_addr(ha
, ndev
) {
727 ether_addr_copy(self
->mc_list
.ar
[i
++],
733 if (i
> 0 && i
<= AQ_HW_MULTICAST_ADDRESS_MAX
) {
734 self
->mc_list
.count
= i
;
735 err
= hw_ops
->hw_multicast_list_set(self
->aq_hw
,
737 self
->mc_list
.count
);
742 return aq_nic_set_packet_filter(self
, packet_filter
);
745 int aq_nic_set_mtu(struct aq_nic_s
*self
, int new_mtu
)
747 self
->aq_nic_cfg
.mtu
= new_mtu
;
752 int aq_nic_set_mac(struct aq_nic_s
*self
, struct net_device
*ndev
)
754 return self
->aq_hw_ops
->hw_set_mac_address(self
->aq_hw
, ndev
->dev_addr
);
757 unsigned int aq_nic_get_link_speed(struct aq_nic_s
*self
)
759 return self
->link_status
.mbps
;
762 int aq_nic_get_regs(struct aq_nic_s
*self
, struct ethtool_regs
*regs
, void *p
)
769 err
= self
->aq_hw_ops
->hw_get_regs(self
->aq_hw
,
770 self
->aq_nic_cfg
.aq_hw_caps
,
779 int aq_nic_get_regs_count(struct aq_nic_s
*self
)
781 return self
->aq_nic_cfg
.aq_hw_caps
->mac_regs_count
;
784 u64
*aq_nic_get_stats(struct aq_nic_s
*self
, u64
*data
)
786 struct aq_vec_s
*aq_vec
= NULL
;
787 struct aq_stats_s
*stats
;
788 unsigned int count
= 0U;
791 if (self
->aq_fw_ops
->update_stats
) {
792 mutex_lock(&self
->fwreq_mutex
);
793 self
->aq_fw_ops
->update_stats(self
->aq_hw
);
794 mutex_unlock(&self
->fwreq_mutex
);
796 stats
= self
->aq_hw_ops
->hw_get_hw_stats(self
->aq_hw
);
801 data
[i
] = stats
->uprc
+ stats
->mprc
+ stats
->bprc
;
802 data
[++i
] = stats
->uprc
;
803 data
[++i
] = stats
->mprc
;
804 data
[++i
] = stats
->bprc
;
805 data
[++i
] = stats
->erpt
;
806 data
[++i
] = stats
->uptc
+ stats
->mptc
+ stats
->bptc
;
807 data
[++i
] = stats
->uptc
;
808 data
[++i
] = stats
->mptc
;
809 data
[++i
] = stats
->bptc
;
810 data
[++i
] = stats
->ubrc
;
811 data
[++i
] = stats
->ubtc
;
812 data
[++i
] = stats
->mbrc
;
813 data
[++i
] = stats
->mbtc
;
814 data
[++i
] = stats
->bbrc
;
815 data
[++i
] = stats
->bbtc
;
816 data
[++i
] = stats
->ubrc
+ stats
->mbrc
+ stats
->bbrc
;
817 data
[++i
] = stats
->ubtc
+ stats
->mbtc
+ stats
->bbtc
;
818 data
[++i
] = stats
->dma_pkt_rc
;
819 data
[++i
] = stats
->dma_pkt_tc
;
820 data
[++i
] = stats
->dma_oct_rc
;
821 data
[++i
] = stats
->dma_oct_tc
;
822 data
[++i
] = stats
->dpc
;
828 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
829 aq_vec
&& self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
]) {
831 aq_vec_get_sw_stats(aq_vec
, data
, &count
);
840 static void aq_nic_update_ndev_stats(struct aq_nic_s
*self
)
842 struct aq_stats_s
*stats
= self
->aq_hw_ops
->hw_get_hw_stats(self
->aq_hw
);
843 struct net_device
*ndev
= self
->ndev
;
845 ndev
->stats
.rx_packets
= stats
->dma_pkt_rc
;
846 ndev
->stats
.rx_bytes
= stats
->dma_oct_rc
;
847 ndev
->stats
.rx_errors
= stats
->erpr
;
848 ndev
->stats
.rx_dropped
= stats
->dpc
;
849 ndev
->stats
.tx_packets
= stats
->dma_pkt_tc
;
850 ndev
->stats
.tx_bytes
= stats
->dma_oct_tc
;
851 ndev
->stats
.tx_errors
= stats
->erpt
;
852 ndev
->stats
.multicast
= stats
->mprc
;
855 void aq_nic_get_link_ksettings(struct aq_nic_s
*self
,
856 struct ethtool_link_ksettings
*cmd
)
858 if (self
->aq_nic_cfg
.aq_hw_caps
->media_type
== AQ_HW_MEDIA_TYPE_FIBRE
)
859 cmd
->base
.port
= PORT_FIBRE
;
861 cmd
->base
.port
= PORT_TP
;
862 /* This driver supports only 10G capable adapters, so DUPLEX_FULL */
863 cmd
->base
.duplex
= DUPLEX_FULL
;
864 cmd
->base
.autoneg
= self
->aq_nic_cfg
.is_autoneg
;
866 ethtool_link_ksettings_zero_link_mode(cmd
, supported
);
868 if (self
->aq_nic_cfg
.aq_hw_caps
->link_speed_msk
& AQ_NIC_RATE_10G
)
869 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
872 if (self
->aq_nic_cfg
.aq_hw_caps
->link_speed_msk
& AQ_NIC_RATE_5G
)
873 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
876 if (self
->aq_nic_cfg
.aq_hw_caps
->link_speed_msk
& AQ_NIC_RATE_2GS
)
877 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
880 if (self
->aq_nic_cfg
.aq_hw_caps
->link_speed_msk
& AQ_NIC_RATE_1G
)
881 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
884 if (self
->aq_nic_cfg
.aq_hw_caps
->link_speed_msk
& AQ_NIC_RATE_100M
)
885 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
888 if (self
->aq_nic_cfg
.aq_hw_caps
->flow_control
) {
889 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
891 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
895 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Autoneg
);
897 if (self
->aq_nic_cfg
.aq_hw_caps
->media_type
== AQ_HW_MEDIA_TYPE_FIBRE
)
898 ethtool_link_ksettings_add_link_mode(cmd
, supported
, FIBRE
);
900 ethtool_link_ksettings_add_link_mode(cmd
, supported
, TP
);
902 ethtool_link_ksettings_zero_link_mode(cmd
, advertising
);
904 if (self
->aq_nic_cfg
.is_autoneg
)
905 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, Autoneg
);
907 if (self
->aq_nic_cfg
.link_speed_msk
& AQ_NIC_RATE_10G
)
908 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
911 if (self
->aq_nic_cfg
.link_speed_msk
& AQ_NIC_RATE_5G
)
912 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
915 if (self
->aq_nic_cfg
.link_speed_msk
& AQ_NIC_RATE_2GS
)
916 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
919 if (self
->aq_nic_cfg
.link_speed_msk
& AQ_NIC_RATE_1G
)
920 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
923 if (self
->aq_nic_cfg
.link_speed_msk
& AQ_NIC_RATE_100M
)
924 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
927 if (self
->aq_nic_cfg
.fc
.cur
& AQ_NIC_FC_RX
)
928 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
931 /* Asym is when either RX or TX, but not both */
932 if (!!(self
->aq_nic_cfg
.fc
.cur
& AQ_NIC_FC_TX
) ^
933 !!(self
->aq_nic_cfg
.fc
.cur
& AQ_NIC_FC_RX
))
934 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
937 if (self
->aq_nic_cfg
.aq_hw_caps
->media_type
== AQ_HW_MEDIA_TYPE_FIBRE
)
938 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, FIBRE
);
940 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, TP
);
943 int aq_nic_set_link_ksettings(struct aq_nic_s
*self
,
944 const struct ethtool_link_ksettings
*cmd
)
950 if (cmd
->base
.autoneg
== AUTONEG_ENABLE
) {
951 rate
= self
->aq_nic_cfg
.aq_hw_caps
->link_speed_msk
;
952 self
->aq_nic_cfg
.is_autoneg
= true;
954 speed
= cmd
->base
.speed
;
958 rate
= AQ_NIC_RATE_100M
;
962 rate
= AQ_NIC_RATE_1G
;
966 rate
= AQ_NIC_RATE_2GS
;
970 rate
= AQ_NIC_RATE_5G
;
974 rate
= AQ_NIC_RATE_10G
;
982 if (!(self
->aq_nic_cfg
.aq_hw_caps
->link_speed_msk
& rate
)) {
987 self
->aq_nic_cfg
.is_autoneg
= false;
990 mutex_lock(&self
->fwreq_mutex
);
991 err
= self
->aq_fw_ops
->set_link_speed(self
->aq_hw
, rate
);
992 mutex_unlock(&self
->fwreq_mutex
);
996 self
->aq_nic_cfg
.link_speed_msk
= rate
;
1002 struct aq_nic_cfg_s
*aq_nic_get_cfg(struct aq_nic_s
*self
)
1004 return &self
->aq_nic_cfg
;
1007 u32
aq_nic_get_fw_version(struct aq_nic_s
*self
)
1009 u32 fw_version
= 0U;
1011 self
->aq_hw_ops
->hw_get_fw_version(self
->aq_hw
, &fw_version
);
1016 int aq_nic_set_loopback(struct aq_nic_s
*self
)
1018 struct aq_nic_cfg_s
*cfg
= &self
->aq_nic_cfg
;
1020 if (!self
->aq_hw_ops
->hw_set_loopback
||
1021 !self
->aq_fw_ops
->set_phyloopback
)
1024 mutex_lock(&self
->fwreq_mutex
);
1025 self
->aq_hw_ops
->hw_set_loopback(self
->aq_hw
,
1026 AQ_HW_LOOPBACK_DMA_SYS
,
1027 !!(cfg
->priv_flags
&
1028 BIT(AQ_HW_LOOPBACK_DMA_SYS
)));
1030 self
->aq_hw_ops
->hw_set_loopback(self
->aq_hw
,
1031 AQ_HW_LOOPBACK_PKT_SYS
,
1032 !!(cfg
->priv_flags
&
1033 BIT(AQ_HW_LOOPBACK_PKT_SYS
)));
1035 self
->aq_hw_ops
->hw_set_loopback(self
->aq_hw
,
1036 AQ_HW_LOOPBACK_DMA_NET
,
1037 !!(cfg
->priv_flags
&
1038 BIT(AQ_HW_LOOPBACK_DMA_NET
)));
1040 self
->aq_fw_ops
->set_phyloopback(self
->aq_hw
,
1041 AQ_HW_LOOPBACK_PHYINT_SYS
,
1042 !!(cfg
->priv_flags
&
1043 BIT(AQ_HW_LOOPBACK_PHYINT_SYS
)));
1045 self
->aq_fw_ops
->set_phyloopback(self
->aq_hw
,
1046 AQ_HW_LOOPBACK_PHYEXT_SYS
,
1047 !!(cfg
->priv_flags
&
1048 BIT(AQ_HW_LOOPBACK_PHYEXT_SYS
)));
1049 mutex_unlock(&self
->fwreq_mutex
);
1054 int aq_nic_stop(struct aq_nic_s
*self
)
1056 struct aq_vec_s
*aq_vec
= NULL
;
1057 unsigned int i
= 0U;
1059 netif_tx_disable(self
->ndev
);
1060 netif_carrier_off(self
->ndev
);
1062 del_timer_sync(&self
->service_timer
);
1063 cancel_work_sync(&self
->service_task
);
1065 self
->aq_hw_ops
->hw_irq_disable(self
->aq_hw
, AQ_CFG_IRQ_MASK
);
1067 if (self
->aq_nic_cfg
.is_polling
)
1068 del_timer_sync(&self
->polling_timer
);
1070 aq_pci_func_free_irqs(self
);
1072 aq_ptp_irq_free(self
);
1074 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
1075 self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
])
1076 aq_vec_stop(aq_vec
);
1078 aq_ptp_ring_stop(self
);
1080 return self
->aq_hw_ops
->hw_stop(self
->aq_hw
);
1083 void aq_nic_set_power(struct aq_nic_s
*self
)
1085 if (self
->power_state
!= AQ_HW_POWER_STATE_D0
||
1086 self
->aq_hw
->aq_nic_cfg
->wol
)
1087 if (likely(self
->aq_fw_ops
->set_power
)) {
1088 mutex_lock(&self
->fwreq_mutex
);
1089 self
->aq_fw_ops
->set_power(self
->aq_hw
,
1091 self
->ndev
->dev_addr
);
1092 mutex_unlock(&self
->fwreq_mutex
);
1096 void aq_nic_deinit(struct aq_nic_s
*self
, bool link_down
)
1098 struct aq_vec_s
*aq_vec
= NULL
;
1099 unsigned int i
= 0U;
1104 for (i
= 0U, aq_vec
= self
->aq_vec
[0];
1105 self
->aq_vecs
> i
; ++i
, aq_vec
= self
->aq_vec
[i
])
1106 aq_vec_deinit(aq_vec
);
1108 aq_ptp_unregister(self
);
1109 aq_ptp_ring_deinit(self
);
1110 aq_ptp_ring_free(self
);
1113 if (likely(self
->aq_fw_ops
->deinit
) && link_down
) {
1114 mutex_lock(&self
->fwreq_mutex
);
1115 self
->aq_fw_ops
->deinit(self
->aq_hw
);
1116 mutex_unlock(&self
->fwreq_mutex
);
1122 void aq_nic_free_vectors(struct aq_nic_s
*self
)
1124 unsigned int i
= 0U;
1129 for (i
= ARRAY_SIZE(self
->aq_vec
); i
--;) {
1130 if (self
->aq_vec
[i
]) {
1131 aq_vec_free(self
->aq_vec
[i
]);
1132 self
->aq_vec
[i
] = NULL
;
1139 void aq_nic_shutdown(struct aq_nic_s
*self
)
1148 netif_device_detach(self
->ndev
);
1150 if (netif_running(self
->ndev
)) {
1151 err
= aq_nic_stop(self
);
1155 aq_nic_deinit(self
, !self
->aq_hw
->aq_nic_cfg
->wol
);
1156 aq_nic_set_power(self
);
1162 u8
aq_nic_reserve_filter(struct aq_nic_s
*self
, enum aq_rx_filter_type type
)
1169 case aq_rx_filter_ethertype
:
1170 location
= AQ_RX_LAST_LOC_FETHERT
- AQ_RX_FIRST_LOC_FETHERT
-
1171 self
->aq_hw_rx_fltrs
.fet_reserved_count
;
1172 self
->aq_hw_rx_fltrs
.fet_reserved_count
++;
1174 case aq_rx_filter_l3l4
:
1175 fltr_cnt
= AQ_RX_LAST_LOC_FL3L4
- AQ_RX_FIRST_LOC_FL3L4
;
1176 n_bit
= fltr_cnt
- self
->aq_hw_rx_fltrs
.fl3l4
.reserved_count
;
1178 self
->aq_hw_rx_fltrs
.fl3l4
.active_ipv4
|= BIT(n_bit
);
1179 self
->aq_hw_rx_fltrs
.fl3l4
.reserved_count
++;
1189 void aq_nic_release_filter(struct aq_nic_s
*self
, enum aq_rx_filter_type type
,
1193 case aq_rx_filter_ethertype
:
1194 self
->aq_hw_rx_fltrs
.fet_reserved_count
--;
1196 case aq_rx_filter_l3l4
:
1197 self
->aq_hw_rx_fltrs
.fl3l4
.reserved_count
--;
1198 self
->aq_hw_rx_fltrs
.fl3l4
.active_ipv4
&= ~BIT(location
);