1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 /* All iavf tracepoints are defined by the include below, which must
8 * be included exactly once across the whole kernel with
9 * CREATE_TRACE_POINTS defined
11 #define CREATE_TRACE_POINTS
12 #include "iavf_trace.h"
14 static int iavf_setup_all_tx_resources(struct iavf_adapter
*adapter
);
15 static int iavf_setup_all_rx_resources(struct iavf_adapter
*adapter
);
16 static int iavf_close(struct net_device
*netdev
);
17 static int iavf_init_get_resources(struct iavf_adapter
*adapter
);
18 static int iavf_check_reset_complete(struct iavf_hw
*hw
);
20 char iavf_driver_name
[] = "iavf";
21 static const char iavf_driver_string
[] =
22 "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
26 #define DRV_VERSION_MAJOR 3
27 #define DRV_VERSION_MINOR 2
28 #define DRV_VERSION_BUILD 3
29 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
30 __stringify(DRV_VERSION_MINOR) "." \
31 __stringify(DRV_VERSION_BUILD) \
33 const char iavf_driver_version
[] = DRV_VERSION
;
34 static const char iavf_copyright
[] =
35 "Copyright (c) 2013 - 2018 Intel Corporation.";
37 /* iavf_pci_tbl - PCI Device ID Table
39 * Wildcard entries (PCI_ANY_ID) should come last
40 * Last entry must be all 0s
42 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
43 * Class, Class Mask, private data (not used) }
45 static const struct pci_device_id iavf_pci_tbl
[] = {
46 {PCI_VDEVICE(INTEL
, IAVF_DEV_ID_VF
), 0},
47 {PCI_VDEVICE(INTEL
, IAVF_DEV_ID_VF_HV
), 0},
48 {PCI_VDEVICE(INTEL
, IAVF_DEV_ID_X722_VF
), 0},
49 {PCI_VDEVICE(INTEL
, IAVF_DEV_ID_ADAPTIVE_VF
), 0},
50 /* required last entry */
54 MODULE_DEVICE_TABLE(pci
, iavf_pci_tbl
);
56 MODULE_ALIAS("i40evf");
57 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
58 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
59 MODULE_LICENSE("GPL v2");
60 MODULE_VERSION(DRV_VERSION
);
62 static const struct net_device_ops iavf_netdev_ops
;
63 struct workqueue_struct
*iavf_wq
;
66 * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
67 * @hw: pointer to the HW structure
68 * @mem: ptr to mem struct to fill out
69 * @size: size of memory requested
70 * @alignment: what to align the allocation to
72 enum iavf_status
iavf_allocate_dma_mem_d(struct iavf_hw
*hw
,
73 struct iavf_dma_mem
*mem
,
74 u64 size
, u32 alignment
)
76 struct iavf_adapter
*adapter
= (struct iavf_adapter
*)hw
->back
;
79 return IAVF_ERR_PARAM
;
81 mem
->size
= ALIGN(size
, alignment
);
82 mem
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, mem
->size
,
83 (dma_addr_t
*)&mem
->pa
, GFP_KERNEL
);
87 return IAVF_ERR_NO_MEMORY
;
91 * iavf_free_dma_mem_d - OS specific memory free for shared code
92 * @hw: pointer to the HW structure
93 * @mem: ptr to mem struct to free
95 enum iavf_status
iavf_free_dma_mem_d(struct iavf_hw
*hw
,
96 struct iavf_dma_mem
*mem
)
98 struct iavf_adapter
*adapter
= (struct iavf_adapter
*)hw
->back
;
100 if (!mem
|| !mem
->va
)
101 return IAVF_ERR_PARAM
;
102 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
,
103 mem
->va
, (dma_addr_t
)mem
->pa
);
108 * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
109 * @hw: pointer to the HW structure
110 * @mem: ptr to mem struct to fill out
111 * @size: size of memory requested
113 enum iavf_status
iavf_allocate_virt_mem_d(struct iavf_hw
*hw
,
114 struct iavf_virt_mem
*mem
, u32 size
)
117 return IAVF_ERR_PARAM
;
120 mem
->va
= kzalloc(size
, GFP_KERNEL
);
125 return IAVF_ERR_NO_MEMORY
;
129 * iavf_free_virt_mem_d - OS specific memory free for shared code
130 * @hw: pointer to the HW structure
131 * @mem: ptr to mem struct to free
133 enum iavf_status
iavf_free_virt_mem_d(struct iavf_hw
*hw
,
134 struct iavf_virt_mem
*mem
)
137 return IAVF_ERR_PARAM
;
139 /* it's ok to kfree a NULL pointer */
146 * iavf_schedule_reset - Set the flags and schedule a reset event
147 * @adapter: board private structure
149 void iavf_schedule_reset(struct iavf_adapter
*adapter
)
151 if (!(adapter
->flags
&
152 (IAVF_FLAG_RESET_PENDING
| IAVF_FLAG_RESET_NEEDED
))) {
153 adapter
->flags
|= IAVF_FLAG_RESET_NEEDED
;
154 queue_work(iavf_wq
, &adapter
->reset_task
);
159 * iavf_tx_timeout - Respond to a Tx Hang
160 * @netdev: network interface device structure
162 static void iavf_tx_timeout(struct net_device
*netdev
, unsigned int txqueue
)
164 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
166 adapter
->tx_timeout_count
++;
167 iavf_schedule_reset(adapter
);
171 * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
172 * @adapter: board private structure
174 static void iavf_misc_irq_disable(struct iavf_adapter
*adapter
)
176 struct iavf_hw
*hw
= &adapter
->hw
;
178 if (!adapter
->msix_entries
)
181 wr32(hw
, IAVF_VFINT_DYN_CTL01
, 0);
185 synchronize_irq(adapter
->msix_entries
[0].vector
);
189 * iavf_misc_irq_enable - Enable default interrupt generation settings
190 * @adapter: board private structure
192 static void iavf_misc_irq_enable(struct iavf_adapter
*adapter
)
194 struct iavf_hw
*hw
= &adapter
->hw
;
196 wr32(hw
, IAVF_VFINT_DYN_CTL01
, IAVF_VFINT_DYN_CTL01_INTENA_MASK
|
197 IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK
);
198 wr32(hw
, IAVF_VFINT_ICR0_ENA1
, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK
);
204 * iavf_irq_disable - Mask off interrupt generation on the NIC
205 * @adapter: board private structure
207 static void iavf_irq_disable(struct iavf_adapter
*adapter
)
210 struct iavf_hw
*hw
= &adapter
->hw
;
212 if (!adapter
->msix_entries
)
215 for (i
= 1; i
< adapter
->num_msix_vectors
; i
++) {
216 wr32(hw
, IAVF_VFINT_DYN_CTLN1(i
- 1), 0);
217 synchronize_irq(adapter
->msix_entries
[i
].vector
);
223 * iavf_irq_enable_queues - Enable interrupt for specified queues
224 * @adapter: board private structure
225 * @mask: bitmap of queues to enable
227 void iavf_irq_enable_queues(struct iavf_adapter
*adapter
, u32 mask
)
229 struct iavf_hw
*hw
= &adapter
->hw
;
232 for (i
= 1; i
< adapter
->num_msix_vectors
; i
++) {
233 if (mask
& BIT(i
- 1)) {
234 wr32(hw
, IAVF_VFINT_DYN_CTLN1(i
- 1),
235 IAVF_VFINT_DYN_CTLN1_INTENA_MASK
|
236 IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK
);
242 * iavf_irq_enable - Enable default interrupt generation settings
243 * @adapter: board private structure
244 * @flush: boolean value whether to run rd32()
246 void iavf_irq_enable(struct iavf_adapter
*adapter
, bool flush
)
248 struct iavf_hw
*hw
= &adapter
->hw
;
250 iavf_misc_irq_enable(adapter
);
251 iavf_irq_enable_queues(adapter
, ~0);
258 * iavf_msix_aq - Interrupt handler for vector 0
259 * @irq: interrupt number
260 * @data: pointer to netdev
262 static irqreturn_t
iavf_msix_aq(int irq
, void *data
)
264 struct net_device
*netdev
= data
;
265 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
266 struct iavf_hw
*hw
= &adapter
->hw
;
268 /* handle non-queue interrupts, these reads clear the registers */
269 rd32(hw
, IAVF_VFINT_ICR01
);
270 rd32(hw
, IAVF_VFINT_ICR0_ENA1
);
272 /* schedule work on the private workqueue */
273 queue_work(iavf_wq
, &adapter
->adminq_task
);
279 * iavf_msix_clean_rings - MSIX mode Interrupt Handler
280 * @irq: interrupt number
281 * @data: pointer to a q_vector
283 static irqreturn_t
iavf_msix_clean_rings(int irq
, void *data
)
285 struct iavf_q_vector
*q_vector
= data
;
287 if (!q_vector
->tx
.ring
&& !q_vector
->rx
.ring
)
290 napi_schedule_irqoff(&q_vector
->napi
);
296 * iavf_map_vector_to_rxq - associate irqs with rx queues
297 * @adapter: board private structure
298 * @v_idx: interrupt number
299 * @r_idx: queue number
302 iavf_map_vector_to_rxq(struct iavf_adapter
*adapter
, int v_idx
, int r_idx
)
304 struct iavf_q_vector
*q_vector
= &adapter
->q_vectors
[v_idx
];
305 struct iavf_ring
*rx_ring
= &adapter
->rx_rings
[r_idx
];
306 struct iavf_hw
*hw
= &adapter
->hw
;
308 rx_ring
->q_vector
= q_vector
;
309 rx_ring
->next
= q_vector
->rx
.ring
;
310 rx_ring
->vsi
= &adapter
->vsi
;
311 q_vector
->rx
.ring
= rx_ring
;
312 q_vector
->rx
.count
++;
313 q_vector
->rx
.next_update
= jiffies
+ 1;
314 q_vector
->rx
.target_itr
= ITR_TO_REG(rx_ring
->itr_setting
);
315 q_vector
->ring_mask
|= BIT(r_idx
);
316 wr32(hw
, IAVF_VFINT_ITRN1(IAVF_RX_ITR
, q_vector
->reg_idx
),
317 q_vector
->rx
.current_itr
>> 1);
318 q_vector
->rx
.current_itr
= q_vector
->rx
.target_itr
;
322 * iavf_map_vector_to_txq - associate irqs with tx queues
323 * @adapter: board private structure
324 * @v_idx: interrupt number
325 * @t_idx: queue number
328 iavf_map_vector_to_txq(struct iavf_adapter
*adapter
, int v_idx
, int t_idx
)
330 struct iavf_q_vector
*q_vector
= &adapter
->q_vectors
[v_idx
];
331 struct iavf_ring
*tx_ring
= &adapter
->tx_rings
[t_idx
];
332 struct iavf_hw
*hw
= &adapter
->hw
;
334 tx_ring
->q_vector
= q_vector
;
335 tx_ring
->next
= q_vector
->tx
.ring
;
336 tx_ring
->vsi
= &adapter
->vsi
;
337 q_vector
->tx
.ring
= tx_ring
;
338 q_vector
->tx
.count
++;
339 q_vector
->tx
.next_update
= jiffies
+ 1;
340 q_vector
->tx
.target_itr
= ITR_TO_REG(tx_ring
->itr_setting
);
341 q_vector
->num_ringpairs
++;
342 wr32(hw
, IAVF_VFINT_ITRN1(IAVF_TX_ITR
, q_vector
->reg_idx
),
343 q_vector
->tx
.target_itr
>> 1);
344 q_vector
->tx
.current_itr
= q_vector
->tx
.target_itr
;
348 * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
349 * @adapter: board private structure to initialize
351 * This function maps descriptor rings to the queue-specific vectors
352 * we were allotted through the MSI-X enabling code. Ideally, we'd have
353 * one vector per ring/queue, but on a constrained vector budget, we
354 * group the rings as "efficiently" as possible. You would add new
355 * mapping configurations in here.
357 static void iavf_map_rings_to_vectors(struct iavf_adapter
*adapter
)
359 int rings_remaining
= adapter
->num_active_queues
;
360 int ridx
= 0, vidx
= 0;
363 q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
365 for (; ridx
< rings_remaining
; ridx
++) {
366 iavf_map_vector_to_rxq(adapter
, vidx
, ridx
);
367 iavf_map_vector_to_txq(adapter
, vidx
, ridx
);
369 /* In the case where we have more queues than vectors, continue
370 * round-robin on vectors until all queues are mapped.
372 if (++vidx
>= q_vectors
)
376 adapter
->aq_required
|= IAVF_FLAG_AQ_MAP_VECTORS
;
380 * iavf_irq_affinity_notify - Callback for affinity changes
381 * @notify: context as to what irq was changed
382 * @mask: the new affinity mask
384 * This is a callback function used by the irq_set_affinity_notifier function
385 * so that we may register to receive changes to the irq affinity masks.
387 static void iavf_irq_affinity_notify(struct irq_affinity_notify
*notify
,
388 const cpumask_t
*mask
)
390 struct iavf_q_vector
*q_vector
=
391 container_of(notify
, struct iavf_q_vector
, affinity_notify
);
393 cpumask_copy(&q_vector
->affinity_mask
, mask
);
397 * iavf_irq_affinity_release - Callback for affinity notifier release
398 * @ref: internal core kernel usage
400 * This is a callback function used by the irq_set_affinity_notifier function
401 * to inform the current notification subscriber that they will no longer
402 * receive notifications.
404 static void iavf_irq_affinity_release(struct kref
*ref
) {}
407 * iavf_request_traffic_irqs - Initialize MSI-X interrupts
408 * @adapter: board private structure
409 * @basename: device basename
411 * Allocates MSI-X vectors for tx and rx handling, and requests
412 * interrupts from the kernel.
415 iavf_request_traffic_irqs(struct iavf_adapter
*adapter
, char *basename
)
417 unsigned int vector
, q_vectors
;
418 unsigned int rx_int_idx
= 0, tx_int_idx
= 0;
422 iavf_irq_disable(adapter
);
423 /* Decrement for Other and TCP Timer vectors */
424 q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
426 for (vector
= 0; vector
< q_vectors
; vector
++) {
427 struct iavf_q_vector
*q_vector
= &adapter
->q_vectors
[vector
];
429 irq_num
= adapter
->msix_entries
[vector
+ NONQ_VECS
].vector
;
431 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
432 snprintf(q_vector
->name
, sizeof(q_vector
->name
),
433 "iavf-%s-TxRx-%d", basename
, rx_int_idx
++);
435 } else if (q_vector
->rx
.ring
) {
436 snprintf(q_vector
->name
, sizeof(q_vector
->name
),
437 "iavf-%s-rx-%d", basename
, rx_int_idx
++);
438 } else if (q_vector
->tx
.ring
) {
439 snprintf(q_vector
->name
, sizeof(q_vector
->name
),
440 "iavf-%s-tx-%d", basename
, tx_int_idx
++);
442 /* skip this unused q_vector */
445 err
= request_irq(irq_num
,
446 iavf_msix_clean_rings
,
451 dev_info(&adapter
->pdev
->dev
,
452 "Request_irq failed, error: %d\n", err
);
453 goto free_queue_irqs
;
455 /* register for affinity change notifications */
456 q_vector
->affinity_notify
.notify
= iavf_irq_affinity_notify
;
457 q_vector
->affinity_notify
.release
=
458 iavf_irq_affinity_release
;
459 irq_set_affinity_notifier(irq_num
, &q_vector
->affinity_notify
);
460 /* Spread the IRQ affinity hints across online CPUs. Note that
461 * get_cpu_mask returns a mask with a permanent lifetime so
462 * it's safe to use as a hint for irq_set_affinity_hint.
464 cpu
= cpumask_local_spread(q_vector
->v_idx
, -1);
465 irq_set_affinity_hint(irq_num
, get_cpu_mask(cpu
));
473 irq_num
= adapter
->msix_entries
[vector
+ NONQ_VECS
].vector
;
474 irq_set_affinity_notifier(irq_num
, NULL
);
475 irq_set_affinity_hint(irq_num
, NULL
);
476 free_irq(irq_num
, &adapter
->q_vectors
[vector
]);
482 * iavf_request_misc_irq - Initialize MSI-X interrupts
483 * @adapter: board private structure
485 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
486 * vector is only for the admin queue, and stays active even when the netdev
489 static int iavf_request_misc_irq(struct iavf_adapter
*adapter
)
491 struct net_device
*netdev
= adapter
->netdev
;
494 snprintf(adapter
->misc_vector_name
,
495 sizeof(adapter
->misc_vector_name
) - 1, "iavf-%s:mbx",
496 dev_name(&adapter
->pdev
->dev
));
497 err
= request_irq(adapter
->msix_entries
[0].vector
,
499 adapter
->misc_vector_name
, netdev
);
501 dev_err(&adapter
->pdev
->dev
,
502 "request_irq for %s failed: %d\n",
503 adapter
->misc_vector_name
, err
);
504 free_irq(adapter
->msix_entries
[0].vector
, netdev
);
510 * iavf_free_traffic_irqs - Free MSI-X interrupts
511 * @adapter: board private structure
513 * Frees all MSI-X vectors other than 0.
515 static void iavf_free_traffic_irqs(struct iavf_adapter
*adapter
)
517 int vector
, irq_num
, q_vectors
;
519 if (!adapter
->msix_entries
)
522 q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
524 for (vector
= 0; vector
< q_vectors
; vector
++) {
525 irq_num
= adapter
->msix_entries
[vector
+ NONQ_VECS
].vector
;
526 irq_set_affinity_notifier(irq_num
, NULL
);
527 irq_set_affinity_hint(irq_num
, NULL
);
528 free_irq(irq_num
, &adapter
->q_vectors
[vector
]);
533 * iavf_free_misc_irq - Free MSI-X miscellaneous vector
534 * @adapter: board private structure
536 * Frees MSI-X vector 0.
538 static void iavf_free_misc_irq(struct iavf_adapter
*adapter
)
540 struct net_device
*netdev
= adapter
->netdev
;
542 if (!adapter
->msix_entries
)
545 free_irq(adapter
->msix_entries
[0].vector
, netdev
);
549 * iavf_configure_tx - Configure Transmit Unit after Reset
550 * @adapter: board private structure
552 * Configure the Tx unit of the MAC after a reset.
554 static void iavf_configure_tx(struct iavf_adapter
*adapter
)
556 struct iavf_hw
*hw
= &adapter
->hw
;
559 for (i
= 0; i
< adapter
->num_active_queues
; i
++)
560 adapter
->tx_rings
[i
].tail
= hw
->hw_addr
+ IAVF_QTX_TAIL1(i
);
564 * iavf_configure_rx - Configure Receive Unit after Reset
565 * @adapter: board private structure
567 * Configure the Rx unit of the MAC after a reset.
569 static void iavf_configure_rx(struct iavf_adapter
*adapter
)
571 unsigned int rx_buf_len
= IAVF_RXBUFFER_2048
;
572 struct iavf_hw
*hw
= &adapter
->hw
;
575 /* Legacy Rx will always default to a 2048 buffer size. */
576 #if (PAGE_SIZE < 8192)
577 if (!(adapter
->flags
& IAVF_FLAG_LEGACY_RX
)) {
578 struct net_device
*netdev
= adapter
->netdev
;
580 /* For jumbo frames on systems with 4K pages we have to use
581 * an order 1 page, so we might as well increase the size
582 * of our Rx buffer to make better use of the available space
584 rx_buf_len
= IAVF_RXBUFFER_3072
;
586 /* We use a 1536 buffer size for configurations with
587 * standard Ethernet mtu. On x86 this gives us enough room
588 * for shared info and 192 bytes of padding.
590 if (!IAVF_2K_TOO_SMALL_WITH_PADDING
&&
591 (netdev
->mtu
<= ETH_DATA_LEN
))
592 rx_buf_len
= IAVF_RXBUFFER_1536
- NET_IP_ALIGN
;
596 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
597 adapter
->rx_rings
[i
].tail
= hw
->hw_addr
+ IAVF_QRX_TAIL1(i
);
598 adapter
->rx_rings
[i
].rx_buf_len
= rx_buf_len
;
600 if (adapter
->flags
& IAVF_FLAG_LEGACY_RX
)
601 clear_ring_build_skb_enabled(&adapter
->rx_rings
[i
]);
603 set_ring_build_skb_enabled(&adapter
->rx_rings
[i
]);
608 * iavf_find_vlan - Search filter list for specific vlan filter
609 * @adapter: board private structure
612 * Returns ptr to the filter object or NULL. Must be called while holding the
613 * mac_vlan_list_lock.
616 iavf_vlan_filter
*iavf_find_vlan(struct iavf_adapter
*adapter
, u16 vlan
)
618 struct iavf_vlan_filter
*f
;
620 list_for_each_entry(f
, &adapter
->vlan_filter_list
, list
) {
628 * iavf_add_vlan - Add a vlan filter to the list
629 * @adapter: board private structure
632 * Returns ptr to the filter object or NULL when no memory available.
635 iavf_vlan_filter
*iavf_add_vlan(struct iavf_adapter
*adapter
, u16 vlan
)
637 struct iavf_vlan_filter
*f
= NULL
;
639 spin_lock_bh(&adapter
->mac_vlan_list_lock
);
641 f
= iavf_find_vlan(adapter
, vlan
);
643 f
= kzalloc(sizeof(*f
), GFP_ATOMIC
);
649 list_add_tail(&f
->list
, &adapter
->vlan_filter_list
);
651 adapter
->aq_required
|= IAVF_FLAG_AQ_ADD_VLAN_FILTER
;
655 spin_unlock_bh(&adapter
->mac_vlan_list_lock
);
660 * iavf_del_vlan - Remove a vlan filter from the list
661 * @adapter: board private structure
664 static void iavf_del_vlan(struct iavf_adapter
*adapter
, u16 vlan
)
666 struct iavf_vlan_filter
*f
;
668 spin_lock_bh(&adapter
->mac_vlan_list_lock
);
670 f
= iavf_find_vlan(adapter
, vlan
);
673 adapter
->aq_required
|= IAVF_FLAG_AQ_DEL_VLAN_FILTER
;
676 spin_unlock_bh(&adapter
->mac_vlan_list_lock
);
680 * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
681 * @netdev: network device struct
682 * @proto: unused protocol data
685 static int iavf_vlan_rx_add_vid(struct net_device
*netdev
,
686 __always_unused __be16 proto
, u16 vid
)
688 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
690 if (!VLAN_ALLOWED(adapter
))
692 if (iavf_add_vlan(adapter
, vid
) == NULL
)
698 * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
699 * @netdev: network device struct
700 * @proto: unused protocol data
703 static int iavf_vlan_rx_kill_vid(struct net_device
*netdev
,
704 __always_unused __be16 proto
, u16 vid
)
706 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
708 if (VLAN_ALLOWED(adapter
)) {
709 iavf_del_vlan(adapter
, vid
);
716 * iavf_find_filter - Search filter list for specific mac filter
717 * @adapter: board private structure
718 * @macaddr: the MAC address
720 * Returns ptr to the filter object or NULL. Must be called while holding the
721 * mac_vlan_list_lock.
724 iavf_mac_filter
*iavf_find_filter(struct iavf_adapter
*adapter
,
727 struct iavf_mac_filter
*f
;
732 list_for_each_entry(f
, &adapter
->mac_filter_list
, list
) {
733 if (ether_addr_equal(macaddr
, f
->macaddr
))
740 * iavf_add_filter - Add a mac filter to the filter list
741 * @adapter: board private structure
742 * @macaddr: the MAC address
744 * Returns ptr to the filter object or NULL when no memory available.
746 struct iavf_mac_filter
*iavf_add_filter(struct iavf_adapter
*adapter
,
749 struct iavf_mac_filter
*f
;
754 f
= iavf_find_filter(adapter
, macaddr
);
756 f
= kzalloc(sizeof(*f
), GFP_ATOMIC
);
760 ether_addr_copy(f
->macaddr
, macaddr
);
762 list_add_tail(&f
->list
, &adapter
->mac_filter_list
);
764 adapter
->aq_required
|= IAVF_FLAG_AQ_ADD_MAC_FILTER
;
773 * iavf_set_mac - NDO callback to set port mac address
774 * @netdev: network interface device structure
775 * @p: pointer to an address structure
777 * Returns 0 on success, negative on failure
779 static int iavf_set_mac(struct net_device
*netdev
, void *p
)
781 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
782 struct iavf_hw
*hw
= &adapter
->hw
;
783 struct iavf_mac_filter
*f
;
784 struct sockaddr
*addr
= p
;
786 if (!is_valid_ether_addr(addr
->sa_data
))
787 return -EADDRNOTAVAIL
;
789 if (ether_addr_equal(netdev
->dev_addr
, addr
->sa_data
))
792 spin_lock_bh(&adapter
->mac_vlan_list_lock
);
794 f
= iavf_find_filter(adapter
, hw
->mac
.addr
);
797 adapter
->aq_required
|= IAVF_FLAG_AQ_DEL_MAC_FILTER
;
800 f
= iavf_add_filter(adapter
, addr
->sa_data
);
802 spin_unlock_bh(&adapter
->mac_vlan_list_lock
);
805 ether_addr_copy(hw
->mac
.addr
, addr
->sa_data
);
808 return (f
== NULL
) ? -ENOMEM
: 0;
812 * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
813 * @netdev: the netdevice
814 * @addr: address to add
816 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
817 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
819 static int iavf_addr_sync(struct net_device
*netdev
, const u8
*addr
)
821 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
823 if (iavf_add_filter(adapter
, addr
))
830 * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
831 * @netdev: the netdevice
832 * @addr: address to add
834 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
835 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
837 static int iavf_addr_unsync(struct net_device
*netdev
, const u8
*addr
)
839 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
840 struct iavf_mac_filter
*f
;
842 /* Under some circumstances, we might receive a request to delete
843 * our own device address from our uc list. Because we store the
844 * device address in the VSI's MAC/VLAN filter list, we need to ignore
845 * such requests and not delete our device address from this list.
847 if (ether_addr_equal(addr
, netdev
->dev_addr
))
850 f
= iavf_find_filter(adapter
, addr
);
853 adapter
->aq_required
|= IAVF_FLAG_AQ_DEL_MAC_FILTER
;
859 * iavf_set_rx_mode - NDO callback to set the netdev filters
860 * @netdev: network interface device structure
862 static void iavf_set_rx_mode(struct net_device
*netdev
)
864 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
866 spin_lock_bh(&adapter
->mac_vlan_list_lock
);
867 __dev_uc_sync(netdev
, iavf_addr_sync
, iavf_addr_unsync
);
868 __dev_mc_sync(netdev
, iavf_addr_sync
, iavf_addr_unsync
);
869 spin_unlock_bh(&adapter
->mac_vlan_list_lock
);
871 if (netdev
->flags
& IFF_PROMISC
&&
872 !(adapter
->flags
& IAVF_FLAG_PROMISC_ON
))
873 adapter
->aq_required
|= IAVF_FLAG_AQ_REQUEST_PROMISC
;
874 else if (!(netdev
->flags
& IFF_PROMISC
) &&
875 adapter
->flags
& IAVF_FLAG_PROMISC_ON
)
876 adapter
->aq_required
|= IAVF_FLAG_AQ_RELEASE_PROMISC
;
878 if (netdev
->flags
& IFF_ALLMULTI
&&
879 !(adapter
->flags
& IAVF_FLAG_ALLMULTI_ON
))
880 adapter
->aq_required
|= IAVF_FLAG_AQ_REQUEST_ALLMULTI
;
881 else if (!(netdev
->flags
& IFF_ALLMULTI
) &&
882 adapter
->flags
& IAVF_FLAG_ALLMULTI_ON
)
883 adapter
->aq_required
|= IAVF_FLAG_AQ_RELEASE_ALLMULTI
;
887 * iavf_napi_enable_all - enable NAPI on all queue vectors
888 * @adapter: board private structure
890 static void iavf_napi_enable_all(struct iavf_adapter
*adapter
)
893 struct iavf_q_vector
*q_vector
;
894 int q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
896 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
897 struct napi_struct
*napi
;
899 q_vector
= &adapter
->q_vectors
[q_idx
];
900 napi
= &q_vector
->napi
;
906 * iavf_napi_disable_all - disable NAPI on all queue vectors
907 * @adapter: board private structure
909 static void iavf_napi_disable_all(struct iavf_adapter
*adapter
)
912 struct iavf_q_vector
*q_vector
;
913 int q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
915 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
916 q_vector
= &adapter
->q_vectors
[q_idx
];
917 napi_disable(&q_vector
->napi
);
922 * iavf_configure - set up transmit and receive data structures
923 * @adapter: board private structure
925 static void iavf_configure(struct iavf_adapter
*adapter
)
927 struct net_device
*netdev
= adapter
->netdev
;
930 iavf_set_rx_mode(netdev
);
932 iavf_configure_tx(adapter
);
933 iavf_configure_rx(adapter
);
934 adapter
->aq_required
|= IAVF_FLAG_AQ_CONFIGURE_QUEUES
;
936 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
937 struct iavf_ring
*ring
= &adapter
->rx_rings
[i
];
939 iavf_alloc_rx_buffers(ring
, IAVF_DESC_UNUSED(ring
));
944 * iavf_up_complete - Finish the last steps of bringing up a connection
945 * @adapter: board private structure
947 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
949 static void iavf_up_complete(struct iavf_adapter
*adapter
)
951 adapter
->state
= __IAVF_RUNNING
;
952 clear_bit(__IAVF_VSI_DOWN
, adapter
->vsi
.state
);
954 iavf_napi_enable_all(adapter
);
956 adapter
->aq_required
|= IAVF_FLAG_AQ_ENABLE_QUEUES
;
957 if (CLIENT_ENABLED(adapter
))
958 adapter
->flags
|= IAVF_FLAG_CLIENT_NEEDS_OPEN
;
959 mod_delayed_work(iavf_wq
, &adapter
->watchdog_task
, 0);
963 * iavf_down - Shutdown the connection processing
964 * @adapter: board private structure
966 * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
968 void iavf_down(struct iavf_adapter
*adapter
)
970 struct net_device
*netdev
= adapter
->netdev
;
971 struct iavf_vlan_filter
*vlf
;
972 struct iavf_mac_filter
*f
;
973 struct iavf_cloud_filter
*cf
;
975 if (adapter
->state
<= __IAVF_DOWN_PENDING
)
978 netif_carrier_off(netdev
);
979 netif_tx_disable(netdev
);
980 adapter
->link_up
= false;
981 iavf_napi_disable_all(adapter
);
982 iavf_irq_disable(adapter
);
984 spin_lock_bh(&adapter
->mac_vlan_list_lock
);
986 /* clear the sync flag on all filters */
987 __dev_uc_unsync(adapter
->netdev
, NULL
);
988 __dev_mc_unsync(adapter
->netdev
, NULL
);
990 /* remove all MAC filters */
991 list_for_each_entry(f
, &adapter
->mac_filter_list
, list
) {
995 /* remove all VLAN filters */
996 list_for_each_entry(vlf
, &adapter
->vlan_filter_list
, list
) {
1000 spin_unlock_bh(&adapter
->mac_vlan_list_lock
);
1002 /* remove all cloud filters */
1003 spin_lock_bh(&adapter
->cloud_filter_list_lock
);
1004 list_for_each_entry(cf
, &adapter
->cloud_filter_list
, list
) {
1007 spin_unlock_bh(&adapter
->cloud_filter_list_lock
);
1009 if (!(adapter
->flags
& IAVF_FLAG_PF_COMMS_FAILED
) &&
1010 adapter
->state
!= __IAVF_RESETTING
) {
1011 /* cancel any current operation */
1012 adapter
->current_op
= VIRTCHNL_OP_UNKNOWN
;
1013 /* Schedule operations to close down the HW. Don't wait
1014 * here for this to complete. The watchdog is still running
1015 * and it will take care of this.
1017 adapter
->aq_required
= IAVF_FLAG_AQ_DEL_MAC_FILTER
;
1018 adapter
->aq_required
|= IAVF_FLAG_AQ_DEL_VLAN_FILTER
;
1019 adapter
->aq_required
|= IAVF_FLAG_AQ_DEL_CLOUD_FILTER
;
1020 adapter
->aq_required
|= IAVF_FLAG_AQ_DISABLE_QUEUES
;
1023 mod_delayed_work(iavf_wq
, &adapter
->watchdog_task
, 0);
1027 * iavf_acquire_msix_vectors - Setup the MSIX capability
1028 * @adapter: board private structure
1029 * @vectors: number of vectors to request
1031 * Work with the OS to set up the MSIX vectors needed.
1033 * Returns 0 on success, negative on failure
1036 iavf_acquire_msix_vectors(struct iavf_adapter
*adapter
, int vectors
)
1038 int err
, vector_threshold
;
1040 /* We'll want at least 3 (vector_threshold):
1041 * 0) Other (Admin Queue and link, mostly)
1045 vector_threshold
= MIN_MSIX_COUNT
;
1047 /* The more we get, the more we will assign to Tx/Rx Cleanup
1048 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1049 * Right now, we simply care about how many we'll get; we'll
1050 * set them up later while requesting irq's.
1052 err
= pci_enable_msix_range(adapter
->pdev
, adapter
->msix_entries
,
1053 vector_threshold
, vectors
);
1055 dev_err(&adapter
->pdev
->dev
, "Unable to allocate MSI-X interrupts\n");
1056 kfree(adapter
->msix_entries
);
1057 adapter
->msix_entries
= NULL
;
1061 /* Adjust for only the vectors we'll use, which is minimum
1062 * of max_msix_q_vectors + NONQ_VECS, or the number of
1063 * vectors we were allocated.
1065 adapter
->num_msix_vectors
= err
;
1070 * iavf_free_queues - Free memory for all rings
1071 * @adapter: board private structure to initialize
1073 * Free all of the memory associated with queue pairs.
1075 static void iavf_free_queues(struct iavf_adapter
*adapter
)
1077 if (!adapter
->vsi_res
)
1079 adapter
->num_active_queues
= 0;
1080 kfree(adapter
->tx_rings
);
1081 adapter
->tx_rings
= NULL
;
1082 kfree(adapter
->rx_rings
);
1083 adapter
->rx_rings
= NULL
;
1087 * iavf_alloc_queues - Allocate memory for all rings
1088 * @adapter: board private structure to initialize
1090 * We allocate one ring per queue at run-time since we don't know the
1091 * number of queues at compile-time. The polling_netdev array is
1092 * intended for Multiqueue, but should work fine with a single queue.
1094 static int iavf_alloc_queues(struct iavf_adapter
*adapter
)
1096 int i
, num_active_queues
;
1098 /* If we're in reset reallocating queues we don't actually know yet for
1099 * certain the PF gave us the number of queues we asked for but we'll
1100 * assume it did. Once basic reset is finished we'll confirm once we
1101 * start negotiating config with PF.
1103 if (adapter
->num_req_queues
)
1104 num_active_queues
= adapter
->num_req_queues
;
1105 else if ((adapter
->vf_res
->vf_cap_flags
& VIRTCHNL_VF_OFFLOAD_ADQ
) &&
1107 num_active_queues
= adapter
->ch_config
.total_qps
;
1109 num_active_queues
= min_t(int,
1110 adapter
->vsi_res
->num_queue_pairs
,
1111 (int)(num_online_cpus()));
1114 adapter
->tx_rings
= kcalloc(num_active_queues
,
1115 sizeof(struct iavf_ring
), GFP_KERNEL
);
1116 if (!adapter
->tx_rings
)
1118 adapter
->rx_rings
= kcalloc(num_active_queues
,
1119 sizeof(struct iavf_ring
), GFP_KERNEL
);
1120 if (!adapter
->rx_rings
)
1123 for (i
= 0; i
< num_active_queues
; i
++) {
1124 struct iavf_ring
*tx_ring
;
1125 struct iavf_ring
*rx_ring
;
1127 tx_ring
= &adapter
->tx_rings
[i
];
1129 tx_ring
->queue_index
= i
;
1130 tx_ring
->netdev
= adapter
->netdev
;
1131 tx_ring
->dev
= &adapter
->pdev
->dev
;
1132 tx_ring
->count
= adapter
->tx_desc_count
;
1133 tx_ring
->itr_setting
= IAVF_ITR_TX_DEF
;
1134 if (adapter
->flags
& IAVF_FLAG_WB_ON_ITR_CAPABLE
)
1135 tx_ring
->flags
|= IAVF_TXR_FLAGS_WB_ON_ITR
;
1137 rx_ring
= &adapter
->rx_rings
[i
];
1138 rx_ring
->queue_index
= i
;
1139 rx_ring
->netdev
= adapter
->netdev
;
1140 rx_ring
->dev
= &adapter
->pdev
->dev
;
1141 rx_ring
->count
= adapter
->rx_desc_count
;
1142 rx_ring
->itr_setting
= IAVF_ITR_RX_DEF
;
1145 adapter
->num_active_queues
= num_active_queues
;
1150 iavf_free_queues(adapter
);
1155 * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1156 * @adapter: board private structure to initialize
1158 * Attempt to configure the interrupts using the best available
1159 * capabilities of the hardware and the kernel.
1161 static int iavf_set_interrupt_capability(struct iavf_adapter
*adapter
)
1163 int vector
, v_budget
;
1167 if (!adapter
->vsi_res
) {
1171 pairs
= adapter
->num_active_queues
;
1173 /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1174 * us much good if we have more vectors than CPUs. However, we already
1175 * limit the total number of queues by the number of CPUs so we do not
1176 * need any further limiting here.
1178 v_budget
= min_t(int, pairs
+ NONQ_VECS
,
1179 (int)adapter
->vf_res
->max_vectors
);
1181 adapter
->msix_entries
= kcalloc(v_budget
,
1182 sizeof(struct msix_entry
), GFP_KERNEL
);
1183 if (!adapter
->msix_entries
) {
1188 for (vector
= 0; vector
< v_budget
; vector
++)
1189 adapter
->msix_entries
[vector
].entry
= vector
;
1191 err
= iavf_acquire_msix_vectors(adapter
, v_budget
);
1194 netif_set_real_num_rx_queues(adapter
->netdev
, pairs
);
1195 netif_set_real_num_tx_queues(adapter
->netdev
, pairs
);
1200 * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1201 * @adapter: board private structure
1203 * Return 0 on success, negative on failure
1205 static int iavf_config_rss_aq(struct iavf_adapter
*adapter
)
1207 struct iavf_aqc_get_set_rss_key_data
*rss_key
=
1208 (struct iavf_aqc_get_set_rss_key_data
*)adapter
->rss_key
;
1209 struct iavf_hw
*hw
= &adapter
->hw
;
1212 if (adapter
->current_op
!= VIRTCHNL_OP_UNKNOWN
) {
1213 /* bail because we already have a command pending */
1214 dev_err(&adapter
->pdev
->dev
, "Cannot configure RSS, command %d pending\n",
1215 adapter
->current_op
);
1219 ret
= iavf_aq_set_rss_key(hw
, adapter
->vsi
.id
, rss_key
);
1221 dev_err(&adapter
->pdev
->dev
, "Cannot set RSS key, err %s aq_err %s\n",
1222 iavf_stat_str(hw
, ret
),
1223 iavf_aq_str(hw
, hw
->aq
.asq_last_status
));
1228 ret
= iavf_aq_set_rss_lut(hw
, adapter
->vsi
.id
, false,
1229 adapter
->rss_lut
, adapter
->rss_lut_size
);
1231 dev_err(&adapter
->pdev
->dev
, "Cannot set RSS lut, err %s aq_err %s\n",
1232 iavf_stat_str(hw
, ret
),
1233 iavf_aq_str(hw
, hw
->aq
.asq_last_status
));
1241 * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1242 * @adapter: board private structure
1244 * Returns 0 on success, negative on failure
1246 static int iavf_config_rss_reg(struct iavf_adapter
*adapter
)
1248 struct iavf_hw
*hw
= &adapter
->hw
;
1252 dw
= (u32
*)adapter
->rss_key
;
1253 for (i
= 0; i
<= adapter
->rss_key_size
/ 4; i
++)
1254 wr32(hw
, IAVF_VFQF_HKEY(i
), dw
[i
]);
1256 dw
= (u32
*)adapter
->rss_lut
;
1257 for (i
= 0; i
<= adapter
->rss_lut_size
/ 4; i
++)
1258 wr32(hw
, IAVF_VFQF_HLUT(i
), dw
[i
]);
1266 * iavf_config_rss - Configure RSS keys and lut
1267 * @adapter: board private structure
1269 * Returns 0 on success, negative on failure
1271 int iavf_config_rss(struct iavf_adapter
*adapter
)
1274 if (RSS_PF(adapter
)) {
1275 adapter
->aq_required
|= IAVF_FLAG_AQ_SET_RSS_LUT
|
1276 IAVF_FLAG_AQ_SET_RSS_KEY
;
1278 } else if (RSS_AQ(adapter
)) {
1279 return iavf_config_rss_aq(adapter
);
1281 return iavf_config_rss_reg(adapter
);
1286 * iavf_fill_rss_lut - Fill the lut with default values
1287 * @adapter: board private structure
1289 static void iavf_fill_rss_lut(struct iavf_adapter
*adapter
)
1293 for (i
= 0; i
< adapter
->rss_lut_size
; i
++)
1294 adapter
->rss_lut
[i
] = i
% adapter
->num_active_queues
;
1298 * iavf_init_rss - Prepare for RSS
1299 * @adapter: board private structure
1301 * Return 0 on success, negative on failure
1303 static int iavf_init_rss(struct iavf_adapter
*adapter
)
1305 struct iavf_hw
*hw
= &adapter
->hw
;
1308 if (!RSS_PF(adapter
)) {
1309 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1310 if (adapter
->vf_res
->vf_cap_flags
&
1311 VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2
)
1312 adapter
->hena
= IAVF_DEFAULT_RSS_HENA_EXPANDED
;
1314 adapter
->hena
= IAVF_DEFAULT_RSS_HENA
;
1316 wr32(hw
, IAVF_VFQF_HENA(0), (u32
)adapter
->hena
);
1317 wr32(hw
, IAVF_VFQF_HENA(1), (u32
)(adapter
->hena
>> 32));
1320 iavf_fill_rss_lut(adapter
);
1321 netdev_rss_key_fill((void *)adapter
->rss_key
, adapter
->rss_key_size
);
1322 ret
= iavf_config_rss(adapter
);
1328 * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1329 * @adapter: board private structure to initialize
1331 * We allocate one q_vector per queue interrupt. If allocation fails we
1334 static int iavf_alloc_q_vectors(struct iavf_adapter
*adapter
)
1336 int q_idx
= 0, num_q_vectors
;
1337 struct iavf_q_vector
*q_vector
;
1339 num_q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
1340 adapter
->q_vectors
= kcalloc(num_q_vectors
, sizeof(*q_vector
),
1342 if (!adapter
->q_vectors
)
1345 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
1346 q_vector
= &adapter
->q_vectors
[q_idx
];
1347 q_vector
->adapter
= adapter
;
1348 q_vector
->vsi
= &adapter
->vsi
;
1349 q_vector
->v_idx
= q_idx
;
1350 q_vector
->reg_idx
= q_idx
;
1351 cpumask_copy(&q_vector
->affinity_mask
, cpu_possible_mask
);
1352 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
1353 iavf_napi_poll
, NAPI_POLL_WEIGHT
);
1360 * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1361 * @adapter: board private structure to initialize
1363 * This function frees the memory allocated to the q_vectors. In addition if
1364 * NAPI is enabled it will delete any references to the NAPI struct prior
1365 * to freeing the q_vector.
1367 static void iavf_free_q_vectors(struct iavf_adapter
*adapter
)
1369 int q_idx
, num_q_vectors
;
1372 if (!adapter
->q_vectors
)
1375 num_q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
1376 napi_vectors
= adapter
->num_active_queues
;
1378 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
1379 struct iavf_q_vector
*q_vector
= &adapter
->q_vectors
[q_idx
];
1381 if (q_idx
< napi_vectors
)
1382 netif_napi_del(&q_vector
->napi
);
1384 kfree(adapter
->q_vectors
);
1385 adapter
->q_vectors
= NULL
;
1389 * iavf_reset_interrupt_capability - Reset MSIX setup
1390 * @adapter: board private structure
1393 void iavf_reset_interrupt_capability(struct iavf_adapter
*adapter
)
1395 if (!adapter
->msix_entries
)
1398 pci_disable_msix(adapter
->pdev
);
1399 kfree(adapter
->msix_entries
);
1400 adapter
->msix_entries
= NULL
;
1404 * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1405 * @adapter: board private structure to initialize
1408 int iavf_init_interrupt_scheme(struct iavf_adapter
*adapter
)
1412 err
= iavf_alloc_queues(adapter
);
1414 dev_err(&adapter
->pdev
->dev
,
1415 "Unable to allocate memory for queues\n");
1416 goto err_alloc_queues
;
1420 err
= iavf_set_interrupt_capability(adapter
);
1423 dev_err(&adapter
->pdev
->dev
,
1424 "Unable to setup interrupt capabilities\n");
1425 goto err_set_interrupt
;
1428 err
= iavf_alloc_q_vectors(adapter
);
1430 dev_err(&adapter
->pdev
->dev
,
1431 "Unable to allocate memory for queue vectors\n");
1432 goto err_alloc_q_vectors
;
1435 /* If we've made it so far while ADq flag being ON, then we haven't
1436 * bailed out anywhere in middle. And ADq isn't just enabled but actual
1437 * resources have been allocated in the reset path.
1438 * Now we can truly claim that ADq is enabled.
1440 if ((adapter
->vf_res
->vf_cap_flags
& VIRTCHNL_VF_OFFLOAD_ADQ
) &&
1442 dev_info(&adapter
->pdev
->dev
, "ADq Enabled, %u TCs created",
1445 dev_info(&adapter
->pdev
->dev
, "Multiqueue %s: Queue pair count = %u",
1446 (adapter
->num_active_queues
> 1) ? "Enabled" : "Disabled",
1447 adapter
->num_active_queues
);
1450 err_alloc_q_vectors
:
1451 iavf_reset_interrupt_capability(adapter
);
1453 iavf_free_queues(adapter
);
1459 * iavf_free_rss - Free memory used by RSS structs
1460 * @adapter: board private structure
1462 static void iavf_free_rss(struct iavf_adapter
*adapter
)
1464 kfree(adapter
->rss_key
);
1465 adapter
->rss_key
= NULL
;
1467 kfree(adapter
->rss_lut
);
1468 adapter
->rss_lut
= NULL
;
1472 * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1473 * @adapter: board private structure
1475 * Returns 0 on success, negative on failure
1477 static int iavf_reinit_interrupt_scheme(struct iavf_adapter
*adapter
)
1479 struct net_device
*netdev
= adapter
->netdev
;
1482 if (netif_running(netdev
))
1483 iavf_free_traffic_irqs(adapter
);
1484 iavf_free_misc_irq(adapter
);
1485 iavf_reset_interrupt_capability(adapter
);
1486 iavf_free_q_vectors(adapter
);
1487 iavf_free_queues(adapter
);
1489 err
= iavf_init_interrupt_scheme(adapter
);
1493 netif_tx_stop_all_queues(netdev
);
1495 err
= iavf_request_misc_irq(adapter
);
1499 set_bit(__IAVF_VSI_DOWN
, adapter
->vsi
.state
);
1501 iavf_map_rings_to_vectors(adapter
);
1503 if (RSS_AQ(adapter
))
1504 adapter
->aq_required
|= IAVF_FLAG_AQ_CONFIGURE_RSS
;
1506 err
= iavf_init_rss(adapter
);
1512 * iavf_process_aq_command - process aq_required flags
1513 * and sends aq command
1514 * @adapter: pointer to iavf adapter structure
1516 * Returns 0 on success
1517 * Returns error code if no command was sent
1518 * or error code if the command failed.
1520 static int iavf_process_aq_command(struct iavf_adapter
*adapter
)
1522 if (adapter
->aq_required
& IAVF_FLAG_AQ_GET_CONFIG
)
1523 return iavf_send_vf_config_msg(adapter
);
1524 if (adapter
->aq_required
& IAVF_FLAG_AQ_DISABLE_QUEUES
) {
1525 iavf_disable_queues(adapter
);
1529 if (adapter
->aq_required
& IAVF_FLAG_AQ_MAP_VECTORS
) {
1530 iavf_map_queues(adapter
);
1534 if (adapter
->aq_required
& IAVF_FLAG_AQ_ADD_MAC_FILTER
) {
1535 iavf_add_ether_addrs(adapter
);
1539 if (adapter
->aq_required
& IAVF_FLAG_AQ_ADD_VLAN_FILTER
) {
1540 iavf_add_vlans(adapter
);
1544 if (adapter
->aq_required
& IAVF_FLAG_AQ_DEL_MAC_FILTER
) {
1545 iavf_del_ether_addrs(adapter
);
1549 if (adapter
->aq_required
& IAVF_FLAG_AQ_DEL_VLAN_FILTER
) {
1550 iavf_del_vlans(adapter
);
1554 if (adapter
->aq_required
& IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING
) {
1555 iavf_enable_vlan_stripping(adapter
);
1559 if (adapter
->aq_required
& IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING
) {
1560 iavf_disable_vlan_stripping(adapter
);
1564 if (adapter
->aq_required
& IAVF_FLAG_AQ_CONFIGURE_QUEUES
) {
1565 iavf_configure_queues(adapter
);
1569 if (adapter
->aq_required
& IAVF_FLAG_AQ_ENABLE_QUEUES
) {
1570 iavf_enable_queues(adapter
);
1574 if (adapter
->aq_required
& IAVF_FLAG_AQ_CONFIGURE_RSS
) {
1575 /* This message goes straight to the firmware, not the
1576 * PF, so we don't have to set current_op as we will
1577 * not get a response through the ARQ.
1579 adapter
->aq_required
&= ~IAVF_FLAG_AQ_CONFIGURE_RSS
;
1582 if (adapter
->aq_required
& IAVF_FLAG_AQ_GET_HENA
) {
1583 iavf_get_hena(adapter
);
1586 if (adapter
->aq_required
& IAVF_FLAG_AQ_SET_HENA
) {
1587 iavf_set_hena(adapter
);
1590 if (adapter
->aq_required
& IAVF_FLAG_AQ_SET_RSS_KEY
) {
1591 iavf_set_rss_key(adapter
);
1594 if (adapter
->aq_required
& IAVF_FLAG_AQ_SET_RSS_LUT
) {
1595 iavf_set_rss_lut(adapter
);
1599 if (adapter
->aq_required
& IAVF_FLAG_AQ_REQUEST_PROMISC
) {
1600 iavf_set_promiscuous(adapter
, FLAG_VF_UNICAST_PROMISC
|
1601 FLAG_VF_MULTICAST_PROMISC
);
1605 if (adapter
->aq_required
& IAVF_FLAG_AQ_REQUEST_ALLMULTI
) {
1606 iavf_set_promiscuous(adapter
, FLAG_VF_MULTICAST_PROMISC
);
1610 if ((adapter
->aq_required
& IAVF_FLAG_AQ_RELEASE_PROMISC
) &&
1611 (adapter
->aq_required
& IAVF_FLAG_AQ_RELEASE_ALLMULTI
)) {
1612 iavf_set_promiscuous(adapter
, 0);
1616 if (adapter
->aq_required
& IAVF_FLAG_AQ_ENABLE_CHANNELS
) {
1617 iavf_enable_channels(adapter
);
1621 if (adapter
->aq_required
& IAVF_FLAG_AQ_DISABLE_CHANNELS
) {
1622 iavf_disable_channels(adapter
);
1625 if (adapter
->aq_required
& IAVF_FLAG_AQ_ADD_CLOUD_FILTER
) {
1626 iavf_add_cloud_filter(adapter
);
1630 if (adapter
->aq_required
& IAVF_FLAG_AQ_DEL_CLOUD_FILTER
) {
1631 iavf_del_cloud_filter(adapter
);
1634 if (adapter
->aq_required
& IAVF_FLAG_AQ_DEL_CLOUD_FILTER
) {
1635 iavf_del_cloud_filter(adapter
);
1638 if (adapter
->aq_required
& IAVF_FLAG_AQ_ADD_CLOUD_FILTER
) {
1639 iavf_add_cloud_filter(adapter
);
1646 * iavf_startup - first step of driver startup
1647 * @adapter: board private structure
1649 * Function process __IAVF_STARTUP driver state.
1650 * When success the state is changed to __IAVF_INIT_VERSION_CHECK
1651 * when fails it returns -EAGAIN
1653 static int iavf_startup(struct iavf_adapter
*adapter
)
1655 struct pci_dev
*pdev
= adapter
->pdev
;
1656 struct iavf_hw
*hw
= &adapter
->hw
;
1659 WARN_ON(adapter
->state
!= __IAVF_STARTUP
);
1661 /* driver loaded, probe complete */
1662 adapter
->flags
&= ~IAVF_FLAG_PF_COMMS_FAILED
;
1663 adapter
->flags
&= ~IAVF_FLAG_RESET_PENDING
;
1664 err
= iavf_set_mac_type(hw
);
1666 dev_err(&pdev
->dev
, "Failed to set MAC type (%d)\n", err
);
1670 err
= iavf_check_reset_complete(hw
);
1672 dev_info(&pdev
->dev
, "Device is still in reset (%d), retrying\n",
1676 hw
->aq
.num_arq_entries
= IAVF_AQ_LEN
;
1677 hw
->aq
.num_asq_entries
= IAVF_AQ_LEN
;
1678 hw
->aq
.arq_buf_size
= IAVF_MAX_AQ_BUF_SIZE
;
1679 hw
->aq
.asq_buf_size
= IAVF_MAX_AQ_BUF_SIZE
;
1681 err
= iavf_init_adminq(hw
);
1683 dev_err(&pdev
->dev
, "Failed to init Admin Queue (%d)\n", err
);
1686 err
= iavf_send_api_ver(adapter
);
1688 dev_err(&pdev
->dev
, "Unable to send to PF (%d)\n", err
);
1689 iavf_shutdown_adminq(hw
);
1692 adapter
->state
= __IAVF_INIT_VERSION_CHECK
;
1698 * iavf_init_version_check - second step of driver startup
1699 * @adapter: board private structure
1701 * Function process __IAVF_INIT_VERSION_CHECK driver state.
1702 * When success the state is changed to __IAVF_INIT_GET_RESOURCES
1703 * when fails it returns -EAGAIN
1705 static int iavf_init_version_check(struct iavf_adapter
*adapter
)
1707 struct pci_dev
*pdev
= adapter
->pdev
;
1708 struct iavf_hw
*hw
= &adapter
->hw
;
1711 WARN_ON(adapter
->state
!= __IAVF_INIT_VERSION_CHECK
);
1713 if (!iavf_asq_done(hw
)) {
1714 dev_err(&pdev
->dev
, "Admin queue command never completed\n");
1715 iavf_shutdown_adminq(hw
);
1716 adapter
->state
= __IAVF_STARTUP
;
1720 /* aq msg sent, awaiting reply */
1721 err
= iavf_verify_api_ver(adapter
);
1723 if (err
== IAVF_ERR_ADMIN_QUEUE_NO_WORK
)
1724 err
= iavf_send_api_ver(adapter
);
1726 dev_err(&pdev
->dev
, "Unsupported PF API version %d.%d, expected %d.%d\n",
1727 adapter
->pf_version
.major
,
1728 adapter
->pf_version
.minor
,
1729 VIRTCHNL_VERSION_MAJOR
,
1730 VIRTCHNL_VERSION_MINOR
);
1733 err
= iavf_send_vf_config_msg(adapter
);
1735 dev_err(&pdev
->dev
, "Unable to send config request (%d)\n",
1739 adapter
->state
= __IAVF_INIT_GET_RESOURCES
;
1746 * iavf_init_get_resources - third step of driver startup
1747 * @adapter: board private structure
1749 * Function process __IAVF_INIT_GET_RESOURCES driver state and
1750 * finishes driver initialization procedure.
1751 * When success the state is changed to __IAVF_DOWN
1752 * when fails it returns -EAGAIN
1754 static int iavf_init_get_resources(struct iavf_adapter
*adapter
)
1756 struct net_device
*netdev
= adapter
->netdev
;
1757 struct pci_dev
*pdev
= adapter
->pdev
;
1758 struct iavf_hw
*hw
= &adapter
->hw
;
1761 WARN_ON(adapter
->state
!= __IAVF_INIT_GET_RESOURCES
);
1762 /* aq msg sent, awaiting reply */
1763 if (!adapter
->vf_res
) {
1764 bufsz
= sizeof(struct virtchnl_vf_resource
) +
1766 sizeof(struct virtchnl_vsi_resource
));
1767 adapter
->vf_res
= kzalloc(bufsz
, GFP_KERNEL
);
1768 if (!adapter
->vf_res
)
1771 err
= iavf_get_vf_config(adapter
);
1772 if (err
== IAVF_ERR_ADMIN_QUEUE_NO_WORK
) {
1773 err
= iavf_send_vf_config_msg(adapter
);
1775 } else if (err
== IAVF_ERR_PARAM
) {
1776 /* We only get ERR_PARAM if the device is in a very bad
1777 * state or if we've been disabled for previous bad
1778 * behavior. Either way, we're done now.
1780 iavf_shutdown_adminq(hw
);
1781 dev_err(&pdev
->dev
, "Unable to get VF config due to PF error condition, not retrying\n");
1785 dev_err(&pdev
->dev
, "Unable to get VF config (%d)\n", err
);
1789 if (iavf_process_config(adapter
))
1791 adapter
->current_op
= VIRTCHNL_OP_UNKNOWN
;
1793 adapter
->flags
|= IAVF_FLAG_RX_CSUM_ENABLED
;
1795 netdev
->netdev_ops
= &iavf_netdev_ops
;
1796 iavf_set_ethtool_ops(netdev
);
1797 netdev
->watchdog_timeo
= 5 * HZ
;
1799 /* MTU range: 68 - 9710 */
1800 netdev
->min_mtu
= ETH_MIN_MTU
;
1801 netdev
->max_mtu
= IAVF_MAX_RXBUFFER
- IAVF_PACKET_HDR_PAD
;
1803 if (!is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
1804 dev_info(&pdev
->dev
, "Invalid MAC address %pM, using random\n",
1805 adapter
->hw
.mac
.addr
);
1806 eth_hw_addr_random(netdev
);
1807 ether_addr_copy(adapter
->hw
.mac
.addr
, netdev
->dev_addr
);
1809 ether_addr_copy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
);
1810 ether_addr_copy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
);
1813 adapter
->tx_desc_count
= IAVF_DEFAULT_TXD
;
1814 adapter
->rx_desc_count
= IAVF_DEFAULT_RXD
;
1815 err
= iavf_init_interrupt_scheme(adapter
);
1818 iavf_map_rings_to_vectors(adapter
);
1819 if (adapter
->vf_res
->vf_cap_flags
&
1820 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
)
1821 adapter
->flags
|= IAVF_FLAG_WB_ON_ITR_CAPABLE
;
1823 err
= iavf_request_misc_irq(adapter
);
1827 netif_carrier_off(netdev
);
1828 adapter
->link_up
= false;
1830 /* set the semaphore to prevent any callbacks after device registration
1831 * up to time when state of driver will be set to __IAVF_DOWN
1834 if (!adapter
->netdev_registered
) {
1835 err
= register_netdevice(netdev
);
1842 adapter
->netdev_registered
= true;
1844 netif_tx_stop_all_queues(netdev
);
1845 if (CLIENT_ALLOWED(adapter
)) {
1846 err
= iavf_lan_add_device(adapter
);
1849 dev_info(&pdev
->dev
, "Failed to add VF to client API service list: %d\n",
1853 dev_info(&pdev
->dev
, "MAC address: %pM\n", adapter
->hw
.mac
.addr
);
1854 if (netdev
->features
& NETIF_F_GRO
)
1855 dev_info(&pdev
->dev
, "GRO is enabled\n");
1857 adapter
->state
= __IAVF_DOWN
;
1858 set_bit(__IAVF_VSI_DOWN
, adapter
->vsi
.state
);
1861 iavf_misc_irq_enable(adapter
);
1862 wake_up(&adapter
->down_waitqueue
);
1864 adapter
->rss_key
= kzalloc(adapter
->rss_key_size
, GFP_KERNEL
);
1865 adapter
->rss_lut
= kzalloc(adapter
->rss_lut_size
, GFP_KERNEL
);
1866 if (!adapter
->rss_key
|| !adapter
->rss_lut
)
1868 if (RSS_AQ(adapter
))
1869 adapter
->aq_required
|= IAVF_FLAG_AQ_CONFIGURE_RSS
;
1871 iavf_init_rss(adapter
);
1875 iavf_free_rss(adapter
);
1877 iavf_free_misc_irq(adapter
);
1879 iavf_reset_interrupt_capability(adapter
);
1881 kfree(adapter
->vf_res
);
1882 adapter
->vf_res
= NULL
;
1888 * iavf_watchdog_task - Periodic call-back task
1889 * @work: pointer to work_struct
1891 static void iavf_watchdog_task(struct work_struct
*work
)
1893 struct iavf_adapter
*adapter
= container_of(work
,
1894 struct iavf_adapter
,
1895 watchdog_task
.work
);
1896 struct iavf_hw
*hw
= &adapter
->hw
;
1899 if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK
, &adapter
->crit_section
))
1900 goto restart_watchdog
;
1902 if (adapter
->flags
& IAVF_FLAG_PF_COMMS_FAILED
)
1903 adapter
->state
= __IAVF_COMM_FAILED
;
1905 switch (adapter
->state
) {
1906 case __IAVF_COMM_FAILED
:
1907 reg_val
= rd32(hw
, IAVF_VFGEN_RSTAT
) &
1908 IAVF_VFGEN_RSTAT_VFR_STATE_MASK
;
1909 if (reg_val
== VIRTCHNL_VFR_VFACTIVE
||
1910 reg_val
== VIRTCHNL_VFR_COMPLETED
) {
1911 /* A chance for redemption! */
1912 dev_err(&adapter
->pdev
->dev
,
1913 "Hardware came out of reset. Attempting reinit.\n");
1914 adapter
->state
= __IAVF_STARTUP
;
1915 adapter
->flags
&= ~IAVF_FLAG_PF_COMMS_FAILED
;
1916 queue_delayed_work(iavf_wq
, &adapter
->init_task
, 10);
1917 clear_bit(__IAVF_IN_CRITICAL_TASK
,
1918 &adapter
->crit_section
);
1919 /* Don't reschedule the watchdog, since we've restarted
1920 * the init task. When init_task contacts the PF and
1921 * gets everything set up again, it'll restart the
1922 * watchdog for us. Down, boy. Sit. Stay. Woof.
1926 adapter
->aq_required
= 0;
1927 adapter
->current_op
= VIRTCHNL_OP_UNKNOWN
;
1928 clear_bit(__IAVF_IN_CRITICAL_TASK
,
1929 &adapter
->crit_section
);
1930 queue_delayed_work(iavf_wq
,
1931 &adapter
->watchdog_task
,
1932 msecs_to_jiffies(10));
1934 case __IAVF_RESETTING
:
1935 clear_bit(__IAVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1936 queue_delayed_work(iavf_wq
, &adapter
->watchdog_task
, HZ
* 2);
1939 case __IAVF_DOWN_PENDING
:
1940 case __IAVF_TESTING
:
1941 case __IAVF_RUNNING
:
1942 if (adapter
->current_op
) {
1943 if (!iavf_asq_done(hw
)) {
1944 dev_dbg(&adapter
->pdev
->dev
,
1945 "Admin queue timeout\n");
1946 iavf_send_api_ver(adapter
);
1949 if (!iavf_process_aq_command(adapter
) &&
1950 adapter
->state
== __IAVF_RUNNING
)
1951 iavf_request_stats(adapter
);
1955 clear_bit(__IAVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1958 goto restart_watchdog
;
1961 /* check for hw reset */
1962 reg_val
= rd32(hw
, IAVF_VF_ARQLEN1
) & IAVF_VF_ARQLEN1_ARQENABLE_MASK
;
1964 adapter
->state
= __IAVF_RESETTING
;
1965 adapter
->flags
|= IAVF_FLAG_RESET_PENDING
;
1966 adapter
->aq_required
= 0;
1967 adapter
->current_op
= VIRTCHNL_OP_UNKNOWN
;
1968 dev_err(&adapter
->pdev
->dev
, "Hardware reset detected\n");
1969 queue_work(iavf_wq
, &adapter
->reset_task
);
1973 schedule_delayed_work(&adapter
->client_task
, msecs_to_jiffies(5));
1975 if (adapter
->state
== __IAVF_RUNNING
||
1976 adapter
->state
== __IAVF_COMM_FAILED
)
1977 iavf_detect_recover_hung(&adapter
->vsi
);
1978 clear_bit(__IAVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1980 if (adapter
->aq_required
)
1981 queue_delayed_work(iavf_wq
, &adapter
->watchdog_task
,
1982 msecs_to_jiffies(20));
1984 queue_delayed_work(iavf_wq
, &adapter
->watchdog_task
, HZ
* 2);
1985 queue_work(iavf_wq
, &adapter
->adminq_task
);
1988 static void iavf_disable_vf(struct iavf_adapter
*adapter
)
1990 struct iavf_mac_filter
*f
, *ftmp
;
1991 struct iavf_vlan_filter
*fv
, *fvtmp
;
1992 struct iavf_cloud_filter
*cf
, *cftmp
;
1994 adapter
->flags
|= IAVF_FLAG_PF_COMMS_FAILED
;
1996 /* We don't use netif_running() because it may be true prior to
1997 * ndo_open() returning, so we can't assume it means all our open
1998 * tasks have finished, since we're not holding the rtnl_lock here.
2000 if (adapter
->state
== __IAVF_RUNNING
) {
2001 set_bit(__IAVF_VSI_DOWN
, adapter
->vsi
.state
);
2002 netif_carrier_off(adapter
->netdev
);
2003 netif_tx_disable(adapter
->netdev
);
2004 adapter
->link_up
= false;
2005 iavf_napi_disable_all(adapter
);
2006 iavf_irq_disable(adapter
);
2007 iavf_free_traffic_irqs(adapter
);
2008 iavf_free_all_tx_resources(adapter
);
2009 iavf_free_all_rx_resources(adapter
);
2012 spin_lock_bh(&adapter
->mac_vlan_list_lock
);
2014 /* Delete all of the filters */
2015 list_for_each_entry_safe(f
, ftmp
, &adapter
->mac_filter_list
, list
) {
2020 list_for_each_entry_safe(fv
, fvtmp
, &adapter
->vlan_filter_list
, list
) {
2021 list_del(&fv
->list
);
2025 spin_unlock_bh(&adapter
->mac_vlan_list_lock
);
2027 spin_lock_bh(&adapter
->cloud_filter_list_lock
);
2028 list_for_each_entry_safe(cf
, cftmp
, &adapter
->cloud_filter_list
, list
) {
2029 list_del(&cf
->list
);
2031 adapter
->num_cloud_filters
--;
2033 spin_unlock_bh(&adapter
->cloud_filter_list_lock
);
2035 iavf_free_misc_irq(adapter
);
2036 iavf_reset_interrupt_capability(adapter
);
2037 iavf_free_queues(adapter
);
2038 iavf_free_q_vectors(adapter
);
2039 kfree(adapter
->vf_res
);
2040 iavf_shutdown_adminq(&adapter
->hw
);
2041 adapter
->netdev
->flags
&= ~IFF_UP
;
2042 clear_bit(__IAVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
2043 adapter
->flags
&= ~IAVF_FLAG_RESET_PENDING
;
2044 adapter
->state
= __IAVF_DOWN
;
2045 wake_up(&adapter
->down_waitqueue
);
2046 dev_info(&adapter
->pdev
->dev
, "Reset task did not complete, VF disabled\n");
2049 #define IAVF_RESET_WAIT_MS 10
2050 #define IAVF_RESET_WAIT_COUNT 500
2052 * iavf_reset_task - Call-back task to handle hardware reset
2053 * @work: pointer to work_struct
2055 * During reset we need to shut down and reinitialize the admin queue
2056 * before we can use it to communicate with the PF again. We also clear
2057 * and reinit the rings because that context is lost as well.
2059 static void iavf_reset_task(struct work_struct
*work
)
2061 struct iavf_adapter
*adapter
= container_of(work
,
2062 struct iavf_adapter
,
2064 struct virtchnl_vf_resource
*vfres
= adapter
->vf_res
;
2065 struct net_device
*netdev
= adapter
->netdev
;
2066 struct iavf_hw
*hw
= &adapter
->hw
;
2067 struct iavf_mac_filter
*f
, *ftmp
;
2068 struct iavf_vlan_filter
*vlf
;
2069 struct iavf_cloud_filter
*cf
;
2074 /* When device is being removed it doesn't make sense to run the reset
2075 * task, just return in such a case.
2077 if (test_bit(__IAVF_IN_REMOVE_TASK
, &adapter
->crit_section
))
2080 while (test_and_set_bit(__IAVF_IN_CLIENT_TASK
,
2081 &adapter
->crit_section
))
2082 usleep_range(500, 1000);
2083 if (CLIENT_ENABLED(adapter
)) {
2084 adapter
->flags
&= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN
|
2085 IAVF_FLAG_CLIENT_NEEDS_CLOSE
|
2086 IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS
|
2087 IAVF_FLAG_SERVICE_CLIENT_REQUESTED
);
2088 cancel_delayed_work_sync(&adapter
->client_task
);
2089 iavf_notify_client_close(&adapter
->vsi
, true);
2091 iavf_misc_irq_disable(adapter
);
2092 if (adapter
->flags
& IAVF_FLAG_RESET_NEEDED
) {
2093 adapter
->flags
&= ~IAVF_FLAG_RESET_NEEDED
;
2094 /* Restart the AQ here. If we have been reset but didn't
2095 * detect it, or if the PF had to reinit, our AQ will be hosed.
2097 iavf_shutdown_adminq(hw
);
2098 iavf_init_adminq(hw
);
2099 iavf_request_reset(adapter
);
2101 adapter
->flags
|= IAVF_FLAG_RESET_PENDING
;
2103 /* poll until we see the reset actually happen */
2104 for (i
= 0; i
< IAVF_RESET_WAIT_COUNT
; i
++) {
2105 reg_val
= rd32(hw
, IAVF_VF_ARQLEN1
) &
2106 IAVF_VF_ARQLEN1_ARQENABLE_MASK
;
2109 usleep_range(5000, 10000);
2111 if (i
== IAVF_RESET_WAIT_COUNT
) {
2112 dev_info(&adapter
->pdev
->dev
, "Never saw reset\n");
2113 goto continue_reset
; /* act like the reset happened */
2116 /* wait until the reset is complete and the PF is responding to us */
2117 for (i
= 0; i
< IAVF_RESET_WAIT_COUNT
; i
++) {
2118 /* sleep first to make sure a minimum wait time is met */
2119 msleep(IAVF_RESET_WAIT_MS
);
2121 reg_val
= rd32(hw
, IAVF_VFGEN_RSTAT
) &
2122 IAVF_VFGEN_RSTAT_VFR_STATE_MASK
;
2123 if (reg_val
== VIRTCHNL_VFR_VFACTIVE
)
2127 pci_set_master(adapter
->pdev
);
2129 if (i
== IAVF_RESET_WAIT_COUNT
) {
2130 dev_err(&adapter
->pdev
->dev
, "Reset never finished (%x)\n",
2132 iavf_disable_vf(adapter
);
2133 clear_bit(__IAVF_IN_CLIENT_TASK
, &adapter
->crit_section
);
2134 return; /* Do not attempt to reinit. It's dead, Jim. */
2138 /* We don't use netif_running() because it may be true prior to
2139 * ndo_open() returning, so we can't assume it means all our open
2140 * tasks have finished, since we're not holding the rtnl_lock here.
2142 running
= ((adapter
->state
== __IAVF_RUNNING
) ||
2143 (adapter
->state
== __IAVF_RESETTING
));
2146 netif_carrier_off(netdev
);
2147 netif_tx_stop_all_queues(netdev
);
2148 adapter
->link_up
= false;
2149 iavf_napi_disable_all(adapter
);
2151 iavf_irq_disable(adapter
);
2153 adapter
->state
= __IAVF_RESETTING
;
2154 adapter
->flags
&= ~IAVF_FLAG_RESET_PENDING
;
2156 /* free the Tx/Rx rings and descriptors, might be better to just
2157 * re-use them sometime in the future
2159 iavf_free_all_rx_resources(adapter
);
2160 iavf_free_all_tx_resources(adapter
);
2162 adapter
->flags
|= IAVF_FLAG_QUEUES_DISABLED
;
2163 /* kill and reinit the admin queue */
2164 iavf_shutdown_adminq(hw
);
2165 adapter
->current_op
= VIRTCHNL_OP_UNKNOWN
;
2166 err
= iavf_init_adminq(hw
);
2168 dev_info(&adapter
->pdev
->dev
, "Failed to init adminq: %d\n",
2170 adapter
->aq_required
= 0;
2172 if (adapter
->flags
& IAVF_FLAG_REINIT_ITR_NEEDED
) {
2173 err
= iavf_reinit_interrupt_scheme(adapter
);
2178 adapter
->aq_required
|= IAVF_FLAG_AQ_GET_CONFIG
;
2179 adapter
->aq_required
|= IAVF_FLAG_AQ_MAP_VECTORS
;
2181 spin_lock_bh(&adapter
->mac_vlan_list_lock
);
2183 /* Delete filter for the current MAC address, it could have
2184 * been changed by the PF via administratively set MAC.
2185 * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
2187 list_for_each_entry_safe(f
, ftmp
, &adapter
->mac_filter_list
, list
) {
2188 if (ether_addr_equal(f
->macaddr
, adapter
->hw
.mac
.addr
)) {
2193 /* re-add all MAC filters */
2194 list_for_each_entry(f
, &adapter
->mac_filter_list
, list
) {
2197 /* re-add all VLAN filters */
2198 list_for_each_entry(vlf
, &adapter
->vlan_filter_list
, list
) {
2202 spin_unlock_bh(&adapter
->mac_vlan_list_lock
);
2204 /* check if TCs are running and re-add all cloud filters */
2205 spin_lock_bh(&adapter
->cloud_filter_list_lock
);
2206 if ((vfres
->vf_cap_flags
& VIRTCHNL_VF_OFFLOAD_ADQ
) &&
2208 list_for_each_entry(cf
, &adapter
->cloud_filter_list
, list
) {
2212 spin_unlock_bh(&adapter
->cloud_filter_list_lock
);
2214 adapter
->aq_required
|= IAVF_FLAG_AQ_ADD_MAC_FILTER
;
2215 adapter
->aq_required
|= IAVF_FLAG_AQ_ADD_VLAN_FILTER
;
2216 adapter
->aq_required
|= IAVF_FLAG_AQ_ADD_CLOUD_FILTER
;
2217 iavf_misc_irq_enable(adapter
);
2219 mod_delayed_work(iavf_wq
, &adapter
->watchdog_task
, 2);
2221 /* We were running when the reset started, so we need to restore some
2225 /* allocate transmit descriptors */
2226 err
= iavf_setup_all_tx_resources(adapter
);
2230 /* allocate receive descriptors */
2231 err
= iavf_setup_all_rx_resources(adapter
);
2235 if (adapter
->flags
& IAVF_FLAG_REINIT_ITR_NEEDED
) {
2236 err
= iavf_request_traffic_irqs(adapter
, netdev
->name
);
2240 adapter
->flags
&= ~IAVF_FLAG_REINIT_ITR_NEEDED
;
2243 iavf_configure(adapter
);
2245 iavf_up_complete(adapter
);
2247 iavf_irq_enable(adapter
, true);
2249 adapter
->state
= __IAVF_DOWN
;
2250 wake_up(&adapter
->down_waitqueue
);
2252 clear_bit(__IAVF_IN_CLIENT_TASK
, &adapter
->crit_section
);
2253 clear_bit(__IAVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
2257 clear_bit(__IAVF_IN_CLIENT_TASK
, &adapter
->crit_section
);
2258 clear_bit(__IAVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
2259 dev_err(&adapter
->pdev
->dev
, "failed to allocate resources during reinit\n");
2264 * iavf_adminq_task - worker thread to clean the admin queue
2265 * @work: pointer to work_struct containing our data
2267 static void iavf_adminq_task(struct work_struct
*work
)
2269 struct iavf_adapter
*adapter
=
2270 container_of(work
, struct iavf_adapter
, adminq_task
);
2271 struct iavf_hw
*hw
= &adapter
->hw
;
2272 struct iavf_arq_event_info event
;
2273 enum virtchnl_ops v_op
;
2274 enum iavf_status ret
, v_ret
;
2278 if (adapter
->flags
& IAVF_FLAG_PF_COMMS_FAILED
)
2281 event
.buf_len
= IAVF_MAX_AQ_BUF_SIZE
;
2282 event
.msg_buf
= kzalloc(event
.buf_len
, GFP_KERNEL
);
2287 ret
= iavf_clean_arq_element(hw
, &event
, &pending
);
2288 v_op
= (enum virtchnl_ops
)le32_to_cpu(event
.desc
.cookie_high
);
2289 v_ret
= (enum iavf_status
)le32_to_cpu(event
.desc
.cookie_low
);
2292 break; /* No event to process or error cleaning ARQ */
2294 iavf_virtchnl_completion(adapter
, v_op
, v_ret
, event
.msg_buf
,
2297 memset(event
.msg_buf
, 0, IAVF_MAX_AQ_BUF_SIZE
);
2300 if ((adapter
->flags
&
2301 (IAVF_FLAG_RESET_PENDING
| IAVF_FLAG_RESET_NEEDED
)) ||
2302 adapter
->state
== __IAVF_RESETTING
)
2305 /* check for error indications */
2306 val
= rd32(hw
, hw
->aq
.arq
.len
);
2307 if (val
== 0xdeadbeef) /* indicates device in reset */
2310 if (val
& IAVF_VF_ARQLEN1_ARQVFE_MASK
) {
2311 dev_info(&adapter
->pdev
->dev
, "ARQ VF Error detected\n");
2312 val
&= ~IAVF_VF_ARQLEN1_ARQVFE_MASK
;
2314 if (val
& IAVF_VF_ARQLEN1_ARQOVFL_MASK
) {
2315 dev_info(&adapter
->pdev
->dev
, "ARQ Overflow Error detected\n");
2316 val
&= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK
;
2318 if (val
& IAVF_VF_ARQLEN1_ARQCRIT_MASK
) {
2319 dev_info(&adapter
->pdev
->dev
, "ARQ Critical Error detected\n");
2320 val
&= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK
;
2323 wr32(hw
, hw
->aq
.arq
.len
, val
);
2325 val
= rd32(hw
, hw
->aq
.asq
.len
);
2327 if (val
& IAVF_VF_ATQLEN1_ATQVFE_MASK
) {
2328 dev_info(&adapter
->pdev
->dev
, "ASQ VF Error detected\n");
2329 val
&= ~IAVF_VF_ATQLEN1_ATQVFE_MASK
;
2331 if (val
& IAVF_VF_ATQLEN1_ATQOVFL_MASK
) {
2332 dev_info(&adapter
->pdev
->dev
, "ASQ Overflow Error detected\n");
2333 val
&= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK
;
2335 if (val
& IAVF_VF_ATQLEN1_ATQCRIT_MASK
) {
2336 dev_info(&adapter
->pdev
->dev
, "ASQ Critical Error detected\n");
2337 val
&= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK
;
2340 wr32(hw
, hw
->aq
.asq
.len
, val
);
2343 kfree(event
.msg_buf
);
2345 /* re-enable Admin queue interrupt cause */
2346 iavf_misc_irq_enable(adapter
);
2350 * iavf_client_task - worker thread to perform client work
2351 * @work: pointer to work_struct containing our data
2353 * This task handles client interactions. Because client calls can be
2354 * reentrant, we can't handle them in the watchdog.
2356 static void iavf_client_task(struct work_struct
*work
)
2358 struct iavf_adapter
*adapter
=
2359 container_of(work
, struct iavf_adapter
, client_task
.work
);
2361 /* If we can't get the client bit, just give up. We'll be rescheduled
2365 if (test_and_set_bit(__IAVF_IN_CLIENT_TASK
, &adapter
->crit_section
))
2368 if (adapter
->flags
& IAVF_FLAG_SERVICE_CLIENT_REQUESTED
) {
2369 iavf_client_subtask(adapter
);
2370 adapter
->flags
&= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED
;
2373 if (adapter
->flags
& IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS
) {
2374 iavf_notify_client_l2_params(&adapter
->vsi
);
2375 adapter
->flags
&= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS
;
2378 if (adapter
->flags
& IAVF_FLAG_CLIENT_NEEDS_CLOSE
) {
2379 iavf_notify_client_close(&adapter
->vsi
, false);
2380 adapter
->flags
&= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE
;
2383 if (adapter
->flags
& IAVF_FLAG_CLIENT_NEEDS_OPEN
) {
2384 iavf_notify_client_open(&adapter
->vsi
);
2385 adapter
->flags
&= ~IAVF_FLAG_CLIENT_NEEDS_OPEN
;
2388 clear_bit(__IAVF_IN_CLIENT_TASK
, &adapter
->crit_section
);
2392 * iavf_free_all_tx_resources - Free Tx Resources for All Queues
2393 * @adapter: board private structure
2395 * Free all transmit software resources
2397 void iavf_free_all_tx_resources(struct iavf_adapter
*adapter
)
2401 if (!adapter
->tx_rings
)
2404 for (i
= 0; i
< adapter
->num_active_queues
; i
++)
2405 if (adapter
->tx_rings
[i
].desc
)
2406 iavf_free_tx_resources(&adapter
->tx_rings
[i
]);
2410 * iavf_setup_all_tx_resources - allocate all queues Tx resources
2411 * @adapter: board private structure
2413 * If this function returns with an error, then it's possible one or
2414 * more of the rings is populated (while the rest are not). It is the
2415 * callers duty to clean those orphaned rings.
2417 * Return 0 on success, negative on failure
2419 static int iavf_setup_all_tx_resources(struct iavf_adapter
*adapter
)
2423 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
2424 adapter
->tx_rings
[i
].count
= adapter
->tx_desc_count
;
2425 err
= iavf_setup_tx_descriptors(&adapter
->tx_rings
[i
]);
2428 dev_err(&adapter
->pdev
->dev
,
2429 "Allocation for Tx Queue %u failed\n", i
);
2437 * iavf_setup_all_rx_resources - allocate all queues Rx resources
2438 * @adapter: board private structure
2440 * If this function returns with an error, then it's possible one or
2441 * more of the rings is populated (while the rest are not). It is the
2442 * callers duty to clean those orphaned rings.
2444 * Return 0 on success, negative on failure
2446 static int iavf_setup_all_rx_resources(struct iavf_adapter
*adapter
)
2450 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
2451 adapter
->rx_rings
[i
].count
= adapter
->rx_desc_count
;
2452 err
= iavf_setup_rx_descriptors(&adapter
->rx_rings
[i
]);
2455 dev_err(&adapter
->pdev
->dev
,
2456 "Allocation for Rx Queue %u failed\n", i
);
2463 * iavf_free_all_rx_resources - Free Rx Resources for All Queues
2464 * @adapter: board private structure
2466 * Free all receive software resources
2468 void iavf_free_all_rx_resources(struct iavf_adapter
*adapter
)
2472 if (!adapter
->rx_rings
)
2475 for (i
= 0; i
< adapter
->num_active_queues
; i
++)
2476 if (adapter
->rx_rings
[i
].desc
)
2477 iavf_free_rx_resources(&adapter
->rx_rings
[i
]);
2481 * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
2482 * @adapter: board private structure
2483 * @max_tx_rate: max Tx bw for a tc
2485 static int iavf_validate_tx_bandwidth(struct iavf_adapter
*adapter
,
2488 int speed
= 0, ret
= 0;
2490 switch (adapter
->link_speed
) {
2491 case IAVF_LINK_SPEED_40GB
:
2494 case IAVF_LINK_SPEED_25GB
:
2497 case IAVF_LINK_SPEED_20GB
:
2500 case IAVF_LINK_SPEED_10GB
:
2503 case IAVF_LINK_SPEED_1GB
:
2506 case IAVF_LINK_SPEED_100MB
:
2513 if (max_tx_rate
> speed
) {
2514 dev_err(&adapter
->pdev
->dev
,
2515 "Invalid tx rate specified\n");
2523 * iavf_validate_channel_config - validate queue mapping info
2524 * @adapter: board private structure
2525 * @mqprio_qopt: queue parameters
2527 * This function validates if the config provided by the user to
2528 * configure queue channels is valid or not. Returns 0 on a valid
2531 static int iavf_validate_ch_config(struct iavf_adapter
*adapter
,
2532 struct tc_mqprio_qopt_offload
*mqprio_qopt
)
2534 u64 total_max_rate
= 0;
2539 if (mqprio_qopt
->qopt
.num_tc
> IAVF_MAX_TRAFFIC_CLASS
||
2540 mqprio_qopt
->qopt
.num_tc
< 1)
2543 for (i
= 0; i
<= mqprio_qopt
->qopt
.num_tc
- 1; i
++) {
2544 if (!mqprio_qopt
->qopt
.count
[i
] ||
2545 mqprio_qopt
->qopt
.offset
[i
] != num_qps
)
2547 if (mqprio_qopt
->min_rate
[i
]) {
2548 dev_err(&adapter
->pdev
->dev
,
2549 "Invalid min tx rate (greater than 0) specified\n");
2552 /*convert to Mbps */
2553 tx_rate
= div_u64(mqprio_qopt
->max_rate
[i
],
2555 total_max_rate
+= tx_rate
;
2556 num_qps
+= mqprio_qopt
->qopt
.count
[i
];
2558 if (num_qps
> IAVF_MAX_REQ_QUEUES
)
2561 ret
= iavf_validate_tx_bandwidth(adapter
, total_max_rate
);
2566 * iavf_del_all_cloud_filters - delete all cloud filters
2567 * on the traffic classes
2569 static void iavf_del_all_cloud_filters(struct iavf_adapter
*adapter
)
2571 struct iavf_cloud_filter
*cf
, *cftmp
;
2573 spin_lock_bh(&adapter
->cloud_filter_list_lock
);
2574 list_for_each_entry_safe(cf
, cftmp
, &adapter
->cloud_filter_list
,
2576 list_del(&cf
->list
);
2578 adapter
->num_cloud_filters
--;
2580 spin_unlock_bh(&adapter
->cloud_filter_list_lock
);
2584 * __iavf_setup_tc - configure multiple traffic classes
2585 * @netdev: network interface device structure
2586 * @type_date: tc offload data
2588 * This function processes the config information provided by the
2589 * user to configure traffic classes/queue channels and packages the
2590 * information to request the PF to setup traffic classes.
2592 * Returns 0 on success.
2594 static int __iavf_setup_tc(struct net_device
*netdev
, void *type_data
)
2596 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
2597 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
2598 struct virtchnl_vf_resource
*vfres
= adapter
->vf_res
;
2599 u8 num_tc
= 0, total_qps
= 0;
2600 int ret
= 0, netdev_tc
= 0;
2605 num_tc
= mqprio_qopt
->qopt
.num_tc
;
2606 mode
= mqprio_qopt
->mode
;
2608 /* delete queue_channel */
2609 if (!mqprio_qopt
->qopt
.hw
) {
2610 if (adapter
->ch_config
.state
== __IAVF_TC_RUNNING
) {
2611 /* reset the tc configuration */
2612 netdev_reset_tc(netdev
);
2613 adapter
->num_tc
= 0;
2614 netif_tx_stop_all_queues(netdev
);
2615 netif_tx_disable(netdev
);
2616 iavf_del_all_cloud_filters(adapter
);
2617 adapter
->aq_required
= IAVF_FLAG_AQ_DISABLE_CHANNELS
;
2624 /* add queue channel */
2625 if (mode
== TC_MQPRIO_MODE_CHANNEL
) {
2626 if (!(vfres
->vf_cap_flags
& VIRTCHNL_VF_OFFLOAD_ADQ
)) {
2627 dev_err(&adapter
->pdev
->dev
, "ADq not supported\n");
2630 if (adapter
->ch_config
.state
!= __IAVF_TC_INVALID
) {
2631 dev_err(&adapter
->pdev
->dev
, "TC configuration already exists\n");
2635 ret
= iavf_validate_ch_config(adapter
, mqprio_qopt
);
2638 /* Return if same TC config is requested */
2639 if (adapter
->num_tc
== num_tc
)
2641 adapter
->num_tc
= num_tc
;
2643 for (i
= 0; i
< IAVF_MAX_TRAFFIC_CLASS
; i
++) {
2645 adapter
->ch_config
.ch_info
[i
].count
=
2646 mqprio_qopt
->qopt
.count
[i
];
2647 adapter
->ch_config
.ch_info
[i
].offset
=
2648 mqprio_qopt
->qopt
.offset
[i
];
2649 total_qps
+= mqprio_qopt
->qopt
.count
[i
];
2650 max_tx_rate
= mqprio_qopt
->max_rate
[i
];
2651 /* convert to Mbps */
2652 max_tx_rate
= div_u64(max_tx_rate
,
2654 adapter
->ch_config
.ch_info
[i
].max_tx_rate
=
2657 adapter
->ch_config
.ch_info
[i
].count
= 1;
2658 adapter
->ch_config
.ch_info
[i
].offset
= 0;
2661 adapter
->ch_config
.total_qps
= total_qps
;
2662 netif_tx_stop_all_queues(netdev
);
2663 netif_tx_disable(netdev
);
2664 adapter
->aq_required
|= IAVF_FLAG_AQ_ENABLE_CHANNELS
;
2665 netdev_reset_tc(netdev
);
2666 /* Report the tc mapping up the stack */
2667 netdev_set_num_tc(adapter
->netdev
, num_tc
);
2668 for (i
= 0; i
< IAVF_MAX_TRAFFIC_CLASS
; i
++) {
2669 u16 qcount
= mqprio_qopt
->qopt
.count
[i
];
2670 u16 qoffset
= mqprio_qopt
->qopt
.offset
[i
];
2673 netdev_set_tc_queue(netdev
, netdev_tc
++, qcount
,
2682 * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
2683 * @adapter: board private structure
2684 * @cls_flower: pointer to struct flow_cls_offload
2685 * @filter: pointer to cloud filter structure
2687 static int iavf_parse_cls_flower(struct iavf_adapter
*adapter
,
2688 struct flow_cls_offload
*f
,
2689 struct iavf_cloud_filter
*filter
)
2691 struct flow_rule
*rule
= flow_cls_offload_flow_rule(f
);
2692 struct flow_dissector
*dissector
= rule
->match
.dissector
;
2693 u16 n_proto_mask
= 0;
2694 u16 n_proto_key
= 0;
2699 struct virtchnl_filter
*vf
= &filter
->f
;
2701 if (dissector
->used_keys
&
2702 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL
) |
2703 BIT(FLOW_DISSECTOR_KEY_BASIC
) |
2704 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS
) |
2705 BIT(FLOW_DISSECTOR_KEY_VLAN
) |
2706 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
2707 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
2708 BIT(FLOW_DISSECTOR_KEY_PORTS
) |
2709 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID
))) {
2710 dev_err(&adapter
->pdev
->dev
, "Unsupported key used: 0x%x\n",
2711 dissector
->used_keys
);
2715 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ENC_KEYID
)) {
2716 struct flow_match_enc_keyid match
;
2718 flow_rule_match_enc_keyid(rule
, &match
);
2719 if (match
.mask
->keyid
!= 0)
2720 field_flags
|= IAVF_CLOUD_FIELD_TEN_ID
;
2723 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_BASIC
)) {
2724 struct flow_match_basic match
;
2726 flow_rule_match_basic(rule
, &match
);
2727 n_proto_key
= ntohs(match
.key
->n_proto
);
2728 n_proto_mask
= ntohs(match
.mask
->n_proto
);
2730 if (n_proto_key
== ETH_P_ALL
) {
2734 n_proto
= n_proto_key
& n_proto_mask
;
2735 if (n_proto
!= ETH_P_IP
&& n_proto
!= ETH_P_IPV6
)
2737 if (n_proto
== ETH_P_IPV6
) {
2738 /* specify flow type as TCP IPv6 */
2739 vf
->flow_type
= VIRTCHNL_TCP_V6_FLOW
;
2742 if (match
.key
->ip_proto
!= IPPROTO_TCP
) {
2743 dev_info(&adapter
->pdev
->dev
, "Only TCP transport is supported\n");
2748 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
2749 struct flow_match_eth_addrs match
;
2751 flow_rule_match_eth_addrs(rule
, &match
);
2753 /* use is_broadcast and is_zero to check for all 0xf or 0 */
2754 if (!is_zero_ether_addr(match
.mask
->dst
)) {
2755 if (is_broadcast_ether_addr(match
.mask
->dst
)) {
2756 field_flags
|= IAVF_CLOUD_FIELD_OMAC
;
2758 dev_err(&adapter
->pdev
->dev
, "Bad ether dest mask %pM\n",
2760 return IAVF_ERR_CONFIG
;
2764 if (!is_zero_ether_addr(match
.mask
->src
)) {
2765 if (is_broadcast_ether_addr(match
.mask
->src
)) {
2766 field_flags
|= IAVF_CLOUD_FIELD_IMAC
;
2768 dev_err(&adapter
->pdev
->dev
, "Bad ether src mask %pM\n",
2770 return IAVF_ERR_CONFIG
;
2774 if (!is_zero_ether_addr(match
.key
->dst
))
2775 if (is_valid_ether_addr(match
.key
->dst
) ||
2776 is_multicast_ether_addr(match
.key
->dst
)) {
2777 /* set the mask if a valid dst_mac address */
2778 for (i
= 0; i
< ETH_ALEN
; i
++)
2779 vf
->mask
.tcp_spec
.dst_mac
[i
] |= 0xff;
2780 ether_addr_copy(vf
->data
.tcp_spec
.dst_mac
,
2784 if (!is_zero_ether_addr(match
.key
->src
))
2785 if (is_valid_ether_addr(match
.key
->src
) ||
2786 is_multicast_ether_addr(match
.key
->src
)) {
2787 /* set the mask if a valid dst_mac address */
2788 for (i
= 0; i
< ETH_ALEN
; i
++)
2789 vf
->mask
.tcp_spec
.src_mac
[i
] |= 0xff;
2790 ether_addr_copy(vf
->data
.tcp_spec
.src_mac
,
2795 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_VLAN
)) {
2796 struct flow_match_vlan match
;
2798 flow_rule_match_vlan(rule
, &match
);
2799 if (match
.mask
->vlan_id
) {
2800 if (match
.mask
->vlan_id
== VLAN_VID_MASK
) {
2801 field_flags
|= IAVF_CLOUD_FIELD_IVLAN
;
2803 dev_err(&adapter
->pdev
->dev
, "Bad vlan mask %u\n",
2804 match
.mask
->vlan_id
);
2805 return IAVF_ERR_CONFIG
;
2808 vf
->mask
.tcp_spec
.vlan_id
|= cpu_to_be16(0xffff);
2809 vf
->data
.tcp_spec
.vlan_id
= cpu_to_be16(match
.key
->vlan_id
);
2812 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_CONTROL
)) {
2813 struct flow_match_control match
;
2815 flow_rule_match_control(rule
, &match
);
2816 addr_type
= match
.key
->addr_type
;
2819 if (addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
2820 struct flow_match_ipv4_addrs match
;
2822 flow_rule_match_ipv4_addrs(rule
, &match
);
2823 if (match
.mask
->dst
) {
2824 if (match
.mask
->dst
== cpu_to_be32(0xffffffff)) {
2825 field_flags
|= IAVF_CLOUD_FIELD_IIP
;
2827 dev_err(&adapter
->pdev
->dev
, "Bad ip dst mask 0x%08x\n",
2828 be32_to_cpu(match
.mask
->dst
));
2829 return IAVF_ERR_CONFIG
;
2833 if (match
.mask
->src
) {
2834 if (match
.mask
->src
== cpu_to_be32(0xffffffff)) {
2835 field_flags
|= IAVF_CLOUD_FIELD_IIP
;
2837 dev_err(&adapter
->pdev
->dev
, "Bad ip src mask 0x%08x\n",
2838 be32_to_cpu(match
.mask
->dst
));
2839 return IAVF_ERR_CONFIG
;
2843 if (field_flags
& IAVF_CLOUD_FIELD_TEN_ID
) {
2844 dev_info(&adapter
->pdev
->dev
, "Tenant id not allowed for ip filter\n");
2845 return IAVF_ERR_CONFIG
;
2847 if (match
.key
->dst
) {
2848 vf
->mask
.tcp_spec
.dst_ip
[0] |= cpu_to_be32(0xffffffff);
2849 vf
->data
.tcp_spec
.dst_ip
[0] = match
.key
->dst
;
2851 if (match
.key
->src
) {
2852 vf
->mask
.tcp_spec
.src_ip
[0] |= cpu_to_be32(0xffffffff);
2853 vf
->data
.tcp_spec
.src_ip
[0] = match
.key
->src
;
2857 if (addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
2858 struct flow_match_ipv6_addrs match
;
2860 flow_rule_match_ipv6_addrs(rule
, &match
);
2862 /* validate mask, make sure it is not IPV6_ADDR_ANY */
2863 if (ipv6_addr_any(&match
.mask
->dst
)) {
2864 dev_err(&adapter
->pdev
->dev
, "Bad ipv6 dst mask 0x%02x\n",
2866 return IAVF_ERR_CONFIG
;
2869 /* src and dest IPv6 address should not be LOOPBACK
2870 * (0:0:0:0:0:0:0:1) which can be represented as ::1
2872 if (ipv6_addr_loopback(&match
.key
->dst
) ||
2873 ipv6_addr_loopback(&match
.key
->src
)) {
2874 dev_err(&adapter
->pdev
->dev
,
2875 "ipv6 addr should not be loopback\n");
2876 return IAVF_ERR_CONFIG
;
2878 if (!ipv6_addr_any(&match
.mask
->dst
) ||
2879 !ipv6_addr_any(&match
.mask
->src
))
2880 field_flags
|= IAVF_CLOUD_FIELD_IIP
;
2882 for (i
= 0; i
< 4; i
++)
2883 vf
->mask
.tcp_spec
.dst_ip
[i
] |= cpu_to_be32(0xffffffff);
2884 memcpy(&vf
->data
.tcp_spec
.dst_ip
, &match
.key
->dst
.s6_addr32
,
2885 sizeof(vf
->data
.tcp_spec
.dst_ip
));
2886 for (i
= 0; i
< 4; i
++)
2887 vf
->mask
.tcp_spec
.src_ip
[i
] |= cpu_to_be32(0xffffffff);
2888 memcpy(&vf
->data
.tcp_spec
.src_ip
, &match
.key
->src
.s6_addr32
,
2889 sizeof(vf
->data
.tcp_spec
.src_ip
));
2891 if (flow_rule_match_key(rule
, FLOW_DISSECTOR_KEY_PORTS
)) {
2892 struct flow_match_ports match
;
2894 flow_rule_match_ports(rule
, &match
);
2895 if (match
.mask
->src
) {
2896 if (match
.mask
->src
== cpu_to_be16(0xffff)) {
2897 field_flags
|= IAVF_CLOUD_FIELD_IIP
;
2899 dev_err(&adapter
->pdev
->dev
, "Bad src port mask %u\n",
2900 be16_to_cpu(match
.mask
->src
));
2901 return IAVF_ERR_CONFIG
;
2905 if (match
.mask
->dst
) {
2906 if (match
.mask
->dst
== cpu_to_be16(0xffff)) {
2907 field_flags
|= IAVF_CLOUD_FIELD_IIP
;
2909 dev_err(&adapter
->pdev
->dev
, "Bad dst port mask %u\n",
2910 be16_to_cpu(match
.mask
->dst
));
2911 return IAVF_ERR_CONFIG
;
2914 if (match
.key
->dst
) {
2915 vf
->mask
.tcp_spec
.dst_port
|= cpu_to_be16(0xffff);
2916 vf
->data
.tcp_spec
.dst_port
= match
.key
->dst
;
2919 if (match
.key
->src
) {
2920 vf
->mask
.tcp_spec
.src_port
|= cpu_to_be16(0xffff);
2921 vf
->data
.tcp_spec
.src_port
= match
.key
->src
;
2924 vf
->field_flags
= field_flags
;
2930 * iavf_handle_tclass - Forward to a traffic class on the device
2931 * @adapter: board private structure
2932 * @tc: traffic class index on the device
2933 * @filter: pointer to cloud filter structure
2935 static int iavf_handle_tclass(struct iavf_adapter
*adapter
, u32 tc
,
2936 struct iavf_cloud_filter
*filter
)
2940 if (tc
< adapter
->num_tc
) {
2941 if (!filter
->f
.data
.tcp_spec
.dst_port
) {
2942 dev_err(&adapter
->pdev
->dev
,
2943 "Specify destination port to redirect to traffic class other than TC0\n");
2947 /* redirect to a traffic class on the same device */
2948 filter
->f
.action
= VIRTCHNL_ACTION_TC_REDIRECT
;
2949 filter
->f
.action_meta
= tc
;
2954 * iavf_configure_clsflower - Add tc flower filters
2955 * @adapter: board private structure
2956 * @cls_flower: Pointer to struct flow_cls_offload
2958 static int iavf_configure_clsflower(struct iavf_adapter
*adapter
,
2959 struct flow_cls_offload
*cls_flower
)
2961 int tc
= tc_classid_to_hwtc(adapter
->netdev
, cls_flower
->classid
);
2962 struct iavf_cloud_filter
*filter
= NULL
;
2963 int err
= -EINVAL
, count
= 50;
2966 dev_err(&adapter
->pdev
->dev
, "Invalid traffic class\n");
2970 filter
= kzalloc(sizeof(*filter
), GFP_KERNEL
);
2974 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK
,
2975 &adapter
->crit_section
)) {
2981 filter
->cookie
= cls_flower
->cookie
;
2983 /* set the mask to all zeroes to begin with */
2984 memset(&filter
->f
.mask
.tcp_spec
, 0, sizeof(struct virtchnl_l4_spec
));
2985 /* start out with flow type and eth type IPv4 to begin with */
2986 filter
->f
.flow_type
= VIRTCHNL_TCP_V4_FLOW
;
2987 err
= iavf_parse_cls_flower(adapter
, cls_flower
, filter
);
2991 err
= iavf_handle_tclass(adapter
, tc
, filter
);
2995 /* add filter to the list */
2996 spin_lock_bh(&adapter
->cloud_filter_list_lock
);
2997 list_add_tail(&filter
->list
, &adapter
->cloud_filter_list
);
2998 adapter
->num_cloud_filters
++;
3000 adapter
->aq_required
|= IAVF_FLAG_AQ_ADD_CLOUD_FILTER
;
3001 spin_unlock_bh(&adapter
->cloud_filter_list_lock
);
3006 clear_bit(__IAVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
3010 /* iavf_find_cf - Find the cloud filter in the list
3011 * @adapter: Board private structure
3012 * @cookie: filter specific cookie
3014 * Returns ptr to the filter object or NULL. Must be called while holding the
3015 * cloud_filter_list_lock.
3017 static struct iavf_cloud_filter
*iavf_find_cf(struct iavf_adapter
*adapter
,
3018 unsigned long *cookie
)
3020 struct iavf_cloud_filter
*filter
= NULL
;
3025 list_for_each_entry(filter
, &adapter
->cloud_filter_list
, list
) {
3026 if (!memcmp(cookie
, &filter
->cookie
, sizeof(filter
->cookie
)))
3033 * iavf_delete_clsflower - Remove tc flower filters
3034 * @adapter: board private structure
3035 * @cls_flower: Pointer to struct flow_cls_offload
3037 static int iavf_delete_clsflower(struct iavf_adapter
*adapter
,
3038 struct flow_cls_offload
*cls_flower
)
3040 struct iavf_cloud_filter
*filter
= NULL
;
3043 spin_lock_bh(&adapter
->cloud_filter_list_lock
);
3044 filter
= iavf_find_cf(adapter
, &cls_flower
->cookie
);
3047 adapter
->aq_required
|= IAVF_FLAG_AQ_DEL_CLOUD_FILTER
;
3051 spin_unlock_bh(&adapter
->cloud_filter_list_lock
);
3057 * iavf_setup_tc_cls_flower - flower classifier offloads
3058 * @netdev: net device to configure
3059 * @type_data: offload data
3061 static int iavf_setup_tc_cls_flower(struct iavf_adapter
*adapter
,
3062 struct flow_cls_offload
*cls_flower
)
3064 switch (cls_flower
->command
) {
3065 case FLOW_CLS_REPLACE
:
3066 return iavf_configure_clsflower(adapter
, cls_flower
);
3067 case FLOW_CLS_DESTROY
:
3068 return iavf_delete_clsflower(adapter
, cls_flower
);
3069 case FLOW_CLS_STATS
:
3077 * iavf_setup_tc_block_cb - block callback for tc
3078 * @type: type of offload
3079 * @type_data: offload data
3082 * This function is the block callback for traffic classes
3084 static int iavf_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
3087 struct iavf_adapter
*adapter
= cb_priv
;
3089 if (!tc_cls_can_offload_and_chain0(adapter
->netdev
, type_data
))
3093 case TC_SETUP_CLSFLOWER
:
3094 return iavf_setup_tc_cls_flower(cb_priv
, type_data
);
3100 static LIST_HEAD(iavf_block_cb_list
);
3103 * iavf_setup_tc - configure multiple traffic classes
3104 * @netdev: network interface device structure
3105 * @type: type of offload
3106 * @type_date: tc offload data
3108 * This function is the callback to ndo_setup_tc in the
3111 * Returns 0 on success
3113 static int iavf_setup_tc(struct net_device
*netdev
, enum tc_setup_type type
,
3116 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
3119 case TC_SETUP_QDISC_MQPRIO
:
3120 return __iavf_setup_tc(netdev
, type_data
);
3121 case TC_SETUP_BLOCK
:
3122 return flow_block_cb_setup_simple(type_data
,
3123 &iavf_block_cb_list
,
3124 iavf_setup_tc_block_cb
,
3125 adapter
, adapter
, true);
3132 * iavf_open - Called when a network interface is made active
3133 * @netdev: network interface device structure
3135 * Returns 0 on success, negative value on failure
3137 * The open entry point is called when a network interface is made
3138 * active by the system (IFF_UP). At this point all resources needed
3139 * for transmit and receive operations are allocated, the interrupt
3140 * handler is registered with the OS, the watchdog is started,
3141 * and the stack is notified that the interface is ready.
3143 static int iavf_open(struct net_device
*netdev
)
3145 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
3148 if (adapter
->flags
& IAVF_FLAG_PF_COMMS_FAILED
) {
3149 dev_err(&adapter
->pdev
->dev
, "Unable to open device due to PF driver failure.\n");
3153 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK
,
3154 &adapter
->crit_section
))
3155 usleep_range(500, 1000);
3157 if (adapter
->state
!= __IAVF_DOWN
) {
3162 /* allocate transmit descriptors */
3163 err
= iavf_setup_all_tx_resources(adapter
);
3167 /* allocate receive descriptors */
3168 err
= iavf_setup_all_rx_resources(adapter
);
3172 /* clear any pending interrupts, may auto mask */
3173 err
= iavf_request_traffic_irqs(adapter
, netdev
->name
);
3177 spin_lock_bh(&adapter
->mac_vlan_list_lock
);
3179 iavf_add_filter(adapter
, adapter
->hw
.mac
.addr
);
3181 spin_unlock_bh(&adapter
->mac_vlan_list_lock
);
3183 iavf_configure(adapter
);
3185 iavf_up_complete(adapter
);
3187 iavf_irq_enable(adapter
, true);
3189 clear_bit(__IAVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
3195 iavf_free_traffic_irqs(adapter
);
3197 iavf_free_all_rx_resources(adapter
);
3199 iavf_free_all_tx_resources(adapter
);
3201 clear_bit(__IAVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
3207 * iavf_close - Disables a network interface
3208 * @netdev: network interface device structure
3210 * Returns 0, this is not allowed to fail
3212 * The close entry point is called when an interface is de-activated
3213 * by the OS. The hardware is still under the drivers control, but
3214 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
3215 * are freed, along with all transmit and receive resources.
3217 static int iavf_close(struct net_device
*netdev
)
3219 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
3222 if (adapter
->state
<= __IAVF_DOWN_PENDING
)
3225 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK
,
3226 &adapter
->crit_section
))
3227 usleep_range(500, 1000);
3229 set_bit(__IAVF_VSI_DOWN
, adapter
->vsi
.state
);
3230 if (CLIENT_ENABLED(adapter
))
3231 adapter
->flags
|= IAVF_FLAG_CLIENT_NEEDS_CLOSE
;
3234 adapter
->state
= __IAVF_DOWN_PENDING
;
3235 iavf_free_traffic_irqs(adapter
);
3237 clear_bit(__IAVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
3239 /* We explicitly don't free resources here because the hardware is
3240 * still active and can DMA into memory. Resources are cleared in
3241 * iavf_virtchnl_completion() after we get confirmation from the PF
3242 * driver that the rings have been stopped.
3244 * Also, we wait for state to transition to __IAVF_DOWN before
3245 * returning. State change occurs in iavf_virtchnl_completion() after
3246 * VF resources are released (which occurs after PF driver processes and
3247 * responds to admin queue commands).
3250 status
= wait_event_timeout(adapter
->down_waitqueue
,
3251 adapter
->state
== __IAVF_DOWN
,
3252 msecs_to_jiffies(500));
3254 netdev_warn(netdev
, "Device resources not yet released\n");
3259 * iavf_change_mtu - Change the Maximum Transfer Unit
3260 * @netdev: network interface device structure
3261 * @new_mtu: new value for maximum frame size
3263 * Returns 0 on success, negative on failure
3265 static int iavf_change_mtu(struct net_device
*netdev
, int new_mtu
)
3267 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
3269 netdev
->mtu
= new_mtu
;
3270 if (CLIENT_ENABLED(adapter
)) {
3271 iavf_notify_client_l2_params(&adapter
->vsi
);
3272 adapter
->flags
|= IAVF_FLAG_SERVICE_CLIENT_REQUESTED
;
3274 adapter
->flags
|= IAVF_FLAG_RESET_NEEDED
;
3275 queue_work(iavf_wq
, &adapter
->reset_task
);
3281 * iavf_set_features - set the netdev feature flags
3282 * @netdev: ptr to the netdev being adjusted
3283 * @features: the feature set that the stack is suggesting
3284 * Note: expects to be called while under rtnl_lock()
3286 static int iavf_set_features(struct net_device
*netdev
,
3287 netdev_features_t features
)
3289 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
3291 /* Don't allow changing VLAN_RX flag when adapter is not capable
3294 if (!VLAN_ALLOWED(adapter
)) {
3295 if ((netdev
->features
^ features
) & NETIF_F_HW_VLAN_CTAG_RX
)
3297 } else if ((netdev
->features
^ features
) & NETIF_F_HW_VLAN_CTAG_RX
) {
3298 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
3299 adapter
->aq_required
|=
3300 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING
;
3302 adapter
->aq_required
|=
3303 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING
;
3310 * iavf_features_check - Validate encapsulated packet conforms to limits
3312 * @dev: This physical port's netdev
3313 * @features: Offload features that the stack believes apply
3315 static netdev_features_t
iavf_features_check(struct sk_buff
*skb
,
3316 struct net_device
*dev
,
3317 netdev_features_t features
)
3321 /* No point in doing any of this if neither checksum nor GSO are
3322 * being requested for this frame. We can rule out both by just
3323 * checking for CHECKSUM_PARTIAL
3325 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
3328 /* We cannot support GSO if the MSS is going to be less than
3329 * 64 bytes. If it is then we need to drop support for GSO.
3331 if (skb_is_gso(skb
) && (skb_shinfo(skb
)->gso_size
< 64))
3332 features
&= ~NETIF_F_GSO_MASK
;
3334 /* MACLEN can support at most 63 words */
3335 len
= skb_network_header(skb
) - skb
->data
;
3336 if (len
& ~(63 * 2))
3339 /* IPLEN and EIPLEN can support at most 127 dwords */
3340 len
= skb_transport_header(skb
) - skb_network_header(skb
);
3341 if (len
& ~(127 * 4))
3344 if (skb
->encapsulation
) {
3345 /* L4TUNLEN can support 127 words */
3346 len
= skb_inner_network_header(skb
) - skb_transport_header(skb
);
3347 if (len
& ~(127 * 2))
3350 /* IPLEN can support at most 127 dwords */
3351 len
= skb_inner_transport_header(skb
) -
3352 skb_inner_network_header(skb
);
3353 if (len
& ~(127 * 4))
3357 /* No need to validate L4LEN as TCP is the only protocol with a
3358 * a flexible value and we support all possible values supported
3359 * by TCP, which is at most 15 dwords
3364 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
3368 * iavf_fix_features - fix up the netdev feature bits
3369 * @netdev: our net device
3370 * @features: desired feature bits
3372 * Returns fixed-up features bits
3374 static netdev_features_t
iavf_fix_features(struct net_device
*netdev
,
3375 netdev_features_t features
)
3377 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
3379 if (!(adapter
->vf_res
->vf_cap_flags
& VIRTCHNL_VF_OFFLOAD_VLAN
))
3380 features
&= ~(NETIF_F_HW_VLAN_CTAG_TX
|
3381 NETIF_F_HW_VLAN_CTAG_RX
|
3382 NETIF_F_HW_VLAN_CTAG_FILTER
);
3387 static const struct net_device_ops iavf_netdev_ops
= {
3388 .ndo_open
= iavf_open
,
3389 .ndo_stop
= iavf_close
,
3390 .ndo_start_xmit
= iavf_xmit_frame
,
3391 .ndo_set_rx_mode
= iavf_set_rx_mode
,
3392 .ndo_validate_addr
= eth_validate_addr
,
3393 .ndo_set_mac_address
= iavf_set_mac
,
3394 .ndo_change_mtu
= iavf_change_mtu
,
3395 .ndo_tx_timeout
= iavf_tx_timeout
,
3396 .ndo_vlan_rx_add_vid
= iavf_vlan_rx_add_vid
,
3397 .ndo_vlan_rx_kill_vid
= iavf_vlan_rx_kill_vid
,
3398 .ndo_features_check
= iavf_features_check
,
3399 .ndo_fix_features
= iavf_fix_features
,
3400 .ndo_set_features
= iavf_set_features
,
3401 .ndo_setup_tc
= iavf_setup_tc
,
3405 * iavf_check_reset_complete - check that VF reset is complete
3406 * @hw: pointer to hw struct
3408 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
3410 static int iavf_check_reset_complete(struct iavf_hw
*hw
)
3415 for (i
= 0; i
< 100; i
++) {
3416 rstat
= rd32(hw
, IAVF_VFGEN_RSTAT
) &
3417 IAVF_VFGEN_RSTAT_VFR_STATE_MASK
;
3418 if ((rstat
== VIRTCHNL_VFR_VFACTIVE
) ||
3419 (rstat
== VIRTCHNL_VFR_COMPLETED
))
3421 usleep_range(10, 20);
3427 * iavf_process_config - Process the config information we got from the PF
3428 * @adapter: board private structure
3430 * Verify that we have a valid config struct, and set up our netdev features
3431 * and our VSI struct.
3433 int iavf_process_config(struct iavf_adapter
*adapter
)
3435 struct virtchnl_vf_resource
*vfres
= adapter
->vf_res
;
3436 int i
, num_req_queues
= adapter
->num_req_queues
;
3437 struct net_device
*netdev
= adapter
->netdev
;
3438 struct iavf_vsi
*vsi
= &adapter
->vsi
;
3439 netdev_features_t hw_enc_features
;
3440 netdev_features_t hw_features
;
3442 /* got VF config message back from PF, now we can parse it */
3443 for (i
= 0; i
< vfres
->num_vsis
; i
++) {
3444 if (vfres
->vsi_res
[i
].vsi_type
== VIRTCHNL_VSI_SRIOV
)
3445 adapter
->vsi_res
= &vfres
->vsi_res
[i
];
3447 if (!adapter
->vsi_res
) {
3448 dev_err(&adapter
->pdev
->dev
, "No LAN VSI found\n");
3452 if (num_req_queues
&&
3453 num_req_queues
> adapter
->vsi_res
->num_queue_pairs
) {
3454 /* Problem. The PF gave us fewer queues than what we had
3455 * negotiated in our request. Need a reset to see if we can't
3456 * get back to a working state.
3458 dev_err(&adapter
->pdev
->dev
,
3459 "Requested %d queues, but PF only gave us %d.\n",
3461 adapter
->vsi_res
->num_queue_pairs
);
3462 adapter
->flags
|= IAVF_FLAG_REINIT_ITR_NEEDED
;
3463 adapter
->num_req_queues
= adapter
->vsi_res
->num_queue_pairs
;
3464 iavf_schedule_reset(adapter
);
3467 adapter
->num_req_queues
= 0;
3469 hw_enc_features
= NETIF_F_SG
|
3473 NETIF_F_SOFT_FEATURES
|
3482 /* advertise to stack only if offloads for encapsulated packets is
3485 if (vfres
->vf_cap_flags
& VIRTCHNL_VF_OFFLOAD_ENCAP
) {
3486 hw_enc_features
|= NETIF_F_GSO_UDP_TUNNEL
|
3488 NETIF_F_GSO_GRE_CSUM
|
3489 NETIF_F_GSO_IPXIP4
|
3490 NETIF_F_GSO_IPXIP6
|
3491 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
3492 NETIF_F_GSO_PARTIAL
|
3495 if (!(vfres
->vf_cap_flags
&
3496 VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM
))
3497 netdev
->gso_partial_features
|=
3498 NETIF_F_GSO_UDP_TUNNEL_CSUM
;
3500 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
3501 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
3502 netdev
->hw_enc_features
|= hw_enc_features
;
3504 /* record features VLANs can make use of */
3505 netdev
->vlan_features
|= hw_enc_features
| NETIF_F_TSO_MANGLEID
;
3507 /* Write features and hw_features separately to avoid polluting
3508 * with, or dropping, features that are set when we registered.
3510 hw_features
= hw_enc_features
;
3512 /* Enable VLAN features if supported */
3513 if (vfres
->vf_cap_flags
& VIRTCHNL_VF_OFFLOAD_VLAN
)
3514 hw_features
|= (NETIF_F_HW_VLAN_CTAG_TX
|
3515 NETIF_F_HW_VLAN_CTAG_RX
);
3516 /* Enable cloud filter if ADQ is supported */
3517 if (vfres
->vf_cap_flags
& VIRTCHNL_VF_OFFLOAD_ADQ
)
3518 hw_features
|= NETIF_F_HW_TC
;
3520 netdev
->hw_features
|= hw_features
;
3522 netdev
->features
|= hw_features
;
3524 if (vfres
->vf_cap_flags
& VIRTCHNL_VF_OFFLOAD_VLAN
)
3525 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
3527 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3529 /* Do not turn on offloads when they are requested to be turned off.
3530 * TSO needs minimum 576 bytes to work correctly.
3532 if (netdev
->wanted_features
) {
3533 if (!(netdev
->wanted_features
& NETIF_F_TSO
) ||
3535 netdev
->features
&= ~NETIF_F_TSO
;
3536 if (!(netdev
->wanted_features
& NETIF_F_TSO6
) ||
3538 netdev
->features
&= ~NETIF_F_TSO6
;
3539 if (!(netdev
->wanted_features
& NETIF_F_TSO_ECN
))
3540 netdev
->features
&= ~NETIF_F_TSO_ECN
;
3541 if (!(netdev
->wanted_features
& NETIF_F_GRO
))
3542 netdev
->features
&= ~NETIF_F_GRO
;
3543 if (!(netdev
->wanted_features
& NETIF_F_GSO
))
3544 netdev
->features
&= ~NETIF_F_GSO
;
3547 adapter
->vsi
.id
= adapter
->vsi_res
->vsi_id
;
3549 adapter
->vsi
.back
= adapter
;
3550 adapter
->vsi
.base_vector
= 1;
3551 adapter
->vsi
.work_limit
= IAVF_DEFAULT_IRQ_WORK
;
3552 vsi
->netdev
= adapter
->netdev
;
3553 vsi
->qs_handle
= adapter
->vsi_res
->qset_handle
;
3554 if (vfres
->vf_cap_flags
& VIRTCHNL_VF_OFFLOAD_RSS_PF
) {
3555 adapter
->rss_key_size
= vfres
->rss_key_size
;
3556 adapter
->rss_lut_size
= vfres
->rss_lut_size
;
3558 adapter
->rss_key_size
= IAVF_HKEY_ARRAY_SIZE
;
3559 adapter
->rss_lut_size
= IAVF_HLUT_ARRAY_SIZE
;
3566 * iavf_init_task - worker thread to perform delayed initialization
3567 * @work: pointer to work_struct containing our data
3569 * This task completes the work that was begun in probe. Due to the nature
3570 * of VF-PF communications, we may need to wait tens of milliseconds to get
3571 * responses back from the PF. Rather than busy-wait in probe and bog down the
3572 * whole system, we'll do it in a task so we can sleep.
3573 * This task only runs during driver init. Once we've established
3574 * communications with the PF driver and set up our netdev, the watchdog
3577 static void iavf_init_task(struct work_struct
*work
)
3579 struct iavf_adapter
*adapter
= container_of(work
,
3580 struct iavf_adapter
,
3582 struct iavf_hw
*hw
= &adapter
->hw
;
3584 switch (adapter
->state
) {
3585 case __IAVF_STARTUP
:
3586 if (iavf_startup(adapter
) < 0)
3589 case __IAVF_INIT_VERSION_CHECK
:
3590 if (iavf_init_version_check(adapter
) < 0)
3593 case __IAVF_INIT_GET_RESOURCES
:
3594 if (iavf_init_get_resources(adapter
) < 0)
3601 queue_delayed_work(iavf_wq
, &adapter
->init_task
,
3602 msecs_to_jiffies(30));
3605 if (++adapter
->aq_wait_count
> IAVF_AQ_MAX_ERR
) {
3606 dev_err(&adapter
->pdev
->dev
,
3607 "Failed to communicate with PF; waiting before retry\n");
3608 adapter
->flags
|= IAVF_FLAG_PF_COMMS_FAILED
;
3609 iavf_shutdown_adminq(hw
);
3610 adapter
->state
= __IAVF_STARTUP
;
3611 queue_delayed_work(iavf_wq
, &adapter
->init_task
, HZ
* 5);
3614 queue_delayed_work(iavf_wq
, &adapter
->init_task
, HZ
);
3618 * iavf_shutdown - Shutdown the device in preparation for a reboot
3619 * @pdev: pci device structure
3621 static void iavf_shutdown(struct pci_dev
*pdev
)
3623 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3624 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
3626 netif_device_detach(netdev
);
3628 if (netif_running(netdev
))
3631 /* Prevent the watchdog from running. */
3632 adapter
->state
= __IAVF_REMOVE
;
3633 adapter
->aq_required
= 0;
3636 pci_save_state(pdev
);
3639 pci_disable_device(pdev
);
3643 * iavf_probe - Device Initialization Routine
3644 * @pdev: PCI device information struct
3645 * @ent: entry in iavf_pci_tbl
3647 * Returns 0 on success, negative on failure
3649 * iavf_probe initializes an adapter identified by a pci_dev structure.
3650 * The OS initialization, configuring of the adapter private structure,
3651 * and a hardware reset occur.
3653 static int iavf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3655 struct net_device
*netdev
;
3656 struct iavf_adapter
*adapter
= NULL
;
3657 struct iavf_hw
*hw
= NULL
;
3660 err
= pci_enable_device(pdev
);
3664 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
3666 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
3669 "DMA configuration failed: 0x%x\n", err
);
3674 err
= pci_request_regions(pdev
, iavf_driver_name
);
3677 "pci_request_regions failed 0x%x\n", err
);
3681 pci_enable_pcie_error_reporting(pdev
);
3683 pci_set_master(pdev
);
3685 netdev
= alloc_etherdev_mq(sizeof(struct iavf_adapter
),
3686 IAVF_MAX_REQ_QUEUES
);
3689 goto err_alloc_etherdev
;
3692 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3694 pci_set_drvdata(pdev
, netdev
);
3695 adapter
= netdev_priv(netdev
);
3697 adapter
->netdev
= netdev
;
3698 adapter
->pdev
= pdev
;
3703 adapter
->msg_enable
= BIT(DEFAULT_DEBUG_LEVEL_SHIFT
) - 1;
3704 adapter
->state
= __IAVF_STARTUP
;
3706 /* Call save state here because it relies on the adapter struct. */
3707 pci_save_state(pdev
);
3709 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
3710 pci_resource_len(pdev
, 0));
3715 hw
->vendor_id
= pdev
->vendor
;
3716 hw
->device_id
= pdev
->device
;
3717 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
3718 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
3719 hw
->subsystem_device_id
= pdev
->subsystem_device
;
3720 hw
->bus
.device
= PCI_SLOT(pdev
->devfn
);
3721 hw
->bus
.func
= PCI_FUNC(pdev
->devfn
);
3722 hw
->bus
.bus_id
= pdev
->bus
->number
;
3724 /* set up the locks for the AQ, do this only once in probe
3725 * and destroy them only once in remove
3727 mutex_init(&hw
->aq
.asq_mutex
);
3728 mutex_init(&hw
->aq
.arq_mutex
);
3730 spin_lock_init(&adapter
->mac_vlan_list_lock
);
3731 spin_lock_init(&adapter
->cloud_filter_list_lock
);
3733 INIT_LIST_HEAD(&adapter
->mac_filter_list
);
3734 INIT_LIST_HEAD(&adapter
->vlan_filter_list
);
3735 INIT_LIST_HEAD(&adapter
->cloud_filter_list
);
3737 INIT_WORK(&adapter
->reset_task
, iavf_reset_task
);
3738 INIT_WORK(&adapter
->adminq_task
, iavf_adminq_task
);
3739 INIT_DELAYED_WORK(&adapter
->watchdog_task
, iavf_watchdog_task
);
3740 INIT_DELAYED_WORK(&adapter
->client_task
, iavf_client_task
);
3741 INIT_DELAYED_WORK(&adapter
->init_task
, iavf_init_task
);
3742 queue_delayed_work(iavf_wq
, &adapter
->init_task
,
3743 msecs_to_jiffies(5 * (pdev
->devfn
& 0x07)));
3745 /* Setup the wait queue for indicating transition to down status */
3746 init_waitqueue_head(&adapter
->down_waitqueue
);
3751 free_netdev(netdev
);
3753 pci_release_regions(pdev
);
3756 pci_disable_device(pdev
);
3762 * iavf_suspend - Power management suspend routine
3763 * @pdev: PCI device information struct
3766 * Called when the system (VM) is entering sleep/suspend.
3768 static int iavf_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3770 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3771 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
3774 netif_device_detach(netdev
);
3776 while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK
,
3777 &adapter
->crit_section
))
3778 usleep_range(500, 1000);
3780 if (netif_running(netdev
)) {
3785 iavf_free_misc_irq(adapter
);
3786 iavf_reset_interrupt_capability(adapter
);
3788 clear_bit(__IAVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
3790 retval
= pci_save_state(pdev
);
3794 pci_disable_device(pdev
);
3800 * iavf_resume - Power management resume routine
3801 * @pdev: PCI device information struct
3803 * Called when the system (VM) is resumed from sleep/suspend.
3805 static int iavf_resume(struct pci_dev
*pdev
)
3807 struct iavf_adapter
*adapter
= pci_get_drvdata(pdev
);
3808 struct net_device
*netdev
= adapter
->netdev
;
3811 pci_set_power_state(pdev
, PCI_D0
);
3812 pci_restore_state(pdev
);
3813 /* pci_restore_state clears dev->state_saved so call
3814 * pci_save_state to restore it.
3816 pci_save_state(pdev
);
3818 err
= pci_enable_device_mem(pdev
);
3820 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend.\n");
3823 pci_set_master(pdev
);
3826 err
= iavf_set_interrupt_capability(adapter
);
3829 dev_err(&pdev
->dev
, "Cannot enable MSI-X interrupts.\n");
3832 err
= iavf_request_misc_irq(adapter
);
3835 dev_err(&pdev
->dev
, "Cannot get interrupt vector.\n");
3839 queue_work(iavf_wq
, &adapter
->reset_task
);
3841 netif_device_attach(netdev
);
3846 #endif /* CONFIG_PM */
3848 * iavf_remove - Device Removal Routine
3849 * @pdev: PCI device information struct
3851 * iavf_remove is called by the PCI subsystem to alert the driver
3852 * that it should release a PCI device. The could be caused by a
3853 * Hot-Plug event, or because the driver is going to be removed from
3856 static void iavf_remove(struct pci_dev
*pdev
)
3858 struct net_device
*netdev
= pci_get_drvdata(pdev
);
3859 struct iavf_adapter
*adapter
= netdev_priv(netdev
);
3860 struct iavf_vlan_filter
*vlf
, *vlftmp
;
3861 struct iavf_mac_filter
*f
, *ftmp
;
3862 struct iavf_cloud_filter
*cf
, *cftmp
;
3863 struct iavf_hw
*hw
= &adapter
->hw
;
3865 /* Indicate we are in remove and not to run reset_task */
3866 set_bit(__IAVF_IN_REMOVE_TASK
, &adapter
->crit_section
);
3867 cancel_delayed_work_sync(&adapter
->init_task
);
3868 cancel_work_sync(&adapter
->reset_task
);
3869 cancel_delayed_work_sync(&adapter
->client_task
);
3870 if (adapter
->netdev_registered
) {
3871 unregister_netdev(netdev
);
3872 adapter
->netdev_registered
= false;
3874 if (CLIENT_ALLOWED(adapter
)) {
3875 err
= iavf_lan_del_device(adapter
);
3877 dev_warn(&pdev
->dev
, "Failed to delete client device: %d\n",
3881 /* Shut down all the garbage mashers on the detention level */
3882 adapter
->state
= __IAVF_REMOVE
;
3883 adapter
->aq_required
= 0;
3884 adapter
->flags
&= ~IAVF_FLAG_REINIT_ITR_NEEDED
;
3885 iavf_request_reset(adapter
);
3887 /* If the FW isn't responding, kick it once, but only once. */
3888 if (!iavf_asq_done(hw
)) {
3889 iavf_request_reset(adapter
);
3892 iavf_free_all_tx_resources(adapter
);
3893 iavf_free_all_rx_resources(adapter
);
3894 iavf_misc_irq_disable(adapter
);
3895 iavf_free_misc_irq(adapter
);
3896 iavf_reset_interrupt_capability(adapter
);
3897 iavf_free_q_vectors(adapter
);
3899 cancel_delayed_work_sync(&adapter
->watchdog_task
);
3901 cancel_work_sync(&adapter
->adminq_task
);
3903 iavf_free_rss(adapter
);
3905 if (hw
->aq
.asq
.count
)
3906 iavf_shutdown_adminq(hw
);
3908 /* destroy the locks only once, here */
3909 mutex_destroy(&hw
->aq
.arq_mutex
);
3910 mutex_destroy(&hw
->aq
.asq_mutex
);
3912 iounmap(hw
->hw_addr
);
3913 pci_release_regions(pdev
);
3914 iavf_free_all_tx_resources(adapter
);
3915 iavf_free_all_rx_resources(adapter
);
3916 iavf_free_queues(adapter
);
3917 kfree(adapter
->vf_res
);
3918 spin_lock_bh(&adapter
->mac_vlan_list_lock
);
3919 /* If we got removed before an up/down sequence, we've got a filter
3920 * hanging out there that we need to get rid of.
3922 list_for_each_entry_safe(f
, ftmp
, &adapter
->mac_filter_list
, list
) {
3926 list_for_each_entry_safe(vlf
, vlftmp
, &adapter
->vlan_filter_list
,
3928 list_del(&vlf
->list
);
3932 spin_unlock_bh(&adapter
->mac_vlan_list_lock
);
3934 spin_lock_bh(&adapter
->cloud_filter_list_lock
);
3935 list_for_each_entry_safe(cf
, cftmp
, &adapter
->cloud_filter_list
, list
) {
3936 list_del(&cf
->list
);
3939 spin_unlock_bh(&adapter
->cloud_filter_list_lock
);
3941 free_netdev(netdev
);
3943 pci_disable_pcie_error_reporting(pdev
);
3945 pci_disable_device(pdev
);
3948 static struct pci_driver iavf_driver
= {
3949 .name
= iavf_driver_name
,
3950 .id_table
= iavf_pci_tbl
,
3951 .probe
= iavf_probe
,
3952 .remove
= iavf_remove
,
3954 .suspend
= iavf_suspend
,
3955 .resume
= iavf_resume
,
3957 .shutdown
= iavf_shutdown
,
3961 * iavf_init_module - Driver Registration Routine
3963 * iavf_init_module is the first routine called when the driver is
3964 * loaded. All it does is register with the PCI subsystem.
3966 static int __init
iavf_init_module(void)
3970 pr_info("iavf: %s - version %s\n", iavf_driver_string
,
3971 iavf_driver_version
);
3973 pr_info("%s\n", iavf_copyright
);
3975 iavf_wq
= alloc_workqueue("%s", WQ_UNBOUND
| WQ_MEM_RECLAIM
, 1,
3978 pr_err("%s: Failed to create workqueue\n", iavf_driver_name
);
3981 ret
= pci_register_driver(&iavf_driver
);
3985 module_init(iavf_init_module
);
3988 * iavf_exit_module - Driver Exit Cleanup Routine
3990 * iavf_exit_module is called just before the driver is removed
3993 static void __exit
iavf_exit_module(void)
3995 pci_unregister_driver(&iavf_driver
);
3996 destroy_workqueue(iavf_wq
);
3999 module_exit(iavf_exit_module
);