1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
28 #include "i40e_prototype.h"
29 static int i40evf_setup_all_tx_resources(struct i40evf_adapter
*adapter
);
30 static int i40evf_setup_all_rx_resources(struct i40evf_adapter
*adapter
);
31 static int i40evf_close(struct net_device
*netdev
);
33 char i40evf_driver_name
[] = "i40evf";
34 static const char i40evf_driver_string
[] =
35 "Intel(R) 40-10 Gigabit Virtual Function Network Driver";
39 #define DRV_VERSION_MAJOR 1
40 #define DRV_VERSION_MINOR 6
41 #define DRV_VERSION_BUILD 27
42 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
43 __stringify(DRV_VERSION_MINOR) "." \
44 __stringify(DRV_VERSION_BUILD) \
46 const char i40evf_driver_version
[] = DRV_VERSION
;
47 static const char i40evf_copyright
[] =
48 "Copyright (c) 2013 - 2015 Intel Corporation.";
50 /* i40evf_pci_tbl - PCI Device ID Table
52 * Wildcard entries (PCI_ANY_ID) should come last
53 * Last entry must be all 0s
55 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
56 * Class, Class Mask, private data (not used) }
58 static const struct pci_device_id i40evf_pci_tbl
[] = {
59 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_VF
), 0},
60 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_VF_HV
), 0},
61 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_X722_VF
), 0},
62 /* required last entry */
66 MODULE_DEVICE_TABLE(pci
, i40evf_pci_tbl
);
68 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
69 MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_VERSION
);
73 static struct workqueue_struct
*i40evf_wq
;
76 * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
77 * @hw: pointer to the HW structure
78 * @mem: ptr to mem struct to fill out
79 * @size: size of memory requested
80 * @alignment: what to align the allocation to
82 i40e_status
i40evf_allocate_dma_mem_d(struct i40e_hw
*hw
,
83 struct i40e_dma_mem
*mem
,
84 u64 size
, u32 alignment
)
86 struct i40evf_adapter
*adapter
= (struct i40evf_adapter
*)hw
->back
;
89 return I40E_ERR_PARAM
;
91 mem
->size
= ALIGN(size
, alignment
);
92 mem
->va
= dma_alloc_coherent(&adapter
->pdev
->dev
, mem
->size
,
93 (dma_addr_t
*)&mem
->pa
, GFP_KERNEL
);
97 return I40E_ERR_NO_MEMORY
;
101 * i40evf_free_dma_mem_d - OS specific memory free for shared code
102 * @hw: pointer to the HW structure
103 * @mem: ptr to mem struct to free
105 i40e_status
i40evf_free_dma_mem_d(struct i40e_hw
*hw
, struct i40e_dma_mem
*mem
)
107 struct i40evf_adapter
*adapter
= (struct i40evf_adapter
*)hw
->back
;
109 if (!mem
|| !mem
->va
)
110 return I40E_ERR_PARAM
;
111 dma_free_coherent(&adapter
->pdev
->dev
, mem
->size
,
112 mem
->va
, (dma_addr_t
)mem
->pa
);
117 * i40evf_allocate_virt_mem_d - OS specific memory alloc for shared code
118 * @hw: pointer to the HW structure
119 * @mem: ptr to mem struct to fill out
120 * @size: size of memory requested
122 i40e_status
i40evf_allocate_virt_mem_d(struct i40e_hw
*hw
,
123 struct i40e_virt_mem
*mem
, u32 size
)
126 return I40E_ERR_PARAM
;
129 mem
->va
= kzalloc(size
, GFP_KERNEL
);
134 return I40E_ERR_NO_MEMORY
;
138 * i40evf_free_virt_mem_d - OS specific memory free for shared code
139 * @hw: pointer to the HW structure
140 * @mem: ptr to mem struct to free
142 i40e_status
i40evf_free_virt_mem_d(struct i40e_hw
*hw
,
143 struct i40e_virt_mem
*mem
)
146 return I40E_ERR_PARAM
;
148 /* it's ok to kfree a NULL pointer */
155 * i40evf_debug_d - OS dependent version of debug printing
156 * @hw: pointer to the HW structure
157 * @mask: debug level mask
158 * @fmt_str: printf-type format description
160 void i40evf_debug_d(void *hw
, u32 mask
, char *fmt_str
, ...)
165 if (!(mask
& ((struct i40e_hw
*)hw
)->debug_mask
))
168 va_start(argptr
, fmt_str
);
169 vsnprintf(buf
, sizeof(buf
), fmt_str
, argptr
);
172 /* the debug string is already formatted with a newline */
177 * i40evf_schedule_reset - Set the flags and schedule a reset event
178 * @adapter: board private structure
180 void i40evf_schedule_reset(struct i40evf_adapter
*adapter
)
182 if (!(adapter
->flags
&
183 (I40EVF_FLAG_RESET_PENDING
| I40EVF_FLAG_RESET_NEEDED
))) {
184 adapter
->flags
|= I40EVF_FLAG_RESET_NEEDED
;
185 schedule_work(&adapter
->reset_task
);
190 * i40evf_tx_timeout - Respond to a Tx Hang
191 * @netdev: network interface device structure
193 static void i40evf_tx_timeout(struct net_device
*netdev
)
195 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
197 adapter
->tx_timeout_count
++;
198 i40evf_schedule_reset(adapter
);
202 * i40evf_misc_irq_disable - Mask off interrupt generation on the NIC
203 * @adapter: board private structure
205 static void i40evf_misc_irq_disable(struct i40evf_adapter
*adapter
)
207 struct i40e_hw
*hw
= &adapter
->hw
;
209 if (!adapter
->msix_entries
)
212 wr32(hw
, I40E_VFINT_DYN_CTL01
, 0);
215 rd32(hw
, I40E_VFGEN_RSTAT
);
217 synchronize_irq(adapter
->msix_entries
[0].vector
);
221 * i40evf_misc_irq_enable - Enable default interrupt generation settings
222 * @adapter: board private structure
224 static void i40evf_misc_irq_enable(struct i40evf_adapter
*adapter
)
226 struct i40e_hw
*hw
= &adapter
->hw
;
228 wr32(hw
, I40E_VFINT_DYN_CTL01
, I40E_VFINT_DYN_CTL01_INTENA_MASK
|
229 I40E_VFINT_DYN_CTL01_ITR_INDX_MASK
);
230 wr32(hw
, I40E_VFINT_ICR0_ENA1
, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK
);
233 rd32(hw
, I40E_VFGEN_RSTAT
);
237 * i40evf_irq_disable - Mask off interrupt generation on the NIC
238 * @adapter: board private structure
240 static void i40evf_irq_disable(struct i40evf_adapter
*adapter
)
243 struct i40e_hw
*hw
= &adapter
->hw
;
245 if (!adapter
->msix_entries
)
248 for (i
= 1; i
< adapter
->num_msix_vectors
; i
++) {
249 wr32(hw
, I40E_VFINT_DYN_CTLN1(i
- 1), 0);
250 synchronize_irq(adapter
->msix_entries
[i
].vector
);
253 rd32(hw
, I40E_VFGEN_RSTAT
);
257 * i40evf_irq_enable_queues - Enable interrupt for specified queues
258 * @adapter: board private structure
259 * @mask: bitmap of queues to enable
261 void i40evf_irq_enable_queues(struct i40evf_adapter
*adapter
, u32 mask
)
263 struct i40e_hw
*hw
= &adapter
->hw
;
266 for (i
= 1; i
< adapter
->num_msix_vectors
; i
++) {
267 if (mask
& BIT(i
- 1)) {
268 wr32(hw
, I40E_VFINT_DYN_CTLN1(i
- 1),
269 I40E_VFINT_DYN_CTLN1_INTENA_MASK
|
270 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK
|
271 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK
);
277 * i40evf_fire_sw_int - Generate SW interrupt for specified vectors
278 * @adapter: board private structure
279 * @mask: bitmap of vectors to trigger
281 static void i40evf_fire_sw_int(struct i40evf_adapter
*adapter
, u32 mask
)
283 struct i40e_hw
*hw
= &adapter
->hw
;
288 dyn_ctl
= rd32(hw
, I40E_VFINT_DYN_CTL01
);
289 dyn_ctl
|= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK
|
290 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK
|
291 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK
;
292 wr32(hw
, I40E_VFINT_DYN_CTL01
, dyn_ctl
);
294 for (i
= 1; i
< adapter
->num_msix_vectors
; i
++) {
296 dyn_ctl
= rd32(hw
, I40E_VFINT_DYN_CTLN1(i
- 1));
297 dyn_ctl
|= I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK
|
298 I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK
|
299 I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK
;
300 wr32(hw
, I40E_VFINT_DYN_CTLN1(i
- 1), dyn_ctl
);
306 * i40evf_irq_enable - Enable default interrupt generation settings
307 * @adapter: board private structure
308 * @flush: boolean value whether to run rd32()
310 void i40evf_irq_enable(struct i40evf_adapter
*adapter
, bool flush
)
312 struct i40e_hw
*hw
= &adapter
->hw
;
314 i40evf_misc_irq_enable(adapter
);
315 i40evf_irq_enable_queues(adapter
, ~0);
318 rd32(hw
, I40E_VFGEN_RSTAT
);
322 * i40evf_msix_aq - Interrupt handler for vector 0
323 * @irq: interrupt number
324 * @data: pointer to netdev
326 static irqreturn_t
i40evf_msix_aq(int irq
, void *data
)
328 struct net_device
*netdev
= data
;
329 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
330 struct i40e_hw
*hw
= &adapter
->hw
;
333 /* handle non-queue interrupts, these reads clear the registers */
334 val
= rd32(hw
, I40E_VFINT_ICR01
);
335 val
= rd32(hw
, I40E_VFINT_ICR0_ENA1
);
337 val
= rd32(hw
, I40E_VFINT_DYN_CTL01
) |
338 I40E_VFINT_DYN_CTL01_CLEARPBA_MASK
;
339 wr32(hw
, I40E_VFINT_DYN_CTL01
, val
);
341 /* schedule work on the private workqueue */
342 schedule_work(&adapter
->adminq_task
);
348 * i40evf_msix_clean_rings - MSIX mode Interrupt Handler
349 * @irq: interrupt number
350 * @data: pointer to a q_vector
352 static irqreturn_t
i40evf_msix_clean_rings(int irq
, void *data
)
354 struct i40e_q_vector
*q_vector
= data
;
356 if (!q_vector
->tx
.ring
&& !q_vector
->rx
.ring
)
359 napi_schedule_irqoff(&q_vector
->napi
);
365 * i40evf_map_vector_to_rxq - associate irqs with rx queues
366 * @adapter: board private structure
367 * @v_idx: interrupt number
368 * @r_idx: queue number
371 i40evf_map_vector_to_rxq(struct i40evf_adapter
*adapter
, int v_idx
, int r_idx
)
373 struct i40e_q_vector
*q_vector
= &adapter
->q_vectors
[v_idx
];
374 struct i40e_ring
*rx_ring
= &adapter
->rx_rings
[r_idx
];
375 struct i40e_hw
*hw
= &adapter
->hw
;
377 rx_ring
->q_vector
= q_vector
;
378 rx_ring
->next
= q_vector
->rx
.ring
;
379 rx_ring
->vsi
= &adapter
->vsi
;
380 q_vector
->rx
.ring
= rx_ring
;
381 q_vector
->rx
.count
++;
382 q_vector
->rx
.latency_range
= I40E_LOW_LATENCY
;
383 q_vector
->rx
.itr
= ITR_TO_REG(rx_ring
->rx_itr_setting
);
384 q_vector
->ring_mask
|= BIT(r_idx
);
385 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
386 wr32(hw
, I40E_VFINT_ITRN1(I40E_RX_ITR
, v_idx
- 1), q_vector
->rx
.itr
);
390 * i40evf_map_vector_to_txq - associate irqs with tx queues
391 * @adapter: board private structure
392 * @v_idx: interrupt number
393 * @t_idx: queue number
396 i40evf_map_vector_to_txq(struct i40evf_adapter
*adapter
, int v_idx
, int t_idx
)
398 struct i40e_q_vector
*q_vector
= &adapter
->q_vectors
[v_idx
];
399 struct i40e_ring
*tx_ring
= &adapter
->tx_rings
[t_idx
];
400 struct i40e_hw
*hw
= &adapter
->hw
;
402 tx_ring
->q_vector
= q_vector
;
403 tx_ring
->next
= q_vector
->tx
.ring
;
404 tx_ring
->vsi
= &adapter
->vsi
;
405 q_vector
->tx
.ring
= tx_ring
;
406 q_vector
->tx
.count
++;
407 q_vector
->tx
.latency_range
= I40E_LOW_LATENCY
;
408 q_vector
->tx
.itr
= ITR_TO_REG(tx_ring
->tx_itr_setting
);
409 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
410 q_vector
->num_ringpairs
++;
411 wr32(hw
, I40E_VFINT_ITRN1(I40E_TX_ITR
, v_idx
- 1), q_vector
->tx
.itr
);
415 * i40evf_map_rings_to_vectors - Maps descriptor rings to vectors
416 * @adapter: board private structure to initialize
418 * This function maps descriptor rings to the queue-specific vectors
419 * we were allotted through the MSI-X enabling code. Ideally, we'd have
420 * one vector per ring/queue, but on a constrained vector budget, we
421 * group the rings as "efficiently" as possible. You would add new
422 * mapping configurations in here.
424 static int i40evf_map_rings_to_vectors(struct i40evf_adapter
*adapter
)
428 int rxr_idx
= 0, txr_idx
= 0;
429 int rxr_remaining
= adapter
->num_active_queues
;
430 int txr_remaining
= adapter
->num_active_queues
;
435 q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
437 /* The ideal configuration...
438 * We have enough vectors to map one per queue.
440 if (q_vectors
>= (rxr_remaining
* 2)) {
441 for (; rxr_idx
< rxr_remaining
; v_start
++, rxr_idx
++)
442 i40evf_map_vector_to_rxq(adapter
, v_start
, rxr_idx
);
444 for (; txr_idx
< txr_remaining
; v_start
++, txr_idx
++)
445 i40evf_map_vector_to_txq(adapter
, v_start
, txr_idx
);
449 /* If we don't have enough vectors for a 1-to-1
450 * mapping, we'll have to group them so there are
451 * multiple queues per vector.
452 * Re-adjusting *qpv takes care of the remainder.
454 for (i
= v_start
; i
< q_vectors
; i
++) {
455 rqpv
= DIV_ROUND_UP(rxr_remaining
, q_vectors
- i
);
456 for (j
= 0; j
< rqpv
; j
++) {
457 i40evf_map_vector_to_rxq(adapter
, i
, rxr_idx
);
462 for (i
= v_start
; i
< q_vectors
; i
++) {
463 tqpv
= DIV_ROUND_UP(txr_remaining
, q_vectors
- i
);
464 for (j
= 0; j
< tqpv
; j
++) {
465 i40evf_map_vector_to_txq(adapter
, i
, txr_idx
);
472 adapter
->aq_required
|= I40EVF_FLAG_AQ_MAP_VECTORS
;
477 #ifdef CONFIG_NET_POLL_CONTROLLER
479 * i40evf_netpoll - A Polling 'interrupt' handler
480 * @netdev: network interface device structure
482 * This is used by netconsole to send skbs without having to re-enable
483 * interrupts. It's not called while the normal interrupt routine is executing.
485 static void i40evf_netpoll(struct net_device
*netdev
)
487 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
488 int q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
491 /* if interface is down do nothing */
492 if (test_bit(__I40E_DOWN
, &adapter
->vsi
.state
))
495 for (i
= 0; i
< q_vectors
; i
++)
496 i40evf_msix_clean_rings(0, &adapter
->q_vectors
[i
]);
501 * i40evf_irq_affinity_notify - Callback for affinity changes
502 * @notify: context as to what irq was changed
503 * @mask: the new affinity mask
505 * This is a callback function used by the irq_set_affinity_notifier function
506 * so that we may register to receive changes to the irq affinity masks.
508 static void i40evf_irq_affinity_notify(struct irq_affinity_notify
*notify
,
509 const cpumask_t
*mask
)
511 struct i40e_q_vector
*q_vector
=
512 container_of(notify
, struct i40e_q_vector
, affinity_notify
);
514 q_vector
->affinity_mask
= *mask
;
518 * i40evf_irq_affinity_release - Callback for affinity notifier release
519 * @ref: internal core kernel usage
521 * This is a callback function used by the irq_set_affinity_notifier function
522 * to inform the current notification subscriber that they will no longer
523 * receive notifications.
525 static void i40evf_irq_affinity_release(struct kref
*ref
) {}
528 * i40evf_request_traffic_irqs - Initialize MSI-X interrupts
529 * @adapter: board private structure
531 * Allocates MSI-X vectors for tx and rx handling, and requests
532 * interrupts from the kernel.
535 i40evf_request_traffic_irqs(struct i40evf_adapter
*adapter
, char *basename
)
537 int vector
, err
, q_vectors
;
538 int rx_int_idx
= 0, tx_int_idx
= 0;
541 i40evf_irq_disable(adapter
);
542 /* Decrement for Other and TCP Timer vectors */
543 q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
545 for (vector
= 0; vector
< q_vectors
; vector
++) {
546 struct i40e_q_vector
*q_vector
= &adapter
->q_vectors
[vector
];
547 irq_num
= adapter
->msix_entries
[vector
+ NONQ_VECS
].vector
;
549 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
550 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
551 "i40evf-%s-%s-%d", basename
,
552 "TxRx", rx_int_idx
++);
554 } else if (q_vector
->rx
.ring
) {
555 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
556 "i40evf-%s-%s-%d", basename
,
558 } else if (q_vector
->tx
.ring
) {
559 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
560 "i40evf-%s-%s-%d", basename
,
563 /* skip this unused q_vector */
566 err
= request_irq(irq_num
,
567 i40evf_msix_clean_rings
,
572 dev_info(&adapter
->pdev
->dev
,
573 "Request_irq failed, error: %d\n", err
);
574 goto free_queue_irqs
;
576 /* register for affinity change notifications */
577 q_vector
->affinity_notify
.notify
= i40evf_irq_affinity_notify
;
578 q_vector
->affinity_notify
.release
=
579 i40evf_irq_affinity_release
;
580 irq_set_affinity_notifier(irq_num
, &q_vector
->affinity_notify
);
581 /* assign the mask for this irq */
582 irq_set_affinity_hint(irq_num
, &q_vector
->affinity_mask
);
590 irq_num
= adapter
->msix_entries
[vector
+ NONQ_VECS
].vector
;
591 irq_set_affinity_notifier(irq_num
, NULL
);
592 irq_set_affinity_hint(irq_num
, NULL
);
593 free_irq(irq_num
, &adapter
->q_vectors
[vector
]);
599 * i40evf_request_misc_irq - Initialize MSI-X interrupts
600 * @adapter: board private structure
602 * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
603 * vector is only for the admin queue, and stays active even when the netdev
606 static int i40evf_request_misc_irq(struct i40evf_adapter
*adapter
)
608 struct net_device
*netdev
= adapter
->netdev
;
611 snprintf(adapter
->misc_vector_name
,
612 sizeof(adapter
->misc_vector_name
) - 1, "i40evf-%s:mbx",
613 dev_name(&adapter
->pdev
->dev
));
614 err
= request_irq(adapter
->msix_entries
[0].vector
,
616 adapter
->misc_vector_name
, netdev
);
618 dev_err(&adapter
->pdev
->dev
,
619 "request_irq for %s failed: %d\n",
620 adapter
->misc_vector_name
, err
);
621 free_irq(adapter
->msix_entries
[0].vector
, netdev
);
627 * i40evf_free_traffic_irqs - Free MSI-X interrupts
628 * @adapter: board private structure
630 * Frees all MSI-X vectors other than 0.
632 static void i40evf_free_traffic_irqs(struct i40evf_adapter
*adapter
)
634 int vector
, irq_num
, q_vectors
;
636 if (!adapter
->msix_entries
)
639 q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
641 for (vector
= 0; vector
< q_vectors
; vector
++) {
642 irq_num
= adapter
->msix_entries
[vector
+ NONQ_VECS
].vector
;
643 irq_set_affinity_notifier(irq_num
, NULL
);
644 irq_set_affinity_hint(irq_num
, NULL
);
645 free_irq(irq_num
, &adapter
->q_vectors
[vector
]);
650 * i40evf_free_misc_irq - Free MSI-X miscellaneous vector
651 * @adapter: board private structure
653 * Frees MSI-X vector 0.
655 static void i40evf_free_misc_irq(struct i40evf_adapter
*adapter
)
657 struct net_device
*netdev
= adapter
->netdev
;
659 if (!adapter
->msix_entries
)
662 free_irq(adapter
->msix_entries
[0].vector
, netdev
);
666 * i40evf_configure_tx - Configure Transmit Unit after Reset
667 * @adapter: board private structure
669 * Configure the Tx unit of the MAC after a reset.
671 static void i40evf_configure_tx(struct i40evf_adapter
*adapter
)
673 struct i40e_hw
*hw
= &adapter
->hw
;
676 for (i
= 0; i
< adapter
->num_active_queues
; i
++)
677 adapter
->tx_rings
[i
].tail
= hw
->hw_addr
+ I40E_QTX_TAIL1(i
);
681 * i40evf_configure_rx - Configure Receive Unit after Reset
682 * @adapter: board private structure
684 * Configure the Rx unit of the MAC after a reset.
686 static void i40evf_configure_rx(struct i40evf_adapter
*adapter
)
688 struct i40e_hw
*hw
= &adapter
->hw
;
691 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
692 adapter
->rx_rings
[i
].tail
= hw
->hw_addr
+ I40E_QRX_TAIL1(i
);
693 adapter
->rx_rings
[i
].rx_buf_len
= I40EVF_RXBUFFER_2048
;
698 * i40evf_find_vlan - Search filter list for specific vlan filter
699 * @adapter: board private structure
702 * Returns ptr to the filter object or NULL
705 i40evf_vlan_filter
*i40evf_find_vlan(struct i40evf_adapter
*adapter
, u16 vlan
)
707 struct i40evf_vlan_filter
*f
;
709 list_for_each_entry(f
, &adapter
->vlan_filter_list
, list
) {
717 * i40evf_add_vlan - Add a vlan filter to the list
718 * @adapter: board private structure
721 * Returns ptr to the filter object or NULL when no memory available.
724 i40evf_vlan_filter
*i40evf_add_vlan(struct i40evf_adapter
*adapter
, u16 vlan
)
726 struct i40evf_vlan_filter
*f
= NULL
;
729 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
730 &adapter
->crit_section
)) {
736 f
= i40evf_find_vlan(adapter
, vlan
);
738 f
= kzalloc(sizeof(*f
), GFP_ATOMIC
);
744 INIT_LIST_HEAD(&f
->list
);
745 list_add(&f
->list
, &adapter
->vlan_filter_list
);
747 adapter
->aq_required
|= I40EVF_FLAG_AQ_ADD_VLAN_FILTER
;
751 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
757 * i40evf_del_vlan - Remove a vlan filter from the list
758 * @adapter: board private structure
761 static void i40evf_del_vlan(struct i40evf_adapter
*adapter
, u16 vlan
)
763 struct i40evf_vlan_filter
*f
;
766 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
767 &adapter
->crit_section
)) {
773 f
= i40evf_find_vlan(adapter
, vlan
);
776 adapter
->aq_required
|= I40EVF_FLAG_AQ_DEL_VLAN_FILTER
;
778 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
782 * i40evf_vlan_rx_add_vid - Add a VLAN filter to a device
783 * @netdev: network device struct
786 static int i40evf_vlan_rx_add_vid(struct net_device
*netdev
,
787 __always_unused __be16 proto
, u16 vid
)
789 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
791 if (!VLAN_ALLOWED(adapter
))
793 if (i40evf_add_vlan(adapter
, vid
) == NULL
)
799 * i40evf_vlan_rx_kill_vid - Remove a VLAN filter from a device
800 * @netdev: network device struct
803 static int i40evf_vlan_rx_kill_vid(struct net_device
*netdev
,
804 __always_unused __be16 proto
, u16 vid
)
806 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
808 if (VLAN_ALLOWED(adapter
)) {
809 i40evf_del_vlan(adapter
, vid
);
816 * i40evf_find_filter - Search filter list for specific mac filter
817 * @adapter: board private structure
818 * @macaddr: the MAC address
820 * Returns ptr to the filter object or NULL
823 i40evf_mac_filter
*i40evf_find_filter(struct i40evf_adapter
*adapter
,
826 struct i40evf_mac_filter
*f
;
831 list_for_each_entry(f
, &adapter
->mac_filter_list
, list
) {
832 if (ether_addr_equal(macaddr
, f
->macaddr
))
839 * i40e_add_filter - Add a mac filter to the filter list
840 * @adapter: board private structure
841 * @macaddr: the MAC address
843 * Returns ptr to the filter object or NULL when no memory available.
846 i40evf_mac_filter
*i40evf_add_filter(struct i40evf_adapter
*adapter
,
849 struct i40evf_mac_filter
*f
;
855 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
856 &adapter
->crit_section
)) {
862 f
= i40evf_find_filter(adapter
, macaddr
);
864 f
= kzalloc(sizeof(*f
), GFP_ATOMIC
);
866 clear_bit(__I40EVF_IN_CRITICAL_TASK
,
867 &adapter
->crit_section
);
871 ether_addr_copy(f
->macaddr
, macaddr
);
873 list_add_tail(&f
->list
, &adapter
->mac_filter_list
);
875 adapter
->aq_required
|= I40EVF_FLAG_AQ_ADD_MAC_FILTER
;
878 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
883 * i40evf_set_mac - NDO callback to set port mac address
884 * @netdev: network interface device structure
885 * @p: pointer to an address structure
887 * Returns 0 on success, negative on failure
889 static int i40evf_set_mac(struct net_device
*netdev
, void *p
)
891 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
892 struct i40e_hw
*hw
= &adapter
->hw
;
893 struct i40evf_mac_filter
*f
;
894 struct sockaddr
*addr
= p
;
896 if (!is_valid_ether_addr(addr
->sa_data
))
897 return -EADDRNOTAVAIL
;
899 if (ether_addr_equal(netdev
->dev_addr
, addr
->sa_data
))
902 if (adapter
->flags
& I40EVF_FLAG_ADDR_SET_BY_PF
)
905 f
= i40evf_find_filter(adapter
, hw
->mac
.addr
);
908 adapter
->aq_required
|= I40EVF_FLAG_AQ_DEL_MAC_FILTER
;
911 f
= i40evf_add_filter(adapter
, addr
->sa_data
);
913 ether_addr_copy(hw
->mac
.addr
, addr
->sa_data
);
914 ether_addr_copy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
);
917 return (f
== NULL
) ? -ENOMEM
: 0;
921 * i40evf_set_rx_mode - NDO callback to set the netdev filters
922 * @netdev: network interface device structure
924 static void i40evf_set_rx_mode(struct net_device
*netdev
)
926 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
927 struct i40evf_mac_filter
*f
, *ftmp
;
928 struct netdev_hw_addr
*uca
;
929 struct netdev_hw_addr
*mca
;
930 struct netdev_hw_addr
*ha
;
933 /* add addr if not already in the filter list */
934 netdev_for_each_uc_addr(uca
, netdev
) {
935 i40evf_add_filter(adapter
, uca
->addr
);
937 netdev_for_each_mc_addr(mca
, netdev
) {
938 i40evf_add_filter(adapter
, mca
->addr
);
941 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
942 &adapter
->crit_section
)) {
945 dev_err(&adapter
->pdev
->dev
,
946 "Failed to get lock in %s\n", __func__
);
950 /* remove filter if not in netdev list */
951 list_for_each_entry_safe(f
, ftmp
, &adapter
->mac_filter_list
, list
) {
952 netdev_for_each_mc_addr(mca
, netdev
)
953 if (ether_addr_equal(mca
->addr
, f
->macaddr
))
954 goto bottom_of_search_loop
;
956 netdev_for_each_uc_addr(uca
, netdev
)
957 if (ether_addr_equal(uca
->addr
, f
->macaddr
))
958 goto bottom_of_search_loop
;
960 for_each_dev_addr(netdev
, ha
)
961 if (ether_addr_equal(ha
->addr
, f
->macaddr
))
962 goto bottom_of_search_loop
;
964 if (ether_addr_equal(f
->macaddr
, adapter
->hw
.mac
.addr
))
965 goto bottom_of_search_loop
;
967 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
969 adapter
->aq_required
|= I40EVF_FLAG_AQ_DEL_MAC_FILTER
;
971 bottom_of_search_loop
:
975 if (netdev
->flags
& IFF_PROMISC
&&
976 !(adapter
->flags
& I40EVF_FLAG_PROMISC_ON
))
977 adapter
->aq_required
|= I40EVF_FLAG_AQ_REQUEST_PROMISC
;
978 else if (!(netdev
->flags
& IFF_PROMISC
) &&
979 adapter
->flags
& I40EVF_FLAG_PROMISC_ON
)
980 adapter
->aq_required
|= I40EVF_FLAG_AQ_RELEASE_PROMISC
;
982 if (netdev
->flags
& IFF_ALLMULTI
&&
983 !(adapter
->flags
& I40EVF_FLAG_ALLMULTI_ON
))
984 adapter
->aq_required
|= I40EVF_FLAG_AQ_REQUEST_ALLMULTI
;
985 else if (!(netdev
->flags
& IFF_ALLMULTI
) &&
986 adapter
->flags
& I40EVF_FLAG_ALLMULTI_ON
)
987 adapter
->aq_required
|= I40EVF_FLAG_AQ_RELEASE_ALLMULTI
;
989 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
993 * i40evf_napi_enable_all - enable NAPI on all queue vectors
994 * @adapter: board private structure
996 static void i40evf_napi_enable_all(struct i40evf_adapter
*adapter
)
999 struct i40e_q_vector
*q_vector
;
1000 int q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
1002 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1003 struct napi_struct
*napi
;
1005 q_vector
= &adapter
->q_vectors
[q_idx
];
1006 napi
= &q_vector
->napi
;
1012 * i40evf_napi_disable_all - disable NAPI on all queue vectors
1013 * @adapter: board private structure
1015 static void i40evf_napi_disable_all(struct i40evf_adapter
*adapter
)
1018 struct i40e_q_vector
*q_vector
;
1019 int q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
1021 for (q_idx
= 0; q_idx
< q_vectors
; q_idx
++) {
1022 q_vector
= &adapter
->q_vectors
[q_idx
];
1023 napi_disable(&q_vector
->napi
);
1028 * i40evf_configure - set up transmit and receive data structures
1029 * @adapter: board private structure
1031 static void i40evf_configure(struct i40evf_adapter
*adapter
)
1033 struct net_device
*netdev
= adapter
->netdev
;
1036 i40evf_set_rx_mode(netdev
);
1038 i40evf_configure_tx(adapter
);
1039 i40evf_configure_rx(adapter
);
1040 adapter
->aq_required
|= I40EVF_FLAG_AQ_CONFIGURE_QUEUES
;
1042 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
1043 struct i40e_ring
*ring
= &adapter
->rx_rings
[i
];
1045 i40evf_alloc_rx_buffers(ring
, I40E_DESC_UNUSED(ring
));
1050 * i40evf_up_complete - Finish the last steps of bringing up a connection
1051 * @adapter: board private structure
1053 static void i40evf_up_complete(struct i40evf_adapter
*adapter
)
1055 adapter
->state
= __I40EVF_RUNNING
;
1056 clear_bit(__I40E_DOWN
, &adapter
->vsi
.state
);
1058 i40evf_napi_enable_all(adapter
);
1060 adapter
->aq_required
|= I40EVF_FLAG_AQ_ENABLE_QUEUES
;
1061 mod_timer_pending(&adapter
->watchdog_timer
, jiffies
+ 1);
1065 * i40e_down - Shutdown the connection processing
1066 * @adapter: board private structure
1068 void i40evf_down(struct i40evf_adapter
*adapter
)
1070 struct net_device
*netdev
= adapter
->netdev
;
1071 struct i40evf_mac_filter
*f
;
1073 if (adapter
->state
<= __I40EVF_DOWN_PENDING
)
1076 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
1077 &adapter
->crit_section
))
1078 usleep_range(500, 1000);
1080 netif_carrier_off(netdev
);
1081 netif_tx_disable(netdev
);
1082 adapter
->link_up
= false;
1083 i40evf_napi_disable_all(adapter
);
1084 i40evf_irq_disable(adapter
);
1086 /* remove all MAC filters */
1087 list_for_each_entry(f
, &adapter
->mac_filter_list
, list
) {
1090 /* remove all VLAN filters */
1091 list_for_each_entry(f
, &adapter
->vlan_filter_list
, list
) {
1094 if (!(adapter
->flags
& I40EVF_FLAG_PF_COMMS_FAILED
) &&
1095 adapter
->state
!= __I40EVF_RESETTING
) {
1096 /* cancel any current operation */
1097 adapter
->current_op
= I40E_VIRTCHNL_OP_UNKNOWN
;
1098 /* Schedule operations to close down the HW. Don't wait
1099 * here for this to complete. The watchdog is still running
1100 * and it will take care of this.
1102 adapter
->aq_required
= I40EVF_FLAG_AQ_DEL_MAC_FILTER
;
1103 adapter
->aq_required
|= I40EVF_FLAG_AQ_DEL_VLAN_FILTER
;
1104 adapter
->aq_required
|= I40EVF_FLAG_AQ_DISABLE_QUEUES
;
1107 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1111 * i40evf_acquire_msix_vectors - Setup the MSIX capability
1112 * @adapter: board private structure
1113 * @vectors: number of vectors to request
1115 * Work with the OS to set up the MSIX vectors needed.
1117 * Returns 0 on success, negative on failure
1120 i40evf_acquire_msix_vectors(struct i40evf_adapter
*adapter
, int vectors
)
1122 int err
, vector_threshold
;
1124 /* We'll want at least 3 (vector_threshold):
1125 * 0) Other (Admin Queue and link, mostly)
1129 vector_threshold
= MIN_MSIX_COUNT
;
1131 /* The more we get, the more we will assign to Tx/Rx Cleanup
1132 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1133 * Right now, we simply care about how many we'll get; we'll
1134 * set them up later while requesting irq's.
1136 err
= pci_enable_msix_range(adapter
->pdev
, adapter
->msix_entries
,
1137 vector_threshold
, vectors
);
1139 dev_err(&adapter
->pdev
->dev
, "Unable to allocate MSI-X interrupts\n");
1140 kfree(adapter
->msix_entries
);
1141 adapter
->msix_entries
= NULL
;
1145 /* Adjust for only the vectors we'll use, which is minimum
1146 * of max_msix_q_vectors + NONQ_VECS, or the number of
1147 * vectors we were allocated.
1149 adapter
->num_msix_vectors
= err
;
1154 * i40evf_free_queues - Free memory for all rings
1155 * @adapter: board private structure to initialize
1157 * Free all of the memory associated with queue pairs.
1159 static void i40evf_free_queues(struct i40evf_adapter
*adapter
)
1161 if (!adapter
->vsi_res
)
1163 kfree(adapter
->tx_rings
);
1164 adapter
->tx_rings
= NULL
;
1165 kfree(adapter
->rx_rings
);
1166 adapter
->rx_rings
= NULL
;
1170 * i40evf_alloc_queues - Allocate memory for all rings
1171 * @adapter: board private structure to initialize
1173 * We allocate one ring per queue at run-time since we don't know the
1174 * number of queues at compile-time. The polling_netdev array is
1175 * intended for Multiqueue, but should work fine with a single queue.
1177 static int i40evf_alloc_queues(struct i40evf_adapter
*adapter
)
1181 adapter
->tx_rings
= kcalloc(adapter
->num_active_queues
,
1182 sizeof(struct i40e_ring
), GFP_KERNEL
);
1183 if (!adapter
->tx_rings
)
1185 adapter
->rx_rings
= kcalloc(adapter
->num_active_queues
,
1186 sizeof(struct i40e_ring
), GFP_KERNEL
);
1187 if (!adapter
->rx_rings
)
1190 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
1191 struct i40e_ring
*tx_ring
;
1192 struct i40e_ring
*rx_ring
;
1194 tx_ring
= &adapter
->tx_rings
[i
];
1196 tx_ring
->queue_index
= i
;
1197 tx_ring
->netdev
= adapter
->netdev
;
1198 tx_ring
->dev
= &adapter
->pdev
->dev
;
1199 tx_ring
->count
= adapter
->tx_desc_count
;
1200 tx_ring
->tx_itr_setting
= (I40E_ITR_DYNAMIC
| I40E_ITR_TX_DEF
);
1201 if (adapter
->flags
& I40E_FLAG_WB_ON_ITR_CAPABLE
)
1202 tx_ring
->flags
|= I40E_TXR_FLAGS_WB_ON_ITR
;
1204 rx_ring
= &adapter
->rx_rings
[i
];
1205 rx_ring
->queue_index
= i
;
1206 rx_ring
->netdev
= adapter
->netdev
;
1207 rx_ring
->dev
= &adapter
->pdev
->dev
;
1208 rx_ring
->count
= adapter
->rx_desc_count
;
1209 rx_ring
->rx_itr_setting
= (I40E_ITR_DYNAMIC
| I40E_ITR_RX_DEF
);
1215 i40evf_free_queues(adapter
);
1220 * i40evf_set_interrupt_capability - set MSI-X or FAIL if not supported
1221 * @adapter: board private structure to initialize
1223 * Attempt to configure the interrupts using the best available
1224 * capabilities of the hardware and the kernel.
1226 static int i40evf_set_interrupt_capability(struct i40evf_adapter
*adapter
)
1228 int vector
, v_budget
;
1232 if (!adapter
->vsi_res
) {
1236 pairs
= adapter
->num_active_queues
;
1238 /* It's easy to be greedy for MSI-X vectors, but it really
1239 * doesn't do us much good if we have a lot more vectors
1240 * than CPU's. So let's be conservative and only ask for
1241 * (roughly) twice the number of vectors as there are CPU's.
1243 v_budget
= min_t(int, pairs
, (int)(num_online_cpus() * 2)) + NONQ_VECS
;
1244 v_budget
= min_t(int, v_budget
, (int)adapter
->vf_res
->max_vectors
);
1246 adapter
->msix_entries
= kcalloc(v_budget
,
1247 sizeof(struct msix_entry
), GFP_KERNEL
);
1248 if (!adapter
->msix_entries
) {
1253 for (vector
= 0; vector
< v_budget
; vector
++)
1254 adapter
->msix_entries
[vector
].entry
= vector
;
1256 err
= i40evf_acquire_msix_vectors(adapter
, v_budget
);
1259 netif_set_real_num_rx_queues(adapter
->netdev
, pairs
);
1260 netif_set_real_num_tx_queues(adapter
->netdev
, pairs
);
1265 * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
1266 * @adapter: board private structure
1268 * Return 0 on success, negative on failure
1270 static int i40evf_config_rss_aq(struct i40evf_adapter
*adapter
)
1272 struct i40e_aqc_get_set_rss_key_data
*rss_key
=
1273 (struct i40e_aqc_get_set_rss_key_data
*)adapter
->rss_key
;
1274 struct i40e_hw
*hw
= &adapter
->hw
;
1277 if (adapter
->current_op
!= I40E_VIRTCHNL_OP_UNKNOWN
) {
1278 /* bail because we already have a command pending */
1279 dev_err(&adapter
->pdev
->dev
, "Cannot configure RSS, command %d pending\n",
1280 adapter
->current_op
);
1284 ret
= i40evf_aq_set_rss_key(hw
, adapter
->vsi
.id
, rss_key
);
1286 dev_err(&adapter
->pdev
->dev
, "Cannot set RSS key, err %s aq_err %s\n",
1287 i40evf_stat_str(hw
, ret
),
1288 i40evf_aq_str(hw
, hw
->aq
.asq_last_status
));
1293 ret
= i40evf_aq_set_rss_lut(hw
, adapter
->vsi
.id
, false,
1294 adapter
->rss_lut
, adapter
->rss_lut_size
);
1296 dev_err(&adapter
->pdev
->dev
, "Cannot set RSS lut, err %s aq_err %s\n",
1297 i40evf_stat_str(hw
, ret
),
1298 i40evf_aq_str(hw
, hw
->aq
.asq_last_status
));
1306 * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
1307 * @adapter: board private structure
1309 * Returns 0 on success, negative on failure
1311 static int i40evf_config_rss_reg(struct i40evf_adapter
*adapter
)
1313 struct i40e_hw
*hw
= &adapter
->hw
;
1317 dw
= (u32
*)adapter
->rss_key
;
1318 for (i
= 0; i
<= adapter
->rss_key_size
/ 4; i
++)
1319 wr32(hw
, I40E_VFQF_HKEY(i
), dw
[i
]);
1321 dw
= (u32
*)adapter
->rss_lut
;
1322 for (i
= 0; i
<= adapter
->rss_lut_size
/ 4; i
++)
1323 wr32(hw
, I40E_VFQF_HLUT(i
), dw
[i
]);
1331 * i40evf_config_rss - Configure RSS keys and lut
1332 * @adapter: board private structure
1334 * Returns 0 on success, negative on failure
1336 int i40evf_config_rss(struct i40evf_adapter
*adapter
)
1339 if (RSS_PF(adapter
)) {
1340 adapter
->aq_required
|= I40EVF_FLAG_AQ_SET_RSS_LUT
|
1341 I40EVF_FLAG_AQ_SET_RSS_KEY
;
1343 } else if (RSS_AQ(adapter
)) {
1344 return i40evf_config_rss_aq(adapter
);
1346 return i40evf_config_rss_reg(adapter
);
1351 * i40evf_fill_rss_lut - Fill the lut with default values
1352 * @adapter: board private structure
1354 static void i40evf_fill_rss_lut(struct i40evf_adapter
*adapter
)
1358 for (i
= 0; i
< adapter
->rss_lut_size
; i
++)
1359 adapter
->rss_lut
[i
] = i
% adapter
->num_active_queues
;
1363 * i40evf_init_rss - Prepare for RSS
1364 * @adapter: board private structure
1366 * Return 0 on success, negative on failure
1368 static int i40evf_init_rss(struct i40evf_adapter
*adapter
)
1370 struct i40e_hw
*hw
= &adapter
->hw
;
1373 if (!RSS_PF(adapter
)) {
1374 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1375 if (adapter
->vf_res
->vf_offload_flags
&
1376 I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2
)
1377 adapter
->hena
= I40E_DEFAULT_RSS_HENA_EXPANDED
;
1379 adapter
->hena
= I40E_DEFAULT_RSS_HENA
;
1381 wr32(hw
, I40E_VFQF_HENA(0), (u32
)adapter
->hena
);
1382 wr32(hw
, I40E_VFQF_HENA(1), (u32
)(adapter
->hena
>> 32));
1385 i40evf_fill_rss_lut(adapter
);
1387 netdev_rss_key_fill((void *)adapter
->rss_key
, adapter
->rss_key_size
);
1388 ret
= i40evf_config_rss(adapter
);
1394 * i40evf_alloc_q_vectors - Allocate memory for interrupt vectors
1395 * @adapter: board private structure to initialize
1397 * We allocate one q_vector per queue interrupt. If allocation fails we
1400 static int i40evf_alloc_q_vectors(struct i40evf_adapter
*adapter
)
1402 int q_idx
= 0, num_q_vectors
;
1403 struct i40e_q_vector
*q_vector
;
1405 num_q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
1406 adapter
->q_vectors
= kcalloc(num_q_vectors
, sizeof(*q_vector
),
1408 if (!adapter
->q_vectors
)
1411 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
1412 q_vector
= &adapter
->q_vectors
[q_idx
];
1413 q_vector
->adapter
= adapter
;
1414 q_vector
->vsi
= &adapter
->vsi
;
1415 q_vector
->v_idx
= q_idx
;
1416 netif_napi_add(adapter
->netdev
, &q_vector
->napi
,
1417 i40evf_napi_poll
, NAPI_POLL_WEIGHT
);
1424 * i40evf_free_q_vectors - Free memory allocated for interrupt vectors
1425 * @adapter: board private structure to initialize
1427 * This function frees the memory allocated to the q_vectors. In addition if
1428 * NAPI is enabled it will delete any references to the NAPI struct prior
1429 * to freeing the q_vector.
1431 static void i40evf_free_q_vectors(struct i40evf_adapter
*adapter
)
1433 int q_idx
, num_q_vectors
;
1436 if (!adapter
->q_vectors
)
1439 num_q_vectors
= adapter
->num_msix_vectors
- NONQ_VECS
;
1440 napi_vectors
= adapter
->num_active_queues
;
1442 for (q_idx
= 0; q_idx
< num_q_vectors
; q_idx
++) {
1443 struct i40e_q_vector
*q_vector
= &adapter
->q_vectors
[q_idx
];
1444 if (q_idx
< napi_vectors
)
1445 netif_napi_del(&q_vector
->napi
);
1447 kfree(adapter
->q_vectors
);
1448 adapter
->q_vectors
= NULL
;
1452 * i40evf_reset_interrupt_capability - Reset MSIX setup
1453 * @adapter: board private structure
1456 void i40evf_reset_interrupt_capability(struct i40evf_adapter
*adapter
)
1458 if (!adapter
->msix_entries
)
1461 pci_disable_msix(adapter
->pdev
);
1462 kfree(adapter
->msix_entries
);
1463 adapter
->msix_entries
= NULL
;
1467 * i40evf_init_interrupt_scheme - Determine if MSIX is supported and init
1468 * @adapter: board private structure to initialize
1471 int i40evf_init_interrupt_scheme(struct i40evf_adapter
*adapter
)
1476 err
= i40evf_set_interrupt_capability(adapter
);
1479 dev_err(&adapter
->pdev
->dev
,
1480 "Unable to setup interrupt capabilities\n");
1481 goto err_set_interrupt
;
1484 err
= i40evf_alloc_q_vectors(adapter
);
1486 dev_err(&adapter
->pdev
->dev
,
1487 "Unable to allocate memory for queue vectors\n");
1488 goto err_alloc_q_vectors
;
1491 err
= i40evf_alloc_queues(adapter
);
1493 dev_err(&adapter
->pdev
->dev
,
1494 "Unable to allocate memory for queues\n");
1495 goto err_alloc_queues
;
1498 dev_info(&adapter
->pdev
->dev
, "Multiqueue %s: Queue pair count = %u",
1499 (adapter
->num_active_queues
> 1) ? "Enabled" : "Disabled",
1500 adapter
->num_active_queues
);
1504 i40evf_free_q_vectors(adapter
);
1505 err_alloc_q_vectors
:
1506 i40evf_reset_interrupt_capability(adapter
);
1512 * i40evf_free_rss - Free memory used by RSS structs
1513 * @adapter: board private structure
1515 static void i40evf_free_rss(struct i40evf_adapter
*adapter
)
1517 kfree(adapter
->rss_key
);
1518 adapter
->rss_key
= NULL
;
1520 kfree(adapter
->rss_lut
);
1521 adapter
->rss_lut
= NULL
;
1525 * i40evf_watchdog_timer - Periodic call-back timer
1526 * @data: pointer to adapter disguised as unsigned long
1528 static void i40evf_watchdog_timer(unsigned long data
)
1530 struct i40evf_adapter
*adapter
= (struct i40evf_adapter
*)data
;
1532 schedule_work(&adapter
->watchdog_task
);
1533 /* timer will be rescheduled in watchdog task */
1537 * i40evf_watchdog_task - Periodic call-back task
1538 * @work: pointer to work_struct
1540 static void i40evf_watchdog_task(struct work_struct
*work
)
1542 struct i40evf_adapter
*adapter
= container_of(work
,
1543 struct i40evf_adapter
,
1545 struct i40e_hw
*hw
= &adapter
->hw
;
1548 if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
))
1549 goto restart_watchdog
;
1551 if (adapter
->flags
& I40EVF_FLAG_PF_COMMS_FAILED
) {
1552 reg_val
= rd32(hw
, I40E_VFGEN_RSTAT
) &
1553 I40E_VFGEN_RSTAT_VFR_STATE_MASK
;
1554 if ((reg_val
== I40E_VFR_VFACTIVE
) ||
1555 (reg_val
== I40E_VFR_COMPLETED
)) {
1556 /* A chance for redemption! */
1557 dev_err(&adapter
->pdev
->dev
, "Hardware came out of reset. Attempting reinit.\n");
1558 adapter
->state
= __I40EVF_STARTUP
;
1559 adapter
->flags
&= ~I40EVF_FLAG_PF_COMMS_FAILED
;
1560 schedule_delayed_work(&adapter
->init_task
, 10);
1561 clear_bit(__I40EVF_IN_CRITICAL_TASK
,
1562 &adapter
->crit_section
);
1563 /* Don't reschedule the watchdog, since we've restarted
1564 * the init task. When init_task contacts the PF and
1565 * gets everything set up again, it'll restart the
1566 * watchdog for us. Down, boy. Sit. Stay. Woof.
1570 adapter
->aq_required
= 0;
1571 adapter
->current_op
= I40E_VIRTCHNL_OP_UNKNOWN
;
1575 if ((adapter
->state
< __I40EVF_DOWN
) ||
1576 (adapter
->flags
& I40EVF_FLAG_RESET_PENDING
))
1579 /* check for reset */
1580 reg_val
= rd32(hw
, I40E_VF_ARQLEN1
) & I40E_VF_ARQLEN1_ARQENABLE_MASK
;
1581 if (!(adapter
->flags
& I40EVF_FLAG_RESET_PENDING
) && !reg_val
) {
1582 adapter
->state
= __I40EVF_RESETTING
;
1583 adapter
->flags
|= I40EVF_FLAG_RESET_PENDING
;
1584 dev_err(&adapter
->pdev
->dev
, "Hardware reset detected\n");
1585 schedule_work(&adapter
->reset_task
);
1586 adapter
->aq_required
= 0;
1587 adapter
->current_op
= I40E_VIRTCHNL_OP_UNKNOWN
;
1591 /* Process admin queue tasks. After init, everything gets done
1592 * here so we don't race on the admin queue.
1594 if (adapter
->current_op
) {
1595 if (!i40evf_asq_done(hw
)) {
1596 dev_dbg(&adapter
->pdev
->dev
, "Admin queue timeout\n");
1597 i40evf_send_api_ver(adapter
);
1601 if (adapter
->aq_required
& I40EVF_FLAG_AQ_GET_CONFIG
) {
1602 i40evf_send_vf_config_msg(adapter
);
1606 if (adapter
->aq_required
& I40EVF_FLAG_AQ_DISABLE_QUEUES
) {
1607 i40evf_disable_queues(adapter
);
1611 if (adapter
->aq_required
& I40EVF_FLAG_AQ_MAP_VECTORS
) {
1612 i40evf_map_queues(adapter
);
1616 if (adapter
->aq_required
& I40EVF_FLAG_AQ_ADD_MAC_FILTER
) {
1617 i40evf_add_ether_addrs(adapter
);
1621 if (adapter
->aq_required
& I40EVF_FLAG_AQ_ADD_VLAN_FILTER
) {
1622 i40evf_add_vlans(adapter
);
1626 if (adapter
->aq_required
& I40EVF_FLAG_AQ_DEL_MAC_FILTER
) {
1627 i40evf_del_ether_addrs(adapter
);
1631 if (adapter
->aq_required
& I40EVF_FLAG_AQ_DEL_VLAN_FILTER
) {
1632 i40evf_del_vlans(adapter
);
1636 if (adapter
->aq_required
& I40EVF_FLAG_AQ_CONFIGURE_QUEUES
) {
1637 i40evf_configure_queues(adapter
);
1641 if (adapter
->aq_required
& I40EVF_FLAG_AQ_ENABLE_QUEUES
) {
1642 i40evf_enable_queues(adapter
);
1646 if (adapter
->aq_required
& I40EVF_FLAG_AQ_CONFIGURE_RSS
) {
1647 /* This message goes straight to the firmware, not the
1648 * PF, so we don't have to set current_op as we will
1649 * not get a response through the ARQ.
1651 i40evf_init_rss(adapter
);
1652 adapter
->aq_required
&= ~I40EVF_FLAG_AQ_CONFIGURE_RSS
;
1655 if (adapter
->aq_required
& I40EVF_FLAG_AQ_GET_HENA
) {
1656 i40evf_get_hena(adapter
);
1659 if (adapter
->aq_required
& I40EVF_FLAG_AQ_SET_HENA
) {
1660 i40evf_set_hena(adapter
);
1663 if (adapter
->aq_required
& I40EVF_FLAG_AQ_SET_RSS_KEY
) {
1664 i40evf_set_rss_key(adapter
);
1667 if (adapter
->aq_required
& I40EVF_FLAG_AQ_SET_RSS_LUT
) {
1668 i40evf_set_rss_lut(adapter
);
1672 if (adapter
->aq_required
& I40EVF_FLAG_AQ_REQUEST_PROMISC
) {
1673 i40evf_set_promiscuous(adapter
, I40E_FLAG_VF_UNICAST_PROMISC
|
1674 I40E_FLAG_VF_MULTICAST_PROMISC
);
1678 if (adapter
->aq_required
& I40EVF_FLAG_AQ_REQUEST_ALLMULTI
) {
1679 i40evf_set_promiscuous(adapter
, I40E_FLAG_VF_MULTICAST_PROMISC
);
1683 if ((adapter
->aq_required
& I40EVF_FLAG_AQ_RELEASE_PROMISC
) &&
1684 (adapter
->aq_required
& I40EVF_FLAG_AQ_RELEASE_ALLMULTI
)) {
1685 i40evf_set_promiscuous(adapter
, 0);
1689 if (adapter
->state
== __I40EVF_RUNNING
)
1690 i40evf_request_stats(adapter
);
1692 if (adapter
->state
== __I40EVF_RUNNING
) {
1693 i40evf_irq_enable_queues(adapter
, ~0);
1694 i40evf_fire_sw_int(adapter
, 0xFF);
1696 i40evf_fire_sw_int(adapter
, 0x1);
1699 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1701 if (adapter
->state
== __I40EVF_REMOVE
)
1703 if (adapter
->aq_required
)
1704 mod_timer(&adapter
->watchdog_timer
,
1705 jiffies
+ msecs_to_jiffies(20));
1707 mod_timer(&adapter
->watchdog_timer
, jiffies
+ (HZ
* 2));
1708 schedule_work(&adapter
->adminq_task
);
1711 static void i40evf_disable_vf(struct i40evf_adapter
*adapter
)
1713 struct i40evf_mac_filter
*f
, *ftmp
;
1714 struct i40evf_vlan_filter
*fv
, *fvtmp
;
1716 adapter
->flags
|= I40EVF_FLAG_PF_COMMS_FAILED
;
1718 if (netif_running(adapter
->netdev
)) {
1719 set_bit(__I40E_DOWN
, &adapter
->vsi
.state
);
1720 netif_carrier_off(adapter
->netdev
);
1721 netif_tx_disable(adapter
->netdev
);
1722 adapter
->link_up
= false;
1723 i40evf_napi_disable_all(adapter
);
1724 i40evf_irq_disable(adapter
);
1725 i40evf_free_traffic_irqs(adapter
);
1726 i40evf_free_all_tx_resources(adapter
);
1727 i40evf_free_all_rx_resources(adapter
);
1730 /* Delete all of the filters, both MAC and VLAN. */
1731 list_for_each_entry_safe(f
, ftmp
, &adapter
->mac_filter_list
, list
) {
1736 list_for_each_entry_safe(fv
, fvtmp
, &adapter
->vlan_filter_list
, list
) {
1737 list_del(&fv
->list
);
1741 i40evf_free_misc_irq(adapter
);
1742 i40evf_reset_interrupt_capability(adapter
);
1743 i40evf_free_queues(adapter
);
1744 i40evf_free_q_vectors(adapter
);
1745 kfree(adapter
->vf_res
);
1746 i40evf_shutdown_adminq(&adapter
->hw
);
1747 adapter
->netdev
->flags
&= ~IFF_UP
;
1748 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1749 adapter
->flags
&= ~I40EVF_FLAG_RESET_PENDING
;
1750 adapter
->state
= __I40EVF_DOWN
;
1751 dev_info(&adapter
->pdev
->dev
, "Reset task did not complete, VF disabled\n");
1754 #define I40EVF_RESET_WAIT_MS 10
1755 #define I40EVF_RESET_WAIT_COUNT 500
1757 * i40evf_reset_task - Call-back task to handle hardware reset
1758 * @work: pointer to work_struct
1760 * During reset we need to shut down and reinitialize the admin queue
1761 * before we can use it to communicate with the PF again. We also clear
1762 * and reinit the rings because that context is lost as well.
1764 static void i40evf_reset_task(struct work_struct
*work
)
1766 struct i40evf_adapter
*adapter
= container_of(work
,
1767 struct i40evf_adapter
,
1769 struct net_device
*netdev
= adapter
->netdev
;
1770 struct i40e_hw
*hw
= &adapter
->hw
;
1771 struct i40evf_vlan_filter
*vlf
;
1772 struct i40evf_mac_filter
*f
;
1776 while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK
,
1777 &adapter
->crit_section
))
1778 usleep_range(500, 1000);
1780 i40evf_misc_irq_disable(adapter
);
1781 if (adapter
->flags
& I40EVF_FLAG_RESET_NEEDED
) {
1782 adapter
->flags
&= ~I40EVF_FLAG_RESET_NEEDED
;
1783 /* Restart the AQ here. If we have been reset but didn't
1784 * detect it, or if the PF had to reinit, our AQ will be hosed.
1786 i40evf_shutdown_adminq(hw
);
1787 i40evf_init_adminq(hw
);
1788 i40evf_request_reset(adapter
);
1790 adapter
->flags
|= I40EVF_FLAG_RESET_PENDING
;
1792 /* poll until we see the reset actually happen */
1793 for (i
= 0; i
< I40EVF_RESET_WAIT_COUNT
; i
++) {
1794 reg_val
= rd32(hw
, I40E_VF_ARQLEN1
) &
1795 I40E_VF_ARQLEN1_ARQENABLE_MASK
;
1798 usleep_range(5000, 10000);
1800 if (i
== I40EVF_RESET_WAIT_COUNT
) {
1801 dev_info(&adapter
->pdev
->dev
, "Never saw reset\n");
1802 goto continue_reset
; /* act like the reset happened */
1805 /* wait until the reset is complete and the PF is responding to us */
1806 for (i
= 0; i
< I40EVF_RESET_WAIT_COUNT
; i
++) {
1807 /* sleep first to make sure a minimum wait time is met */
1808 msleep(I40EVF_RESET_WAIT_MS
);
1810 reg_val
= rd32(hw
, I40E_VFGEN_RSTAT
) &
1811 I40E_VFGEN_RSTAT_VFR_STATE_MASK
;
1812 if (reg_val
== I40E_VFR_VFACTIVE
)
1816 pci_set_master(adapter
->pdev
);
1818 if (i
== I40EVF_RESET_WAIT_COUNT
) {
1819 dev_err(&adapter
->pdev
->dev
, "Reset never finished (%x)\n",
1821 i40evf_disable_vf(adapter
);
1822 return; /* Do not attempt to reinit. It's dead, Jim. */
1826 if (netif_running(adapter
->netdev
)) {
1827 netif_carrier_off(netdev
);
1828 netif_tx_stop_all_queues(netdev
);
1829 adapter
->link_up
= false;
1830 i40evf_napi_disable_all(adapter
);
1832 i40evf_irq_disable(adapter
);
1834 adapter
->state
= __I40EVF_RESETTING
;
1835 adapter
->flags
&= ~I40EVF_FLAG_RESET_PENDING
;
1837 /* free the Tx/Rx rings and descriptors, might be better to just
1838 * re-use them sometime in the future
1840 i40evf_free_all_rx_resources(adapter
);
1841 i40evf_free_all_tx_resources(adapter
);
1843 /* kill and reinit the admin queue */
1844 i40evf_shutdown_adminq(hw
);
1845 adapter
->current_op
= I40E_VIRTCHNL_OP_UNKNOWN
;
1846 err
= i40evf_init_adminq(hw
);
1848 dev_info(&adapter
->pdev
->dev
, "Failed to init adminq: %d\n",
1851 adapter
->aq_required
= I40EVF_FLAG_AQ_GET_CONFIG
;
1852 adapter
->aq_required
|= I40EVF_FLAG_AQ_MAP_VECTORS
;
1854 /* re-add all MAC filters */
1855 list_for_each_entry(f
, &adapter
->mac_filter_list
, list
) {
1858 /* re-add all VLAN filters */
1859 list_for_each_entry(vlf
, &adapter
->vlan_filter_list
, list
) {
1862 adapter
->aq_required
|= I40EVF_FLAG_AQ_ADD_MAC_FILTER
;
1863 adapter
->aq_required
|= I40EVF_FLAG_AQ_ADD_VLAN_FILTER
;
1864 /* Open RDMA Client again */
1865 adapter
->aq_required
|= I40EVF_FLAG_SERVICE_CLIENT_REQUESTED
;
1866 clear_bit(__I40EVF_IN_CRITICAL_TASK
, &adapter
->crit_section
);
1867 i40evf_misc_irq_enable(adapter
);
1869 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 2);
1871 if (netif_running(adapter
->netdev
)) {
1872 /* allocate transmit descriptors */
1873 err
= i40evf_setup_all_tx_resources(adapter
);
1877 /* allocate receive descriptors */
1878 err
= i40evf_setup_all_rx_resources(adapter
);
1882 i40evf_configure(adapter
);
1884 i40evf_up_complete(adapter
);
1886 i40evf_irq_enable(adapter
, true);
1888 adapter
->state
= __I40EVF_DOWN
;
1893 dev_err(&adapter
->pdev
->dev
, "failed to allocate resources during reinit\n");
1894 i40evf_close(adapter
->netdev
);
1898 * i40evf_adminq_task - worker thread to clean the admin queue
1899 * @work: pointer to work_struct containing our data
1901 static void i40evf_adminq_task(struct work_struct
*work
)
1903 struct i40evf_adapter
*adapter
=
1904 container_of(work
, struct i40evf_adapter
, adminq_task
);
1905 struct i40e_hw
*hw
= &adapter
->hw
;
1906 struct i40e_arq_event_info event
;
1907 struct i40e_virtchnl_msg
*v_msg
;
1912 if (adapter
->flags
& I40EVF_FLAG_PF_COMMS_FAILED
)
1915 event
.buf_len
= I40EVF_MAX_AQ_BUF_SIZE
;
1916 event
.msg_buf
= kzalloc(event
.buf_len
, GFP_KERNEL
);
1920 v_msg
= (struct i40e_virtchnl_msg
*)&event
.desc
;
1922 ret
= i40evf_clean_arq_element(hw
, &event
, &pending
);
1923 if (ret
|| !v_msg
->v_opcode
)
1924 break; /* No event to process or error cleaning ARQ */
1926 i40evf_virtchnl_completion(adapter
, v_msg
->v_opcode
,
1927 v_msg
->v_retval
, event
.msg_buf
,
1930 memset(event
.msg_buf
, 0, I40EVF_MAX_AQ_BUF_SIZE
);
1933 if ((adapter
->flags
&
1934 (I40EVF_FLAG_RESET_PENDING
| I40EVF_FLAG_RESET_NEEDED
)) ||
1935 adapter
->state
== __I40EVF_RESETTING
)
1938 /* check for error indications */
1939 val
= rd32(hw
, hw
->aq
.arq
.len
);
1940 if (val
== 0xdeadbeef) /* indicates device in reset */
1943 if (val
& I40E_VF_ARQLEN1_ARQVFE_MASK
) {
1944 dev_info(&adapter
->pdev
->dev
, "ARQ VF Error detected\n");
1945 val
&= ~I40E_VF_ARQLEN1_ARQVFE_MASK
;
1947 if (val
& I40E_VF_ARQLEN1_ARQOVFL_MASK
) {
1948 dev_info(&adapter
->pdev
->dev
, "ARQ Overflow Error detected\n");
1949 val
&= ~I40E_VF_ARQLEN1_ARQOVFL_MASK
;
1951 if (val
& I40E_VF_ARQLEN1_ARQCRIT_MASK
) {
1952 dev_info(&adapter
->pdev
->dev
, "ARQ Critical Error detected\n");
1953 val
&= ~I40E_VF_ARQLEN1_ARQCRIT_MASK
;
1956 wr32(hw
, hw
->aq
.arq
.len
, val
);
1958 val
= rd32(hw
, hw
->aq
.asq
.len
);
1960 if (val
& I40E_VF_ATQLEN1_ATQVFE_MASK
) {
1961 dev_info(&adapter
->pdev
->dev
, "ASQ VF Error detected\n");
1962 val
&= ~I40E_VF_ATQLEN1_ATQVFE_MASK
;
1964 if (val
& I40E_VF_ATQLEN1_ATQOVFL_MASK
) {
1965 dev_info(&adapter
->pdev
->dev
, "ASQ Overflow Error detected\n");
1966 val
&= ~I40E_VF_ATQLEN1_ATQOVFL_MASK
;
1968 if (val
& I40E_VF_ATQLEN1_ATQCRIT_MASK
) {
1969 dev_info(&adapter
->pdev
->dev
, "ASQ Critical Error detected\n");
1970 val
&= ~I40E_VF_ATQLEN1_ATQCRIT_MASK
;
1973 wr32(hw
, hw
->aq
.asq
.len
, val
);
1976 kfree(event
.msg_buf
);
1978 /* re-enable Admin queue interrupt cause */
1979 i40evf_misc_irq_enable(adapter
);
1983 * i40evf_free_all_tx_resources - Free Tx Resources for All Queues
1984 * @adapter: board private structure
1986 * Free all transmit software resources
1988 void i40evf_free_all_tx_resources(struct i40evf_adapter
*adapter
)
1992 if (!adapter
->tx_rings
)
1995 for (i
= 0; i
< adapter
->num_active_queues
; i
++)
1996 if (adapter
->tx_rings
[i
].desc
)
1997 i40evf_free_tx_resources(&adapter
->tx_rings
[i
]);
2001 * i40evf_setup_all_tx_resources - allocate all queues Tx resources
2002 * @adapter: board private structure
2004 * If this function returns with an error, then it's possible one or
2005 * more of the rings is populated (while the rest are not). It is the
2006 * callers duty to clean those orphaned rings.
2008 * Return 0 on success, negative on failure
2010 static int i40evf_setup_all_tx_resources(struct i40evf_adapter
*adapter
)
2014 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
2015 adapter
->tx_rings
[i
].count
= adapter
->tx_desc_count
;
2016 err
= i40evf_setup_tx_descriptors(&adapter
->tx_rings
[i
]);
2019 dev_err(&adapter
->pdev
->dev
,
2020 "Allocation for Tx Queue %u failed\n", i
);
2028 * i40evf_setup_all_rx_resources - allocate all queues Rx resources
2029 * @adapter: board private structure
2031 * If this function returns with an error, then it's possible one or
2032 * more of the rings is populated (while the rest are not). It is the
2033 * callers duty to clean those orphaned rings.
2035 * Return 0 on success, negative on failure
2037 static int i40evf_setup_all_rx_resources(struct i40evf_adapter
*adapter
)
2041 for (i
= 0; i
< adapter
->num_active_queues
; i
++) {
2042 adapter
->rx_rings
[i
].count
= adapter
->rx_desc_count
;
2043 err
= i40evf_setup_rx_descriptors(&adapter
->rx_rings
[i
]);
2046 dev_err(&adapter
->pdev
->dev
,
2047 "Allocation for Rx Queue %u failed\n", i
);
2054 * i40evf_free_all_rx_resources - Free Rx Resources for All Queues
2055 * @adapter: board private structure
2057 * Free all receive software resources
2059 void i40evf_free_all_rx_resources(struct i40evf_adapter
*adapter
)
2063 if (!adapter
->rx_rings
)
2066 for (i
= 0; i
< adapter
->num_active_queues
; i
++)
2067 if (adapter
->rx_rings
[i
].desc
)
2068 i40evf_free_rx_resources(&adapter
->rx_rings
[i
]);
2072 * i40evf_open - Called when a network interface is made active
2073 * @netdev: network interface device structure
2075 * Returns 0 on success, negative value on failure
2077 * The open entry point is called when a network interface is made
2078 * active by the system (IFF_UP). At this point all resources needed
2079 * for transmit and receive operations are allocated, the interrupt
2080 * handler is registered with the OS, the watchdog timer is started,
2081 * and the stack is notified that the interface is ready.
2083 static int i40evf_open(struct net_device
*netdev
)
2085 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2088 if (adapter
->flags
& I40EVF_FLAG_PF_COMMS_FAILED
) {
2089 dev_err(&adapter
->pdev
->dev
, "Unable to open device due to PF driver failure.\n");
2093 if (adapter
->state
!= __I40EVF_DOWN
)
2096 /* allocate transmit descriptors */
2097 err
= i40evf_setup_all_tx_resources(adapter
);
2101 /* allocate receive descriptors */
2102 err
= i40evf_setup_all_rx_resources(adapter
);
2106 /* clear any pending interrupts, may auto mask */
2107 err
= i40evf_request_traffic_irqs(adapter
, netdev
->name
);
2111 i40evf_add_filter(adapter
, adapter
->hw
.mac
.addr
);
2112 i40evf_configure(adapter
);
2114 i40evf_up_complete(adapter
);
2116 i40evf_irq_enable(adapter
, true);
2121 i40evf_down(adapter
);
2122 i40evf_free_traffic_irqs(adapter
);
2124 i40evf_free_all_rx_resources(adapter
);
2126 i40evf_free_all_tx_resources(adapter
);
2132 * i40evf_close - Disables a network interface
2133 * @netdev: network interface device structure
2135 * Returns 0, this is not allowed to fail
2137 * The close entry point is called when an interface is de-activated
2138 * by the OS. The hardware is still under the drivers control, but
2139 * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
2140 * are freed, along with all transmit and receive resources.
2142 static int i40evf_close(struct net_device
*netdev
)
2144 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2146 if (adapter
->state
<= __I40EVF_DOWN_PENDING
)
2150 set_bit(__I40E_DOWN
, &adapter
->vsi
.state
);
2152 i40evf_down(adapter
);
2153 adapter
->state
= __I40EVF_DOWN_PENDING
;
2154 i40evf_free_traffic_irqs(adapter
);
2156 /* We explicitly don't free resources here because the hardware is
2157 * still active and can DMA into memory. Resources are cleared in
2158 * i40evf_virtchnl_completion() after we get confirmation from the PF
2159 * driver that the rings have been stopped.
2165 * i40evf_get_stats - Get System Network Statistics
2166 * @netdev: network interface device structure
2168 * Returns the address of the device statistics structure.
2169 * The statistics are actually updated from the timer callback.
2171 static struct net_device_stats
*i40evf_get_stats(struct net_device
*netdev
)
2173 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2175 /* only return the current stats */
2176 return &adapter
->net_stats
;
2180 * i40evf_change_mtu - Change the Maximum Transfer Unit
2181 * @netdev: network interface device structure
2182 * @new_mtu: new value for maximum frame size
2184 * Returns 0 on success, negative on failure
2186 static int i40evf_change_mtu(struct net_device
*netdev
, int new_mtu
)
2188 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2190 netdev
->mtu
= new_mtu
;
2191 adapter
->flags
|= I40EVF_FLAG_RESET_NEEDED
;
2192 schedule_work(&adapter
->reset_task
);
2198 * i40evf_features_check - Validate encapsulated packet conforms to limits
2200 * @netdev: This physical port's netdev
2201 * @features: Offload features that the stack believes apply
2203 static netdev_features_t
i40evf_features_check(struct sk_buff
*skb
,
2204 struct net_device
*dev
,
2205 netdev_features_t features
)
2209 /* No point in doing any of this if neither checksum nor GSO are
2210 * being requested for this frame. We can rule out both by just
2211 * checking for CHECKSUM_PARTIAL
2213 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
2216 /* We cannot support GSO if the MSS is going to be less than
2217 * 64 bytes. If it is then we need to drop support for GSO.
2219 if (skb_is_gso(skb
) && (skb_shinfo(skb
)->gso_size
< 64))
2220 features
&= ~NETIF_F_GSO_MASK
;
2222 /* MACLEN can support at most 63 words */
2223 len
= skb_network_header(skb
) - skb
->data
;
2224 if (len
& ~(63 * 2))
2227 /* IPLEN and EIPLEN can support at most 127 dwords */
2228 len
= skb_transport_header(skb
) - skb_network_header(skb
);
2229 if (len
& ~(127 * 4))
2232 if (skb
->encapsulation
) {
2233 /* L4TUNLEN can support 127 words */
2234 len
= skb_inner_network_header(skb
) - skb_transport_header(skb
);
2235 if (len
& ~(127 * 2))
2238 /* IPLEN can support at most 127 dwords */
2239 len
= skb_inner_transport_header(skb
) -
2240 skb_inner_network_header(skb
);
2241 if (len
& ~(127 * 4))
2245 /* No need to validate L4LEN as TCP is the only protocol with a
2246 * a flexible value and we support all possible values supported
2247 * by TCP, which is at most 15 dwords
2252 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
2255 #define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
2256 NETIF_F_HW_VLAN_CTAG_RX |\
2257 NETIF_F_HW_VLAN_CTAG_FILTER)
2260 * i40evf_fix_features - fix up the netdev feature bits
2261 * @netdev: our net device
2262 * @features: desired feature bits
2264 * Returns fixed-up features bits
2266 static netdev_features_t
i40evf_fix_features(struct net_device
*netdev
,
2267 netdev_features_t features
)
2269 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2271 features
&= ~I40EVF_VLAN_FEATURES
;
2272 if (adapter
->vf_res
->vf_offload_flags
& I40E_VIRTCHNL_VF_OFFLOAD_VLAN
)
2273 features
|= I40EVF_VLAN_FEATURES
;
2277 static const struct net_device_ops i40evf_netdev_ops
= {
2278 .ndo_open
= i40evf_open
,
2279 .ndo_stop
= i40evf_close
,
2280 .ndo_start_xmit
= i40evf_xmit_frame
,
2281 .ndo_get_stats
= i40evf_get_stats
,
2282 .ndo_set_rx_mode
= i40evf_set_rx_mode
,
2283 .ndo_validate_addr
= eth_validate_addr
,
2284 .ndo_set_mac_address
= i40evf_set_mac
,
2285 .ndo_change_mtu
= i40evf_change_mtu
,
2286 .ndo_tx_timeout
= i40evf_tx_timeout
,
2287 .ndo_vlan_rx_add_vid
= i40evf_vlan_rx_add_vid
,
2288 .ndo_vlan_rx_kill_vid
= i40evf_vlan_rx_kill_vid
,
2289 .ndo_features_check
= i40evf_features_check
,
2290 .ndo_fix_features
= i40evf_fix_features
,
2291 #ifdef CONFIG_NET_POLL_CONTROLLER
2292 .ndo_poll_controller
= i40evf_netpoll
,
2297 * i40evf_check_reset_complete - check that VF reset is complete
2298 * @hw: pointer to hw struct
2300 * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
2302 static int i40evf_check_reset_complete(struct i40e_hw
*hw
)
2307 for (i
= 0; i
< 100; i
++) {
2308 rstat
= rd32(hw
, I40E_VFGEN_RSTAT
) &
2309 I40E_VFGEN_RSTAT_VFR_STATE_MASK
;
2310 if ((rstat
== I40E_VFR_VFACTIVE
) ||
2311 (rstat
== I40E_VFR_COMPLETED
))
2313 usleep_range(10, 20);
2319 * i40evf_process_config - Process the config information we got from the PF
2320 * @adapter: board private structure
2322 * Verify that we have a valid config struct, and set up our netdev features
2323 * and our VSI struct.
2325 int i40evf_process_config(struct i40evf_adapter
*adapter
)
2327 struct i40e_virtchnl_vf_resource
*vfres
= adapter
->vf_res
;
2328 struct net_device
*netdev
= adapter
->netdev
;
2329 struct i40e_vsi
*vsi
= &adapter
->vsi
;
2332 /* got VF config message back from PF, now we can parse it */
2333 for (i
= 0; i
< vfres
->num_vsis
; i
++) {
2334 if (vfres
->vsi_res
[i
].vsi_type
== I40E_VSI_SRIOV
)
2335 adapter
->vsi_res
= &vfres
->vsi_res
[i
];
2337 if (!adapter
->vsi_res
) {
2338 dev_err(&adapter
->pdev
->dev
, "No LAN VSI found\n");
2342 netdev
->hw_enc_features
|= NETIF_F_SG
|
2346 NETIF_F_SOFT_FEATURES
|
2351 NETIF_F_GSO_GRE_CSUM
|
2352 NETIF_F_GSO_IPXIP4
|
2353 NETIF_F_GSO_IPXIP6
|
2354 NETIF_F_GSO_UDP_TUNNEL
|
2355 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
2356 NETIF_F_GSO_PARTIAL
|
2362 if (!(adapter
->flags
& I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
))
2363 netdev
->gso_partial_features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
2365 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
2367 /* record features VLANs can make use of */
2368 netdev
->vlan_features
|= netdev
->hw_enc_features
|
2369 NETIF_F_TSO_MANGLEID
;
2371 /* Write features and hw_features separately to avoid polluting
2372 * with, or dropping, features that are set when we registgered.
2374 netdev
->hw_features
|= netdev
->hw_enc_features
;
2376 netdev
->features
|= netdev
->hw_enc_features
| I40EVF_VLAN_FEATURES
;
2377 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
2379 /* disable VLAN features if not supported */
2380 if (!(vfres
->vf_offload_flags
& I40E_VIRTCHNL_VF_OFFLOAD_VLAN
))
2381 netdev
->features
^= I40EVF_VLAN_FEATURES
;
2383 adapter
->vsi
.id
= adapter
->vsi_res
->vsi_id
;
2385 adapter
->vsi
.back
= adapter
;
2386 adapter
->vsi
.base_vector
= 1;
2387 adapter
->vsi
.work_limit
= I40E_DEFAULT_IRQ_WORK
;
2388 vsi
->netdev
= adapter
->netdev
;
2389 vsi
->qs_handle
= adapter
->vsi_res
->qset_handle
;
2390 if (vfres
->vf_offload_flags
& I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF
) {
2391 adapter
->rss_key_size
= vfres
->rss_key_size
;
2392 adapter
->rss_lut_size
= vfres
->rss_lut_size
;
2394 adapter
->rss_key_size
= I40EVF_HKEY_ARRAY_SIZE
;
2395 adapter
->rss_lut_size
= I40EVF_HLUT_ARRAY_SIZE
;
2402 * i40evf_init_task - worker thread to perform delayed initialization
2403 * @work: pointer to work_struct containing our data
2405 * This task completes the work that was begun in probe. Due to the nature
2406 * of VF-PF communications, we may need to wait tens of milliseconds to get
2407 * responses back from the PF. Rather than busy-wait in probe and bog down the
2408 * whole system, we'll do it in a task so we can sleep.
2409 * This task only runs during driver init. Once we've established
2410 * communications with the PF driver and set up our netdev, the watchdog
2413 static void i40evf_init_task(struct work_struct
*work
)
2415 struct i40evf_adapter
*adapter
= container_of(work
,
2416 struct i40evf_adapter
,
2418 struct net_device
*netdev
= adapter
->netdev
;
2419 struct i40e_hw
*hw
= &adapter
->hw
;
2420 struct pci_dev
*pdev
= adapter
->pdev
;
2423 switch (adapter
->state
) {
2424 case __I40EVF_STARTUP
:
2425 /* driver loaded, probe complete */
2426 adapter
->flags
&= ~I40EVF_FLAG_PF_COMMS_FAILED
;
2427 adapter
->flags
&= ~I40EVF_FLAG_RESET_PENDING
;
2428 err
= i40e_set_mac_type(hw
);
2430 dev_err(&pdev
->dev
, "Failed to set MAC type (%d)\n",
2434 err
= i40evf_check_reset_complete(hw
);
2436 dev_info(&pdev
->dev
, "Device is still in reset (%d), retrying\n",
2440 hw
->aq
.num_arq_entries
= I40EVF_AQ_LEN
;
2441 hw
->aq
.num_asq_entries
= I40EVF_AQ_LEN
;
2442 hw
->aq
.arq_buf_size
= I40EVF_MAX_AQ_BUF_SIZE
;
2443 hw
->aq
.asq_buf_size
= I40EVF_MAX_AQ_BUF_SIZE
;
2445 err
= i40evf_init_adminq(hw
);
2447 dev_err(&pdev
->dev
, "Failed to init Admin Queue (%d)\n",
2451 err
= i40evf_send_api_ver(adapter
);
2453 dev_err(&pdev
->dev
, "Unable to send to PF (%d)\n", err
);
2454 i40evf_shutdown_adminq(hw
);
2457 adapter
->state
= __I40EVF_INIT_VERSION_CHECK
;
2459 case __I40EVF_INIT_VERSION_CHECK
:
2460 if (!i40evf_asq_done(hw
)) {
2461 dev_err(&pdev
->dev
, "Admin queue command never completed\n");
2462 i40evf_shutdown_adminq(hw
);
2463 adapter
->state
= __I40EVF_STARTUP
;
2467 /* aq msg sent, awaiting reply */
2468 err
= i40evf_verify_api_ver(adapter
);
2470 if (err
== I40E_ERR_ADMIN_QUEUE_NO_WORK
)
2471 err
= i40evf_send_api_ver(adapter
);
2473 dev_err(&pdev
->dev
, "Unsupported PF API version %d.%d, expected %d.%d\n",
2474 adapter
->pf_version
.major
,
2475 adapter
->pf_version
.minor
,
2476 I40E_VIRTCHNL_VERSION_MAJOR
,
2477 I40E_VIRTCHNL_VERSION_MINOR
);
2480 err
= i40evf_send_vf_config_msg(adapter
);
2482 dev_err(&pdev
->dev
, "Unable to send config request (%d)\n",
2486 adapter
->state
= __I40EVF_INIT_GET_RESOURCES
;
2488 case __I40EVF_INIT_GET_RESOURCES
:
2489 /* aq msg sent, awaiting reply */
2490 if (!adapter
->vf_res
) {
2491 bufsz
= sizeof(struct i40e_virtchnl_vf_resource
) +
2493 sizeof(struct i40e_virtchnl_vsi_resource
));
2494 adapter
->vf_res
= kzalloc(bufsz
, GFP_KERNEL
);
2495 if (!adapter
->vf_res
)
2498 err
= i40evf_get_vf_config(adapter
);
2499 if (err
== I40E_ERR_ADMIN_QUEUE_NO_WORK
) {
2500 err
= i40evf_send_vf_config_msg(adapter
);
2502 } else if (err
== I40E_ERR_PARAM
) {
2503 /* We only get ERR_PARAM if the device is in a very bad
2504 * state or if we've been disabled for previous bad
2505 * behavior. Either way, we're done now.
2507 i40evf_shutdown_adminq(hw
);
2508 dev_err(&pdev
->dev
, "Unable to get VF config due to PF error condition, not retrying\n");
2512 dev_err(&pdev
->dev
, "Unable to get VF config (%d)\n",
2516 adapter
->state
= __I40EVF_INIT_SW
;
2522 if (hw
->mac
.type
== I40E_MAC_X722_VF
)
2523 adapter
->flags
|= I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE
;
2525 if (i40evf_process_config(adapter
))
2527 adapter
->current_op
= I40E_VIRTCHNL_OP_UNKNOWN
;
2529 adapter
->flags
|= I40EVF_FLAG_RX_CSUM_ENABLED
;
2531 netdev
->netdev_ops
= &i40evf_netdev_ops
;
2532 i40evf_set_ethtool_ops(netdev
);
2533 netdev
->watchdog_timeo
= 5 * HZ
;
2535 /* MTU range: 68 - 9710 */
2536 netdev
->min_mtu
= ETH_MIN_MTU
;
2537 netdev
->max_mtu
= I40E_MAX_RXBUFFER
- (ETH_HLEN
+ ETH_FCS_LEN
);
2539 if (!is_valid_ether_addr(adapter
->hw
.mac
.addr
)) {
2540 dev_info(&pdev
->dev
, "Invalid MAC address %pM, using random\n",
2541 adapter
->hw
.mac
.addr
);
2542 eth_hw_addr_random(netdev
);
2543 ether_addr_copy(adapter
->hw
.mac
.addr
, netdev
->dev_addr
);
2545 adapter
->flags
|= I40EVF_FLAG_ADDR_SET_BY_PF
;
2546 ether_addr_copy(netdev
->dev_addr
, adapter
->hw
.mac
.addr
);
2547 ether_addr_copy(netdev
->perm_addr
, adapter
->hw
.mac
.addr
);
2550 init_timer(&adapter
->watchdog_timer
);
2551 adapter
->watchdog_timer
.function
= &i40evf_watchdog_timer
;
2552 adapter
->watchdog_timer
.data
= (unsigned long)adapter
;
2553 mod_timer(&adapter
->watchdog_timer
, jiffies
+ 1);
2555 adapter
->num_active_queues
= min_t(int,
2556 adapter
->vsi_res
->num_queue_pairs
,
2557 (int)(num_online_cpus()));
2558 adapter
->tx_desc_count
= I40EVF_DEFAULT_TXD
;
2559 adapter
->rx_desc_count
= I40EVF_DEFAULT_RXD
;
2560 err
= i40evf_init_interrupt_scheme(adapter
);
2563 i40evf_map_rings_to_vectors(adapter
);
2564 if (adapter
->vf_res
->vf_offload_flags
&
2565 I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR
)
2566 adapter
->flags
|= I40EVF_FLAG_WB_ON_ITR_CAPABLE
;
2568 err
= i40evf_request_misc_irq(adapter
);
2572 netif_carrier_off(netdev
);
2573 adapter
->link_up
= false;
2575 if (!adapter
->netdev_registered
) {
2576 err
= register_netdev(netdev
);
2581 adapter
->netdev_registered
= true;
2583 netif_tx_stop_all_queues(netdev
);
2585 dev_info(&pdev
->dev
, "MAC address: %pM\n", adapter
->hw
.mac
.addr
);
2586 if (netdev
->features
& NETIF_F_GRO
)
2587 dev_info(&pdev
->dev
, "GRO is enabled\n");
2589 adapter
->state
= __I40EVF_DOWN
;
2590 set_bit(__I40E_DOWN
, &adapter
->vsi
.state
);
2591 i40evf_misc_irq_enable(adapter
);
2593 adapter
->rss_key
= kzalloc(adapter
->rss_key_size
, GFP_KERNEL
);
2594 adapter
->rss_lut
= kzalloc(adapter
->rss_lut_size
, GFP_KERNEL
);
2595 if (!adapter
->rss_key
|| !adapter
->rss_lut
)
2598 if (RSS_AQ(adapter
)) {
2599 adapter
->aq_required
|= I40EVF_FLAG_AQ_CONFIGURE_RSS
;
2600 mod_timer_pending(&adapter
->watchdog_timer
, jiffies
+ 1);
2602 i40evf_init_rss(adapter
);
2606 schedule_delayed_work(&adapter
->init_task
, msecs_to_jiffies(30));
2609 i40evf_free_rss(adapter
);
2611 i40evf_free_misc_irq(adapter
);
2613 i40evf_reset_interrupt_capability(adapter
);
2615 kfree(adapter
->vf_res
);
2616 adapter
->vf_res
= NULL
;
2618 /* Things went into the weeds, so try again later */
2619 if (++adapter
->aq_wait_count
> I40EVF_AQ_MAX_ERR
) {
2620 dev_err(&pdev
->dev
, "Failed to communicate with PF; waiting before retry\n");
2621 adapter
->flags
|= I40EVF_FLAG_PF_COMMS_FAILED
;
2622 i40evf_shutdown_adminq(hw
);
2623 adapter
->state
= __I40EVF_STARTUP
;
2624 schedule_delayed_work(&adapter
->init_task
, HZ
* 5);
2627 schedule_delayed_work(&adapter
->init_task
, HZ
);
2631 * i40evf_shutdown - Shutdown the device in preparation for a reboot
2632 * @pdev: pci device structure
2634 static void i40evf_shutdown(struct pci_dev
*pdev
)
2636 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2637 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2639 netif_device_detach(netdev
);
2641 if (netif_running(netdev
))
2642 i40evf_close(netdev
);
2644 /* Prevent the watchdog from running. */
2645 adapter
->state
= __I40EVF_REMOVE
;
2646 adapter
->aq_required
= 0;
2649 pci_save_state(pdev
);
2652 pci_disable_device(pdev
);
2656 * i40evf_probe - Device Initialization Routine
2657 * @pdev: PCI device information struct
2658 * @ent: entry in i40evf_pci_tbl
2660 * Returns 0 on success, negative on failure
2662 * i40evf_probe initializes an adapter identified by a pci_dev structure.
2663 * The OS initialization, configuring of the adapter private structure,
2664 * and a hardware reset occur.
2666 static int i40evf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2668 struct net_device
*netdev
;
2669 struct i40evf_adapter
*adapter
= NULL
;
2670 struct i40e_hw
*hw
= NULL
;
2673 err
= pci_enable_device(pdev
);
2677 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
2679 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
2682 "DMA configuration failed: 0x%x\n", err
);
2687 err
= pci_request_regions(pdev
, i40evf_driver_name
);
2690 "pci_request_regions failed 0x%x\n", err
);
2694 pci_enable_pcie_error_reporting(pdev
);
2696 pci_set_master(pdev
);
2698 netdev
= alloc_etherdev_mq(sizeof(struct i40evf_adapter
), MAX_QUEUES
);
2701 goto err_alloc_etherdev
;
2704 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2706 pci_set_drvdata(pdev
, netdev
);
2707 adapter
= netdev_priv(netdev
);
2709 adapter
->netdev
= netdev
;
2710 adapter
->pdev
= pdev
;
2715 adapter
->msg_enable
= BIT(DEFAULT_DEBUG_LEVEL_SHIFT
) - 1;
2716 adapter
->state
= __I40EVF_STARTUP
;
2718 /* Call save state here because it relies on the adapter struct. */
2719 pci_save_state(pdev
);
2721 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
2722 pci_resource_len(pdev
, 0));
2727 hw
->vendor_id
= pdev
->vendor
;
2728 hw
->device_id
= pdev
->device
;
2729 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
2730 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
2731 hw
->subsystem_device_id
= pdev
->subsystem_device
;
2732 hw
->bus
.device
= PCI_SLOT(pdev
->devfn
);
2733 hw
->bus
.func
= PCI_FUNC(pdev
->devfn
);
2734 hw
->bus
.bus_id
= pdev
->bus
->number
;
2736 /* set up the locks for the AQ, do this only once in probe
2737 * and destroy them only once in remove
2739 mutex_init(&hw
->aq
.asq_mutex
);
2740 mutex_init(&hw
->aq
.arq_mutex
);
2742 INIT_LIST_HEAD(&adapter
->mac_filter_list
);
2743 INIT_LIST_HEAD(&adapter
->vlan_filter_list
);
2745 INIT_WORK(&adapter
->reset_task
, i40evf_reset_task
);
2746 INIT_WORK(&adapter
->adminq_task
, i40evf_adminq_task
);
2747 INIT_WORK(&adapter
->watchdog_task
, i40evf_watchdog_task
);
2748 INIT_DELAYED_WORK(&adapter
->init_task
, i40evf_init_task
);
2749 schedule_delayed_work(&adapter
->init_task
,
2750 msecs_to_jiffies(5 * (pdev
->devfn
& 0x07)));
2755 free_netdev(netdev
);
2757 pci_release_regions(pdev
);
2760 pci_disable_device(pdev
);
2766 * i40evf_suspend - Power management suspend routine
2767 * @pdev: PCI device information struct
2770 * Called when the system (VM) is entering sleep/suspend.
2772 static int i40evf_suspend(struct pci_dev
*pdev
, pm_message_t state
)
2774 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2775 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2778 netif_device_detach(netdev
);
2780 if (netif_running(netdev
)) {
2782 i40evf_down(adapter
);
2785 i40evf_free_misc_irq(adapter
);
2786 i40evf_reset_interrupt_capability(adapter
);
2788 retval
= pci_save_state(pdev
);
2792 pci_disable_device(pdev
);
2798 * i40evf_resume - Power management resume routine
2799 * @pdev: PCI device information struct
2801 * Called when the system (VM) is resumed from sleep/suspend.
2803 static int i40evf_resume(struct pci_dev
*pdev
)
2805 struct i40evf_adapter
*adapter
= pci_get_drvdata(pdev
);
2806 struct net_device
*netdev
= adapter
->netdev
;
2809 pci_set_power_state(pdev
, PCI_D0
);
2810 pci_restore_state(pdev
);
2811 /* pci_restore_state clears dev->state_saved so call
2812 * pci_save_state to restore it.
2814 pci_save_state(pdev
);
2816 err
= pci_enable_device_mem(pdev
);
2818 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend.\n");
2821 pci_set_master(pdev
);
2824 err
= i40evf_set_interrupt_capability(adapter
);
2827 dev_err(&pdev
->dev
, "Cannot enable MSI-X interrupts.\n");
2830 err
= i40evf_request_misc_irq(adapter
);
2833 dev_err(&pdev
->dev
, "Cannot get interrupt vector.\n");
2837 schedule_work(&adapter
->reset_task
);
2839 netif_device_attach(netdev
);
2844 #endif /* CONFIG_PM */
2846 * i40evf_remove - Device Removal Routine
2847 * @pdev: PCI device information struct
2849 * i40evf_remove is called by the PCI subsystem to alert the driver
2850 * that it should release a PCI device. The could be caused by a
2851 * Hot-Plug event, or because the driver is going to be removed from
2854 static void i40evf_remove(struct pci_dev
*pdev
)
2856 struct net_device
*netdev
= pci_get_drvdata(pdev
);
2857 struct i40evf_adapter
*adapter
= netdev_priv(netdev
);
2858 struct i40evf_mac_filter
*f
, *ftmp
;
2859 struct i40e_hw
*hw
= &adapter
->hw
;
2861 cancel_delayed_work_sync(&adapter
->init_task
);
2862 cancel_work_sync(&adapter
->reset_task
);
2864 if (adapter
->netdev_registered
) {
2865 unregister_netdev(netdev
);
2866 adapter
->netdev_registered
= false;
2869 /* Shut down all the garbage mashers on the detention level */
2870 adapter
->state
= __I40EVF_REMOVE
;
2871 adapter
->aq_required
= 0;
2872 i40evf_request_reset(adapter
);
2874 /* If the FW isn't responding, kick it once, but only once. */
2875 if (!i40evf_asq_done(hw
)) {
2876 i40evf_request_reset(adapter
);
2879 i40evf_free_all_tx_resources(adapter
);
2880 i40evf_free_all_rx_resources(adapter
);
2881 i40evf_misc_irq_disable(adapter
);
2882 i40evf_free_misc_irq(adapter
);
2883 i40evf_reset_interrupt_capability(adapter
);
2884 i40evf_free_q_vectors(adapter
);
2886 if (adapter
->watchdog_timer
.function
)
2887 del_timer_sync(&adapter
->watchdog_timer
);
2889 flush_scheduled_work();
2891 i40evf_free_rss(adapter
);
2893 if (hw
->aq
.asq
.count
)
2894 i40evf_shutdown_adminq(hw
);
2896 /* destroy the locks only once, here */
2897 mutex_destroy(&hw
->aq
.arq_mutex
);
2898 mutex_destroy(&hw
->aq
.asq_mutex
);
2900 iounmap(hw
->hw_addr
);
2901 pci_release_regions(pdev
);
2902 i40evf_free_all_tx_resources(adapter
);
2903 i40evf_free_all_rx_resources(adapter
);
2904 i40evf_free_queues(adapter
);
2905 kfree(adapter
->vf_res
);
2906 /* If we got removed before an up/down sequence, we've got a filter
2907 * hanging out there that we need to get rid of.
2909 list_for_each_entry_safe(f
, ftmp
, &adapter
->mac_filter_list
, list
) {
2913 list_for_each_entry_safe(f
, ftmp
, &adapter
->vlan_filter_list
, list
) {
2918 free_netdev(netdev
);
2920 pci_disable_pcie_error_reporting(pdev
);
2922 pci_disable_device(pdev
);
2925 static struct pci_driver i40evf_driver
= {
2926 .name
= i40evf_driver_name
,
2927 .id_table
= i40evf_pci_tbl
,
2928 .probe
= i40evf_probe
,
2929 .remove
= i40evf_remove
,
2931 .suspend
= i40evf_suspend
,
2932 .resume
= i40evf_resume
,
2934 .shutdown
= i40evf_shutdown
,
2938 * i40e_init_module - Driver Registration Routine
2940 * i40e_init_module is the first routine called when the driver is
2941 * loaded. All it does is register with the PCI subsystem.
2943 static int __init
i40evf_init_module(void)
2947 pr_info("i40evf: %s - version %s\n", i40evf_driver_string
,
2948 i40evf_driver_version
);
2950 pr_info("%s\n", i40evf_copyright
);
2952 i40evf_wq
= alloc_workqueue("%s", WQ_UNBOUND
| WQ_MEM_RECLAIM
, 1,
2953 i40evf_driver_name
);
2955 pr_err("%s: Failed to create workqueue\n", i40evf_driver_name
);
2958 ret
= pci_register_driver(&i40evf_driver
);
2962 module_init(i40evf_init_module
);
2965 * i40e_exit_module - Driver Exit Cleanup Routine
2967 * i40e_exit_module is called just before the driver is removed
2970 static void __exit
i40evf_exit_module(void)
2972 pci_unregister_driver(&i40evf_driver
);
2973 destroy_workqueue(i40evf_wq
);
2976 module_exit(i40evf_exit_module
);