1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2016 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/etherdevice.h>
28 #include <linux/of_net.h>
29 #include <linux/pci.h>
33 #include "i40e_diag.h"
34 #include <net/udp_tunnel.h>
36 const char i40e_driver_name
[] = "i40e";
37 static const char i40e_driver_string
[] =
38 "Intel(R) Ethernet Connection XL710 Network Driver";
42 #define DRV_VERSION_MAJOR 1
43 #define DRV_VERSION_MINOR 6
44 #define DRV_VERSION_BUILD 16
45 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
46 __stringify(DRV_VERSION_MINOR) "." \
47 __stringify(DRV_VERSION_BUILD) DRV_KERN
48 const char i40e_driver_version_str
[] = DRV_VERSION
;
49 static const char i40e_copyright
[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
51 /* a bit of forward declarations */
52 static void i40e_vsi_reinit_locked(struct i40e_vsi
*vsi
);
53 static void i40e_handle_reset_warning(struct i40e_pf
*pf
);
54 static int i40e_add_vsi(struct i40e_vsi
*vsi
);
55 static int i40e_add_veb(struct i40e_veb
*veb
, struct i40e_vsi
*vsi
);
56 static int i40e_setup_pf_switch(struct i40e_pf
*pf
, bool reinit
);
57 static int i40e_setup_misc_vector(struct i40e_pf
*pf
);
58 static void i40e_determine_queue_usage(struct i40e_pf
*pf
);
59 static int i40e_setup_pf_filter_control(struct i40e_pf
*pf
);
60 static void i40e_fdir_sb_setup(struct i40e_pf
*pf
);
61 static int i40e_veb_get_bw_info(struct i40e_veb
*veb
);
63 /* i40e_pci_tbl - PCI Device ID Table
65 * Last entry must be all 0s
67 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
68 * Class, Class Mask, private data (not used) }
70 static const struct pci_device_id i40e_pci_tbl
[] = {
71 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_SFP_XL710
), 0},
72 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QEMU
), 0},
73 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_KX_B
), 0},
74 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_KX_C
), 0},
75 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_A
), 0},
76 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_B
), 0},
77 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_C
), 0},
78 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_10G_BASE_T
), 0},
79 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_10G_BASE_T4
), 0},
80 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_20G_KR2
), 0},
81 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_KX_X722
), 0},
82 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_X722
), 0},
83 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_SFP_X722
), 0},
84 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_1G_BASE_T_X722
), 0},
85 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_10G_BASE_T_X722
), 0},
86 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_SFP_I_X722
), 0},
87 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_20G_KR2
), 0},
88 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_20G_KR2_A
), 0},
89 /* required last entry */
92 MODULE_DEVICE_TABLE(pci
, i40e_pci_tbl
);
94 #define I40E_MAX_VF_COUNT 128
95 static int debug
= -1;
96 module_param(debug
, int, 0);
97 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all)");
99 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
100 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
101 MODULE_LICENSE("GPL");
102 MODULE_VERSION(DRV_VERSION
);
104 static struct workqueue_struct
*i40e_wq
;
107 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
108 * @hw: pointer to the HW structure
109 * @mem: ptr to mem struct to fill out
110 * @size: size of memory requested
111 * @alignment: what to align the allocation to
113 int i40e_allocate_dma_mem_d(struct i40e_hw
*hw
, struct i40e_dma_mem
*mem
,
114 u64 size
, u32 alignment
)
116 struct i40e_pf
*pf
= (struct i40e_pf
*)hw
->back
;
118 mem
->size
= ALIGN(size
, alignment
);
119 mem
->va
= dma_zalloc_coherent(&pf
->pdev
->dev
, mem
->size
,
120 &mem
->pa
, GFP_KERNEL
);
128 * i40e_free_dma_mem_d - OS specific memory free for shared code
129 * @hw: pointer to the HW structure
130 * @mem: ptr to mem struct to free
132 int i40e_free_dma_mem_d(struct i40e_hw
*hw
, struct i40e_dma_mem
*mem
)
134 struct i40e_pf
*pf
= (struct i40e_pf
*)hw
->back
;
136 dma_free_coherent(&pf
->pdev
->dev
, mem
->size
, mem
->va
, mem
->pa
);
145 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
146 * @hw: pointer to the HW structure
147 * @mem: ptr to mem struct to fill out
148 * @size: size of memory requested
150 int i40e_allocate_virt_mem_d(struct i40e_hw
*hw
, struct i40e_virt_mem
*mem
,
154 mem
->va
= kzalloc(size
, GFP_KERNEL
);
163 * i40e_free_virt_mem_d - OS specific memory free for shared code
164 * @hw: pointer to the HW structure
165 * @mem: ptr to mem struct to free
167 int i40e_free_virt_mem_d(struct i40e_hw
*hw
, struct i40e_virt_mem
*mem
)
169 /* it's ok to kfree a NULL pointer */
178 * i40e_get_lump - find a lump of free generic resource
179 * @pf: board private structure
180 * @pile: the pile of resource to search
181 * @needed: the number of items needed
182 * @id: an owner id to stick on the items assigned
184 * Returns the base item index of the lump, or negative for error
186 * The search_hint trick and lack of advanced fit-finding only work
187 * because we're highly likely to have all the same size lump requests.
188 * Linear search time and any fragmentation should be minimal.
190 static int i40e_get_lump(struct i40e_pf
*pf
, struct i40e_lump_tracking
*pile
,
196 if (!pile
|| needed
== 0 || id
>= I40E_PILE_VALID_BIT
) {
197 dev_info(&pf
->pdev
->dev
,
198 "param err: pile=%p needed=%d id=0x%04x\n",
203 /* start the linear search with an imperfect hint */
204 i
= pile
->search_hint
;
205 while (i
< pile
->num_entries
) {
206 /* skip already allocated entries */
207 if (pile
->list
[i
] & I40E_PILE_VALID_BIT
) {
212 /* do we have enough in this lump? */
213 for (j
= 0; (j
< needed
) && ((i
+j
) < pile
->num_entries
); j
++) {
214 if (pile
->list
[i
+j
] & I40E_PILE_VALID_BIT
)
219 /* there was enough, so assign it to the requestor */
220 for (j
= 0; j
< needed
; j
++)
221 pile
->list
[i
+j
] = id
| I40E_PILE_VALID_BIT
;
223 pile
->search_hint
= i
+ j
;
227 /* not enough, so skip over it and continue looking */
235 * i40e_put_lump - return a lump of generic resource
236 * @pile: the pile of resource to search
237 * @index: the base item index
238 * @id: the owner id of the items assigned
240 * Returns the count of items in the lump
242 static int i40e_put_lump(struct i40e_lump_tracking
*pile
, u16 index
, u16 id
)
244 int valid_id
= (id
| I40E_PILE_VALID_BIT
);
248 if (!pile
|| index
>= pile
->num_entries
)
252 i
< pile
->num_entries
&& pile
->list
[i
] == valid_id
;
258 if (count
&& index
< pile
->search_hint
)
259 pile
->search_hint
= index
;
265 * i40e_find_vsi_from_id - searches for the vsi with the given id
266 * @pf - the pf structure to search for the vsi
267 * @id - id of the vsi it is searching for
269 struct i40e_vsi
*i40e_find_vsi_from_id(struct i40e_pf
*pf
, u16 id
)
273 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
274 if (pf
->vsi
[i
] && (pf
->vsi
[i
]->id
== id
))
281 * i40e_service_event_schedule - Schedule the service task to wake up
282 * @pf: board private structure
284 * If not already scheduled, this puts the task into the work queue
286 void i40e_service_event_schedule(struct i40e_pf
*pf
)
288 if (!test_bit(__I40E_DOWN
, &pf
->state
) &&
289 !test_bit(__I40E_RESET_RECOVERY_PENDING
, &pf
->state
) &&
290 !test_and_set_bit(__I40E_SERVICE_SCHED
, &pf
->state
))
291 queue_work(i40e_wq
, &pf
->service_task
);
295 * i40e_tx_timeout - Respond to a Tx Hang
296 * @netdev: network interface device structure
298 * If any port has noticed a Tx timeout, it is likely that the whole
299 * device is munged, not just the one netdev port, so go for the full
303 void i40e_tx_timeout(struct net_device
*netdev
)
305 static void i40e_tx_timeout(struct net_device
*netdev
)
308 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
309 struct i40e_vsi
*vsi
= np
->vsi
;
310 struct i40e_pf
*pf
= vsi
->back
;
311 struct i40e_ring
*tx_ring
= NULL
;
312 unsigned int i
, hung_queue
= 0;
315 pf
->tx_timeout_count
++;
317 /* find the stopped queue the same way the stack does */
318 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
319 struct netdev_queue
*q
;
320 unsigned long trans_start
;
322 q
= netdev_get_tx_queue(netdev
, i
);
323 trans_start
= q
->trans_start
;
324 if (netif_xmit_stopped(q
) &&
326 (trans_start
+ netdev
->watchdog_timeo
))) {
332 if (i
== netdev
->num_tx_queues
) {
333 netdev_info(netdev
, "tx_timeout: no netdev hung queue found\n");
335 /* now that we have an index, find the tx_ring struct */
336 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
337 if (vsi
->tx_rings
[i
] && vsi
->tx_rings
[i
]->desc
) {
339 vsi
->tx_rings
[i
]->queue_index
) {
340 tx_ring
= vsi
->tx_rings
[i
];
347 if (time_after(jiffies
, (pf
->tx_timeout_last_recovery
+ HZ
*20)))
348 pf
->tx_timeout_recovery_level
= 1; /* reset after some time */
349 else if (time_before(jiffies
,
350 (pf
->tx_timeout_last_recovery
+ netdev
->watchdog_timeo
)))
351 return; /* don't do any new action before the next timeout */
354 head
= i40e_get_head(tx_ring
);
355 /* Read interrupt register */
356 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
358 I40E_PFINT_DYN_CTLN(tx_ring
->q_vector
->v_idx
+
359 tx_ring
->vsi
->base_vector
- 1));
361 val
= rd32(&pf
->hw
, I40E_PFINT_DYN_CTL0
);
363 netdev_info(netdev
, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
364 vsi
->seid
, hung_queue
, tx_ring
->next_to_clean
,
365 head
, tx_ring
->next_to_use
,
366 readl(tx_ring
->tail
), val
);
369 pf
->tx_timeout_last_recovery
= jiffies
;
370 netdev_info(netdev
, "tx_timeout recovery level %d, hung_queue %d\n",
371 pf
->tx_timeout_recovery_level
, hung_queue
);
373 switch (pf
->tx_timeout_recovery_level
) {
375 set_bit(__I40E_PF_RESET_REQUESTED
, &pf
->state
);
378 set_bit(__I40E_CORE_RESET_REQUESTED
, &pf
->state
);
381 set_bit(__I40E_GLOBAL_RESET_REQUESTED
, &pf
->state
);
384 netdev_err(netdev
, "tx_timeout recovery unsuccessful\n");
388 i40e_service_event_schedule(pf
);
389 pf
->tx_timeout_recovery_level
++;
393 * i40e_get_vsi_stats_struct - Get System Network Statistics
394 * @vsi: the VSI we care about
396 * Returns the address of the device statistics structure.
397 * The statistics are actually updated from the service task.
399 struct rtnl_link_stats64
*i40e_get_vsi_stats_struct(struct i40e_vsi
*vsi
)
401 return &vsi
->net_stats
;
405 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
406 * @netdev: network interface device structure
408 * Returns the address of the device statistics structure.
409 * The statistics are actually updated from the service task.
412 struct rtnl_link_stats64
*i40e_get_netdev_stats_struct(
413 struct net_device
*netdev
,
414 struct rtnl_link_stats64
*stats
)
416 static struct rtnl_link_stats64
*i40e_get_netdev_stats_struct(
417 struct net_device
*netdev
,
418 struct rtnl_link_stats64
*stats
)
421 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
422 struct i40e_ring
*tx_ring
, *rx_ring
;
423 struct i40e_vsi
*vsi
= np
->vsi
;
424 struct rtnl_link_stats64
*vsi_stats
= i40e_get_vsi_stats_struct(vsi
);
427 if (test_bit(__I40E_DOWN
, &vsi
->state
))
434 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
438 tx_ring
= ACCESS_ONCE(vsi
->tx_rings
[i
]);
443 start
= u64_stats_fetch_begin_irq(&tx_ring
->syncp
);
444 packets
= tx_ring
->stats
.packets
;
445 bytes
= tx_ring
->stats
.bytes
;
446 } while (u64_stats_fetch_retry_irq(&tx_ring
->syncp
, start
));
448 stats
->tx_packets
+= packets
;
449 stats
->tx_bytes
+= bytes
;
450 rx_ring
= &tx_ring
[1];
453 start
= u64_stats_fetch_begin_irq(&rx_ring
->syncp
);
454 packets
= rx_ring
->stats
.packets
;
455 bytes
= rx_ring
->stats
.bytes
;
456 } while (u64_stats_fetch_retry_irq(&rx_ring
->syncp
, start
));
458 stats
->rx_packets
+= packets
;
459 stats
->rx_bytes
+= bytes
;
463 /* following stats updated by i40e_watchdog_subtask() */
464 stats
->multicast
= vsi_stats
->multicast
;
465 stats
->tx_errors
= vsi_stats
->tx_errors
;
466 stats
->tx_dropped
= vsi_stats
->tx_dropped
;
467 stats
->rx_errors
= vsi_stats
->rx_errors
;
468 stats
->rx_dropped
= vsi_stats
->rx_dropped
;
469 stats
->rx_crc_errors
= vsi_stats
->rx_crc_errors
;
470 stats
->rx_length_errors
= vsi_stats
->rx_length_errors
;
476 * i40e_vsi_reset_stats - Resets all stats of the given vsi
477 * @vsi: the VSI to have its stats reset
479 void i40e_vsi_reset_stats(struct i40e_vsi
*vsi
)
481 struct rtnl_link_stats64
*ns
;
487 ns
= i40e_get_vsi_stats_struct(vsi
);
488 memset(ns
, 0, sizeof(*ns
));
489 memset(&vsi
->net_stats_offsets
, 0, sizeof(vsi
->net_stats_offsets
));
490 memset(&vsi
->eth_stats
, 0, sizeof(vsi
->eth_stats
));
491 memset(&vsi
->eth_stats_offsets
, 0, sizeof(vsi
->eth_stats_offsets
));
492 if (vsi
->rx_rings
&& vsi
->rx_rings
[0]) {
493 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
494 memset(&vsi
->rx_rings
[i
]->stats
, 0,
495 sizeof(vsi
->rx_rings
[i
]->stats
));
496 memset(&vsi
->rx_rings
[i
]->rx_stats
, 0,
497 sizeof(vsi
->rx_rings
[i
]->rx_stats
));
498 memset(&vsi
->tx_rings
[i
]->stats
, 0,
499 sizeof(vsi
->tx_rings
[i
]->stats
));
500 memset(&vsi
->tx_rings
[i
]->tx_stats
, 0,
501 sizeof(vsi
->tx_rings
[i
]->tx_stats
));
504 vsi
->stat_offsets_loaded
= false;
508 * i40e_pf_reset_stats - Reset all of the stats for the given PF
509 * @pf: the PF to be reset
511 void i40e_pf_reset_stats(struct i40e_pf
*pf
)
515 memset(&pf
->stats
, 0, sizeof(pf
->stats
));
516 memset(&pf
->stats_offsets
, 0, sizeof(pf
->stats_offsets
));
517 pf
->stat_offsets_loaded
= false;
519 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
521 memset(&pf
->veb
[i
]->stats
, 0,
522 sizeof(pf
->veb
[i
]->stats
));
523 memset(&pf
->veb
[i
]->stats_offsets
, 0,
524 sizeof(pf
->veb
[i
]->stats_offsets
));
525 pf
->veb
[i
]->stat_offsets_loaded
= false;
528 pf
->hw_csum_rx_error
= 0;
532 * i40e_stat_update48 - read and update a 48 bit stat from the chip
533 * @hw: ptr to the hardware info
534 * @hireg: the high 32 bit reg to read
535 * @loreg: the low 32 bit reg to read
536 * @offset_loaded: has the initial offset been loaded yet
537 * @offset: ptr to current offset value
538 * @stat: ptr to the stat
540 * Since the device stats are not reset at PFReset, they likely will not
541 * be zeroed when the driver starts. We'll save the first values read
542 * and use them as offsets to be subtracted from the raw values in order
543 * to report stats that count from zero. In the process, we also manage
544 * the potential roll-over.
546 static void i40e_stat_update48(struct i40e_hw
*hw
, u32 hireg
, u32 loreg
,
547 bool offset_loaded
, u64
*offset
, u64
*stat
)
551 if (hw
->device_id
== I40E_DEV_ID_QEMU
) {
552 new_data
= rd32(hw
, loreg
);
553 new_data
|= ((u64
)(rd32(hw
, hireg
) & 0xFFFF)) << 32;
555 new_data
= rd64(hw
, loreg
);
559 if (likely(new_data
>= *offset
))
560 *stat
= new_data
- *offset
;
562 *stat
= (new_data
+ BIT_ULL(48)) - *offset
;
563 *stat
&= 0xFFFFFFFFFFFFULL
;
567 * i40e_stat_update32 - read and update a 32 bit stat from the chip
568 * @hw: ptr to the hardware info
569 * @reg: the hw reg to read
570 * @offset_loaded: has the initial offset been loaded yet
571 * @offset: ptr to current offset value
572 * @stat: ptr to the stat
574 static void i40e_stat_update32(struct i40e_hw
*hw
, u32 reg
,
575 bool offset_loaded
, u64
*offset
, u64
*stat
)
579 new_data
= rd32(hw
, reg
);
582 if (likely(new_data
>= *offset
))
583 *stat
= (u32
)(new_data
- *offset
);
585 *stat
= (u32
)((new_data
+ BIT_ULL(32)) - *offset
);
589 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
590 * @vsi: the VSI to be updated
592 void i40e_update_eth_stats(struct i40e_vsi
*vsi
)
594 int stat_idx
= le16_to_cpu(vsi
->info
.stat_counter_idx
);
595 struct i40e_pf
*pf
= vsi
->back
;
596 struct i40e_hw
*hw
= &pf
->hw
;
597 struct i40e_eth_stats
*oes
;
598 struct i40e_eth_stats
*es
; /* device's eth stats */
600 es
= &vsi
->eth_stats
;
601 oes
= &vsi
->eth_stats_offsets
;
603 /* Gather up the stats that the hw collects */
604 i40e_stat_update32(hw
, I40E_GLV_TEPC(stat_idx
),
605 vsi
->stat_offsets_loaded
,
606 &oes
->tx_errors
, &es
->tx_errors
);
607 i40e_stat_update32(hw
, I40E_GLV_RDPC(stat_idx
),
608 vsi
->stat_offsets_loaded
,
609 &oes
->rx_discards
, &es
->rx_discards
);
610 i40e_stat_update32(hw
, I40E_GLV_RUPP(stat_idx
),
611 vsi
->stat_offsets_loaded
,
612 &oes
->rx_unknown_protocol
, &es
->rx_unknown_protocol
);
613 i40e_stat_update32(hw
, I40E_GLV_TEPC(stat_idx
),
614 vsi
->stat_offsets_loaded
,
615 &oes
->tx_errors
, &es
->tx_errors
);
617 i40e_stat_update48(hw
, I40E_GLV_GORCH(stat_idx
),
618 I40E_GLV_GORCL(stat_idx
),
619 vsi
->stat_offsets_loaded
,
620 &oes
->rx_bytes
, &es
->rx_bytes
);
621 i40e_stat_update48(hw
, I40E_GLV_UPRCH(stat_idx
),
622 I40E_GLV_UPRCL(stat_idx
),
623 vsi
->stat_offsets_loaded
,
624 &oes
->rx_unicast
, &es
->rx_unicast
);
625 i40e_stat_update48(hw
, I40E_GLV_MPRCH(stat_idx
),
626 I40E_GLV_MPRCL(stat_idx
),
627 vsi
->stat_offsets_loaded
,
628 &oes
->rx_multicast
, &es
->rx_multicast
);
629 i40e_stat_update48(hw
, I40E_GLV_BPRCH(stat_idx
),
630 I40E_GLV_BPRCL(stat_idx
),
631 vsi
->stat_offsets_loaded
,
632 &oes
->rx_broadcast
, &es
->rx_broadcast
);
634 i40e_stat_update48(hw
, I40E_GLV_GOTCH(stat_idx
),
635 I40E_GLV_GOTCL(stat_idx
),
636 vsi
->stat_offsets_loaded
,
637 &oes
->tx_bytes
, &es
->tx_bytes
);
638 i40e_stat_update48(hw
, I40E_GLV_UPTCH(stat_idx
),
639 I40E_GLV_UPTCL(stat_idx
),
640 vsi
->stat_offsets_loaded
,
641 &oes
->tx_unicast
, &es
->tx_unicast
);
642 i40e_stat_update48(hw
, I40E_GLV_MPTCH(stat_idx
),
643 I40E_GLV_MPTCL(stat_idx
),
644 vsi
->stat_offsets_loaded
,
645 &oes
->tx_multicast
, &es
->tx_multicast
);
646 i40e_stat_update48(hw
, I40E_GLV_BPTCH(stat_idx
),
647 I40E_GLV_BPTCL(stat_idx
),
648 vsi
->stat_offsets_loaded
,
649 &oes
->tx_broadcast
, &es
->tx_broadcast
);
650 vsi
->stat_offsets_loaded
= true;
654 * i40e_update_veb_stats - Update Switch component statistics
655 * @veb: the VEB being updated
657 static void i40e_update_veb_stats(struct i40e_veb
*veb
)
659 struct i40e_pf
*pf
= veb
->pf
;
660 struct i40e_hw
*hw
= &pf
->hw
;
661 struct i40e_eth_stats
*oes
;
662 struct i40e_eth_stats
*es
; /* device's eth stats */
663 struct i40e_veb_tc_stats
*veb_oes
;
664 struct i40e_veb_tc_stats
*veb_es
;
667 idx
= veb
->stats_idx
;
669 oes
= &veb
->stats_offsets
;
670 veb_es
= &veb
->tc_stats
;
671 veb_oes
= &veb
->tc_stats_offsets
;
673 /* Gather up the stats that the hw collects */
674 i40e_stat_update32(hw
, I40E_GLSW_TDPC(idx
),
675 veb
->stat_offsets_loaded
,
676 &oes
->tx_discards
, &es
->tx_discards
);
677 if (hw
->revision_id
> 0)
678 i40e_stat_update32(hw
, I40E_GLSW_RUPP(idx
),
679 veb
->stat_offsets_loaded
,
680 &oes
->rx_unknown_protocol
,
681 &es
->rx_unknown_protocol
);
682 i40e_stat_update48(hw
, I40E_GLSW_GORCH(idx
), I40E_GLSW_GORCL(idx
),
683 veb
->stat_offsets_loaded
,
684 &oes
->rx_bytes
, &es
->rx_bytes
);
685 i40e_stat_update48(hw
, I40E_GLSW_UPRCH(idx
), I40E_GLSW_UPRCL(idx
),
686 veb
->stat_offsets_loaded
,
687 &oes
->rx_unicast
, &es
->rx_unicast
);
688 i40e_stat_update48(hw
, I40E_GLSW_MPRCH(idx
), I40E_GLSW_MPRCL(idx
),
689 veb
->stat_offsets_loaded
,
690 &oes
->rx_multicast
, &es
->rx_multicast
);
691 i40e_stat_update48(hw
, I40E_GLSW_BPRCH(idx
), I40E_GLSW_BPRCL(idx
),
692 veb
->stat_offsets_loaded
,
693 &oes
->rx_broadcast
, &es
->rx_broadcast
);
695 i40e_stat_update48(hw
, I40E_GLSW_GOTCH(idx
), I40E_GLSW_GOTCL(idx
),
696 veb
->stat_offsets_loaded
,
697 &oes
->tx_bytes
, &es
->tx_bytes
);
698 i40e_stat_update48(hw
, I40E_GLSW_UPTCH(idx
), I40E_GLSW_UPTCL(idx
),
699 veb
->stat_offsets_loaded
,
700 &oes
->tx_unicast
, &es
->tx_unicast
);
701 i40e_stat_update48(hw
, I40E_GLSW_MPTCH(idx
), I40E_GLSW_MPTCL(idx
),
702 veb
->stat_offsets_loaded
,
703 &oes
->tx_multicast
, &es
->tx_multicast
);
704 i40e_stat_update48(hw
, I40E_GLSW_BPTCH(idx
), I40E_GLSW_BPTCL(idx
),
705 veb
->stat_offsets_loaded
,
706 &oes
->tx_broadcast
, &es
->tx_broadcast
);
707 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
708 i40e_stat_update48(hw
, I40E_GLVEBTC_RPCH(i
, idx
),
709 I40E_GLVEBTC_RPCL(i
, idx
),
710 veb
->stat_offsets_loaded
,
711 &veb_oes
->tc_rx_packets
[i
],
712 &veb_es
->tc_rx_packets
[i
]);
713 i40e_stat_update48(hw
, I40E_GLVEBTC_RBCH(i
, idx
),
714 I40E_GLVEBTC_RBCL(i
, idx
),
715 veb
->stat_offsets_loaded
,
716 &veb_oes
->tc_rx_bytes
[i
],
717 &veb_es
->tc_rx_bytes
[i
]);
718 i40e_stat_update48(hw
, I40E_GLVEBTC_TPCH(i
, idx
),
719 I40E_GLVEBTC_TPCL(i
, idx
),
720 veb
->stat_offsets_loaded
,
721 &veb_oes
->tc_tx_packets
[i
],
722 &veb_es
->tc_tx_packets
[i
]);
723 i40e_stat_update48(hw
, I40E_GLVEBTC_TBCH(i
, idx
),
724 I40E_GLVEBTC_TBCL(i
, idx
),
725 veb
->stat_offsets_loaded
,
726 &veb_oes
->tc_tx_bytes
[i
],
727 &veb_es
->tc_tx_bytes
[i
]);
729 veb
->stat_offsets_loaded
= true;
734 * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
735 * @vsi: the VSI that is capable of doing FCoE
737 static void i40e_update_fcoe_stats(struct i40e_vsi
*vsi
)
739 struct i40e_pf
*pf
= vsi
->back
;
740 struct i40e_hw
*hw
= &pf
->hw
;
741 struct i40e_fcoe_stats
*ofs
;
742 struct i40e_fcoe_stats
*fs
; /* device's eth stats */
745 if (vsi
->type
!= I40E_VSI_FCOE
)
748 idx
= hw
->pf_id
+ I40E_FCOE_PF_STAT_OFFSET
;
749 fs
= &vsi
->fcoe_stats
;
750 ofs
= &vsi
->fcoe_stats_offsets
;
752 i40e_stat_update32(hw
, I40E_GL_FCOEPRC(idx
),
753 vsi
->fcoe_stat_offsets_loaded
,
754 &ofs
->rx_fcoe_packets
, &fs
->rx_fcoe_packets
);
755 i40e_stat_update48(hw
, I40E_GL_FCOEDWRCH(idx
), I40E_GL_FCOEDWRCL(idx
),
756 vsi
->fcoe_stat_offsets_loaded
,
757 &ofs
->rx_fcoe_dwords
, &fs
->rx_fcoe_dwords
);
758 i40e_stat_update32(hw
, I40E_GL_FCOERPDC(idx
),
759 vsi
->fcoe_stat_offsets_loaded
,
760 &ofs
->rx_fcoe_dropped
, &fs
->rx_fcoe_dropped
);
761 i40e_stat_update32(hw
, I40E_GL_FCOEPTC(idx
),
762 vsi
->fcoe_stat_offsets_loaded
,
763 &ofs
->tx_fcoe_packets
, &fs
->tx_fcoe_packets
);
764 i40e_stat_update48(hw
, I40E_GL_FCOEDWTCH(idx
), I40E_GL_FCOEDWTCL(idx
),
765 vsi
->fcoe_stat_offsets_loaded
,
766 &ofs
->tx_fcoe_dwords
, &fs
->tx_fcoe_dwords
);
767 i40e_stat_update32(hw
, I40E_GL_FCOECRC(idx
),
768 vsi
->fcoe_stat_offsets_loaded
,
769 &ofs
->fcoe_bad_fccrc
, &fs
->fcoe_bad_fccrc
);
770 i40e_stat_update32(hw
, I40E_GL_FCOELAST(idx
),
771 vsi
->fcoe_stat_offsets_loaded
,
772 &ofs
->fcoe_last_error
, &fs
->fcoe_last_error
);
773 i40e_stat_update32(hw
, I40E_GL_FCOEDDPC(idx
),
774 vsi
->fcoe_stat_offsets_loaded
,
775 &ofs
->fcoe_ddp_count
, &fs
->fcoe_ddp_count
);
777 vsi
->fcoe_stat_offsets_loaded
= true;
782 * i40e_update_vsi_stats - Update the vsi statistics counters.
783 * @vsi: the VSI to be updated
785 * There are a few instances where we store the same stat in a
786 * couple of different structs. This is partly because we have
787 * the netdev stats that need to be filled out, which is slightly
788 * different from the "eth_stats" defined by the chip and used in
789 * VF communications. We sort it out here.
791 static void i40e_update_vsi_stats(struct i40e_vsi
*vsi
)
793 struct i40e_pf
*pf
= vsi
->back
;
794 struct rtnl_link_stats64
*ons
;
795 struct rtnl_link_stats64
*ns
; /* netdev stats */
796 struct i40e_eth_stats
*oes
;
797 struct i40e_eth_stats
*es
; /* device's eth stats */
798 u32 tx_restart
, tx_busy
;
799 u64 tx_lost_interrupt
;
810 if (test_bit(__I40E_DOWN
, &vsi
->state
) ||
811 test_bit(__I40E_CONFIG_BUSY
, &pf
->state
))
814 ns
= i40e_get_vsi_stats_struct(vsi
);
815 ons
= &vsi
->net_stats_offsets
;
816 es
= &vsi
->eth_stats
;
817 oes
= &vsi
->eth_stats_offsets
;
819 /* Gather up the netdev and vsi stats that the driver collects
820 * on the fly during packet processing
824 tx_restart
= tx_busy
= tx_linearize
= tx_force_wb
= 0;
825 tx_lost_interrupt
= 0;
829 for (q
= 0; q
< vsi
->num_queue_pairs
; q
++) {
831 p
= ACCESS_ONCE(vsi
->tx_rings
[q
]);
834 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
835 packets
= p
->stats
.packets
;
836 bytes
= p
->stats
.bytes
;
837 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
840 tx_restart
+= p
->tx_stats
.restart_queue
;
841 tx_busy
+= p
->tx_stats
.tx_busy
;
842 tx_linearize
+= p
->tx_stats
.tx_linearize
;
843 tx_force_wb
+= p
->tx_stats
.tx_force_wb
;
844 tx_lost_interrupt
+= p
->tx_stats
.tx_lost_interrupt
;
846 /* Rx queue is part of the same block as Tx queue */
849 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
850 packets
= p
->stats
.packets
;
851 bytes
= p
->stats
.bytes
;
852 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
855 rx_buf
+= p
->rx_stats
.alloc_buff_failed
;
856 rx_page
+= p
->rx_stats
.alloc_page_failed
;
859 vsi
->tx_restart
= tx_restart
;
860 vsi
->tx_busy
= tx_busy
;
861 vsi
->tx_linearize
= tx_linearize
;
862 vsi
->tx_force_wb
= tx_force_wb
;
863 vsi
->tx_lost_interrupt
= tx_lost_interrupt
;
864 vsi
->rx_page_failed
= rx_page
;
865 vsi
->rx_buf_failed
= rx_buf
;
867 ns
->rx_packets
= rx_p
;
869 ns
->tx_packets
= tx_p
;
872 /* update netdev stats from eth stats */
873 i40e_update_eth_stats(vsi
);
874 ons
->tx_errors
= oes
->tx_errors
;
875 ns
->tx_errors
= es
->tx_errors
;
876 ons
->multicast
= oes
->rx_multicast
;
877 ns
->multicast
= es
->rx_multicast
;
878 ons
->rx_dropped
= oes
->rx_discards
;
879 ns
->rx_dropped
= es
->rx_discards
;
880 ons
->tx_dropped
= oes
->tx_discards
;
881 ns
->tx_dropped
= es
->tx_discards
;
883 /* pull in a couple PF stats if this is the main vsi */
884 if (vsi
== pf
->vsi
[pf
->lan_vsi
]) {
885 ns
->rx_crc_errors
= pf
->stats
.crc_errors
;
886 ns
->rx_errors
= pf
->stats
.crc_errors
+ pf
->stats
.illegal_bytes
;
887 ns
->rx_length_errors
= pf
->stats
.rx_length_errors
;
892 * i40e_update_pf_stats - Update the PF statistics counters.
893 * @pf: the PF to be updated
895 static void i40e_update_pf_stats(struct i40e_pf
*pf
)
897 struct i40e_hw_port_stats
*osd
= &pf
->stats_offsets
;
898 struct i40e_hw_port_stats
*nsd
= &pf
->stats
;
899 struct i40e_hw
*hw
= &pf
->hw
;
903 i40e_stat_update48(hw
, I40E_GLPRT_GORCH(hw
->port
),
904 I40E_GLPRT_GORCL(hw
->port
),
905 pf
->stat_offsets_loaded
,
906 &osd
->eth
.rx_bytes
, &nsd
->eth
.rx_bytes
);
907 i40e_stat_update48(hw
, I40E_GLPRT_GOTCH(hw
->port
),
908 I40E_GLPRT_GOTCL(hw
->port
),
909 pf
->stat_offsets_loaded
,
910 &osd
->eth
.tx_bytes
, &nsd
->eth
.tx_bytes
);
911 i40e_stat_update32(hw
, I40E_GLPRT_RDPC(hw
->port
),
912 pf
->stat_offsets_loaded
,
913 &osd
->eth
.rx_discards
,
914 &nsd
->eth
.rx_discards
);
915 i40e_stat_update48(hw
, I40E_GLPRT_UPRCH(hw
->port
),
916 I40E_GLPRT_UPRCL(hw
->port
),
917 pf
->stat_offsets_loaded
,
918 &osd
->eth
.rx_unicast
,
919 &nsd
->eth
.rx_unicast
);
920 i40e_stat_update48(hw
, I40E_GLPRT_MPRCH(hw
->port
),
921 I40E_GLPRT_MPRCL(hw
->port
),
922 pf
->stat_offsets_loaded
,
923 &osd
->eth
.rx_multicast
,
924 &nsd
->eth
.rx_multicast
);
925 i40e_stat_update48(hw
, I40E_GLPRT_BPRCH(hw
->port
),
926 I40E_GLPRT_BPRCL(hw
->port
),
927 pf
->stat_offsets_loaded
,
928 &osd
->eth
.rx_broadcast
,
929 &nsd
->eth
.rx_broadcast
);
930 i40e_stat_update48(hw
, I40E_GLPRT_UPTCH(hw
->port
),
931 I40E_GLPRT_UPTCL(hw
->port
),
932 pf
->stat_offsets_loaded
,
933 &osd
->eth
.tx_unicast
,
934 &nsd
->eth
.tx_unicast
);
935 i40e_stat_update48(hw
, I40E_GLPRT_MPTCH(hw
->port
),
936 I40E_GLPRT_MPTCL(hw
->port
),
937 pf
->stat_offsets_loaded
,
938 &osd
->eth
.tx_multicast
,
939 &nsd
->eth
.tx_multicast
);
940 i40e_stat_update48(hw
, I40E_GLPRT_BPTCH(hw
->port
),
941 I40E_GLPRT_BPTCL(hw
->port
),
942 pf
->stat_offsets_loaded
,
943 &osd
->eth
.tx_broadcast
,
944 &nsd
->eth
.tx_broadcast
);
946 i40e_stat_update32(hw
, I40E_GLPRT_TDOLD(hw
->port
),
947 pf
->stat_offsets_loaded
,
948 &osd
->tx_dropped_link_down
,
949 &nsd
->tx_dropped_link_down
);
951 i40e_stat_update32(hw
, I40E_GLPRT_CRCERRS(hw
->port
),
952 pf
->stat_offsets_loaded
,
953 &osd
->crc_errors
, &nsd
->crc_errors
);
955 i40e_stat_update32(hw
, I40E_GLPRT_ILLERRC(hw
->port
),
956 pf
->stat_offsets_loaded
,
957 &osd
->illegal_bytes
, &nsd
->illegal_bytes
);
959 i40e_stat_update32(hw
, I40E_GLPRT_MLFC(hw
->port
),
960 pf
->stat_offsets_loaded
,
961 &osd
->mac_local_faults
,
962 &nsd
->mac_local_faults
);
963 i40e_stat_update32(hw
, I40E_GLPRT_MRFC(hw
->port
),
964 pf
->stat_offsets_loaded
,
965 &osd
->mac_remote_faults
,
966 &nsd
->mac_remote_faults
);
968 i40e_stat_update32(hw
, I40E_GLPRT_RLEC(hw
->port
),
969 pf
->stat_offsets_loaded
,
970 &osd
->rx_length_errors
,
971 &nsd
->rx_length_errors
);
973 i40e_stat_update32(hw
, I40E_GLPRT_LXONRXC(hw
->port
),
974 pf
->stat_offsets_loaded
,
975 &osd
->link_xon_rx
, &nsd
->link_xon_rx
);
976 i40e_stat_update32(hw
, I40E_GLPRT_LXONTXC(hw
->port
),
977 pf
->stat_offsets_loaded
,
978 &osd
->link_xon_tx
, &nsd
->link_xon_tx
);
979 i40e_stat_update32(hw
, I40E_GLPRT_LXOFFRXC(hw
->port
),
980 pf
->stat_offsets_loaded
,
981 &osd
->link_xoff_rx
, &nsd
->link_xoff_rx
);
982 i40e_stat_update32(hw
, I40E_GLPRT_LXOFFTXC(hw
->port
),
983 pf
->stat_offsets_loaded
,
984 &osd
->link_xoff_tx
, &nsd
->link_xoff_tx
);
986 for (i
= 0; i
< 8; i
++) {
987 i40e_stat_update32(hw
, I40E_GLPRT_PXOFFRXC(hw
->port
, i
),
988 pf
->stat_offsets_loaded
,
989 &osd
->priority_xoff_rx
[i
],
990 &nsd
->priority_xoff_rx
[i
]);
991 i40e_stat_update32(hw
, I40E_GLPRT_PXONRXC(hw
->port
, i
),
992 pf
->stat_offsets_loaded
,
993 &osd
->priority_xon_rx
[i
],
994 &nsd
->priority_xon_rx
[i
]);
995 i40e_stat_update32(hw
, I40E_GLPRT_PXONTXC(hw
->port
, i
),
996 pf
->stat_offsets_loaded
,
997 &osd
->priority_xon_tx
[i
],
998 &nsd
->priority_xon_tx
[i
]);
999 i40e_stat_update32(hw
, I40E_GLPRT_PXOFFTXC(hw
->port
, i
),
1000 pf
->stat_offsets_loaded
,
1001 &osd
->priority_xoff_tx
[i
],
1002 &nsd
->priority_xoff_tx
[i
]);
1003 i40e_stat_update32(hw
,
1004 I40E_GLPRT_RXON2OFFCNT(hw
->port
, i
),
1005 pf
->stat_offsets_loaded
,
1006 &osd
->priority_xon_2_xoff
[i
],
1007 &nsd
->priority_xon_2_xoff
[i
]);
1010 i40e_stat_update48(hw
, I40E_GLPRT_PRC64H(hw
->port
),
1011 I40E_GLPRT_PRC64L(hw
->port
),
1012 pf
->stat_offsets_loaded
,
1013 &osd
->rx_size_64
, &nsd
->rx_size_64
);
1014 i40e_stat_update48(hw
, I40E_GLPRT_PRC127H(hw
->port
),
1015 I40E_GLPRT_PRC127L(hw
->port
),
1016 pf
->stat_offsets_loaded
,
1017 &osd
->rx_size_127
, &nsd
->rx_size_127
);
1018 i40e_stat_update48(hw
, I40E_GLPRT_PRC255H(hw
->port
),
1019 I40E_GLPRT_PRC255L(hw
->port
),
1020 pf
->stat_offsets_loaded
,
1021 &osd
->rx_size_255
, &nsd
->rx_size_255
);
1022 i40e_stat_update48(hw
, I40E_GLPRT_PRC511H(hw
->port
),
1023 I40E_GLPRT_PRC511L(hw
->port
),
1024 pf
->stat_offsets_loaded
,
1025 &osd
->rx_size_511
, &nsd
->rx_size_511
);
1026 i40e_stat_update48(hw
, I40E_GLPRT_PRC1023H(hw
->port
),
1027 I40E_GLPRT_PRC1023L(hw
->port
),
1028 pf
->stat_offsets_loaded
,
1029 &osd
->rx_size_1023
, &nsd
->rx_size_1023
);
1030 i40e_stat_update48(hw
, I40E_GLPRT_PRC1522H(hw
->port
),
1031 I40E_GLPRT_PRC1522L(hw
->port
),
1032 pf
->stat_offsets_loaded
,
1033 &osd
->rx_size_1522
, &nsd
->rx_size_1522
);
1034 i40e_stat_update48(hw
, I40E_GLPRT_PRC9522H(hw
->port
),
1035 I40E_GLPRT_PRC9522L(hw
->port
),
1036 pf
->stat_offsets_loaded
,
1037 &osd
->rx_size_big
, &nsd
->rx_size_big
);
1039 i40e_stat_update48(hw
, I40E_GLPRT_PTC64H(hw
->port
),
1040 I40E_GLPRT_PTC64L(hw
->port
),
1041 pf
->stat_offsets_loaded
,
1042 &osd
->tx_size_64
, &nsd
->tx_size_64
);
1043 i40e_stat_update48(hw
, I40E_GLPRT_PTC127H(hw
->port
),
1044 I40E_GLPRT_PTC127L(hw
->port
),
1045 pf
->stat_offsets_loaded
,
1046 &osd
->tx_size_127
, &nsd
->tx_size_127
);
1047 i40e_stat_update48(hw
, I40E_GLPRT_PTC255H(hw
->port
),
1048 I40E_GLPRT_PTC255L(hw
->port
),
1049 pf
->stat_offsets_loaded
,
1050 &osd
->tx_size_255
, &nsd
->tx_size_255
);
1051 i40e_stat_update48(hw
, I40E_GLPRT_PTC511H(hw
->port
),
1052 I40E_GLPRT_PTC511L(hw
->port
),
1053 pf
->stat_offsets_loaded
,
1054 &osd
->tx_size_511
, &nsd
->tx_size_511
);
1055 i40e_stat_update48(hw
, I40E_GLPRT_PTC1023H(hw
->port
),
1056 I40E_GLPRT_PTC1023L(hw
->port
),
1057 pf
->stat_offsets_loaded
,
1058 &osd
->tx_size_1023
, &nsd
->tx_size_1023
);
1059 i40e_stat_update48(hw
, I40E_GLPRT_PTC1522H(hw
->port
),
1060 I40E_GLPRT_PTC1522L(hw
->port
),
1061 pf
->stat_offsets_loaded
,
1062 &osd
->tx_size_1522
, &nsd
->tx_size_1522
);
1063 i40e_stat_update48(hw
, I40E_GLPRT_PTC9522H(hw
->port
),
1064 I40E_GLPRT_PTC9522L(hw
->port
),
1065 pf
->stat_offsets_loaded
,
1066 &osd
->tx_size_big
, &nsd
->tx_size_big
);
1068 i40e_stat_update32(hw
, I40E_GLPRT_RUC(hw
->port
),
1069 pf
->stat_offsets_loaded
,
1070 &osd
->rx_undersize
, &nsd
->rx_undersize
);
1071 i40e_stat_update32(hw
, I40E_GLPRT_RFC(hw
->port
),
1072 pf
->stat_offsets_loaded
,
1073 &osd
->rx_fragments
, &nsd
->rx_fragments
);
1074 i40e_stat_update32(hw
, I40E_GLPRT_ROC(hw
->port
),
1075 pf
->stat_offsets_loaded
,
1076 &osd
->rx_oversize
, &nsd
->rx_oversize
);
1077 i40e_stat_update32(hw
, I40E_GLPRT_RJC(hw
->port
),
1078 pf
->stat_offsets_loaded
,
1079 &osd
->rx_jabber
, &nsd
->rx_jabber
);
1082 i40e_stat_update32(hw
,
1083 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf
->hw
.pf_id
)),
1084 pf
->stat_offsets_loaded
,
1085 &osd
->fd_atr_match
, &nsd
->fd_atr_match
);
1086 i40e_stat_update32(hw
,
1087 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf
->hw
.pf_id
)),
1088 pf
->stat_offsets_loaded
,
1089 &osd
->fd_sb_match
, &nsd
->fd_sb_match
);
1090 i40e_stat_update32(hw
,
1091 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf
->hw
.pf_id
)),
1092 pf
->stat_offsets_loaded
,
1093 &osd
->fd_atr_tunnel_match
, &nsd
->fd_atr_tunnel_match
);
1095 val
= rd32(hw
, I40E_PRTPM_EEE_STAT
);
1096 nsd
->tx_lpi_status
=
1097 (val
& I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK
) >>
1098 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT
;
1099 nsd
->rx_lpi_status
=
1100 (val
& I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK
) >>
1101 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT
;
1102 i40e_stat_update32(hw
, I40E_PRTPM_TLPIC
,
1103 pf
->stat_offsets_loaded
,
1104 &osd
->tx_lpi_count
, &nsd
->tx_lpi_count
);
1105 i40e_stat_update32(hw
, I40E_PRTPM_RLPIC
,
1106 pf
->stat_offsets_loaded
,
1107 &osd
->rx_lpi_count
, &nsd
->rx_lpi_count
);
1109 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
&&
1110 !(pf
->auto_disable_flags
& I40E_FLAG_FD_SB_ENABLED
))
1111 nsd
->fd_sb_status
= true;
1113 nsd
->fd_sb_status
= false;
1115 if (pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
&&
1116 !(pf
->auto_disable_flags
& I40E_FLAG_FD_ATR_ENABLED
))
1117 nsd
->fd_atr_status
= true;
1119 nsd
->fd_atr_status
= false;
1121 pf
->stat_offsets_loaded
= true;
1125 * i40e_update_stats - Update the various statistics counters.
1126 * @vsi: the VSI to be updated
1128 * Update the various stats for this VSI and its related entities.
1130 void i40e_update_stats(struct i40e_vsi
*vsi
)
1132 struct i40e_pf
*pf
= vsi
->back
;
1134 if (vsi
== pf
->vsi
[pf
->lan_vsi
])
1135 i40e_update_pf_stats(pf
);
1137 i40e_update_vsi_stats(vsi
);
1139 i40e_update_fcoe_stats(vsi
);
1144 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1145 * @vsi: the VSI to be searched
1146 * @macaddr: the MAC address
1148 * @is_vf: make sure its a VF filter, else doesn't matter
1149 * @is_netdev: make sure its a netdev filter, else doesn't matter
1151 * Returns ptr to the filter object or NULL
1153 static struct i40e_mac_filter
*i40e_find_filter(struct i40e_vsi
*vsi
,
1154 u8
*macaddr
, s16 vlan
,
1155 bool is_vf
, bool is_netdev
)
1157 struct i40e_mac_filter
*f
;
1159 if (!vsi
|| !macaddr
)
1162 list_for_each_entry(f
, &vsi
->mac_filter_list
, list
) {
1163 if ((ether_addr_equal(macaddr
, f
->macaddr
)) &&
1164 (vlan
== f
->vlan
) &&
1165 (!is_vf
|| f
->is_vf
) &&
1166 (!is_netdev
|| f
->is_netdev
))
1173 * i40e_find_mac - Find a mac addr in the macvlan filters list
1174 * @vsi: the VSI to be searched
1175 * @macaddr: the MAC address we are searching for
1176 * @is_vf: make sure its a VF filter, else doesn't matter
1177 * @is_netdev: make sure its a netdev filter, else doesn't matter
1179 * Returns the first filter with the provided MAC address or NULL if
1180 * MAC address was not found
1182 struct i40e_mac_filter
*i40e_find_mac(struct i40e_vsi
*vsi
, u8
*macaddr
,
1183 bool is_vf
, bool is_netdev
)
1185 struct i40e_mac_filter
*f
;
1187 if (!vsi
|| !macaddr
)
1190 list_for_each_entry(f
, &vsi
->mac_filter_list
, list
) {
1191 if ((ether_addr_equal(macaddr
, f
->macaddr
)) &&
1192 (!is_vf
|| f
->is_vf
) &&
1193 (!is_netdev
|| f
->is_netdev
))
1200 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1201 * @vsi: the VSI to be searched
1203 * Returns true if VSI is in vlan mode or false otherwise
1205 bool i40e_is_vsi_in_vlan(struct i40e_vsi
*vsi
)
1207 struct i40e_mac_filter
*f
;
1209 /* Only -1 for all the filters denotes not in vlan mode
1210 * so we have to go through all the list in order to make sure
1212 list_for_each_entry(f
, &vsi
->mac_filter_list
, list
) {
1213 if (f
->vlan
>= 0 || vsi
->info
.pvid
)
1221 * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1222 * @vsi: the VSI to be searched
1223 * @macaddr: the mac address to be filtered
1224 * @is_vf: true if it is a VF
1225 * @is_netdev: true if it is a netdev
1227 * Goes through all the macvlan filters and adds a
1228 * macvlan filter for each unique vlan that already exists
1230 * Returns first filter found on success, else NULL
1232 struct i40e_mac_filter
*i40e_put_mac_in_vlan(struct i40e_vsi
*vsi
, u8
*macaddr
,
1233 bool is_vf
, bool is_netdev
)
1235 struct i40e_mac_filter
*f
;
1237 list_for_each_entry(f
, &vsi
->mac_filter_list
, list
) {
1239 f
->vlan
= le16_to_cpu(vsi
->info
.pvid
);
1240 if (!i40e_find_filter(vsi
, macaddr
, f
->vlan
,
1241 is_vf
, is_netdev
)) {
1242 if (!i40e_add_filter(vsi
, macaddr
, f
->vlan
,
1248 return list_first_entry_or_null(&vsi
->mac_filter_list
,
1249 struct i40e_mac_filter
, list
);
1253 * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
1254 * @vsi: the VSI to be searched
1255 * @macaddr: the mac address to be removed
1256 * @is_vf: true if it is a VF
1257 * @is_netdev: true if it is a netdev
1259 * Removes a given MAC address from a VSI, regardless of VLAN
1261 * Returns 0 for success, or error
1263 int i40e_del_mac_all_vlan(struct i40e_vsi
*vsi
, u8
*macaddr
,
1264 bool is_vf
, bool is_netdev
)
1266 struct i40e_mac_filter
*f
= NULL
;
1269 WARN(!spin_is_locked(&vsi
->mac_filter_list_lock
),
1270 "Missing mac_filter_list_lock\n");
1271 list_for_each_entry(f
, &vsi
->mac_filter_list
, list
) {
1272 if ((ether_addr_equal(macaddr
, f
->macaddr
)) &&
1273 (is_vf
== f
->is_vf
) &&
1274 (is_netdev
== f
->is_netdev
)) {
1277 if (f
->counter
== 0)
1278 f
->state
= I40E_FILTER_REMOVE
;
1282 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1283 vsi
->back
->flags
|= I40E_FLAG_FILTER_SYNC
;
1290 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1291 * @vsi: the PF Main VSI - inappropriate for any other VSI
1292 * @macaddr: the MAC address
1294 * Remove whatever filter the firmware set up so the driver can manage
1295 * its own filtering intelligently.
1297 static void i40e_rm_default_mac_filter(struct i40e_vsi
*vsi
, u8
*macaddr
)
1299 struct i40e_aqc_remove_macvlan_element_data element
;
1300 struct i40e_pf
*pf
= vsi
->back
;
1302 /* Only appropriate for the PF main VSI */
1303 if (vsi
->type
!= I40E_VSI_MAIN
)
1306 memset(&element
, 0, sizeof(element
));
1307 ether_addr_copy(element
.mac_addr
, macaddr
);
1308 element
.vlan_tag
= 0;
1309 /* Ignore error returns, some firmware does it this way... */
1310 element
.flags
= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH
;
1311 i40e_aq_remove_macvlan(&pf
->hw
, vsi
->seid
, &element
, 1, NULL
);
1313 memset(&element
, 0, sizeof(element
));
1314 ether_addr_copy(element
.mac_addr
, macaddr
);
1315 element
.vlan_tag
= 0;
1316 /* ...and some firmware does it this way. */
1317 element
.flags
= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH
|
1318 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN
;
1319 i40e_aq_remove_macvlan(&pf
->hw
, vsi
->seid
, &element
, 1, NULL
);
1323 * i40e_add_filter - Add a mac/vlan filter to the VSI
1324 * @vsi: the VSI to be searched
1325 * @macaddr: the MAC address
1327 * @is_vf: make sure its a VF filter, else doesn't matter
1328 * @is_netdev: make sure its a netdev filter, else doesn't matter
1330 * Returns ptr to the filter object or NULL when no memory available.
1332 * NOTE: This function is expected to be called with mac_filter_list_lock
1335 struct i40e_mac_filter
*i40e_add_filter(struct i40e_vsi
*vsi
,
1336 u8
*macaddr
, s16 vlan
,
1337 bool is_vf
, bool is_netdev
)
1339 struct i40e_mac_filter
*f
;
1340 int changed
= false;
1342 if (!vsi
|| !macaddr
)
1345 /* Do not allow broadcast filter to be added since broadcast filter
1346 * is added as part of add VSI for any newly created VSI except
1349 if (is_broadcast_ether_addr(macaddr
))
1352 f
= i40e_find_filter(vsi
, macaddr
, vlan
, is_vf
, is_netdev
);
1354 f
= kzalloc(sizeof(*f
), GFP_ATOMIC
);
1356 goto add_filter_out
;
1358 ether_addr_copy(f
->macaddr
, macaddr
);
1360 /* If we're in overflow promisc mode, set the state directly
1361 * to failed, so we don't bother to try sending the filter
1364 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC
, &vsi
->state
))
1365 f
->state
= I40E_FILTER_FAILED
;
1367 f
->state
= I40E_FILTER_NEW
;
1369 INIT_LIST_HEAD(&f
->list
);
1370 list_add_tail(&f
->list
, &vsi
->mac_filter_list
);
1373 /* increment counter and add a new flag if needed */
1379 } else if (is_netdev
) {
1380 if (!f
->is_netdev
) {
1381 f
->is_netdev
= true;
1389 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1390 vsi
->back
->flags
|= I40E_FLAG_FILTER_SYNC
;
1398 * i40e_del_filter - Remove a mac/vlan filter from the VSI
1399 * @vsi: the VSI to be searched
1400 * @macaddr: the MAC address
1402 * @is_vf: make sure it's a VF filter, else doesn't matter
1403 * @is_netdev: make sure it's a netdev filter, else doesn't matter
1405 * NOTE: This function is expected to be called with mac_filter_list_lock
1407 * ANOTHER NOTE: This function MUST be called from within the context of
1408 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1409 * instead of list_for_each_entry().
1411 void i40e_del_filter(struct i40e_vsi
*vsi
,
1412 u8
*macaddr
, s16 vlan
,
1413 bool is_vf
, bool is_netdev
)
1415 struct i40e_mac_filter
*f
;
1417 if (!vsi
|| !macaddr
)
1420 f
= i40e_find_filter(vsi
, macaddr
, vlan
, is_vf
, is_netdev
);
1421 if (!f
|| f
->counter
== 0)
1429 } else if (is_netdev
) {
1431 f
->is_netdev
= false;
1435 /* make sure we don't remove a filter in use by VF or netdev */
1438 min_f
+= (f
->is_vf
? 1 : 0);
1439 min_f
+= (f
->is_netdev
? 1 : 0);
1441 if (f
->counter
> min_f
)
1445 /* counter == 0 tells sync_filters_subtask to
1446 * remove the filter from the firmware's list
1448 if (f
->counter
== 0) {
1449 if ((f
->state
== I40E_FILTER_FAILED
) ||
1450 (f
->state
== I40E_FILTER_NEW
)) {
1451 /* this one never got added by the FW. Just remove it,
1452 * no need to sync anything.
1457 f
->state
= I40E_FILTER_REMOVE
;
1458 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1459 vsi
->back
->flags
|= I40E_FLAG_FILTER_SYNC
;
1465 * i40e_set_mac - NDO callback to set mac address
1466 * @netdev: network interface device structure
1467 * @p: pointer to an address structure
1469 * Returns 0 on success, negative on failure
1472 int i40e_set_mac(struct net_device
*netdev
, void *p
)
1474 static int i40e_set_mac(struct net_device
*netdev
, void *p
)
1477 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1478 struct i40e_vsi
*vsi
= np
->vsi
;
1479 struct i40e_pf
*pf
= vsi
->back
;
1480 struct i40e_hw
*hw
= &pf
->hw
;
1481 struct sockaddr
*addr
= p
;
1483 if (!is_valid_ether_addr(addr
->sa_data
))
1484 return -EADDRNOTAVAIL
;
1486 if (ether_addr_equal(netdev
->dev_addr
, addr
->sa_data
)) {
1487 netdev_info(netdev
, "already using mac address %pM\n",
1492 if (test_bit(__I40E_DOWN
, &vsi
->back
->state
) ||
1493 test_bit(__I40E_RESET_RECOVERY_PENDING
, &vsi
->back
->state
))
1494 return -EADDRNOTAVAIL
;
1496 if (ether_addr_equal(hw
->mac
.addr
, addr
->sa_data
))
1497 netdev_info(netdev
, "returning to hw mac address %pM\n",
1500 netdev_info(netdev
, "set new mac address %pM\n", addr
->sa_data
);
1502 spin_lock_bh(&vsi
->mac_filter_list_lock
);
1503 i40e_del_mac_all_vlan(vsi
, netdev
->dev_addr
, false, true);
1504 i40e_put_mac_in_vlan(vsi
, addr
->sa_data
, false, true);
1505 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
1506 ether_addr_copy(netdev
->dev_addr
, addr
->sa_data
);
1507 if (vsi
->type
== I40E_VSI_MAIN
) {
1510 ret
= i40e_aq_mac_address_write(&vsi
->back
->hw
,
1511 I40E_AQC_WRITE_TYPE_LAA_WOL
,
1512 addr
->sa_data
, NULL
);
1514 netdev_info(netdev
, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1515 i40e_stat_str(hw
, ret
),
1516 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
1519 /* schedule our worker thread which will take care of
1520 * applying the new filter changes
1522 i40e_service_event_schedule(vsi
->back
);
1527 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1528 * @vsi: the VSI being setup
1529 * @ctxt: VSI context structure
1530 * @enabled_tc: Enabled TCs bitmap
1531 * @is_add: True if called before Add VSI
1533 * Setup VSI queue mapping for enabled traffic classes.
1536 void i40e_vsi_setup_queue_map(struct i40e_vsi
*vsi
,
1537 struct i40e_vsi_context
*ctxt
,
1541 static void i40e_vsi_setup_queue_map(struct i40e_vsi
*vsi
,
1542 struct i40e_vsi_context
*ctxt
,
1547 struct i40e_pf
*pf
= vsi
->back
;
1557 sections
= I40E_AQ_VSI_PROP_QUEUE_MAP_VALID
;
1560 if (enabled_tc
&& (vsi
->back
->flags
& I40E_FLAG_DCB_ENABLED
)) {
1561 /* Find numtc from enabled TC bitmap */
1562 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1563 if (enabled_tc
& BIT(i
)) /* TC is enabled */
1567 dev_warn(&pf
->pdev
->dev
, "DCB is enabled but no TC enabled, forcing TC0\n");
1571 /* At least TC0 is enabled in case of non-DCB case */
1575 vsi
->tc_config
.numtc
= numtc
;
1576 vsi
->tc_config
.enabled_tc
= enabled_tc
? enabled_tc
: 1;
1577 /* Number of queues per enabled TC */
1578 qcount
= vsi
->alloc_queue_pairs
;
1580 num_tc_qps
= qcount
/ numtc
;
1581 num_tc_qps
= min_t(int, num_tc_qps
, i40e_pf_get_max_q_per_tc(pf
));
1583 /* Setup queue offset/count for all TCs for given VSI */
1584 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1585 /* See if the given TC is enabled for the given VSI */
1586 if (vsi
->tc_config
.enabled_tc
& BIT(i
)) {
1590 switch (vsi
->type
) {
1592 qcount
= min_t(int, pf
->alloc_rss_size
,
1597 qcount
= num_tc_qps
;
1601 case I40E_VSI_SRIOV
:
1602 case I40E_VSI_VMDQ2
:
1604 qcount
= num_tc_qps
;
1608 vsi
->tc_config
.tc_info
[i
].qoffset
= offset
;
1609 vsi
->tc_config
.tc_info
[i
].qcount
= qcount
;
1611 /* find the next higher power-of-2 of num queue pairs */
1614 while (num_qps
&& (BIT_ULL(pow
) < qcount
)) {
1619 vsi
->tc_config
.tc_info
[i
].netdev_tc
= netdev_tc
++;
1621 (offset
<< I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT
) |
1622 (pow
<< I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT
);
1626 /* TC is not enabled so set the offset to
1627 * default queue and allocate one queue
1630 vsi
->tc_config
.tc_info
[i
].qoffset
= 0;
1631 vsi
->tc_config
.tc_info
[i
].qcount
= 1;
1632 vsi
->tc_config
.tc_info
[i
].netdev_tc
= 0;
1636 ctxt
->info
.tc_mapping
[i
] = cpu_to_le16(qmap
);
1639 /* Set actual Tx/Rx queue pairs */
1640 vsi
->num_queue_pairs
= offset
;
1641 if ((vsi
->type
== I40E_VSI_MAIN
) && (numtc
== 1)) {
1642 if (vsi
->req_queue_pairs
> 0)
1643 vsi
->num_queue_pairs
= vsi
->req_queue_pairs
;
1644 else if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
1645 vsi
->num_queue_pairs
= pf
->num_lan_msix
;
1648 /* Scheduler section valid can only be set for ADD VSI */
1650 sections
|= I40E_AQ_VSI_PROP_SCHED_VALID
;
1652 ctxt
->info
.up_enable_bits
= enabled_tc
;
1654 if (vsi
->type
== I40E_VSI_SRIOV
) {
1655 ctxt
->info
.mapping_flags
|=
1656 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG
);
1657 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
1658 ctxt
->info
.queue_mapping
[i
] =
1659 cpu_to_le16(vsi
->base_queue
+ i
);
1661 ctxt
->info
.mapping_flags
|=
1662 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG
);
1663 ctxt
->info
.queue_mapping
[0] = cpu_to_le16(vsi
->base_queue
);
1665 ctxt
->info
.valid_sections
|= cpu_to_le16(sections
);
1669 * i40e_set_rx_mode - NDO callback to set the netdev filters
1670 * @netdev: network interface device structure
1673 void i40e_set_rx_mode(struct net_device
*netdev
)
1675 static void i40e_set_rx_mode(struct net_device
*netdev
)
1678 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1679 struct i40e_mac_filter
*f
, *ftmp
;
1680 struct i40e_vsi
*vsi
= np
->vsi
;
1681 struct netdev_hw_addr
*uca
;
1682 struct netdev_hw_addr
*mca
;
1683 struct netdev_hw_addr
*ha
;
1685 spin_lock_bh(&vsi
->mac_filter_list_lock
);
1687 /* add addr if not already in the filter list */
1688 netdev_for_each_uc_addr(uca
, netdev
) {
1689 if (!i40e_find_mac(vsi
, uca
->addr
, false, true)) {
1690 if (i40e_is_vsi_in_vlan(vsi
))
1691 i40e_put_mac_in_vlan(vsi
, uca
->addr
,
1694 i40e_add_filter(vsi
, uca
->addr
, I40E_VLAN_ANY
,
1699 netdev_for_each_mc_addr(mca
, netdev
) {
1700 if (!i40e_find_mac(vsi
, mca
->addr
, false, true)) {
1701 if (i40e_is_vsi_in_vlan(vsi
))
1702 i40e_put_mac_in_vlan(vsi
, mca
->addr
,
1705 i40e_add_filter(vsi
, mca
->addr
, I40E_VLAN_ANY
,
1710 /* remove filter if not in netdev list */
1711 list_for_each_entry_safe(f
, ftmp
, &vsi
->mac_filter_list
, list
) {
1716 netdev_for_each_mc_addr(mca
, netdev
)
1717 if (ether_addr_equal(mca
->addr
, f
->macaddr
))
1718 goto bottom_of_search_loop
;
1720 netdev_for_each_uc_addr(uca
, netdev
)
1721 if (ether_addr_equal(uca
->addr
, f
->macaddr
))
1722 goto bottom_of_search_loop
;
1724 for_each_dev_addr(netdev
, ha
)
1725 if (ether_addr_equal(ha
->addr
, f
->macaddr
))
1726 goto bottom_of_search_loop
;
1728 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
1729 i40e_del_filter(vsi
, f
->macaddr
, I40E_VLAN_ANY
, false, true);
1731 bottom_of_search_loop
:
1734 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
1736 /* check for other flag changes */
1737 if (vsi
->current_netdev_flags
!= vsi
->netdev
->flags
) {
1738 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1739 vsi
->back
->flags
|= I40E_FLAG_FILTER_SYNC
;
1742 /* schedule our worker thread which will take care of
1743 * applying the new filter changes
1745 i40e_service_event_schedule(vsi
->back
);
1749 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1750 * @vsi: pointer to vsi struct
1751 * @from: Pointer to list which contains MAC filter entries - changes to
1752 * those entries needs to be undone.
1754 * MAC filter entries from list were slated to be removed from device.
1756 static void i40e_undo_del_filter_entries(struct i40e_vsi
*vsi
,
1757 struct list_head
*from
)
1759 struct i40e_mac_filter
*f
, *ftmp
;
1761 list_for_each_entry_safe(f
, ftmp
, from
, list
) {
1762 /* Move the element back into MAC filter list*/
1763 list_move_tail(&f
->list
, &vsi
->mac_filter_list
);
1768 * i40e_update_filter_state - Update filter state based on return data
1770 * @count: Number of filters added
1771 * @add_list: return data from fw
1772 * @head: pointer to first filter in current batch
1773 * @aq_err: status from fw
1775 * MAC filter entries from list were slated to be added to device. Returns
1776 * number of successful filters. Note that 0 does NOT mean success!
1779 i40e_update_filter_state(int count
,
1780 struct i40e_aqc_add_macvlan_element_data
*add_list
,
1781 struct i40e_mac_filter
*add_head
, int aq_err
)
1789 /* Everything's good, mark all filters active. */
1790 for (i
= 0; i
< count
; i
++) {
1791 add_head
->state
= I40E_FILTER_ACTIVE
;
1792 add_head
= list_next_entry(add_head
, list
);
1794 } else if (aq_err
== I40E_AQ_RC_ENOSPC
) {
1795 /* Device ran out of filter space. Check the return value
1796 * for each filter to see which ones are active.
1798 for (i
= 0; i
< count
; i
++) {
1799 if (add_list
[i
].match_method
==
1800 I40E_AQC_MM_ERR_NO_RES
) {
1801 add_head
->state
= I40E_FILTER_FAILED
;
1803 add_head
->state
= I40E_FILTER_ACTIVE
;
1806 add_head
= list_next_entry(add_head
, list
);
1809 /* Some other horrible thing happened, fail all filters */
1811 for (i
= 0; i
< count
; i
++) {
1812 add_head
->state
= I40E_FILTER_FAILED
;
1813 add_head
= list_next_entry(add_head
, list
);
1820 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1821 * @vsi: ptr to the VSI
1823 * Push any outstanding VSI filter changes through the AdminQ.
1825 * Returns 0 or error value
1827 int i40e_sync_vsi_filters(struct i40e_vsi
*vsi
)
1829 struct i40e_mac_filter
*f
, *ftmp
, *add_head
= NULL
;
1830 struct list_head tmp_add_list
, tmp_del_list
;
1831 struct i40e_hw
*hw
= &vsi
->back
->hw
;
1832 bool promisc_changed
= false;
1833 char vsi_name
[16] = "PF";
1834 int filter_list_len
= 0;
1835 u32 changed_flags
= 0;
1836 i40e_status aq_ret
= 0;
1846 /* empty array typed pointers, kcalloc later */
1847 struct i40e_aqc_add_macvlan_element_data
*add_list
;
1848 struct i40e_aqc_remove_macvlan_element_data
*del_list
;
1850 while (test_and_set_bit(__I40E_CONFIG_BUSY
, &vsi
->state
))
1851 usleep_range(1000, 2000);
1855 changed_flags
= vsi
->current_netdev_flags
^ vsi
->netdev
->flags
;
1856 vsi
->current_netdev_flags
= vsi
->netdev
->flags
;
1859 INIT_LIST_HEAD(&tmp_add_list
);
1860 INIT_LIST_HEAD(&tmp_del_list
);
1862 if (vsi
->type
== I40E_VSI_SRIOV
)
1863 snprintf(vsi_name
, sizeof(vsi_name
) - 1, "VF %d", vsi
->vf_id
);
1864 else if (vsi
->type
!= I40E_VSI_MAIN
)
1865 snprintf(vsi_name
, sizeof(vsi_name
) - 1, "vsi %d", vsi
->seid
);
1867 if (vsi
->flags
& I40E_VSI_FLAG_FILTER_CHANGED
) {
1868 vsi
->flags
&= ~I40E_VSI_FLAG_FILTER_CHANGED
;
1870 spin_lock_bh(&vsi
->mac_filter_list_lock
);
1871 /* Create a list of filters to delete. */
1872 list_for_each_entry_safe(f
, ftmp
, &vsi
->mac_filter_list
, list
) {
1873 if (f
->state
== I40E_FILTER_REMOVE
) {
1874 WARN_ON(f
->counter
!= 0);
1875 /* Move the element into temporary del_list */
1876 list_move_tail(&f
->list
, &tmp_del_list
);
1877 vsi
->active_filters
--;
1879 if (f
->state
== I40E_FILTER_NEW
) {
1880 WARN_ON(f
->counter
== 0);
1881 /* Move the element into temporary add_list */
1882 list_move_tail(&f
->list
, &tmp_add_list
);
1885 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
1888 /* Now process 'del_list' outside the lock */
1889 if (!list_empty(&tmp_del_list
)) {
1890 filter_list_len
= hw
->aq
.asq_buf_size
/
1891 sizeof(struct i40e_aqc_remove_macvlan_element_data
);
1892 list_size
= filter_list_len
*
1893 sizeof(struct i40e_aqc_remove_macvlan_element_data
);
1894 del_list
= kzalloc(list_size
, GFP_ATOMIC
);
1896 /* Undo VSI's MAC filter entry element updates */
1897 spin_lock_bh(&vsi
->mac_filter_list_lock
);
1898 i40e_undo_del_filter_entries(vsi
, &tmp_del_list
);
1899 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
1904 list_for_each_entry_safe(f
, ftmp
, &tmp_del_list
, list
) {
1907 /* add to delete list */
1908 ether_addr_copy(del_list
[num_del
].mac_addr
, f
->macaddr
);
1909 if (f
->vlan
== I40E_VLAN_ANY
) {
1910 del_list
[num_del
].vlan_tag
= 0;
1911 cmd_flags
|= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN
;
1913 del_list
[num_del
].vlan_tag
=
1914 cpu_to_le16((u16
)(f
->vlan
));
1917 cmd_flags
|= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH
;
1918 del_list
[num_del
].flags
= cmd_flags
;
1921 /* flush a full buffer */
1922 if (num_del
== filter_list_len
) {
1923 aq_ret
= i40e_aq_remove_macvlan(hw
, vsi
->seid
,
1926 aq_err
= hw
->aq
.asq_last_status
;
1928 memset(del_list
, 0, list_size
);
1930 /* Explicitly ignore and do not report when
1931 * firmware returns ENOENT.
1933 if (aq_ret
&& !(aq_err
== I40E_AQ_RC_ENOENT
)) {
1935 dev_info(&pf
->pdev
->dev
,
1936 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
1938 i40e_stat_str(hw
, aq_ret
),
1939 i40e_aq_str(hw
, aq_err
));
1942 /* Release memory for MAC filter entries which were
1943 * synced up with HW.
1950 aq_ret
= i40e_aq_remove_macvlan(hw
, vsi
->seid
, del_list
,
1952 aq_err
= hw
->aq
.asq_last_status
;
1955 /* Explicitly ignore and do not report when firmware
1958 if (aq_ret
&& !(aq_err
== I40E_AQ_RC_ENOENT
)) {
1960 dev_info(&pf
->pdev
->dev
,
1961 "ignoring delete macvlan error on %s, err %s aq_err %s\n",
1963 i40e_stat_str(hw
, aq_ret
),
1964 i40e_aq_str(hw
, aq_err
));
1972 if (!list_empty(&tmp_add_list
)) {
1973 /* Do all the adds now. */
1974 filter_list_len
= hw
->aq
.asq_buf_size
/
1975 sizeof(struct i40e_aqc_add_macvlan_element_data
);
1976 list_size
= filter_list_len
*
1977 sizeof(struct i40e_aqc_add_macvlan_element_data
);
1978 add_list
= kzalloc(list_size
, GFP_ATOMIC
);
1984 list_for_each_entry(f
, &tmp_add_list
, list
) {
1985 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC
,
1987 f
->state
= I40E_FILTER_FAILED
;
1990 /* add to add array */
1994 ether_addr_copy(add_list
[num_add
].mac_addr
, f
->macaddr
);
1995 if (f
->vlan
== I40E_VLAN_ANY
) {
1996 add_list
[num_add
].vlan_tag
= 0;
1997 cmd_flags
|= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN
;
1999 add_list
[num_add
].vlan_tag
=
2000 cpu_to_le16((u16
)(f
->vlan
));
2002 add_list
[num_add
].queue_number
= 0;
2003 cmd_flags
|= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH
;
2004 add_list
[num_add
].flags
= cpu_to_le16(cmd_flags
);
2007 /* flush a full buffer */
2008 if (num_add
== filter_list_len
) {
2009 aq_ret
= i40e_aq_add_macvlan(hw
, vsi
->seid
,
2012 aq_err
= hw
->aq
.asq_last_status
;
2013 fcnt
= i40e_update_filter_state(num_add
,
2017 vsi
->active_filters
+= fcnt
;
2019 if (fcnt
!= num_add
) {
2020 promisc_changed
= true;
2021 set_bit(__I40E_FILTER_OVERFLOW_PROMISC
,
2023 vsi
->promisc_threshold
=
2024 (vsi
->active_filters
* 3) / 4;
2025 dev_warn(&pf
->pdev
->dev
,
2026 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2027 i40e_aq_str(hw
, aq_err
),
2030 memset(add_list
, 0, list_size
);
2035 aq_ret
= i40e_aq_add_macvlan(hw
, vsi
->seid
,
2036 add_list
, num_add
, NULL
);
2037 aq_err
= hw
->aq
.asq_last_status
;
2038 fcnt
= i40e_update_filter_state(num_add
, add_list
,
2040 vsi
->active_filters
+= fcnt
;
2041 if (fcnt
!= num_add
) {
2042 promisc_changed
= true;
2043 set_bit(__I40E_FILTER_OVERFLOW_PROMISC
,
2045 vsi
->promisc_threshold
=
2046 (vsi
->active_filters
* 3) / 4;
2047 dev_warn(&pf
->pdev
->dev
,
2048 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2049 i40e_aq_str(hw
, aq_err
), vsi_name
);
2052 /* Now move all of the filters from the temp add list back to
2055 spin_lock_bh(&vsi
->mac_filter_list_lock
);
2056 list_for_each_entry_safe(f
, ftmp
, &tmp_add_list
, list
) {
2057 list_move_tail(&f
->list
, &vsi
->mac_filter_list
);
2059 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
2064 /* Check to see if we can drop out of overflow promiscuous mode. */
2065 if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC
, &vsi
->state
) &&
2066 (vsi
->active_filters
< vsi
->promisc_threshold
)) {
2067 int failed_count
= 0;
2068 /* See if we have any failed filters. We can't drop out of
2069 * promiscuous until these have all been deleted.
2071 spin_lock_bh(&vsi
->mac_filter_list_lock
);
2072 list_for_each_entry(f
, &vsi
->mac_filter_list
, list
) {
2073 if (f
->state
== I40E_FILTER_FAILED
)
2076 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
2077 if (!failed_count
) {
2078 dev_info(&pf
->pdev
->dev
,
2079 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2081 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC
, &vsi
->state
);
2082 promisc_changed
= true;
2083 vsi
->promisc_threshold
= 0;
2087 /* if the VF is not trusted do not do promisc */
2088 if ((vsi
->type
== I40E_VSI_SRIOV
) && !pf
->vf
[vsi
->vf_id
].trusted
) {
2089 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC
, &vsi
->state
);
2093 /* check for changes in promiscuous modes */
2094 if (changed_flags
& IFF_ALLMULTI
) {
2095 bool cur_multipromisc
;
2097 cur_multipromisc
= !!(vsi
->current_netdev_flags
& IFF_ALLMULTI
);
2098 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(&vsi
->back
->hw
,
2103 retval
= i40e_aq_rc_to_posix(aq_ret
,
2104 hw
->aq
.asq_last_status
);
2105 dev_info(&pf
->pdev
->dev
,
2106 "set multi promisc failed on %s, err %s aq_err %s\n",
2108 i40e_stat_str(hw
, aq_ret
),
2109 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2112 if ((changed_flags
& IFF_PROMISC
) ||
2114 test_bit(__I40E_FILTER_OVERFLOW_PROMISC
, &vsi
->state
))) {
2117 cur_promisc
= (!!(vsi
->current_netdev_flags
& IFF_PROMISC
) ||
2118 test_bit(__I40E_FILTER_OVERFLOW_PROMISC
,
2120 if ((vsi
->type
== I40E_VSI_MAIN
) &&
2121 (pf
->lan_veb
!= I40E_NO_VEB
) &&
2122 !(pf
->flags
& I40E_FLAG_MFP_ENABLED
)) {
2123 /* set defport ON for Main VSI instead of true promisc
2124 * this way we will get all unicast/multicast and VLAN
2125 * promisc behavior but will not get VF or VMDq traffic
2126 * replicated on the Main VSI.
2128 if (pf
->cur_promisc
!= cur_promisc
) {
2129 pf
->cur_promisc
= cur_promisc
;
2132 i40e_aq_set_default_vsi(hw
,
2137 i40e_aq_clear_default_vsi(hw
,
2141 retval
= i40e_aq_rc_to_posix(aq_ret
,
2142 hw
->aq
.asq_last_status
);
2143 dev_info(&pf
->pdev
->dev
,
2144 "Set default VSI failed on %s, err %s, aq_err %s\n",
2146 i40e_stat_str(hw
, aq_ret
),
2148 hw
->aq
.asq_last_status
));
2152 aq_ret
= i40e_aq_set_vsi_unicast_promiscuous(
2159 i40e_aq_rc_to_posix(aq_ret
,
2160 hw
->aq
.asq_last_status
);
2161 dev_info(&pf
->pdev
->dev
,
2162 "set unicast promisc failed on %s, err %s, aq_err %s\n",
2164 i40e_stat_str(hw
, aq_ret
),
2166 hw
->aq
.asq_last_status
));
2168 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(
2174 i40e_aq_rc_to_posix(aq_ret
,
2175 hw
->aq
.asq_last_status
);
2176 dev_info(&pf
->pdev
->dev
,
2177 "set multicast promisc failed on %s, err %s, aq_err %s\n",
2179 i40e_stat_str(hw
, aq_ret
),
2181 hw
->aq
.asq_last_status
));
2184 aq_ret
= i40e_aq_set_vsi_broadcast(&vsi
->back
->hw
,
2188 retval
= i40e_aq_rc_to_posix(aq_ret
,
2189 pf
->hw
.aq
.asq_last_status
);
2190 dev_info(&pf
->pdev
->dev
,
2191 "set brdcast promisc failed, err %s, aq_err %s\n",
2192 i40e_stat_str(hw
, aq_ret
),
2194 hw
->aq
.asq_last_status
));
2198 /* if something went wrong then set the changed flag so we try again */
2200 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
2202 clear_bit(__I40E_CONFIG_BUSY
, &vsi
->state
);
2207 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2208 * @pf: board private structure
2210 static void i40e_sync_filters_subtask(struct i40e_pf
*pf
)
2214 if (!pf
|| !(pf
->flags
& I40E_FLAG_FILTER_SYNC
))
2216 pf
->flags
&= ~I40E_FLAG_FILTER_SYNC
;
2218 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
2220 (pf
->vsi
[v
]->flags
& I40E_VSI_FLAG_FILTER_CHANGED
)) {
2221 int ret
= i40e_sync_vsi_filters(pf
->vsi
[v
]);
2224 /* come back and try again later */
2225 pf
->flags
|= I40E_FLAG_FILTER_SYNC
;
2233 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2234 * @netdev: network interface device structure
2235 * @new_mtu: new value for maximum frame size
2237 * Returns 0 on success, negative on failure
2239 static int i40e_change_mtu(struct net_device
*netdev
, int new_mtu
)
2241 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2242 int max_frame
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
2243 struct i40e_vsi
*vsi
= np
->vsi
;
2245 /* MTU < 68 is an error and causes problems on some kernels */
2246 if ((new_mtu
< 68) || (max_frame
> I40E_MAX_RXBUFFER
))
2249 netdev_info(netdev
, "changing MTU from %d to %d\n",
2250 netdev
->mtu
, new_mtu
);
2251 netdev
->mtu
= new_mtu
;
2252 if (netif_running(netdev
))
2253 i40e_vsi_reinit_locked(vsi
);
2254 i40e_notify_client_of_l2_param_changes(vsi
);
2259 * i40e_ioctl - Access the hwtstamp interface
2260 * @netdev: network interface device structure
2261 * @ifr: interface request data
2262 * @cmd: ioctl command
2264 int i40e_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2266 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2267 struct i40e_pf
*pf
= np
->vsi
->back
;
2271 return i40e_ptp_get_ts_config(pf
, ifr
);
2273 return i40e_ptp_set_ts_config(pf
, ifr
);
2280 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2281 * @vsi: the vsi being adjusted
2283 void i40e_vlan_stripping_enable(struct i40e_vsi
*vsi
)
2285 struct i40e_vsi_context ctxt
;
2288 if ((vsi
->info
.valid_sections
&
2289 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
)) &&
2290 ((vsi
->info
.port_vlan_flags
& I40E_AQ_VSI_PVLAN_MODE_MASK
) == 0))
2291 return; /* already enabled */
2293 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
2294 vsi
->info
.port_vlan_flags
= I40E_AQ_VSI_PVLAN_MODE_ALL
|
2295 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH
;
2297 ctxt
.seid
= vsi
->seid
;
2298 ctxt
.info
= vsi
->info
;
2299 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
2301 dev_info(&vsi
->back
->pdev
->dev
,
2302 "update vlan stripping failed, err %s aq_err %s\n",
2303 i40e_stat_str(&vsi
->back
->hw
, ret
),
2304 i40e_aq_str(&vsi
->back
->hw
,
2305 vsi
->back
->hw
.aq
.asq_last_status
));
2310 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2311 * @vsi: the vsi being adjusted
2313 void i40e_vlan_stripping_disable(struct i40e_vsi
*vsi
)
2315 struct i40e_vsi_context ctxt
;
2318 if ((vsi
->info
.valid_sections
&
2319 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
)) &&
2320 ((vsi
->info
.port_vlan_flags
& I40E_AQ_VSI_PVLAN_EMOD_MASK
) ==
2321 I40E_AQ_VSI_PVLAN_EMOD_MASK
))
2322 return; /* already disabled */
2324 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
2325 vsi
->info
.port_vlan_flags
= I40E_AQ_VSI_PVLAN_MODE_ALL
|
2326 I40E_AQ_VSI_PVLAN_EMOD_NOTHING
;
2328 ctxt
.seid
= vsi
->seid
;
2329 ctxt
.info
= vsi
->info
;
2330 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
2332 dev_info(&vsi
->back
->pdev
->dev
,
2333 "update vlan stripping failed, err %s aq_err %s\n",
2334 i40e_stat_str(&vsi
->back
->hw
, ret
),
2335 i40e_aq_str(&vsi
->back
->hw
,
2336 vsi
->back
->hw
.aq
.asq_last_status
));
2341 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2342 * @netdev: network interface to be adjusted
2343 * @features: netdev features to test if VLAN offload is enabled or not
2345 static void i40e_vlan_rx_register(struct net_device
*netdev
, u32 features
)
2347 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2348 struct i40e_vsi
*vsi
= np
->vsi
;
2350 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
2351 i40e_vlan_stripping_enable(vsi
);
2353 i40e_vlan_stripping_disable(vsi
);
2357 * i40e_vsi_add_vlan - Add vsi membership for given vlan
2358 * @vsi: the vsi being configured
2359 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2361 int i40e_vsi_add_vlan(struct i40e_vsi
*vsi
, s16 vid
)
2363 struct i40e_mac_filter
*f
, *ftmp
, *add_f
;
2364 bool is_netdev
, is_vf
;
2366 is_vf
= (vsi
->type
== I40E_VSI_SRIOV
);
2367 is_netdev
= !!(vsi
->netdev
);
2369 /* Locked once because all functions invoked below iterates list*/
2370 spin_lock_bh(&vsi
->mac_filter_list_lock
);
2373 add_f
= i40e_add_filter(vsi
, vsi
->netdev
->dev_addr
, vid
,
2376 dev_info(&vsi
->back
->pdev
->dev
,
2377 "Could not add vlan filter %d for %pM\n",
2378 vid
, vsi
->netdev
->dev_addr
);
2379 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
2384 list_for_each_entry_safe(f
, ftmp
, &vsi
->mac_filter_list
, list
) {
2385 add_f
= i40e_add_filter(vsi
, f
->macaddr
, vid
, is_vf
, is_netdev
);
2387 dev_info(&vsi
->back
->pdev
->dev
,
2388 "Could not add vlan filter %d for %pM\n",
2390 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
2395 /* Now if we add a vlan tag, make sure to check if it is the first
2396 * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2397 * with 0, so we now accept untagged and specified tagged traffic
2398 * (and not all tags along with untagged)
2401 if (is_netdev
&& i40e_find_filter(vsi
, vsi
->netdev
->dev_addr
,
2403 is_vf
, is_netdev
)) {
2404 i40e_del_filter(vsi
, vsi
->netdev
->dev_addr
,
2405 I40E_VLAN_ANY
, is_vf
, is_netdev
);
2406 add_f
= i40e_add_filter(vsi
, vsi
->netdev
->dev_addr
, 0,
2409 dev_info(&vsi
->back
->pdev
->dev
,
2410 "Could not add filter 0 for %pM\n",
2411 vsi
->netdev
->dev_addr
);
2412 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
2418 /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2419 if (vid
> 0 && !vsi
->info
.pvid
) {
2420 list_for_each_entry_safe(f
, ftmp
, &vsi
->mac_filter_list
, list
) {
2421 if (!i40e_find_filter(vsi
, f
->macaddr
, I40E_VLAN_ANY
,
2424 i40e_del_filter(vsi
, f
->macaddr
, I40E_VLAN_ANY
,
2426 add_f
= i40e_add_filter(vsi
, f
->macaddr
,
2427 0, is_vf
, is_netdev
);
2429 dev_info(&vsi
->back
->pdev
->dev
,
2430 "Could not add filter 0 for %pM\n",
2432 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
2438 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
2440 /* schedule our worker thread which will take care of
2441 * applying the new filter changes
2443 i40e_service_event_schedule(vsi
->back
);
2448 * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2449 * @vsi: the vsi being configured
2450 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2452 * Return: 0 on success or negative otherwise
2454 int i40e_vsi_kill_vlan(struct i40e_vsi
*vsi
, s16 vid
)
2456 struct net_device
*netdev
= vsi
->netdev
;
2457 struct i40e_mac_filter
*f
, *ftmp
, *add_f
;
2458 bool is_vf
, is_netdev
;
2459 int filter_count
= 0;
2461 is_vf
= (vsi
->type
== I40E_VSI_SRIOV
);
2462 is_netdev
= !!(netdev
);
2464 /* Locked once because all functions invoked below iterates list */
2465 spin_lock_bh(&vsi
->mac_filter_list_lock
);
2468 i40e_del_filter(vsi
, netdev
->dev_addr
, vid
, is_vf
, is_netdev
);
2470 list_for_each_entry_safe(f
, ftmp
, &vsi
->mac_filter_list
, list
)
2471 i40e_del_filter(vsi
, f
->macaddr
, vid
, is_vf
, is_netdev
);
2473 /* go through all the filters for this VSI and if there is only
2474 * vid == 0 it means there are no other filters, so vid 0 must
2475 * be replaced with -1. This signifies that we should from now
2476 * on accept any traffic (with any tag present, or untagged)
2478 list_for_each_entry(f
, &vsi
->mac_filter_list
, list
) {
2481 ether_addr_equal(netdev
->dev_addr
, f
->macaddr
))
2489 if (!filter_count
&& is_netdev
) {
2490 i40e_del_filter(vsi
, netdev
->dev_addr
, 0, is_vf
, is_netdev
);
2491 f
= i40e_add_filter(vsi
, netdev
->dev_addr
, I40E_VLAN_ANY
,
2494 dev_info(&vsi
->back
->pdev
->dev
,
2495 "Could not add filter %d for %pM\n",
2496 I40E_VLAN_ANY
, netdev
->dev_addr
);
2497 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
2502 if (!filter_count
) {
2503 list_for_each_entry_safe(f
, ftmp
, &vsi
->mac_filter_list
, list
) {
2504 i40e_del_filter(vsi
, f
->macaddr
, 0, is_vf
, is_netdev
);
2505 add_f
= i40e_add_filter(vsi
, f
->macaddr
, I40E_VLAN_ANY
,
2508 dev_info(&vsi
->back
->pdev
->dev
,
2509 "Could not add filter %d for %pM\n",
2510 I40E_VLAN_ANY
, f
->macaddr
);
2511 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
2517 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
2519 /* schedule our worker thread which will take care of
2520 * applying the new filter changes
2522 i40e_service_event_schedule(vsi
->back
);
2527 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2528 * @netdev: network interface to be adjusted
2529 * @vid: vlan id to be added
2531 * net_device_ops implementation for adding vlan ids
2534 int i40e_vlan_rx_add_vid(struct net_device
*netdev
,
2535 __always_unused __be16 proto
, u16 vid
)
2537 static int i40e_vlan_rx_add_vid(struct net_device
*netdev
,
2538 __always_unused __be16 proto
, u16 vid
)
2541 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2542 struct i40e_vsi
*vsi
= np
->vsi
;
2548 /* If the network stack called us with vid = 0 then
2549 * it is asking to receive priority tagged packets with
2550 * vlan id 0. Our HW receives them by default when configured
2551 * to receive untagged packets so there is no need to add an
2552 * extra filter for vlan 0 tagged packets.
2555 ret
= i40e_vsi_add_vlan(vsi
, vid
);
2557 if (!ret
&& (vid
< VLAN_N_VID
))
2558 set_bit(vid
, vsi
->active_vlans
);
2564 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2565 * @netdev: network interface to be adjusted
2566 * @vid: vlan id to be removed
2568 * net_device_ops implementation for removing vlan ids
2571 int i40e_vlan_rx_kill_vid(struct net_device
*netdev
,
2572 __always_unused __be16 proto
, u16 vid
)
2574 static int i40e_vlan_rx_kill_vid(struct net_device
*netdev
,
2575 __always_unused __be16 proto
, u16 vid
)
2578 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2579 struct i40e_vsi
*vsi
= np
->vsi
;
2581 /* return code is ignored as there is nothing a user
2582 * can do about failure to remove and a log message was
2583 * already printed from the other function
2585 i40e_vsi_kill_vlan(vsi
, vid
);
2587 clear_bit(vid
, vsi
->active_vlans
);
2593 * i40e_macaddr_init - explicitly write the mac address filters
2595 * @vsi: pointer to the vsi
2596 * @macaddr: the MAC address
2598 * This is needed when the macaddr has been obtained by other
2599 * means than the default, e.g., from Open Firmware or IDPROM.
2600 * Returns 0 on success, negative on failure
2602 static int i40e_macaddr_init(struct i40e_vsi
*vsi
, u8
*macaddr
)
2605 struct i40e_aqc_add_macvlan_element_data element
;
2607 ret
= i40e_aq_mac_address_write(&vsi
->back
->hw
,
2608 I40E_AQC_WRITE_TYPE_LAA_WOL
,
2611 dev_info(&vsi
->back
->pdev
->dev
,
2612 "Addr change for VSI failed: %d\n", ret
);
2613 return -EADDRNOTAVAIL
;
2616 memset(&element
, 0, sizeof(element
));
2617 ether_addr_copy(element
.mac_addr
, macaddr
);
2618 element
.flags
= cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH
);
2619 ret
= i40e_aq_add_macvlan(&vsi
->back
->hw
, vsi
->seid
, &element
, 1, NULL
);
2621 dev_info(&vsi
->back
->pdev
->dev
,
2622 "add filter failed err %s aq_err %s\n",
2623 i40e_stat_str(&vsi
->back
->hw
, ret
),
2624 i40e_aq_str(&vsi
->back
->hw
,
2625 vsi
->back
->hw
.aq
.asq_last_status
));
2631 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2632 * @vsi: the vsi being brought back up
2634 static void i40e_restore_vlan(struct i40e_vsi
*vsi
)
2641 i40e_vlan_rx_register(vsi
->netdev
, vsi
->netdev
->features
);
2643 for_each_set_bit(vid
, vsi
->active_vlans
, VLAN_N_VID
)
2644 i40e_vlan_rx_add_vid(vsi
->netdev
, htons(ETH_P_8021Q
),
2649 * i40e_vsi_add_pvid - Add pvid for the VSI
2650 * @vsi: the vsi being adjusted
2651 * @vid: the vlan id to set as a PVID
2653 int i40e_vsi_add_pvid(struct i40e_vsi
*vsi
, u16 vid
)
2655 struct i40e_vsi_context ctxt
;
2658 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
2659 vsi
->info
.pvid
= cpu_to_le16(vid
);
2660 vsi
->info
.port_vlan_flags
= I40E_AQ_VSI_PVLAN_MODE_TAGGED
|
2661 I40E_AQ_VSI_PVLAN_INSERT_PVID
|
2662 I40E_AQ_VSI_PVLAN_EMOD_STR
;
2664 ctxt
.seid
= vsi
->seid
;
2665 ctxt
.info
= vsi
->info
;
2666 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
2668 dev_info(&vsi
->back
->pdev
->dev
,
2669 "add pvid failed, err %s aq_err %s\n",
2670 i40e_stat_str(&vsi
->back
->hw
, ret
),
2671 i40e_aq_str(&vsi
->back
->hw
,
2672 vsi
->back
->hw
.aq
.asq_last_status
));
2680 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2681 * @vsi: the vsi being adjusted
2683 * Just use the vlan_rx_register() service to put it back to normal
2685 void i40e_vsi_remove_pvid(struct i40e_vsi
*vsi
)
2687 i40e_vlan_stripping_disable(vsi
);
2693 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2694 * @vsi: ptr to the VSI
2696 * If this function returns with an error, then it's possible one or
2697 * more of the rings is populated (while the rest are not). It is the
2698 * callers duty to clean those orphaned rings.
2700 * Return 0 on success, negative on failure
2702 static int i40e_vsi_setup_tx_resources(struct i40e_vsi
*vsi
)
2706 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
2707 err
= i40e_setup_tx_descriptors(vsi
->tx_rings
[i
]);
2713 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2714 * @vsi: ptr to the VSI
2716 * Free VSI's transmit software resources
2718 static void i40e_vsi_free_tx_resources(struct i40e_vsi
*vsi
)
2725 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
2726 if (vsi
->tx_rings
[i
] && vsi
->tx_rings
[i
]->desc
)
2727 i40e_free_tx_resources(vsi
->tx_rings
[i
]);
2731 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2732 * @vsi: ptr to the VSI
2734 * If this function returns with an error, then it's possible one or
2735 * more of the rings is populated (while the rest are not). It is the
2736 * callers duty to clean those orphaned rings.
2738 * Return 0 on success, negative on failure
2740 static int i40e_vsi_setup_rx_resources(struct i40e_vsi
*vsi
)
2744 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
2745 err
= i40e_setup_rx_descriptors(vsi
->rx_rings
[i
]);
2747 i40e_fcoe_setup_ddp_resources(vsi
);
2753 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2754 * @vsi: ptr to the VSI
2756 * Free all receive software resources
2758 static void i40e_vsi_free_rx_resources(struct i40e_vsi
*vsi
)
2765 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
2766 if (vsi
->rx_rings
[i
] && vsi
->rx_rings
[i
]->desc
)
2767 i40e_free_rx_resources(vsi
->rx_rings
[i
]);
2769 i40e_fcoe_free_ddp_resources(vsi
);
2774 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2775 * @ring: The Tx ring to configure
2777 * This enables/disables XPS for a given Tx descriptor ring
2778 * based on the TCs enabled for the VSI that ring belongs to.
2780 static void i40e_config_xps_tx_ring(struct i40e_ring
*ring
)
2782 struct i40e_vsi
*vsi
= ring
->vsi
;
2785 if (!ring
->q_vector
|| !ring
->netdev
)
2788 /* Single TC mode enable XPS */
2789 if (vsi
->tc_config
.numtc
<= 1) {
2790 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE
, &ring
->state
))
2791 netif_set_xps_queue(ring
->netdev
,
2792 &ring
->q_vector
->affinity_mask
,
2794 } else if (alloc_cpumask_var(&mask
, GFP_KERNEL
)) {
2795 /* Disable XPS to allow selection based on TC */
2796 bitmap_zero(cpumask_bits(mask
), nr_cpumask_bits
);
2797 netif_set_xps_queue(ring
->netdev
, mask
, ring
->queue_index
);
2798 free_cpumask_var(mask
);
2801 /* schedule our worker thread which will take care of
2802 * applying the new filter changes
2804 i40e_service_event_schedule(vsi
->back
);
2808 * i40e_configure_tx_ring - Configure a transmit ring context and rest
2809 * @ring: The Tx ring to configure
2811 * Configure the Tx descriptor ring in the HMC context.
2813 static int i40e_configure_tx_ring(struct i40e_ring
*ring
)
2815 struct i40e_vsi
*vsi
= ring
->vsi
;
2816 u16 pf_q
= vsi
->base_queue
+ ring
->queue_index
;
2817 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2818 struct i40e_hmc_obj_txq tx_ctx
;
2819 i40e_status err
= 0;
2822 /* some ATR related tx ring init */
2823 if (vsi
->back
->flags
& I40E_FLAG_FD_ATR_ENABLED
) {
2824 ring
->atr_sample_rate
= vsi
->back
->atr_sample_rate
;
2825 ring
->atr_count
= 0;
2827 ring
->atr_sample_rate
= 0;
2831 i40e_config_xps_tx_ring(ring
);
2833 /* clear the context structure first */
2834 memset(&tx_ctx
, 0, sizeof(tx_ctx
));
2836 tx_ctx
.new_context
= 1;
2837 tx_ctx
.base
= (ring
->dma
/ 128);
2838 tx_ctx
.qlen
= ring
->count
;
2839 tx_ctx
.fd_ena
= !!(vsi
->back
->flags
& (I40E_FLAG_FD_SB_ENABLED
|
2840 I40E_FLAG_FD_ATR_ENABLED
));
2842 tx_ctx
.fc_ena
= (vsi
->type
== I40E_VSI_FCOE
);
2844 tx_ctx
.timesync_ena
= !!(vsi
->back
->flags
& I40E_FLAG_PTP
);
2845 /* FDIR VSI tx ring can still use RS bit and writebacks */
2846 if (vsi
->type
!= I40E_VSI_FDIR
)
2847 tx_ctx
.head_wb_ena
= 1;
2848 tx_ctx
.head_wb_addr
= ring
->dma
+
2849 (ring
->count
* sizeof(struct i40e_tx_desc
));
2851 /* As part of VSI creation/update, FW allocates certain
2852 * Tx arbitration queue sets for each TC enabled for
2853 * the VSI. The FW returns the handles to these queue
2854 * sets as part of the response buffer to Add VSI,
2855 * Update VSI, etc. AQ commands. It is expected that
2856 * these queue set handles be associated with the Tx
2857 * queues by the driver as part of the TX queue context
2858 * initialization. This has to be done regardless of
2859 * DCB as by default everything is mapped to TC0.
2861 tx_ctx
.rdylist
= le16_to_cpu(vsi
->info
.qs_handle
[ring
->dcb_tc
]);
2862 tx_ctx
.rdylist_act
= 0;
2864 /* clear the context in the HMC */
2865 err
= i40e_clear_lan_tx_queue_context(hw
, pf_q
);
2867 dev_info(&vsi
->back
->pdev
->dev
,
2868 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2869 ring
->queue_index
, pf_q
, err
);
2873 /* set the context in the HMC */
2874 err
= i40e_set_lan_tx_queue_context(hw
, pf_q
, &tx_ctx
);
2876 dev_info(&vsi
->back
->pdev
->dev
,
2877 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2878 ring
->queue_index
, pf_q
, err
);
2882 /* Now associate this queue with this PCI function */
2883 if (vsi
->type
== I40E_VSI_VMDQ2
) {
2884 qtx_ctl
= I40E_QTX_CTL_VM_QUEUE
;
2885 qtx_ctl
|= ((vsi
->id
) << I40E_QTX_CTL_VFVM_INDX_SHIFT
) &
2886 I40E_QTX_CTL_VFVM_INDX_MASK
;
2888 qtx_ctl
= I40E_QTX_CTL_PF_QUEUE
;
2891 qtx_ctl
|= ((hw
->pf_id
<< I40E_QTX_CTL_PF_INDX_SHIFT
) &
2892 I40E_QTX_CTL_PF_INDX_MASK
);
2893 wr32(hw
, I40E_QTX_CTL(pf_q
), qtx_ctl
);
2896 /* cache tail off for easier writes later */
2897 ring
->tail
= hw
->hw_addr
+ I40E_QTX_TAIL(pf_q
);
2903 * i40e_configure_rx_ring - Configure a receive ring context
2904 * @ring: The Rx ring to configure
2906 * Configure the Rx descriptor ring in the HMC context.
2908 static int i40e_configure_rx_ring(struct i40e_ring
*ring
)
2910 struct i40e_vsi
*vsi
= ring
->vsi
;
2911 u32 chain_len
= vsi
->back
->hw
.func_caps
.rx_buf_chain_len
;
2912 u16 pf_q
= vsi
->base_queue
+ ring
->queue_index
;
2913 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2914 struct i40e_hmc_obj_rxq rx_ctx
;
2915 i40e_status err
= 0;
2919 /* clear the context structure first */
2920 memset(&rx_ctx
, 0, sizeof(rx_ctx
));
2922 ring
->rx_buf_len
= vsi
->rx_buf_len
;
2924 rx_ctx
.dbuff
= ring
->rx_buf_len
>> I40E_RXQ_CTX_DBUFF_SHIFT
;
2926 rx_ctx
.base
= (ring
->dma
/ 128);
2927 rx_ctx
.qlen
= ring
->count
;
2929 /* use 32 byte descriptors */
2932 /* descriptor type is always zero
2935 rx_ctx
.hsplit_0
= 0;
2937 rx_ctx
.rxmax
= min_t(u16
, vsi
->max_frame
, chain_len
* ring
->rx_buf_len
);
2938 if (hw
->revision_id
== 0)
2939 rx_ctx
.lrxqthresh
= 0;
2941 rx_ctx
.lrxqthresh
= 2;
2942 rx_ctx
.crcstrip
= 1;
2944 /* this controls whether VLAN is stripped from inner headers */
2947 rx_ctx
.fc_ena
= (vsi
->type
== I40E_VSI_FCOE
);
2949 /* set the prefena field to 1 because the manual says to */
2952 /* clear the context in the HMC */
2953 err
= i40e_clear_lan_rx_queue_context(hw
, pf_q
);
2955 dev_info(&vsi
->back
->pdev
->dev
,
2956 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2957 ring
->queue_index
, pf_q
, err
);
2961 /* set the context in the HMC */
2962 err
= i40e_set_lan_rx_queue_context(hw
, pf_q
, &rx_ctx
);
2964 dev_info(&vsi
->back
->pdev
->dev
,
2965 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2966 ring
->queue_index
, pf_q
, err
);
2970 /* cache tail for quicker writes, and clear the reg before use */
2971 ring
->tail
= hw
->hw_addr
+ I40E_QRX_TAIL(pf_q
);
2972 writel(0, ring
->tail
);
2974 i40e_alloc_rx_buffers(ring
, I40E_DESC_UNUSED(ring
));
2980 * i40e_vsi_configure_tx - Configure the VSI for Tx
2981 * @vsi: VSI structure describing this set of rings and resources
2983 * Configure the Tx VSI for operation.
2985 static int i40e_vsi_configure_tx(struct i40e_vsi
*vsi
)
2990 for (i
= 0; (i
< vsi
->num_queue_pairs
) && !err
; i
++)
2991 err
= i40e_configure_tx_ring(vsi
->tx_rings
[i
]);
2997 * i40e_vsi_configure_rx - Configure the VSI for Rx
2998 * @vsi: the VSI being configured
3000 * Configure the Rx VSI for operation.
3002 static int i40e_vsi_configure_rx(struct i40e_vsi
*vsi
)
3007 if (vsi
->netdev
&& (vsi
->netdev
->mtu
> ETH_DATA_LEN
))
3008 vsi
->max_frame
= vsi
->netdev
->mtu
+ ETH_HLEN
3009 + ETH_FCS_LEN
+ VLAN_HLEN
;
3011 vsi
->max_frame
= I40E_RXBUFFER_2048
;
3013 vsi
->rx_buf_len
= I40E_RXBUFFER_2048
;
3016 /* setup rx buffer for FCoE */
3017 if ((vsi
->type
== I40E_VSI_FCOE
) &&
3018 (vsi
->back
->flags
& I40E_FLAG_FCOE_ENABLED
)) {
3019 vsi
->rx_buf_len
= I40E_RXBUFFER_3072
;
3020 vsi
->max_frame
= I40E_RXBUFFER_3072
;
3023 #endif /* I40E_FCOE */
3024 /* round up for the chip's needs */
3025 vsi
->rx_buf_len
= ALIGN(vsi
->rx_buf_len
,
3026 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT
));
3028 /* set up individual rings */
3029 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
3030 err
= i40e_configure_rx_ring(vsi
->rx_rings
[i
]);
3036 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3037 * @vsi: ptr to the VSI
3039 static void i40e_vsi_config_dcb_rings(struct i40e_vsi
*vsi
)
3041 struct i40e_ring
*tx_ring
, *rx_ring
;
3042 u16 qoffset
, qcount
;
3045 if (!(vsi
->back
->flags
& I40E_FLAG_DCB_ENABLED
)) {
3046 /* Reset the TC information */
3047 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
3048 rx_ring
= vsi
->rx_rings
[i
];
3049 tx_ring
= vsi
->tx_rings
[i
];
3050 rx_ring
->dcb_tc
= 0;
3051 tx_ring
->dcb_tc
= 0;
3055 for (n
= 0; n
< I40E_MAX_TRAFFIC_CLASS
; n
++) {
3056 if (!(vsi
->tc_config
.enabled_tc
& BIT_ULL(n
)))
3059 qoffset
= vsi
->tc_config
.tc_info
[n
].qoffset
;
3060 qcount
= vsi
->tc_config
.tc_info
[n
].qcount
;
3061 for (i
= qoffset
; i
< (qoffset
+ qcount
); i
++) {
3062 rx_ring
= vsi
->rx_rings
[i
];
3063 tx_ring
= vsi
->tx_rings
[i
];
3064 rx_ring
->dcb_tc
= n
;
3065 tx_ring
->dcb_tc
= n
;
3071 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3072 * @vsi: ptr to the VSI
3074 static void i40e_set_vsi_rx_mode(struct i40e_vsi
*vsi
)
3076 struct i40e_pf
*pf
= vsi
->back
;
3080 i40e_set_rx_mode(vsi
->netdev
);
3082 if (!!(pf
->flags
& I40E_FLAG_PF_MAC
)) {
3083 err
= i40e_macaddr_init(vsi
, pf
->hw
.mac
.addr
);
3085 dev_warn(&pf
->pdev
->dev
,
3086 "could not set up macaddr; err %d\n", err
);
3092 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3093 * @vsi: Pointer to the targeted VSI
3095 * This function replays the hlist on the hw where all the SB Flow Director
3096 * filters were saved.
3098 static void i40e_fdir_filter_restore(struct i40e_vsi
*vsi
)
3100 struct i40e_fdir_filter
*filter
;
3101 struct i40e_pf
*pf
= vsi
->back
;
3102 struct hlist_node
*node
;
3104 if (!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
))
3107 hlist_for_each_entry_safe(filter
, node
,
3108 &pf
->fdir_filter_list
, fdir_node
) {
3109 i40e_add_del_fdir(vsi
, filter
, true);
3114 * i40e_vsi_configure - Set up the VSI for action
3115 * @vsi: the VSI being configured
3117 static int i40e_vsi_configure(struct i40e_vsi
*vsi
)
3121 i40e_set_vsi_rx_mode(vsi
);
3122 i40e_restore_vlan(vsi
);
3123 i40e_vsi_config_dcb_rings(vsi
);
3124 err
= i40e_vsi_configure_tx(vsi
);
3126 err
= i40e_vsi_configure_rx(vsi
);
3132 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3133 * @vsi: the VSI being configured
3135 static void i40e_vsi_configure_msix(struct i40e_vsi
*vsi
)
3137 struct i40e_pf
*pf
= vsi
->back
;
3138 struct i40e_hw
*hw
= &pf
->hw
;
3143 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3144 * and PFINT_LNKLSTn registers, e.g.:
3145 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3147 qp
= vsi
->base_queue
;
3148 vector
= vsi
->base_vector
;
3149 for (i
= 0; i
< vsi
->num_q_vectors
; i
++, vector
++) {
3150 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[i
];
3152 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
3153 q_vector
->rx
.itr
= ITR_TO_REG(vsi
->rx_rings
[i
]->rx_itr_setting
);
3154 q_vector
->rx
.latency_range
= I40E_LOW_LATENCY
;
3155 wr32(hw
, I40E_PFINT_ITRN(I40E_RX_ITR
, vector
- 1),
3157 q_vector
->tx
.itr
= ITR_TO_REG(vsi
->tx_rings
[i
]->tx_itr_setting
);
3158 q_vector
->tx
.latency_range
= I40E_LOW_LATENCY
;
3159 wr32(hw
, I40E_PFINT_ITRN(I40E_TX_ITR
, vector
- 1),
3161 wr32(hw
, I40E_PFINT_RATEN(vector
- 1),
3162 INTRL_USEC_TO_REG(vsi
->int_rate_limit
));
3164 /* Linked list for the queuepairs assigned to this vector */
3165 wr32(hw
, I40E_PFINT_LNKLSTN(vector
- 1), qp
);
3166 for (q
= 0; q
< q_vector
->num_ringpairs
; q
++) {
3169 val
= I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
3170 (I40E_RX_ITR
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
) |
3171 (vector
<< I40E_QINT_RQCTL_MSIX_INDX_SHIFT
) |
3172 (qp
<< I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT
)|
3174 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT
);
3176 wr32(hw
, I40E_QINT_RQCTL(qp
), val
);
3178 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3179 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
) |
3180 (vector
<< I40E_QINT_TQCTL_MSIX_INDX_SHIFT
) |
3181 ((qp
+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
)|
3183 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3185 /* Terminate the linked list */
3186 if (q
== (q_vector
->num_ringpairs
- 1))
3187 val
|= (I40E_QUEUE_END_OF_LIST
3188 << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
);
3190 wr32(hw
, I40E_QINT_TQCTL(qp
), val
);
3199 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3200 * @hw: ptr to the hardware info
3202 static void i40e_enable_misc_int_causes(struct i40e_pf
*pf
)
3204 struct i40e_hw
*hw
= &pf
->hw
;
3207 /* clear things first */
3208 wr32(hw
, I40E_PFINT_ICR0_ENA
, 0); /* disable all */
3209 rd32(hw
, I40E_PFINT_ICR0
); /* read to clear */
3211 val
= I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
|
3212 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
|
3213 I40E_PFINT_ICR0_ENA_GRST_MASK
|
3214 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
|
3215 I40E_PFINT_ICR0_ENA_GPIO_MASK
|
3216 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
|
3217 I40E_PFINT_ICR0_ENA_VFLR_MASK
|
3218 I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
3220 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
)
3221 val
|= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
;
3223 if (pf
->flags
& I40E_FLAG_PTP
)
3224 val
|= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK
;
3226 wr32(hw
, I40E_PFINT_ICR0_ENA
, val
);
3228 /* SW_ITR_IDX = 0, but don't change INTENA */
3229 wr32(hw
, I40E_PFINT_DYN_CTL0
, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK
|
3230 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK
);
3232 /* OTHER_ITR_IDX = 0 */
3233 wr32(hw
, I40E_PFINT_STAT_CTL0
, 0);
3237 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3238 * @vsi: the VSI being configured
3240 static void i40e_configure_msi_and_legacy(struct i40e_vsi
*vsi
)
3242 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[0];
3243 struct i40e_pf
*pf
= vsi
->back
;
3244 struct i40e_hw
*hw
= &pf
->hw
;
3247 /* set the ITR configuration */
3248 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
3249 q_vector
->rx
.itr
= ITR_TO_REG(vsi
->rx_rings
[0]->rx_itr_setting
);
3250 q_vector
->rx
.latency_range
= I40E_LOW_LATENCY
;
3251 wr32(hw
, I40E_PFINT_ITR0(I40E_RX_ITR
), q_vector
->rx
.itr
);
3252 q_vector
->tx
.itr
= ITR_TO_REG(vsi
->tx_rings
[0]->tx_itr_setting
);
3253 q_vector
->tx
.latency_range
= I40E_LOW_LATENCY
;
3254 wr32(hw
, I40E_PFINT_ITR0(I40E_TX_ITR
), q_vector
->tx
.itr
);
3256 i40e_enable_misc_int_causes(pf
);
3258 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3259 wr32(hw
, I40E_PFINT_LNKLST0
, 0);
3261 /* Associate the queue pair to the vector and enable the queue int */
3262 val
= I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
3263 (I40E_RX_ITR
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
) |
3264 (I40E_QUEUE_TYPE_TX
<< I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3266 wr32(hw
, I40E_QINT_RQCTL(0), val
);
3268 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3269 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
) |
3270 (I40E_QUEUE_END_OF_LIST
<< I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
);
3272 wr32(hw
, I40E_QINT_TQCTL(0), val
);
3277 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3278 * @pf: board private structure
3280 void i40e_irq_dynamic_disable_icr0(struct i40e_pf
*pf
)
3282 struct i40e_hw
*hw
= &pf
->hw
;
3284 wr32(hw
, I40E_PFINT_DYN_CTL0
,
3285 I40E_ITR_NONE
<< I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT
);
3290 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3291 * @pf: board private structure
3292 * @clearpba: true when all pending interrupt events should be cleared
3294 void i40e_irq_dynamic_enable_icr0(struct i40e_pf
*pf
, bool clearpba
)
3296 struct i40e_hw
*hw
= &pf
->hw
;
3299 val
= I40E_PFINT_DYN_CTL0_INTENA_MASK
|
3300 (clearpba
? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK
: 0) |
3301 (I40E_ITR_NONE
<< I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT
);
3303 wr32(hw
, I40E_PFINT_DYN_CTL0
, val
);
3308 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3309 * @irq: interrupt number
3310 * @data: pointer to a q_vector
3312 static irqreturn_t
i40e_msix_clean_rings(int irq
, void *data
)
3314 struct i40e_q_vector
*q_vector
= data
;
3316 if (!q_vector
->tx
.ring
&& !q_vector
->rx
.ring
)
3319 napi_schedule_irqoff(&q_vector
->napi
);
3325 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3326 * @vsi: the VSI being configured
3327 * @basename: name for the vector
3329 * Allocates MSI-X vectors and requests interrupts from the kernel.
3331 static int i40e_vsi_request_irq_msix(struct i40e_vsi
*vsi
, char *basename
)
3333 int q_vectors
= vsi
->num_q_vectors
;
3334 struct i40e_pf
*pf
= vsi
->back
;
3335 int base
= vsi
->base_vector
;
3340 for (vector
= 0; vector
< q_vectors
; vector
++) {
3341 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[vector
];
3343 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
3344 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
3345 "%s-%s-%d", basename
, "TxRx", rx_int_idx
++);
3347 } else if (q_vector
->rx
.ring
) {
3348 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
3349 "%s-%s-%d", basename
, "rx", rx_int_idx
++);
3350 } else if (q_vector
->tx
.ring
) {
3351 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
3352 "%s-%s-%d", basename
, "tx", tx_int_idx
++);
3354 /* skip this unused q_vector */
3357 err
= request_irq(pf
->msix_entries
[base
+ vector
].vector
,
3363 dev_info(&pf
->pdev
->dev
,
3364 "MSIX request_irq failed, error: %d\n", err
);
3365 goto free_queue_irqs
;
3367 /* assign the mask for this irq */
3368 irq_set_affinity_hint(pf
->msix_entries
[base
+ vector
].vector
,
3369 &q_vector
->affinity_mask
);
3372 vsi
->irqs_ready
= true;
3378 irq_set_affinity_hint(pf
->msix_entries
[base
+ vector
].vector
,
3380 free_irq(pf
->msix_entries
[base
+ vector
].vector
,
3381 &(vsi
->q_vectors
[vector
]));
3387 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3388 * @vsi: the VSI being un-configured
3390 static void i40e_vsi_disable_irq(struct i40e_vsi
*vsi
)
3392 struct i40e_pf
*pf
= vsi
->back
;
3393 struct i40e_hw
*hw
= &pf
->hw
;
3394 int base
= vsi
->base_vector
;
3397 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
3398 wr32(hw
, I40E_QINT_TQCTL(vsi
->tx_rings
[i
]->reg_idx
), 0);
3399 wr32(hw
, I40E_QINT_RQCTL(vsi
->rx_rings
[i
]->reg_idx
), 0);
3402 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
3403 for (i
= vsi
->base_vector
;
3404 i
< (vsi
->num_q_vectors
+ vsi
->base_vector
); i
++)
3405 wr32(hw
, I40E_PFINT_DYN_CTLN(i
- 1), 0);
3408 for (i
= 0; i
< vsi
->num_q_vectors
; i
++)
3409 synchronize_irq(pf
->msix_entries
[i
+ base
].vector
);
3411 /* Legacy and MSI mode - this stops all interrupt handling */
3412 wr32(hw
, I40E_PFINT_ICR0_ENA
, 0);
3413 wr32(hw
, I40E_PFINT_DYN_CTL0
, 0);
3415 synchronize_irq(pf
->pdev
->irq
);
3420 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3421 * @vsi: the VSI being configured
3423 static int i40e_vsi_enable_irq(struct i40e_vsi
*vsi
)
3425 struct i40e_pf
*pf
= vsi
->back
;
3428 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
3429 for (i
= 0; i
< vsi
->num_q_vectors
; i
++)
3430 i40e_irq_dynamic_enable(vsi
, i
);
3432 i40e_irq_dynamic_enable_icr0(pf
, true);
3435 i40e_flush(&pf
->hw
);
3440 * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3441 * @pf: board private structure
3443 static void i40e_stop_misc_vector(struct i40e_pf
*pf
)
3446 wr32(&pf
->hw
, I40E_PFINT_ICR0_ENA
, 0);
3447 i40e_flush(&pf
->hw
);
3451 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3452 * @irq: interrupt number
3453 * @data: pointer to a q_vector
3455 * This is the handler used for all MSI/Legacy interrupts, and deals
3456 * with both queue and non-queue interrupts. This is also used in
3457 * MSIX mode to handle the non-queue interrupts.
3459 static irqreturn_t
i40e_intr(int irq
, void *data
)
3461 struct i40e_pf
*pf
= (struct i40e_pf
*)data
;
3462 struct i40e_hw
*hw
= &pf
->hw
;
3463 irqreturn_t ret
= IRQ_NONE
;
3464 u32 icr0
, icr0_remaining
;
3467 icr0
= rd32(hw
, I40E_PFINT_ICR0
);
3468 ena_mask
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
3470 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3471 if ((icr0
& I40E_PFINT_ICR0_INTEVENT_MASK
) == 0)
3474 /* if interrupt but no bits showing, must be SWINT */
3475 if (((icr0
& ~I40E_PFINT_ICR0_INTEVENT_MASK
) == 0) ||
3476 (icr0
& I40E_PFINT_ICR0_SWINT_MASK
))
3479 if ((pf
->flags
& I40E_FLAG_IWARP_ENABLED
) &&
3480 (ena_mask
& I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
)) {
3481 ena_mask
&= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
;
3482 icr0
&= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
;
3483 dev_info(&pf
->pdev
->dev
, "cleared PE_CRITERR\n");
3486 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3487 if (icr0
& I40E_PFINT_ICR0_QUEUE_0_MASK
) {
3488 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
3489 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[0];
3491 /* We do not have a way to disarm Queue causes while leaving
3492 * interrupt enabled for all other causes, ideally
3493 * interrupt should be disabled while we are in NAPI but
3494 * this is not a performance path and napi_schedule()
3495 * can deal with rescheduling.
3497 if (!test_bit(__I40E_DOWN
, &pf
->state
))
3498 napi_schedule_irqoff(&q_vector
->napi
);
3501 if (icr0
& I40E_PFINT_ICR0_ADMINQ_MASK
) {
3502 ena_mask
&= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
3503 set_bit(__I40E_ADMINQ_EVENT_PENDING
, &pf
->state
);
3504 i40e_debug(&pf
->hw
, I40E_DEBUG_NVM
, "AdminQ event\n");
3507 if (icr0
& I40E_PFINT_ICR0_MAL_DETECT_MASK
) {
3508 ena_mask
&= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
;
3509 set_bit(__I40E_MDD_EVENT_PENDING
, &pf
->state
);
3512 if (icr0
& I40E_PFINT_ICR0_VFLR_MASK
) {
3513 ena_mask
&= ~I40E_PFINT_ICR0_ENA_VFLR_MASK
;
3514 set_bit(__I40E_VFLR_EVENT_PENDING
, &pf
->state
);
3517 if (icr0
& I40E_PFINT_ICR0_GRST_MASK
) {
3518 if (!test_bit(__I40E_RESET_RECOVERY_PENDING
, &pf
->state
))
3519 set_bit(__I40E_RESET_INTR_RECEIVED
, &pf
->state
);
3520 ena_mask
&= ~I40E_PFINT_ICR0_ENA_GRST_MASK
;
3521 val
= rd32(hw
, I40E_GLGEN_RSTAT
);
3522 val
= (val
& I40E_GLGEN_RSTAT_RESET_TYPE_MASK
)
3523 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT
;
3524 if (val
== I40E_RESET_CORER
) {
3526 } else if (val
== I40E_RESET_GLOBR
) {
3528 } else if (val
== I40E_RESET_EMPR
) {
3530 set_bit(__I40E_EMP_RESET_INTR_RECEIVED
, &pf
->state
);
3534 if (icr0
& I40E_PFINT_ICR0_HMC_ERR_MASK
) {
3535 icr0
&= ~I40E_PFINT_ICR0_HMC_ERR_MASK
;
3536 dev_info(&pf
->pdev
->dev
, "HMC error interrupt\n");
3537 dev_info(&pf
->pdev
->dev
, "HMC error info 0x%x, HMC error data 0x%x\n",
3538 rd32(hw
, I40E_PFHMC_ERRORINFO
),
3539 rd32(hw
, I40E_PFHMC_ERRORDATA
));
3542 if (icr0
& I40E_PFINT_ICR0_TIMESYNC_MASK
) {
3543 u32 prttsyn_stat
= rd32(hw
, I40E_PRTTSYN_STAT_0
);
3545 if (prttsyn_stat
& I40E_PRTTSYN_STAT_0_TXTIME_MASK
) {
3546 icr0
&= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK
;
3547 i40e_ptp_tx_hwtstamp(pf
);
3551 /* If a critical error is pending we have no choice but to reset the
3553 * Report and mask out any remaining unexpected interrupts.
3555 icr0_remaining
= icr0
& ena_mask
;
3556 if (icr0_remaining
) {
3557 dev_info(&pf
->pdev
->dev
, "unhandled interrupt icr0=0x%08x\n",
3559 if ((icr0_remaining
& I40E_PFINT_ICR0_PE_CRITERR_MASK
) ||
3560 (icr0_remaining
& I40E_PFINT_ICR0_PCI_EXCEPTION_MASK
) ||
3561 (icr0_remaining
& I40E_PFINT_ICR0_ECC_ERR_MASK
)) {
3562 dev_info(&pf
->pdev
->dev
, "device will be reset\n");
3563 set_bit(__I40E_PF_RESET_REQUESTED
, &pf
->state
);
3564 i40e_service_event_schedule(pf
);
3566 ena_mask
&= ~icr0_remaining
;
3571 /* re-enable interrupt causes */
3572 wr32(hw
, I40E_PFINT_ICR0_ENA
, ena_mask
);
3573 if (!test_bit(__I40E_DOWN
, &pf
->state
)) {
3574 i40e_service_event_schedule(pf
);
3575 i40e_irq_dynamic_enable_icr0(pf
, false);
3582 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3583 * @tx_ring: tx ring to clean
3584 * @budget: how many cleans we're allowed
3586 * Returns true if there's any budget left (e.g. the clean is finished)
3588 static bool i40e_clean_fdir_tx_irq(struct i40e_ring
*tx_ring
, int budget
)
3590 struct i40e_vsi
*vsi
= tx_ring
->vsi
;
3591 u16 i
= tx_ring
->next_to_clean
;
3592 struct i40e_tx_buffer
*tx_buf
;
3593 struct i40e_tx_desc
*tx_desc
;
3595 tx_buf
= &tx_ring
->tx_bi
[i
];
3596 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
3597 i
-= tx_ring
->count
;
3600 struct i40e_tx_desc
*eop_desc
= tx_buf
->next_to_watch
;
3602 /* if next_to_watch is not set then there is no work pending */
3606 /* prevent any other reads prior to eop_desc */
3609 /* if the descriptor isn't done, no work yet to do */
3610 if (!(eop_desc
->cmd_type_offset_bsz
&
3611 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE
)))
3614 /* clear next_to_watch to prevent false hangs */
3615 tx_buf
->next_to_watch
= NULL
;
3617 tx_desc
->buffer_addr
= 0;
3618 tx_desc
->cmd_type_offset_bsz
= 0;
3619 /* move past filter desc */
3624 i
-= tx_ring
->count
;
3625 tx_buf
= tx_ring
->tx_bi
;
3626 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
3628 /* unmap skb header data */
3629 dma_unmap_single(tx_ring
->dev
,
3630 dma_unmap_addr(tx_buf
, dma
),
3631 dma_unmap_len(tx_buf
, len
),
3633 if (tx_buf
->tx_flags
& I40E_TX_FLAGS_FD_SB
)
3634 kfree(tx_buf
->raw_buf
);
3636 tx_buf
->raw_buf
= NULL
;
3637 tx_buf
->tx_flags
= 0;
3638 tx_buf
->next_to_watch
= NULL
;
3639 dma_unmap_len_set(tx_buf
, len
, 0);
3640 tx_desc
->buffer_addr
= 0;
3641 tx_desc
->cmd_type_offset_bsz
= 0;
3643 /* move us past the eop_desc for start of next FD desc */
3648 i
-= tx_ring
->count
;
3649 tx_buf
= tx_ring
->tx_bi
;
3650 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
3653 /* update budget accounting */
3655 } while (likely(budget
));
3657 i
+= tx_ring
->count
;
3658 tx_ring
->next_to_clean
= i
;
3660 if (vsi
->back
->flags
& I40E_FLAG_MSIX_ENABLED
)
3661 i40e_irq_dynamic_enable(vsi
, tx_ring
->q_vector
->v_idx
);
3667 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3668 * @irq: interrupt number
3669 * @data: pointer to a q_vector
3671 static irqreturn_t
i40e_fdir_clean_ring(int irq
, void *data
)
3673 struct i40e_q_vector
*q_vector
= data
;
3674 struct i40e_vsi
*vsi
;
3676 if (!q_vector
->tx
.ring
)
3679 vsi
= q_vector
->tx
.ring
->vsi
;
3680 i40e_clean_fdir_tx_irq(q_vector
->tx
.ring
, vsi
->work_limit
);
3686 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3687 * @vsi: the VSI being configured
3688 * @v_idx: vector index
3689 * @qp_idx: queue pair index
3691 static void i40e_map_vector_to_qp(struct i40e_vsi
*vsi
, int v_idx
, int qp_idx
)
3693 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[v_idx
];
3694 struct i40e_ring
*tx_ring
= vsi
->tx_rings
[qp_idx
];
3695 struct i40e_ring
*rx_ring
= vsi
->rx_rings
[qp_idx
];
3697 tx_ring
->q_vector
= q_vector
;
3698 tx_ring
->next
= q_vector
->tx
.ring
;
3699 q_vector
->tx
.ring
= tx_ring
;
3700 q_vector
->tx
.count
++;
3702 rx_ring
->q_vector
= q_vector
;
3703 rx_ring
->next
= q_vector
->rx
.ring
;
3704 q_vector
->rx
.ring
= rx_ring
;
3705 q_vector
->rx
.count
++;
3709 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3710 * @vsi: the VSI being configured
3712 * This function maps descriptor rings to the queue-specific vectors
3713 * we were allotted through the MSI-X enabling code. Ideally, we'd have
3714 * one vector per queue pair, but on a constrained vector budget, we
3715 * group the queue pairs as "efficiently" as possible.
3717 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi
*vsi
)
3719 int qp_remaining
= vsi
->num_queue_pairs
;
3720 int q_vectors
= vsi
->num_q_vectors
;
3725 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3726 * group them so there are multiple queues per vector.
3727 * It is also important to go through all the vectors available to be
3728 * sure that if we don't use all the vectors, that the remaining vectors
3729 * are cleared. This is especially important when decreasing the
3730 * number of queues in use.
3732 for (; v_start
< q_vectors
; v_start
++) {
3733 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[v_start
];
3735 num_ringpairs
= DIV_ROUND_UP(qp_remaining
, q_vectors
- v_start
);
3737 q_vector
->num_ringpairs
= num_ringpairs
;
3739 q_vector
->rx
.count
= 0;
3740 q_vector
->tx
.count
= 0;
3741 q_vector
->rx
.ring
= NULL
;
3742 q_vector
->tx
.ring
= NULL
;
3744 while (num_ringpairs
--) {
3745 i40e_map_vector_to_qp(vsi
, v_start
, qp_idx
);
3753 * i40e_vsi_request_irq - Request IRQ from the OS
3754 * @vsi: the VSI being configured
3755 * @basename: name for the vector
3757 static int i40e_vsi_request_irq(struct i40e_vsi
*vsi
, char *basename
)
3759 struct i40e_pf
*pf
= vsi
->back
;
3762 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
3763 err
= i40e_vsi_request_irq_msix(vsi
, basename
);
3764 else if (pf
->flags
& I40E_FLAG_MSI_ENABLED
)
3765 err
= request_irq(pf
->pdev
->irq
, i40e_intr
, 0,
3768 err
= request_irq(pf
->pdev
->irq
, i40e_intr
, IRQF_SHARED
,
3772 dev_info(&pf
->pdev
->dev
, "request_irq failed, Error %d\n", err
);
3777 #ifdef CONFIG_NET_POLL_CONTROLLER
3779 * i40e_netpoll - A Polling 'interrupt' handler
3780 * @netdev: network interface device structure
3782 * This is used by netconsole to send skbs without having to re-enable
3783 * interrupts. It's not called while the normal interrupt routine is executing.
3786 void i40e_netpoll(struct net_device
*netdev
)
3788 static void i40e_netpoll(struct net_device
*netdev
)
3791 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
3792 struct i40e_vsi
*vsi
= np
->vsi
;
3793 struct i40e_pf
*pf
= vsi
->back
;
3796 /* if interface is down do nothing */
3797 if (test_bit(__I40E_DOWN
, &vsi
->state
))
3800 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
3801 for (i
= 0; i
< vsi
->num_q_vectors
; i
++)
3802 i40e_msix_clean_rings(0, vsi
->q_vectors
[i
]);
3804 i40e_intr(pf
->pdev
->irq
, netdev
);
3810 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3811 * @pf: the PF being configured
3812 * @pf_q: the PF queue
3813 * @enable: enable or disable state of the queue
3815 * This routine will wait for the given Tx queue of the PF to reach the
3816 * enabled or disabled state.
3817 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3818 * multiple retries; else will return 0 in case of success.
3820 static int i40e_pf_txq_wait(struct i40e_pf
*pf
, int pf_q
, bool enable
)
3825 for (i
= 0; i
< I40E_QUEUE_WAIT_RETRY_LIMIT
; i
++) {
3826 tx_reg
= rd32(&pf
->hw
, I40E_QTX_ENA(pf_q
));
3827 if (enable
== !!(tx_reg
& I40E_QTX_ENA_QENA_STAT_MASK
))
3830 usleep_range(10, 20);
3832 if (i
>= I40E_QUEUE_WAIT_RETRY_LIMIT
)
3839 * i40e_vsi_control_tx - Start or stop a VSI's rings
3840 * @vsi: the VSI being configured
3841 * @enable: start or stop the rings
3843 static int i40e_vsi_control_tx(struct i40e_vsi
*vsi
, bool enable
)
3845 struct i40e_pf
*pf
= vsi
->back
;
3846 struct i40e_hw
*hw
= &pf
->hw
;
3847 int i
, j
, pf_q
, ret
= 0;
3850 pf_q
= vsi
->base_queue
;
3851 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
3853 /* warn the TX unit of coming changes */
3854 i40e_pre_tx_queue_cfg(&pf
->hw
, pf_q
, enable
);
3856 usleep_range(10, 20);
3858 for (j
= 0; j
< 50; j
++) {
3859 tx_reg
= rd32(hw
, I40E_QTX_ENA(pf_q
));
3860 if (((tx_reg
>> I40E_QTX_ENA_QENA_REQ_SHIFT
) & 1) ==
3861 ((tx_reg
>> I40E_QTX_ENA_QENA_STAT_SHIFT
) & 1))
3863 usleep_range(1000, 2000);
3865 /* Skip if the queue is already in the requested state */
3866 if (enable
== !!(tx_reg
& I40E_QTX_ENA_QENA_STAT_MASK
))
3869 /* turn on/off the queue */
3871 wr32(hw
, I40E_QTX_HEAD(pf_q
), 0);
3872 tx_reg
|= I40E_QTX_ENA_QENA_REQ_MASK
;
3874 tx_reg
&= ~I40E_QTX_ENA_QENA_REQ_MASK
;
3877 wr32(hw
, I40E_QTX_ENA(pf_q
), tx_reg
);
3878 /* No waiting for the Tx queue to disable */
3879 if (!enable
&& test_bit(__I40E_PORT_TX_SUSPENDED
, &pf
->state
))
3882 /* wait for the change to finish */
3883 ret
= i40e_pf_txq_wait(pf
, pf_q
, enable
);
3885 dev_info(&pf
->pdev
->dev
,
3886 "VSI seid %d Tx ring %d %sable timeout\n",
3887 vsi
->seid
, pf_q
, (enable
? "en" : "dis"));
3892 if (hw
->revision_id
== 0)
3898 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3899 * @pf: the PF being configured
3900 * @pf_q: the PF queue
3901 * @enable: enable or disable state of the queue
3903 * This routine will wait for the given Rx queue of the PF to reach the
3904 * enabled or disabled state.
3905 * Returns -ETIMEDOUT in case of failing to reach the requested state after
3906 * multiple retries; else will return 0 in case of success.
3908 static int i40e_pf_rxq_wait(struct i40e_pf
*pf
, int pf_q
, bool enable
)
3913 for (i
= 0; i
< I40E_QUEUE_WAIT_RETRY_LIMIT
; i
++) {
3914 rx_reg
= rd32(&pf
->hw
, I40E_QRX_ENA(pf_q
));
3915 if (enable
== !!(rx_reg
& I40E_QRX_ENA_QENA_STAT_MASK
))
3918 usleep_range(10, 20);
3920 if (i
>= I40E_QUEUE_WAIT_RETRY_LIMIT
)
3927 * i40e_vsi_control_rx - Start or stop a VSI's rings
3928 * @vsi: the VSI being configured
3929 * @enable: start or stop the rings
3931 static int i40e_vsi_control_rx(struct i40e_vsi
*vsi
, bool enable
)
3933 struct i40e_pf
*pf
= vsi
->back
;
3934 struct i40e_hw
*hw
= &pf
->hw
;
3935 int i
, j
, pf_q
, ret
= 0;
3938 pf_q
= vsi
->base_queue
;
3939 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
3940 for (j
= 0; j
< 50; j
++) {
3941 rx_reg
= rd32(hw
, I40E_QRX_ENA(pf_q
));
3942 if (((rx_reg
>> I40E_QRX_ENA_QENA_REQ_SHIFT
) & 1) ==
3943 ((rx_reg
>> I40E_QRX_ENA_QENA_STAT_SHIFT
) & 1))
3945 usleep_range(1000, 2000);
3948 /* Skip if the queue is already in the requested state */
3949 if (enable
== !!(rx_reg
& I40E_QRX_ENA_QENA_STAT_MASK
))
3952 /* turn on/off the queue */
3954 rx_reg
|= I40E_QRX_ENA_QENA_REQ_MASK
;
3956 rx_reg
&= ~I40E_QRX_ENA_QENA_REQ_MASK
;
3957 wr32(hw
, I40E_QRX_ENA(pf_q
), rx_reg
);
3958 /* No waiting for the Tx queue to disable */
3959 if (!enable
&& test_bit(__I40E_PORT_TX_SUSPENDED
, &pf
->state
))
3962 /* wait for the change to finish */
3963 ret
= i40e_pf_rxq_wait(pf
, pf_q
, enable
);
3965 dev_info(&pf
->pdev
->dev
,
3966 "VSI seid %d Rx ring %d %sable timeout\n",
3967 vsi
->seid
, pf_q
, (enable
? "en" : "dis"));
3976 * i40e_vsi_control_rings - Start or stop a VSI's rings
3977 * @vsi: the VSI being configured
3978 * @enable: start or stop the rings
3980 int i40e_vsi_control_rings(struct i40e_vsi
*vsi
, bool request
)
3984 /* do rx first for enable and last for disable */
3986 ret
= i40e_vsi_control_rx(vsi
, request
);
3989 ret
= i40e_vsi_control_tx(vsi
, request
);
3991 /* Ignore return value, we need to shutdown whatever we can */
3992 i40e_vsi_control_tx(vsi
, request
);
3993 i40e_vsi_control_rx(vsi
, request
);
4000 * i40e_vsi_free_irq - Free the irq association with the OS
4001 * @vsi: the VSI being configured
4003 static void i40e_vsi_free_irq(struct i40e_vsi
*vsi
)
4005 struct i40e_pf
*pf
= vsi
->back
;
4006 struct i40e_hw
*hw
= &pf
->hw
;
4007 int base
= vsi
->base_vector
;
4011 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
4012 if (!vsi
->q_vectors
)
4015 if (!vsi
->irqs_ready
)
4018 vsi
->irqs_ready
= false;
4019 for (i
= 0; i
< vsi
->num_q_vectors
; i
++) {
4020 u16 vector
= i
+ base
;
4022 /* free only the irqs that were actually requested */
4023 if (!vsi
->q_vectors
[i
] ||
4024 !vsi
->q_vectors
[i
]->num_ringpairs
)
4027 /* clear the affinity_mask in the IRQ descriptor */
4028 irq_set_affinity_hint(pf
->msix_entries
[vector
].vector
,
4030 synchronize_irq(pf
->msix_entries
[vector
].vector
);
4031 free_irq(pf
->msix_entries
[vector
].vector
,
4034 /* Tear down the interrupt queue link list
4036 * We know that they come in pairs and always
4037 * the Rx first, then the Tx. To clear the
4038 * link list, stick the EOL value into the
4039 * next_q field of the registers.
4041 val
= rd32(hw
, I40E_PFINT_LNKLSTN(vector
- 1));
4042 qp
= (val
& I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK
)
4043 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT
;
4044 val
|= I40E_QUEUE_END_OF_LIST
4045 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT
;
4046 wr32(hw
, I40E_PFINT_LNKLSTN(vector
- 1), val
);
4048 while (qp
!= I40E_QUEUE_END_OF_LIST
) {
4051 val
= rd32(hw
, I40E_QINT_RQCTL(qp
));
4053 val
&= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK
|
4054 I40E_QINT_RQCTL_MSIX0_INDX_MASK
|
4055 I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
4056 I40E_QINT_RQCTL_INTEVENT_MASK
);
4058 val
|= (I40E_QINT_RQCTL_ITR_INDX_MASK
|
4059 I40E_QINT_RQCTL_NEXTQ_INDX_MASK
);
4061 wr32(hw
, I40E_QINT_RQCTL(qp
), val
);
4063 val
= rd32(hw
, I40E_QINT_TQCTL(qp
));
4065 next
= (val
& I40E_QINT_TQCTL_NEXTQ_INDX_MASK
)
4066 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
;
4068 val
&= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK
|
4069 I40E_QINT_TQCTL_MSIX0_INDX_MASK
|
4070 I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
4071 I40E_QINT_TQCTL_INTEVENT_MASK
);
4073 val
|= (I40E_QINT_TQCTL_ITR_INDX_MASK
|
4074 I40E_QINT_TQCTL_NEXTQ_INDX_MASK
);
4076 wr32(hw
, I40E_QINT_TQCTL(qp
), val
);
4081 free_irq(pf
->pdev
->irq
, pf
);
4083 val
= rd32(hw
, I40E_PFINT_LNKLST0
);
4084 qp
= (val
& I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK
)
4085 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT
;
4086 val
|= I40E_QUEUE_END_OF_LIST
4087 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT
;
4088 wr32(hw
, I40E_PFINT_LNKLST0
, val
);
4090 val
= rd32(hw
, I40E_QINT_RQCTL(qp
));
4091 val
&= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK
|
4092 I40E_QINT_RQCTL_MSIX0_INDX_MASK
|
4093 I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
4094 I40E_QINT_RQCTL_INTEVENT_MASK
);
4096 val
|= (I40E_QINT_RQCTL_ITR_INDX_MASK
|
4097 I40E_QINT_RQCTL_NEXTQ_INDX_MASK
);
4099 wr32(hw
, I40E_QINT_RQCTL(qp
), val
);
4101 val
= rd32(hw
, I40E_QINT_TQCTL(qp
));
4103 val
&= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK
|
4104 I40E_QINT_TQCTL_MSIX0_INDX_MASK
|
4105 I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
4106 I40E_QINT_TQCTL_INTEVENT_MASK
);
4108 val
|= (I40E_QINT_TQCTL_ITR_INDX_MASK
|
4109 I40E_QINT_TQCTL_NEXTQ_INDX_MASK
);
4111 wr32(hw
, I40E_QINT_TQCTL(qp
), val
);
4116 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4117 * @vsi: the VSI being configured
4118 * @v_idx: Index of vector to be freed
4120 * This function frees the memory allocated to the q_vector. In addition if
4121 * NAPI is enabled it will delete any references to the NAPI struct prior
4122 * to freeing the q_vector.
4124 static void i40e_free_q_vector(struct i40e_vsi
*vsi
, int v_idx
)
4126 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[v_idx
];
4127 struct i40e_ring
*ring
;
4132 /* disassociate q_vector from rings */
4133 i40e_for_each_ring(ring
, q_vector
->tx
)
4134 ring
->q_vector
= NULL
;
4136 i40e_for_each_ring(ring
, q_vector
->rx
)
4137 ring
->q_vector
= NULL
;
4139 /* only VSI w/ an associated netdev is set up w/ NAPI */
4141 netif_napi_del(&q_vector
->napi
);
4143 vsi
->q_vectors
[v_idx
] = NULL
;
4145 kfree_rcu(q_vector
, rcu
);
4149 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4150 * @vsi: the VSI being un-configured
4152 * This frees the memory allocated to the q_vectors and
4153 * deletes references to the NAPI struct.
4155 static void i40e_vsi_free_q_vectors(struct i40e_vsi
*vsi
)
4159 for (v_idx
= 0; v_idx
< vsi
->num_q_vectors
; v_idx
++)
4160 i40e_free_q_vector(vsi
, v_idx
);
4164 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4165 * @pf: board private structure
4167 static void i40e_reset_interrupt_capability(struct i40e_pf
*pf
)
4169 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4170 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
4171 pci_disable_msix(pf
->pdev
);
4172 kfree(pf
->msix_entries
);
4173 pf
->msix_entries
= NULL
;
4174 kfree(pf
->irq_pile
);
4175 pf
->irq_pile
= NULL
;
4176 } else if (pf
->flags
& I40E_FLAG_MSI_ENABLED
) {
4177 pci_disable_msi(pf
->pdev
);
4179 pf
->flags
&= ~(I40E_FLAG_MSIX_ENABLED
| I40E_FLAG_MSI_ENABLED
);
4183 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4184 * @pf: board private structure
4186 * We go through and clear interrupt specific resources and reset the structure
4187 * to pre-load conditions
4189 static void i40e_clear_interrupt_scheme(struct i40e_pf
*pf
)
4193 i40e_stop_misc_vector(pf
);
4194 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
&& pf
->msix_entries
) {
4195 synchronize_irq(pf
->msix_entries
[0].vector
);
4196 free_irq(pf
->msix_entries
[0].vector
, pf
);
4199 i40e_put_lump(pf
->irq_pile
, pf
->iwarp_base_vector
,
4200 I40E_IWARP_IRQ_PILE_ID
);
4202 i40e_put_lump(pf
->irq_pile
, 0, I40E_PILE_VALID_BIT
-1);
4203 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
4205 i40e_vsi_free_q_vectors(pf
->vsi
[i
]);
4206 i40e_reset_interrupt_capability(pf
);
4210 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4211 * @vsi: the VSI being configured
4213 static void i40e_napi_enable_all(struct i40e_vsi
*vsi
)
4220 for (q_idx
= 0; q_idx
< vsi
->num_q_vectors
; q_idx
++) {
4221 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[q_idx
];
4223 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
4224 napi_enable(&q_vector
->napi
);
4229 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4230 * @vsi: the VSI being configured
4232 static void i40e_napi_disable_all(struct i40e_vsi
*vsi
)
4239 for (q_idx
= 0; q_idx
< vsi
->num_q_vectors
; q_idx
++) {
4240 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[q_idx
];
4242 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
4243 napi_disable(&q_vector
->napi
);
4248 * i40e_vsi_close - Shut down a VSI
4249 * @vsi: the vsi to be quelled
4251 static void i40e_vsi_close(struct i40e_vsi
*vsi
)
4255 if (!test_and_set_bit(__I40E_DOWN
, &vsi
->state
))
4257 i40e_vsi_free_irq(vsi
);
4258 i40e_vsi_free_tx_resources(vsi
);
4259 i40e_vsi_free_rx_resources(vsi
);
4260 vsi
->current_netdev_flags
= 0;
4261 if (test_bit(__I40E_RESET_RECOVERY_PENDING
, &vsi
->back
->state
))
4263 i40e_notify_client_of_netdev_close(vsi
, reset
);
4267 * i40e_quiesce_vsi - Pause a given VSI
4268 * @vsi: the VSI being paused
4270 static void i40e_quiesce_vsi(struct i40e_vsi
*vsi
)
4272 if (test_bit(__I40E_DOWN
, &vsi
->state
))
4275 /* No need to disable FCoE VSI when Tx suspended */
4276 if ((test_bit(__I40E_PORT_TX_SUSPENDED
, &vsi
->back
->state
)) &&
4277 vsi
->type
== I40E_VSI_FCOE
) {
4278 dev_dbg(&vsi
->back
->pdev
->dev
,
4279 "VSI seid %d skipping FCoE VSI disable\n", vsi
->seid
);
4283 set_bit(__I40E_NEEDS_RESTART
, &vsi
->state
);
4284 if (vsi
->netdev
&& netif_running(vsi
->netdev
))
4285 vsi
->netdev
->netdev_ops
->ndo_stop(vsi
->netdev
);
4287 i40e_vsi_close(vsi
);
4291 * i40e_unquiesce_vsi - Resume a given VSI
4292 * @vsi: the VSI being resumed
4294 static void i40e_unquiesce_vsi(struct i40e_vsi
*vsi
)
4296 if (!test_bit(__I40E_NEEDS_RESTART
, &vsi
->state
))
4299 clear_bit(__I40E_NEEDS_RESTART
, &vsi
->state
);
4300 if (vsi
->netdev
&& netif_running(vsi
->netdev
))
4301 vsi
->netdev
->netdev_ops
->ndo_open(vsi
->netdev
);
4303 i40e_vsi_open(vsi
); /* this clears the DOWN bit */
4307 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4310 static void i40e_pf_quiesce_all_vsi(struct i40e_pf
*pf
)
4314 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
4316 i40e_quiesce_vsi(pf
->vsi
[v
]);
4321 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4324 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf
*pf
)
4328 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
4330 i40e_unquiesce_vsi(pf
->vsi
[v
]);
4334 #ifdef CONFIG_I40E_DCB
4336 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4337 * @vsi: the VSI being configured
4339 * This function waits for the given VSI's queues to be disabled.
4341 static int i40e_vsi_wait_queues_disabled(struct i40e_vsi
*vsi
)
4343 struct i40e_pf
*pf
= vsi
->back
;
4346 pf_q
= vsi
->base_queue
;
4347 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4348 /* Check and wait for the disable status of the queue */
4349 ret
= i40e_pf_txq_wait(pf
, pf_q
, false);
4351 dev_info(&pf
->pdev
->dev
,
4352 "VSI seid %d Tx ring %d disable timeout\n",
4358 pf_q
= vsi
->base_queue
;
4359 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4360 /* Check and wait for the disable status of the queue */
4361 ret
= i40e_pf_rxq_wait(pf
, pf_q
, false);
4363 dev_info(&pf
->pdev
->dev
,
4364 "VSI seid %d Rx ring %d disable timeout\n",
4374 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4377 * This function waits for the queues to be in disabled state for all the
4378 * VSIs that are managed by this PF.
4380 static int i40e_pf_wait_queues_disabled(struct i40e_pf
*pf
)
4384 for (v
= 0; v
< pf
->hw
.func_caps
.num_vsis
; v
++) {
4385 /* No need to wait for FCoE VSI queues */
4386 if (pf
->vsi
[v
] && pf
->vsi
[v
]->type
!= I40E_VSI_FCOE
) {
4387 ret
= i40e_vsi_wait_queues_disabled(pf
->vsi
[v
]);
4399 * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4400 * @q_idx: TX queue number
4401 * @vsi: Pointer to VSI struct
4403 * This function checks specified queue for given VSI. Detects hung condition.
4404 * Sets hung bit since it is two step process. Before next run of service task
4405 * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
4406 * hung condition remain unchanged and during subsequent run, this function
4407 * issues SW interrupt to recover from hung condition.
4409 static void i40e_detect_recover_hung_queue(int q_idx
, struct i40e_vsi
*vsi
)
4411 struct i40e_ring
*tx_ring
= NULL
;
4413 u32 head
, val
, tx_pending_hw
;
4418 /* now that we have an index, find the tx_ring struct */
4419 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
4420 if (vsi
->tx_rings
[i
] && vsi
->tx_rings
[i
]->desc
) {
4421 if (q_idx
== vsi
->tx_rings
[i
]->queue_index
) {
4422 tx_ring
= vsi
->tx_rings
[i
];
4431 /* Read interrupt register */
4432 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
4434 I40E_PFINT_DYN_CTLN(tx_ring
->q_vector
->v_idx
+
4435 tx_ring
->vsi
->base_vector
- 1));
4437 val
= rd32(&pf
->hw
, I40E_PFINT_DYN_CTL0
);
4439 head
= i40e_get_head(tx_ring
);
4441 tx_pending_hw
= i40e_get_tx_pending(tx_ring
, false);
4443 /* HW is done executing descriptors, updated HEAD write back,
4444 * but SW hasn't processed those descriptors. If interrupt is
4445 * not generated from this point ON, it could result into
4446 * dev_watchdog detecting timeout on those netdev_queue,
4447 * hence proactively trigger SW interrupt.
4449 if (tx_pending_hw
&& (!(val
& I40E_PFINT_DYN_CTLN_INTENA_MASK
))) {
4450 /* NAPI Poll didn't run and clear since it was set */
4451 if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT
,
4452 &tx_ring
->q_vector
->hung_detected
)) {
4453 netdev_info(vsi
->netdev
, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
4454 vsi
->seid
, q_idx
, tx_pending_hw
,
4455 tx_ring
->next_to_clean
, head
,
4456 tx_ring
->next_to_use
,
4457 readl(tx_ring
->tail
));
4458 netdev_info(vsi
->netdev
, "VSI_seid %d, Issuing force_wb for TX queue %d, Interrupt Reg: 0x%x\n",
4459 vsi
->seid
, q_idx
, val
);
4460 i40e_force_wb(vsi
, tx_ring
->q_vector
);
4462 /* First Chance - detected possible hung */
4463 set_bit(I40E_Q_VECTOR_HUNG_DETECT
,
4464 &tx_ring
->q_vector
->hung_detected
);
4468 /* This is the case where we have interrupts missing,
4469 * so the tx_pending in HW will most likely be 0, but we
4470 * will have tx_pending in SW since the WB happened but the
4471 * interrupt got lost.
4473 if ((!tx_pending_hw
) && i40e_get_tx_pending(tx_ring
, true) &&
4474 (!(val
& I40E_PFINT_DYN_CTLN_INTENA_MASK
))) {
4475 if (napi_reschedule(&tx_ring
->q_vector
->napi
))
4476 tx_ring
->tx_stats
.tx_lost_interrupt
++;
4481 * i40e_detect_recover_hung - Function to detect and recover hung_queues
4482 * @pf: pointer to PF struct
4484 * LAN VSI has netdev and netdev has TX queues. This function is to check
4485 * each of those TX queues if they are hung, trigger recovery by issuing
4488 static void i40e_detect_recover_hung(struct i40e_pf
*pf
)
4490 struct net_device
*netdev
;
4491 struct i40e_vsi
*vsi
;
4494 /* Only for LAN VSI */
4495 vsi
= pf
->vsi
[pf
->lan_vsi
];
4500 /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4501 if (test_bit(__I40E_DOWN
, &vsi
->back
->state
) ||
4502 test_bit(__I40E_RESET_RECOVERY_PENDING
, &vsi
->back
->state
))
4505 /* Make sure type is MAIN VSI */
4506 if (vsi
->type
!= I40E_VSI_MAIN
)
4509 netdev
= vsi
->netdev
;
4513 /* Bail out if netif_carrier is not OK */
4514 if (!netif_carrier_ok(netdev
))
4517 /* Go thru' TX queues for netdev */
4518 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
4519 struct netdev_queue
*q
;
4521 q
= netdev_get_tx_queue(netdev
, i
);
4523 i40e_detect_recover_hung_queue(i
, vsi
);
4528 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4529 * @pf: pointer to PF
4531 * Get TC map for ISCSI PF type that will include iSCSI TC
4534 static u8
i40e_get_iscsi_tc_map(struct i40e_pf
*pf
)
4536 struct i40e_dcb_app_priority_table app
;
4537 struct i40e_hw
*hw
= &pf
->hw
;
4538 u8 enabled_tc
= 1; /* TC0 is always enabled */
4540 /* Get the iSCSI APP TLV */
4541 struct i40e_dcbx_config
*dcbcfg
= &hw
->local_dcbx_config
;
4543 for (i
= 0; i
< dcbcfg
->numapps
; i
++) {
4544 app
= dcbcfg
->app
[i
];
4545 if (app
.selector
== I40E_APP_SEL_TCPIP
&&
4546 app
.protocolid
== I40E_APP_PROTOID_ISCSI
) {
4547 tc
= dcbcfg
->etscfg
.prioritytable
[app
.priority
];
4548 enabled_tc
|= BIT(tc
);
4557 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4558 * @dcbcfg: the corresponding DCBx configuration structure
4560 * Return the number of TCs from given DCBx configuration
4562 static u8
i40e_dcb_get_num_tc(struct i40e_dcbx_config
*dcbcfg
)
4564 int i
, tc_unused
= 0;
4568 /* Scan the ETS Config Priority Table to find
4569 * traffic class enabled for a given priority
4570 * and create a bitmask of enabled TCs
4572 for (i
= 0; i
< I40E_MAX_USER_PRIORITY
; i
++)
4573 num_tc
|= BIT(dcbcfg
->etscfg
.prioritytable
[i
]);
4575 /* Now scan the bitmask to check for
4576 * contiguous TCs starting with TC0
4578 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
4579 if (num_tc
& BIT(i
)) {
4583 pr_err("Non-contiguous TC - Disabling DCB\n");
4591 /* There is always at least TC0 */
4599 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4600 * @dcbcfg: the corresponding DCBx configuration structure
4602 * Query the current DCB configuration and return the number of
4603 * traffic classes enabled from the given DCBX config
4605 static u8
i40e_dcb_get_enabled_tc(struct i40e_dcbx_config
*dcbcfg
)
4607 u8 num_tc
= i40e_dcb_get_num_tc(dcbcfg
);
4611 for (i
= 0; i
< num_tc
; i
++)
4612 enabled_tc
|= BIT(i
);
4618 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4619 * @pf: PF being queried
4621 * Return number of traffic classes enabled for the given PF
4623 static u8
i40e_pf_get_num_tc(struct i40e_pf
*pf
)
4625 struct i40e_hw
*hw
= &pf
->hw
;
4626 u8 i
, enabled_tc
= 1;
4628 struct i40e_dcbx_config
*dcbcfg
= &hw
->local_dcbx_config
;
4630 /* If DCB is not enabled then always in single TC */
4631 if (!(pf
->flags
& I40E_FLAG_DCB_ENABLED
))
4634 /* SFP mode will be enabled for all TCs on port */
4635 if (!(pf
->flags
& I40E_FLAG_MFP_ENABLED
))
4636 return i40e_dcb_get_num_tc(dcbcfg
);
4638 /* MFP mode return count of enabled TCs for this PF */
4639 if (pf
->hw
.func_caps
.iscsi
)
4640 enabled_tc
= i40e_get_iscsi_tc_map(pf
);
4642 return 1; /* Only TC0 */
4644 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
4645 if (enabled_tc
& BIT(i
))
4652 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4653 * @pf: PF being queried
4655 * Return a bitmap for enabled traffic classes for this PF.
4657 static u8
i40e_pf_get_tc_map(struct i40e_pf
*pf
)
4659 /* If DCB is not enabled for this PF then just return default TC */
4660 if (!(pf
->flags
& I40E_FLAG_DCB_ENABLED
))
4661 return I40E_DEFAULT_TRAFFIC_CLASS
;
4663 /* SFP mode we want PF to be enabled for all TCs */
4664 if (!(pf
->flags
& I40E_FLAG_MFP_ENABLED
))
4665 return i40e_dcb_get_enabled_tc(&pf
->hw
.local_dcbx_config
);
4667 /* MFP enabled and iSCSI PF type */
4668 if (pf
->hw
.func_caps
.iscsi
)
4669 return i40e_get_iscsi_tc_map(pf
);
4671 return I40E_DEFAULT_TRAFFIC_CLASS
;
4675 * i40e_vsi_get_bw_info - Query VSI BW Information
4676 * @vsi: the VSI being queried
4678 * Returns 0 on success, negative value on failure
4680 static int i40e_vsi_get_bw_info(struct i40e_vsi
*vsi
)
4682 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config
= {0};
4683 struct i40e_aqc_query_vsi_bw_config_resp bw_config
= {0};
4684 struct i40e_pf
*pf
= vsi
->back
;
4685 struct i40e_hw
*hw
= &pf
->hw
;
4690 /* Get the VSI level BW configuration */
4691 ret
= i40e_aq_query_vsi_bw_config(hw
, vsi
->seid
, &bw_config
, NULL
);
4693 dev_info(&pf
->pdev
->dev
,
4694 "couldn't get PF vsi bw config, err %s aq_err %s\n",
4695 i40e_stat_str(&pf
->hw
, ret
),
4696 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
4700 /* Get the VSI level BW configuration per TC */
4701 ret
= i40e_aq_query_vsi_ets_sla_config(hw
, vsi
->seid
, &bw_ets_config
,
4704 dev_info(&pf
->pdev
->dev
,
4705 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4706 i40e_stat_str(&pf
->hw
, ret
),
4707 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
4711 if (bw_config
.tc_valid_bits
!= bw_ets_config
.tc_valid_bits
) {
4712 dev_info(&pf
->pdev
->dev
,
4713 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4714 bw_config
.tc_valid_bits
,
4715 bw_ets_config
.tc_valid_bits
);
4716 /* Still continuing */
4719 vsi
->bw_limit
= le16_to_cpu(bw_config
.port_bw_limit
);
4720 vsi
->bw_max_quanta
= bw_config
.max_bw
;
4721 tc_bw_max
= le16_to_cpu(bw_ets_config
.tc_bw_max
[0]) |
4722 (le16_to_cpu(bw_ets_config
.tc_bw_max
[1]) << 16);
4723 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
4724 vsi
->bw_ets_share_credits
[i
] = bw_ets_config
.share_credits
[i
];
4725 vsi
->bw_ets_limit_credits
[i
] =
4726 le16_to_cpu(bw_ets_config
.credits
[i
]);
4727 /* 3 bits out of 4 for each TC */
4728 vsi
->bw_ets_max_quanta
[i
] = (u8
)((tc_bw_max
>> (i
*4)) & 0x7);
4735 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4736 * @vsi: the VSI being configured
4737 * @enabled_tc: TC bitmap
4738 * @bw_credits: BW shared credits per TC
4740 * Returns 0 on success, negative value on failure
4742 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi
*vsi
, u8 enabled_tc
,
4745 struct i40e_aqc_configure_vsi_tc_bw_data bw_data
;
4749 bw_data
.tc_valid_bits
= enabled_tc
;
4750 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
4751 bw_data
.tc_bw_credits
[i
] = bw_share
[i
];
4753 ret
= i40e_aq_config_vsi_tc_bw(&vsi
->back
->hw
, vsi
->seid
, &bw_data
,
4756 dev_info(&vsi
->back
->pdev
->dev
,
4757 "AQ command Config VSI BW allocation per TC failed = %d\n",
4758 vsi
->back
->hw
.aq
.asq_last_status
);
4762 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
4763 vsi
->info
.qs_handle
[i
] = bw_data
.qs_handles
[i
];
4769 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4770 * @vsi: the VSI being configured
4771 * @enabled_tc: TC map to be enabled
4774 static void i40e_vsi_config_netdev_tc(struct i40e_vsi
*vsi
, u8 enabled_tc
)
4776 struct net_device
*netdev
= vsi
->netdev
;
4777 struct i40e_pf
*pf
= vsi
->back
;
4778 struct i40e_hw
*hw
= &pf
->hw
;
4781 struct i40e_dcbx_config
*dcbcfg
= &hw
->local_dcbx_config
;
4787 netdev_reset_tc(netdev
);
4791 /* Set up actual enabled TCs on the VSI */
4792 if (netdev_set_num_tc(netdev
, vsi
->tc_config
.numtc
))
4795 /* set per TC queues for the VSI */
4796 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
4797 /* Only set TC queues for enabled tcs
4799 * e.g. For a VSI that has TC0 and TC3 enabled the
4800 * enabled_tc bitmap would be 0x00001001; the driver
4801 * will set the numtc for netdev as 2 that will be
4802 * referenced by the netdev layer as TC 0 and 1.
4804 if (vsi
->tc_config
.enabled_tc
& BIT(i
))
4805 netdev_set_tc_queue(netdev
,
4806 vsi
->tc_config
.tc_info
[i
].netdev_tc
,
4807 vsi
->tc_config
.tc_info
[i
].qcount
,
4808 vsi
->tc_config
.tc_info
[i
].qoffset
);
4811 /* Assign UP2TC map for the VSI */
4812 for (i
= 0; i
< I40E_MAX_USER_PRIORITY
; i
++) {
4813 /* Get the actual TC# for the UP */
4814 u8 ets_tc
= dcbcfg
->etscfg
.prioritytable
[i
];
4815 /* Get the mapped netdev TC# for the UP */
4816 netdev_tc
= vsi
->tc_config
.tc_info
[ets_tc
].netdev_tc
;
4817 netdev_set_prio_tc_map(netdev
, i
, netdev_tc
);
4822 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4823 * @vsi: the VSI being configured
4824 * @ctxt: the ctxt buffer returned from AQ VSI update param command
4826 static void i40e_vsi_update_queue_map(struct i40e_vsi
*vsi
,
4827 struct i40e_vsi_context
*ctxt
)
4829 /* copy just the sections touched not the entire info
4830 * since not all sections are valid as returned by
4833 vsi
->info
.mapping_flags
= ctxt
->info
.mapping_flags
;
4834 memcpy(&vsi
->info
.queue_mapping
,
4835 &ctxt
->info
.queue_mapping
, sizeof(vsi
->info
.queue_mapping
));
4836 memcpy(&vsi
->info
.tc_mapping
, ctxt
->info
.tc_mapping
,
4837 sizeof(vsi
->info
.tc_mapping
));
4841 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4842 * @vsi: VSI to be configured
4843 * @enabled_tc: TC bitmap
4845 * This configures a particular VSI for TCs that are mapped to the
4846 * given TC bitmap. It uses default bandwidth share for TCs across
4847 * VSIs to configure TC for a particular VSI.
4850 * It is expected that the VSI queues have been quisced before calling
4853 static int i40e_vsi_config_tc(struct i40e_vsi
*vsi
, u8 enabled_tc
)
4855 u8 bw_share
[I40E_MAX_TRAFFIC_CLASS
] = {0};
4856 struct i40e_vsi_context ctxt
;
4860 /* Check if enabled_tc is same as existing or new TCs */
4861 if (vsi
->tc_config
.enabled_tc
== enabled_tc
)
4864 /* Enable ETS TCs with equal BW Share for now across all VSIs */
4865 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
4866 if (enabled_tc
& BIT(i
))
4870 ret
= i40e_vsi_configure_bw_alloc(vsi
, enabled_tc
, bw_share
);
4872 dev_info(&vsi
->back
->pdev
->dev
,
4873 "Failed configuring TC map %d for VSI %d\n",
4874 enabled_tc
, vsi
->seid
);
4878 /* Update Queue Pairs Mapping for currently enabled UPs */
4879 ctxt
.seid
= vsi
->seid
;
4880 ctxt
.pf_num
= vsi
->back
->hw
.pf_id
;
4882 ctxt
.uplink_seid
= vsi
->uplink_seid
;
4883 ctxt
.info
= vsi
->info
;
4884 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, false);
4886 if (vsi
->back
->flags
& I40E_FLAG_IWARP_ENABLED
) {
4887 ctxt
.info
.valid_sections
|=
4888 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID
);
4889 ctxt
.info
.queueing_opt_flags
|= I40E_AQ_VSI_QUE_OPT_TCP_ENA
;
4892 /* Update the VSI after updating the VSI queue-mapping information */
4893 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
4895 dev_info(&vsi
->back
->pdev
->dev
,
4896 "Update vsi tc config failed, err %s aq_err %s\n",
4897 i40e_stat_str(&vsi
->back
->hw
, ret
),
4898 i40e_aq_str(&vsi
->back
->hw
,
4899 vsi
->back
->hw
.aq
.asq_last_status
));
4902 /* update the local VSI info with updated queue map */
4903 i40e_vsi_update_queue_map(vsi
, &ctxt
);
4904 vsi
->info
.valid_sections
= 0;
4906 /* Update current VSI BW information */
4907 ret
= i40e_vsi_get_bw_info(vsi
);
4909 dev_info(&vsi
->back
->pdev
->dev
,
4910 "Failed updating vsi bw info, err %s aq_err %s\n",
4911 i40e_stat_str(&vsi
->back
->hw
, ret
),
4912 i40e_aq_str(&vsi
->back
->hw
,
4913 vsi
->back
->hw
.aq
.asq_last_status
));
4917 /* Update the netdev TC setup */
4918 i40e_vsi_config_netdev_tc(vsi
, enabled_tc
);
4924 * i40e_veb_config_tc - Configure TCs for given VEB
4926 * @enabled_tc: TC bitmap
4928 * Configures given TC bitmap for VEB (switching) element
4930 int i40e_veb_config_tc(struct i40e_veb
*veb
, u8 enabled_tc
)
4932 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data
= {0};
4933 struct i40e_pf
*pf
= veb
->pf
;
4937 /* No TCs or already enabled TCs just return */
4938 if (!enabled_tc
|| veb
->enabled_tc
== enabled_tc
)
4941 bw_data
.tc_valid_bits
= enabled_tc
;
4942 /* bw_data.absolute_credits is not set (relative) */
4944 /* Enable ETS TCs with equal BW Share for now */
4945 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
4946 if (enabled_tc
& BIT(i
))
4947 bw_data
.tc_bw_share_credits
[i
] = 1;
4950 ret
= i40e_aq_config_switch_comp_bw_config(&pf
->hw
, veb
->seid
,
4953 dev_info(&pf
->pdev
->dev
,
4954 "VEB bw config failed, err %s aq_err %s\n",
4955 i40e_stat_str(&pf
->hw
, ret
),
4956 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
4960 /* Update the BW information */
4961 ret
= i40e_veb_get_bw_info(veb
);
4963 dev_info(&pf
->pdev
->dev
,
4964 "Failed getting veb bw config, err %s aq_err %s\n",
4965 i40e_stat_str(&pf
->hw
, ret
),
4966 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
4973 #ifdef CONFIG_I40E_DCB
4975 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4978 * Reconfigure VEB/VSIs on a given PF; it is assumed that
4979 * the caller would've quiesce all the VSIs before calling
4982 static void i40e_dcb_reconfigure(struct i40e_pf
*pf
)
4988 /* Enable the TCs available on PF to all VEBs */
4989 tc_map
= i40e_pf_get_tc_map(pf
);
4990 for (v
= 0; v
< I40E_MAX_VEB
; v
++) {
4993 ret
= i40e_veb_config_tc(pf
->veb
[v
], tc_map
);
4995 dev_info(&pf
->pdev
->dev
,
4996 "Failed configuring TC for VEB seid=%d\n",
4998 /* Will try to configure as many components */
5002 /* Update each VSI */
5003 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
5007 /* - Enable all TCs for the LAN VSI
5009 * - For FCoE VSI only enable the TC configured
5010 * as per the APP TLV
5012 * - For all others keep them at TC0 for now
5014 if (v
== pf
->lan_vsi
)
5015 tc_map
= i40e_pf_get_tc_map(pf
);
5017 tc_map
= I40E_DEFAULT_TRAFFIC_CLASS
;
5019 if (pf
->vsi
[v
]->type
== I40E_VSI_FCOE
)
5020 tc_map
= i40e_get_fcoe_tc_map(pf
);
5021 #endif /* #ifdef I40E_FCOE */
5023 ret
= i40e_vsi_config_tc(pf
->vsi
[v
], tc_map
);
5025 dev_info(&pf
->pdev
->dev
,
5026 "Failed configuring TC for VSI seid=%d\n",
5028 /* Will try to configure as many components */
5030 /* Re-configure VSI vectors based on updated TC map */
5031 i40e_vsi_map_rings_to_vectors(pf
->vsi
[v
]);
5032 if (pf
->vsi
[v
]->netdev
)
5033 i40e_dcbnl_set_all(pf
->vsi
[v
]);
5039 * i40e_resume_port_tx - Resume port Tx
5042 * Resume a port's Tx and issue a PF reset in case of failure to
5045 static int i40e_resume_port_tx(struct i40e_pf
*pf
)
5047 struct i40e_hw
*hw
= &pf
->hw
;
5050 ret
= i40e_aq_resume_port_tx(hw
, NULL
);
5052 dev_info(&pf
->pdev
->dev
,
5053 "Resume Port Tx failed, err %s aq_err %s\n",
5054 i40e_stat_str(&pf
->hw
, ret
),
5055 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
5056 /* Schedule PF reset to recover */
5057 set_bit(__I40E_PF_RESET_REQUESTED
, &pf
->state
);
5058 i40e_service_event_schedule(pf
);
5065 * i40e_init_pf_dcb - Initialize DCB configuration
5066 * @pf: PF being configured
5068 * Query the current DCB configuration and cache it
5069 * in the hardware structure
5071 static int i40e_init_pf_dcb(struct i40e_pf
*pf
)
5073 struct i40e_hw
*hw
= &pf
->hw
;
5076 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
5077 if (pf
->flags
& I40E_FLAG_NO_DCB_SUPPORT
)
5080 /* Get the initial DCB configuration */
5081 err
= i40e_init_dcb(hw
);
5083 /* Device/Function is not DCBX capable */
5084 if ((!hw
->func_caps
.dcb
) ||
5085 (hw
->dcbx_status
== I40E_DCBX_STATUS_DISABLED
)) {
5086 dev_info(&pf
->pdev
->dev
,
5087 "DCBX offload is not supported or is disabled for this PF.\n");
5089 if (pf
->flags
& I40E_FLAG_MFP_ENABLED
)
5093 /* When status is not DISABLED then DCBX in FW */
5094 pf
->dcbx_cap
= DCB_CAP_DCBX_LLD_MANAGED
|
5095 DCB_CAP_DCBX_VER_IEEE
;
5097 pf
->flags
|= I40E_FLAG_DCB_CAPABLE
;
5098 /* Enable DCB tagging only when more than one TC
5099 * or explicitly disable if only one TC
5101 if (i40e_dcb_get_num_tc(&hw
->local_dcbx_config
) > 1)
5102 pf
->flags
|= I40E_FLAG_DCB_ENABLED
;
5104 pf
->flags
&= ~I40E_FLAG_DCB_ENABLED
;
5105 dev_dbg(&pf
->pdev
->dev
,
5106 "DCBX offload is supported for this PF.\n");
5109 dev_info(&pf
->pdev
->dev
,
5110 "Query for DCB configuration failed, err %s aq_err %s\n",
5111 i40e_stat_str(&pf
->hw
, err
),
5112 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
5118 #endif /* CONFIG_I40E_DCB */
5119 #define SPEED_SIZE 14
5122 * i40e_print_link_message - print link up or down
5123 * @vsi: the VSI for which link needs a message
5125 void i40e_print_link_message(struct i40e_vsi
*vsi
, bool isup
)
5127 char *speed
= "Unknown";
5128 char *fc
= "Unknown";
5130 if (vsi
->current_isup
== isup
)
5132 vsi
->current_isup
= isup
;
5134 netdev_info(vsi
->netdev
, "NIC Link is Down\n");
5138 /* Warn user if link speed on NPAR enabled partition is not at
5141 if (vsi
->back
->hw
.func_caps
.npar_enable
&&
5142 (vsi
->back
->hw
.phy
.link_info
.link_speed
== I40E_LINK_SPEED_1GB
||
5143 vsi
->back
->hw
.phy
.link_info
.link_speed
== I40E_LINK_SPEED_100MB
))
5144 netdev_warn(vsi
->netdev
,
5145 "The partition detected link speed that is less than 10Gbps\n");
5147 switch (vsi
->back
->hw
.phy
.link_info
.link_speed
) {
5148 case I40E_LINK_SPEED_40GB
:
5151 case I40E_LINK_SPEED_20GB
:
5154 case I40E_LINK_SPEED_10GB
:
5157 case I40E_LINK_SPEED_1GB
:
5160 case I40E_LINK_SPEED_100MB
:
5167 switch (vsi
->back
->hw
.fc
.current_mode
) {
5171 case I40E_FC_TX_PAUSE
:
5174 case I40E_FC_RX_PAUSE
:
5182 netdev_info(vsi
->netdev
, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
5187 * i40e_up_complete - Finish the last steps of bringing up a connection
5188 * @vsi: the VSI being configured
5190 static int i40e_up_complete(struct i40e_vsi
*vsi
)
5192 struct i40e_pf
*pf
= vsi
->back
;
5195 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
5196 i40e_vsi_configure_msix(vsi
);
5198 i40e_configure_msi_and_legacy(vsi
);
5201 err
= i40e_vsi_control_rings(vsi
, true);
5205 clear_bit(__I40E_DOWN
, &vsi
->state
);
5206 i40e_napi_enable_all(vsi
);
5207 i40e_vsi_enable_irq(vsi
);
5209 if ((pf
->hw
.phy
.link_info
.link_info
& I40E_AQ_LINK_UP
) &&
5211 i40e_print_link_message(vsi
, true);
5212 netif_tx_start_all_queues(vsi
->netdev
);
5213 netif_carrier_on(vsi
->netdev
);
5214 } else if (vsi
->netdev
) {
5215 i40e_print_link_message(vsi
, false);
5216 /* need to check for qualified module here*/
5217 if ((pf
->hw
.phy
.link_info
.link_info
&
5218 I40E_AQ_MEDIA_AVAILABLE
) &&
5219 (!(pf
->hw
.phy
.link_info
.an_info
&
5220 I40E_AQ_QUALIFIED_MODULE
)))
5221 netdev_err(vsi
->netdev
,
5222 "the driver failed to link because an unqualified module was detected.");
5225 /* replay FDIR SB filters */
5226 if (vsi
->type
== I40E_VSI_FDIR
) {
5227 /* reset fd counters */
5228 pf
->fd_add_err
= pf
->fd_atr_cnt
= 0;
5229 if (pf
->fd_tcp_rule
> 0) {
5230 pf
->auto_disable_flags
|= I40E_FLAG_FD_ATR_ENABLED
;
5231 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
5232 dev_info(&pf
->pdev
->dev
, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
5233 pf
->fd_tcp_rule
= 0;
5235 i40e_fdir_filter_restore(vsi
);
5238 /* On the next run of the service_task, notify any clients of the new
5241 pf
->flags
|= I40E_FLAG_SERVICE_CLIENT_REQUESTED
;
5242 i40e_service_event_schedule(pf
);
5248 * i40e_vsi_reinit_locked - Reset the VSI
5249 * @vsi: the VSI being configured
5251 * Rebuild the ring structs after some configuration
5252 * has changed, e.g. MTU size.
5254 static void i40e_vsi_reinit_locked(struct i40e_vsi
*vsi
)
5256 struct i40e_pf
*pf
= vsi
->back
;
5258 WARN_ON(in_interrupt());
5259 while (test_and_set_bit(__I40E_CONFIG_BUSY
, &pf
->state
))
5260 usleep_range(1000, 2000);
5264 clear_bit(__I40E_CONFIG_BUSY
, &pf
->state
);
5268 * i40e_up - Bring the connection back up after being down
5269 * @vsi: the VSI being configured
5271 int i40e_up(struct i40e_vsi
*vsi
)
5275 err
= i40e_vsi_configure(vsi
);
5277 err
= i40e_up_complete(vsi
);
5283 * i40e_down - Shutdown the connection processing
5284 * @vsi: the VSI being stopped
5286 void i40e_down(struct i40e_vsi
*vsi
)
5290 /* It is assumed that the caller of this function
5291 * sets the vsi->state __I40E_DOWN bit.
5294 netif_carrier_off(vsi
->netdev
);
5295 netif_tx_disable(vsi
->netdev
);
5297 i40e_vsi_disable_irq(vsi
);
5298 i40e_vsi_control_rings(vsi
, false);
5299 i40e_napi_disable_all(vsi
);
5301 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
5302 i40e_clean_tx_ring(vsi
->tx_rings
[i
]);
5303 i40e_clean_rx_ring(vsi
->rx_rings
[i
]);
5306 i40e_notify_client_of_netdev_close(vsi
, false);
5311 * i40e_setup_tc - configure multiple traffic classes
5312 * @netdev: net device to configure
5313 * @tc: number of traffic classes to enable
5315 static int i40e_setup_tc(struct net_device
*netdev
, u8 tc
)
5317 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
5318 struct i40e_vsi
*vsi
= np
->vsi
;
5319 struct i40e_pf
*pf
= vsi
->back
;
5324 /* Check if DCB enabled to continue */
5325 if (!(pf
->flags
& I40E_FLAG_DCB_ENABLED
)) {
5326 netdev_info(netdev
, "DCB is not enabled for adapter\n");
5330 /* Check if MFP enabled */
5331 if (pf
->flags
& I40E_FLAG_MFP_ENABLED
) {
5332 netdev_info(netdev
, "Configuring TC not supported in MFP mode\n");
5336 /* Check whether tc count is within enabled limit */
5337 if (tc
> i40e_pf_get_num_tc(pf
)) {
5338 netdev_info(netdev
, "TC count greater than enabled on link for adapter\n");
5342 /* Generate TC map for number of tc requested */
5343 for (i
= 0; i
< tc
; i
++)
5344 enabled_tc
|= BIT(i
);
5346 /* Requesting same TC configuration as already enabled */
5347 if (enabled_tc
== vsi
->tc_config
.enabled_tc
)
5350 /* Quiesce VSI queues */
5351 i40e_quiesce_vsi(vsi
);
5353 /* Configure VSI for enabled TCs */
5354 ret
= i40e_vsi_config_tc(vsi
, enabled_tc
);
5356 netdev_info(netdev
, "Failed configuring TC for VSI seid=%d\n",
5362 i40e_unquiesce_vsi(vsi
);
5369 int __i40e_setup_tc(struct net_device
*netdev
, u32 handle
, __be16 proto
,
5370 struct tc_to_netdev
*tc
)
5372 static int __i40e_setup_tc(struct net_device
*netdev
, u32 handle
, __be16 proto
,
5373 struct tc_to_netdev
*tc
)
5376 if (handle
!= TC_H_ROOT
|| tc
->type
!= TC_SETUP_MQPRIO
)
5378 return i40e_setup_tc(netdev
, tc
->tc
);
5382 * i40e_open - Called when a network interface is made active
5383 * @netdev: network interface device structure
5385 * The open entry point is called when a network interface is made
5386 * active by the system (IFF_UP). At this point all resources needed
5387 * for transmit and receive operations are allocated, the interrupt
5388 * handler is registered with the OS, the netdev watchdog subtask is
5389 * enabled, and the stack is notified that the interface is ready.
5391 * Returns 0 on success, negative value on failure
5393 int i40e_open(struct net_device
*netdev
)
5395 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
5396 struct i40e_vsi
*vsi
= np
->vsi
;
5397 struct i40e_pf
*pf
= vsi
->back
;
5400 /* disallow open during test or if eeprom is broken */
5401 if (test_bit(__I40E_TESTING
, &pf
->state
) ||
5402 test_bit(__I40E_BAD_EEPROM
, &pf
->state
))
5405 netif_carrier_off(netdev
);
5407 err
= i40e_vsi_open(vsi
);
5411 /* configure global TSO hardware offload settings */
5412 wr32(&pf
->hw
, I40E_GLLAN_TSOMSK_F
, be32_to_cpu(TCP_FLAG_PSH
|
5413 TCP_FLAG_FIN
) >> 16);
5414 wr32(&pf
->hw
, I40E_GLLAN_TSOMSK_M
, be32_to_cpu(TCP_FLAG_PSH
|
5416 TCP_FLAG_CWR
) >> 16);
5417 wr32(&pf
->hw
, I40E_GLLAN_TSOMSK_L
, be32_to_cpu(TCP_FLAG_CWR
) >> 16);
5419 udp_tunnel_get_rx_info(netdev
);
5426 * @vsi: the VSI to open
5428 * Finish initialization of the VSI.
5430 * Returns 0 on success, negative value on failure
5432 int i40e_vsi_open(struct i40e_vsi
*vsi
)
5434 struct i40e_pf
*pf
= vsi
->back
;
5435 char int_name
[I40E_INT_NAME_STR_LEN
];
5438 /* allocate descriptors */
5439 err
= i40e_vsi_setup_tx_resources(vsi
);
5442 err
= i40e_vsi_setup_rx_resources(vsi
);
5446 err
= i40e_vsi_configure(vsi
);
5451 snprintf(int_name
, sizeof(int_name
) - 1, "%s-%s",
5452 dev_driver_string(&pf
->pdev
->dev
), vsi
->netdev
->name
);
5453 err
= i40e_vsi_request_irq(vsi
, int_name
);
5457 /* Notify the stack of the actual queue counts. */
5458 err
= netif_set_real_num_tx_queues(vsi
->netdev
,
5459 vsi
->num_queue_pairs
);
5461 goto err_set_queues
;
5463 err
= netif_set_real_num_rx_queues(vsi
->netdev
,
5464 vsi
->num_queue_pairs
);
5466 goto err_set_queues
;
5468 } else if (vsi
->type
== I40E_VSI_FDIR
) {
5469 snprintf(int_name
, sizeof(int_name
) - 1, "%s-%s:fdir",
5470 dev_driver_string(&pf
->pdev
->dev
),
5471 dev_name(&pf
->pdev
->dev
));
5472 err
= i40e_vsi_request_irq(vsi
, int_name
);
5479 err
= i40e_up_complete(vsi
);
5481 goto err_up_complete
;
5488 i40e_vsi_free_irq(vsi
);
5490 i40e_vsi_free_rx_resources(vsi
);
5492 i40e_vsi_free_tx_resources(vsi
);
5493 if (vsi
== pf
->vsi
[pf
->lan_vsi
])
5494 i40e_do_reset(pf
, BIT_ULL(__I40E_PF_RESET_REQUESTED
));
5500 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5501 * @pf: Pointer to PF
5503 * This function destroys the hlist where all the Flow Director
5504 * filters were saved.
5506 static void i40e_fdir_filter_exit(struct i40e_pf
*pf
)
5508 struct i40e_fdir_filter
*filter
;
5509 struct hlist_node
*node2
;
5511 hlist_for_each_entry_safe(filter
, node2
,
5512 &pf
->fdir_filter_list
, fdir_node
) {
5513 hlist_del(&filter
->fdir_node
);
5516 pf
->fdir_pf_active_filters
= 0;
5520 * i40e_close - Disables a network interface
5521 * @netdev: network interface device structure
5523 * The close entry point is called when an interface is de-activated
5524 * by the OS. The hardware is still under the driver's control, but
5525 * this netdev interface is disabled.
5527 * Returns 0, this is not allowed to fail
5529 int i40e_close(struct net_device
*netdev
)
5531 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
5532 struct i40e_vsi
*vsi
= np
->vsi
;
5534 i40e_vsi_close(vsi
);
5540 * i40e_do_reset - Start a PF or Core Reset sequence
5541 * @pf: board private structure
5542 * @reset_flags: which reset is requested
5544 * The essential difference in resets is that the PF Reset
5545 * doesn't clear the packet buffers, doesn't reset the PE
5546 * firmware, and doesn't bother the other PFs on the chip.
5548 void i40e_do_reset(struct i40e_pf
*pf
, u32 reset_flags
)
5552 WARN_ON(in_interrupt());
5555 /* do the biggest reset indicated */
5556 if (reset_flags
& BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED
)) {
5558 /* Request a Global Reset
5560 * This will start the chip's countdown to the actual full
5561 * chip reset event, and a warning interrupt to be sent
5562 * to all PFs, including the requestor. Our handler
5563 * for the warning interrupt will deal with the shutdown
5564 * and recovery of the switch setup.
5566 dev_dbg(&pf
->pdev
->dev
, "GlobalR requested\n");
5567 val
= rd32(&pf
->hw
, I40E_GLGEN_RTRIG
);
5568 val
|= I40E_GLGEN_RTRIG_GLOBR_MASK
;
5569 wr32(&pf
->hw
, I40E_GLGEN_RTRIG
, val
);
5571 } else if (reset_flags
& BIT_ULL(__I40E_CORE_RESET_REQUESTED
)) {
5573 /* Request a Core Reset
5575 * Same as Global Reset, except does *not* include the MAC/PHY
5577 dev_dbg(&pf
->pdev
->dev
, "CoreR requested\n");
5578 val
= rd32(&pf
->hw
, I40E_GLGEN_RTRIG
);
5579 val
|= I40E_GLGEN_RTRIG_CORER_MASK
;
5580 wr32(&pf
->hw
, I40E_GLGEN_RTRIG
, val
);
5581 i40e_flush(&pf
->hw
);
5583 } else if (reset_flags
& BIT_ULL(__I40E_PF_RESET_REQUESTED
)) {
5585 /* Request a PF Reset
5587 * Resets only the PF-specific registers
5589 * This goes directly to the tear-down and rebuild of
5590 * the switch, since we need to do all the recovery as
5591 * for the Core Reset.
5593 dev_dbg(&pf
->pdev
->dev
, "PFR requested\n");
5594 i40e_handle_reset_warning(pf
);
5596 } else if (reset_flags
& BIT_ULL(__I40E_REINIT_REQUESTED
)) {
5599 /* Find the VSI(s) that requested a re-init */
5600 dev_info(&pf
->pdev
->dev
,
5601 "VSI reinit requested\n");
5602 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
5603 struct i40e_vsi
*vsi
= pf
->vsi
[v
];
5606 test_bit(__I40E_REINIT_REQUESTED
, &vsi
->state
)) {
5607 i40e_vsi_reinit_locked(pf
->vsi
[v
]);
5608 clear_bit(__I40E_REINIT_REQUESTED
, &vsi
->state
);
5611 } else if (reset_flags
& BIT_ULL(__I40E_DOWN_REQUESTED
)) {
5614 /* Find the VSI(s) that needs to be brought down */
5615 dev_info(&pf
->pdev
->dev
, "VSI down requested\n");
5616 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
5617 struct i40e_vsi
*vsi
= pf
->vsi
[v
];
5620 test_bit(__I40E_DOWN_REQUESTED
, &vsi
->state
)) {
5621 set_bit(__I40E_DOWN
, &vsi
->state
);
5623 clear_bit(__I40E_DOWN_REQUESTED
, &vsi
->state
);
5627 dev_info(&pf
->pdev
->dev
,
5628 "bad reset request 0x%08x\n", reset_flags
);
5632 #ifdef CONFIG_I40E_DCB
5634 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5635 * @pf: board private structure
5636 * @old_cfg: current DCB config
5637 * @new_cfg: new DCB config
5639 bool i40e_dcb_need_reconfig(struct i40e_pf
*pf
,
5640 struct i40e_dcbx_config
*old_cfg
,
5641 struct i40e_dcbx_config
*new_cfg
)
5643 bool need_reconfig
= false;
5645 /* Check if ETS configuration has changed */
5646 if (memcmp(&new_cfg
->etscfg
,
5648 sizeof(new_cfg
->etscfg
))) {
5649 /* If Priority Table has changed reconfig is needed */
5650 if (memcmp(&new_cfg
->etscfg
.prioritytable
,
5651 &old_cfg
->etscfg
.prioritytable
,
5652 sizeof(new_cfg
->etscfg
.prioritytable
))) {
5653 need_reconfig
= true;
5654 dev_dbg(&pf
->pdev
->dev
, "ETS UP2TC changed.\n");
5657 if (memcmp(&new_cfg
->etscfg
.tcbwtable
,
5658 &old_cfg
->etscfg
.tcbwtable
,
5659 sizeof(new_cfg
->etscfg
.tcbwtable
)))
5660 dev_dbg(&pf
->pdev
->dev
, "ETS TC BW Table changed.\n");
5662 if (memcmp(&new_cfg
->etscfg
.tsatable
,
5663 &old_cfg
->etscfg
.tsatable
,
5664 sizeof(new_cfg
->etscfg
.tsatable
)))
5665 dev_dbg(&pf
->pdev
->dev
, "ETS TSA Table changed.\n");
5668 /* Check if PFC configuration has changed */
5669 if (memcmp(&new_cfg
->pfc
,
5671 sizeof(new_cfg
->pfc
))) {
5672 need_reconfig
= true;
5673 dev_dbg(&pf
->pdev
->dev
, "PFC config change detected.\n");
5676 /* Check if APP Table has changed */
5677 if (memcmp(&new_cfg
->app
,
5679 sizeof(new_cfg
->app
))) {
5680 need_reconfig
= true;
5681 dev_dbg(&pf
->pdev
->dev
, "APP Table change detected.\n");
5684 dev_dbg(&pf
->pdev
->dev
, "dcb need_reconfig=%d\n", need_reconfig
);
5685 return need_reconfig
;
5689 * i40e_handle_lldp_event - Handle LLDP Change MIB event
5690 * @pf: board private structure
5691 * @e: event info posted on ARQ
5693 static int i40e_handle_lldp_event(struct i40e_pf
*pf
,
5694 struct i40e_arq_event_info
*e
)
5696 struct i40e_aqc_lldp_get_mib
*mib
=
5697 (struct i40e_aqc_lldp_get_mib
*)&e
->desc
.params
.raw
;
5698 struct i40e_hw
*hw
= &pf
->hw
;
5699 struct i40e_dcbx_config tmp_dcbx_cfg
;
5700 bool need_reconfig
= false;
5704 /* Not DCB capable or capability disabled */
5705 if (!(pf
->flags
& I40E_FLAG_DCB_CAPABLE
))
5708 /* Ignore if event is not for Nearest Bridge */
5709 type
= ((mib
->type
>> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT
)
5710 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK
);
5711 dev_dbg(&pf
->pdev
->dev
, "LLDP event mib bridge type 0x%x\n", type
);
5712 if (type
!= I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE
)
5715 /* Check MIB Type and return if event for Remote MIB update */
5716 type
= mib
->type
& I40E_AQ_LLDP_MIB_TYPE_MASK
;
5717 dev_dbg(&pf
->pdev
->dev
,
5718 "LLDP event mib type %s\n", type
? "remote" : "local");
5719 if (type
== I40E_AQ_LLDP_MIB_REMOTE
) {
5720 /* Update the remote cached instance and return */
5721 ret
= i40e_aq_get_dcb_config(hw
, I40E_AQ_LLDP_MIB_REMOTE
,
5722 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE
,
5723 &hw
->remote_dcbx_config
);
5727 /* Store the old configuration */
5728 tmp_dcbx_cfg
= hw
->local_dcbx_config
;
5730 /* Reset the old DCBx configuration data */
5731 memset(&hw
->local_dcbx_config
, 0, sizeof(hw
->local_dcbx_config
));
5732 /* Get updated DCBX data from firmware */
5733 ret
= i40e_get_dcb_config(&pf
->hw
);
5735 dev_info(&pf
->pdev
->dev
,
5736 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5737 i40e_stat_str(&pf
->hw
, ret
),
5738 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
5742 /* No change detected in DCBX configs */
5743 if (!memcmp(&tmp_dcbx_cfg
, &hw
->local_dcbx_config
,
5744 sizeof(tmp_dcbx_cfg
))) {
5745 dev_dbg(&pf
->pdev
->dev
, "No change detected in DCBX configuration.\n");
5749 need_reconfig
= i40e_dcb_need_reconfig(pf
, &tmp_dcbx_cfg
,
5750 &hw
->local_dcbx_config
);
5752 i40e_dcbnl_flush_apps(pf
, &tmp_dcbx_cfg
, &hw
->local_dcbx_config
);
5757 /* Enable DCB tagging only when more than one TC */
5758 if (i40e_dcb_get_num_tc(&hw
->local_dcbx_config
) > 1)
5759 pf
->flags
|= I40E_FLAG_DCB_ENABLED
;
5761 pf
->flags
&= ~I40E_FLAG_DCB_ENABLED
;
5763 set_bit(__I40E_PORT_TX_SUSPENDED
, &pf
->state
);
5764 /* Reconfiguration needed quiesce all VSIs */
5765 i40e_pf_quiesce_all_vsi(pf
);
5767 /* Changes in configuration update VEB/VSI */
5768 i40e_dcb_reconfigure(pf
);
5770 ret
= i40e_resume_port_tx(pf
);
5772 clear_bit(__I40E_PORT_TX_SUSPENDED
, &pf
->state
);
5773 /* In case of error no point in resuming VSIs */
5777 /* Wait for the PF's queues to be disabled */
5778 ret
= i40e_pf_wait_queues_disabled(pf
);
5780 /* Schedule PF reset to recover */
5781 set_bit(__I40E_PF_RESET_REQUESTED
, &pf
->state
);
5782 i40e_service_event_schedule(pf
);
5784 i40e_pf_unquiesce_all_vsi(pf
);
5785 /* Notify the client for the DCB changes */
5786 i40e_notify_client_of_l2_param_changes(pf
->vsi
[pf
->lan_vsi
]);
5792 #endif /* CONFIG_I40E_DCB */
5795 * i40e_do_reset_safe - Protected reset path for userland calls.
5796 * @pf: board private structure
5797 * @reset_flags: which reset is requested
5800 void i40e_do_reset_safe(struct i40e_pf
*pf
, u32 reset_flags
)
5803 i40e_do_reset(pf
, reset_flags
);
5808 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5809 * @pf: board private structure
5810 * @e: event info posted on ARQ
5812 * Handler for LAN Queue Overflow Event generated by the firmware for PF
5815 static void i40e_handle_lan_overflow_event(struct i40e_pf
*pf
,
5816 struct i40e_arq_event_info
*e
)
5818 struct i40e_aqc_lan_overflow
*data
=
5819 (struct i40e_aqc_lan_overflow
*)&e
->desc
.params
.raw
;
5820 u32 queue
= le32_to_cpu(data
->prtdcb_rupto
);
5821 u32 qtx_ctl
= le32_to_cpu(data
->otx_ctl
);
5822 struct i40e_hw
*hw
= &pf
->hw
;
5826 dev_dbg(&pf
->pdev
->dev
, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5829 /* Queue belongs to VF, find the VF and issue VF reset */
5830 if (((qtx_ctl
& I40E_QTX_CTL_PFVF_Q_MASK
)
5831 >> I40E_QTX_CTL_PFVF_Q_SHIFT
) == I40E_QTX_CTL_VF_QUEUE
) {
5832 vf_id
= (u16
)((qtx_ctl
& I40E_QTX_CTL_VFVM_INDX_MASK
)
5833 >> I40E_QTX_CTL_VFVM_INDX_SHIFT
);
5834 vf_id
-= hw
->func_caps
.vf_base_id
;
5835 vf
= &pf
->vf
[vf_id
];
5836 i40e_vc_notify_vf_reset(vf
);
5837 /* Allow VF to process pending reset notification */
5839 i40e_reset_vf(vf
, false);
5844 * i40e_service_event_complete - Finish up the service event
5845 * @pf: board private structure
5847 static void i40e_service_event_complete(struct i40e_pf
*pf
)
5849 WARN_ON(!test_bit(__I40E_SERVICE_SCHED
, &pf
->state
));
5851 /* flush memory to make sure state is correct before next watchog */
5852 smp_mb__before_atomic();
5853 clear_bit(__I40E_SERVICE_SCHED
, &pf
->state
);
5857 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5858 * @pf: board private structure
5860 u32
i40e_get_cur_guaranteed_fd_count(struct i40e_pf
*pf
)
5864 val
= rd32(&pf
->hw
, I40E_PFQF_FDSTAT
);
5865 fcnt_prog
= (val
& I40E_PFQF_FDSTAT_GUARANT_CNT_MASK
);
5870 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
5871 * @pf: board private structure
5873 u32
i40e_get_current_fd_count(struct i40e_pf
*pf
)
5877 val
= rd32(&pf
->hw
, I40E_PFQF_FDSTAT
);
5878 fcnt_prog
= (val
& I40E_PFQF_FDSTAT_GUARANT_CNT_MASK
) +
5879 ((val
& I40E_PFQF_FDSTAT_BEST_CNT_MASK
) >>
5880 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT
);
5885 * i40e_get_global_fd_count - Get total FD filters programmed on device
5886 * @pf: board private structure
5888 u32
i40e_get_global_fd_count(struct i40e_pf
*pf
)
5892 val
= rd32(&pf
->hw
, I40E_GLQF_FDCNT_0
);
5893 fcnt_prog
= (val
& I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK
) +
5894 ((val
& I40E_GLQF_FDCNT_0_BESTCNT_MASK
) >>
5895 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT
);
5900 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5901 * @pf: board private structure
5903 void i40e_fdir_check_and_reenable(struct i40e_pf
*pf
)
5905 struct i40e_fdir_filter
*filter
;
5906 u32 fcnt_prog
, fcnt_avail
;
5907 struct hlist_node
*node
;
5909 if (test_bit(__I40E_FD_FLUSH_REQUESTED
, &pf
->state
))
5912 /* Check if, FD SB or ATR was auto disabled and if there is enough room
5915 fcnt_prog
= i40e_get_global_fd_count(pf
);
5916 fcnt_avail
= pf
->fdir_pf_filter_count
;
5917 if ((fcnt_prog
< (fcnt_avail
- I40E_FDIR_BUFFER_HEAD_ROOM
)) ||
5918 (pf
->fd_add_err
== 0) ||
5919 (i40e_get_current_atr_cnt(pf
) < pf
->fd_atr_cnt
)) {
5920 if ((pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) &&
5921 (pf
->auto_disable_flags
& I40E_FLAG_FD_SB_ENABLED
)) {
5922 pf
->auto_disable_flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
5923 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
5924 dev_info(&pf
->pdev
->dev
, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5928 /* Wait for some more space to be available to turn on ATR. We also
5929 * must check that no existing ntuple rules for TCP are in effect
5931 if (fcnt_prog
< (fcnt_avail
- I40E_FDIR_BUFFER_HEAD_ROOM
* 2)) {
5932 if ((pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) &&
5933 (pf
->auto_disable_flags
& I40E_FLAG_FD_ATR_ENABLED
) &&
5934 (pf
->fd_tcp_rule
== 0)) {
5935 pf
->auto_disable_flags
&= ~I40E_FLAG_FD_ATR_ENABLED
;
5936 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
5937 dev_info(&pf
->pdev
->dev
, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
5941 /* if hw had a problem adding a filter, delete it */
5942 if (pf
->fd_inv
> 0) {
5943 hlist_for_each_entry_safe(filter
, node
,
5944 &pf
->fdir_filter_list
, fdir_node
) {
5945 if (filter
->fd_id
== pf
->fd_inv
) {
5946 hlist_del(&filter
->fdir_node
);
5948 pf
->fdir_pf_active_filters
--;
5954 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5955 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
5957 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5958 * @pf: board private structure
5960 static void i40e_fdir_flush_and_replay(struct i40e_pf
*pf
)
5962 unsigned long min_flush_time
;
5963 int flush_wait_retry
= 50;
5964 bool disable_atr
= false;
5968 if (!time_after(jiffies
, pf
->fd_flush_timestamp
+
5969 (I40E_MIN_FD_FLUSH_INTERVAL
* HZ
)))
5972 /* If the flush is happening too quick and we have mostly SB rules we
5973 * should not re-enable ATR for some time.
5975 min_flush_time
= pf
->fd_flush_timestamp
+
5976 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE
* HZ
);
5977 fd_room
= pf
->fdir_pf_filter_count
- pf
->fdir_pf_active_filters
;
5979 if (!(time_after(jiffies
, min_flush_time
)) &&
5980 (fd_room
< I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR
)) {
5981 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
5982 dev_info(&pf
->pdev
->dev
, "ATR disabled, not enough FD filter space.\n");
5986 pf
->fd_flush_timestamp
= jiffies
;
5987 pf
->auto_disable_flags
|= I40E_FLAG_FD_ATR_ENABLED
;
5988 /* flush all filters */
5989 wr32(&pf
->hw
, I40E_PFQF_CTL_1
,
5990 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK
);
5991 i40e_flush(&pf
->hw
);
5995 /* Check FD flush status every 5-6msec */
5996 usleep_range(5000, 6000);
5997 reg
= rd32(&pf
->hw
, I40E_PFQF_CTL_1
);
5998 if (!(reg
& I40E_PFQF_CTL_1_CLEARFDTABLE_MASK
))
6000 } while (flush_wait_retry
--);
6001 if (reg
& I40E_PFQF_CTL_1_CLEARFDTABLE_MASK
) {
6002 dev_warn(&pf
->pdev
->dev
, "FD table did not flush, needs more time\n");
6004 /* replay sideband filters */
6005 i40e_fdir_filter_restore(pf
->vsi
[pf
->lan_vsi
]);
6007 pf
->auto_disable_flags
&= ~I40E_FLAG_FD_ATR_ENABLED
;
6008 clear_bit(__I40E_FD_FLUSH_REQUESTED
, &pf
->state
);
6009 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
6010 dev_info(&pf
->pdev
->dev
, "FD Filter table flushed and FD-SB replayed.\n");
6015 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
6016 * @pf: board private structure
6018 u32
i40e_get_current_atr_cnt(struct i40e_pf
*pf
)
6020 return i40e_get_current_fd_count(pf
) - pf
->fdir_pf_active_filters
;
6023 /* We can see up to 256 filter programming desc in transit if the filters are
6024 * being applied really fast; before we see the first
6025 * filter miss error on Rx queue 0. Accumulating enough error messages before
6026 * reacting will make sure we don't cause flush too often.
6028 #define I40E_MAX_FD_PROGRAM_ERROR 256
6031 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
6032 * @pf: board private structure
6034 static void i40e_fdir_reinit_subtask(struct i40e_pf
*pf
)
6037 /* if interface is down do nothing */
6038 if (test_bit(__I40E_DOWN
, &pf
->state
))
6041 if (test_bit(__I40E_FD_FLUSH_REQUESTED
, &pf
->state
))
6042 i40e_fdir_flush_and_replay(pf
);
6044 i40e_fdir_check_and_reenable(pf
);
6049 * i40e_vsi_link_event - notify VSI of a link event
6050 * @vsi: vsi to be notified
6051 * @link_up: link up or down
6053 static void i40e_vsi_link_event(struct i40e_vsi
*vsi
, bool link_up
)
6055 if (!vsi
|| test_bit(__I40E_DOWN
, &vsi
->state
))
6058 switch (vsi
->type
) {
6063 if (!vsi
->netdev
|| !vsi
->netdev_registered
)
6067 netif_carrier_on(vsi
->netdev
);
6068 netif_tx_wake_all_queues(vsi
->netdev
);
6070 netif_carrier_off(vsi
->netdev
);
6071 netif_tx_stop_all_queues(vsi
->netdev
);
6075 case I40E_VSI_SRIOV
:
6076 case I40E_VSI_VMDQ2
:
6078 case I40E_VSI_IWARP
:
6079 case I40E_VSI_MIRROR
:
6081 /* there is no notification for other VSIs */
6087 * i40e_veb_link_event - notify elements on the veb of a link event
6088 * @veb: veb to be notified
6089 * @link_up: link up or down
6091 static void i40e_veb_link_event(struct i40e_veb
*veb
, bool link_up
)
6096 if (!veb
|| !veb
->pf
)
6100 /* depth first... */
6101 for (i
= 0; i
< I40E_MAX_VEB
; i
++)
6102 if (pf
->veb
[i
] && (pf
->veb
[i
]->uplink_seid
== veb
->seid
))
6103 i40e_veb_link_event(pf
->veb
[i
], link_up
);
6105 /* ... now the local VSIs */
6106 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
6107 if (pf
->vsi
[i
] && (pf
->vsi
[i
]->uplink_seid
== veb
->seid
))
6108 i40e_vsi_link_event(pf
->vsi
[i
], link_up
);
6112 * i40e_link_event - Update netif_carrier status
6113 * @pf: board private structure
6115 static void i40e_link_event(struct i40e_pf
*pf
)
6117 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
6118 u8 new_link_speed
, old_link_speed
;
6120 bool new_link
, old_link
;
6122 /* save off old link status information */
6123 pf
->hw
.phy
.link_info_old
= pf
->hw
.phy
.link_info
;
6125 /* set this to force the get_link_status call to refresh state */
6126 pf
->hw
.phy
.get_link_info
= true;
6128 old_link
= (pf
->hw
.phy
.link_info_old
.link_info
& I40E_AQ_LINK_UP
);
6130 status
= i40e_get_link_status(&pf
->hw
, &new_link
);
6132 dev_dbg(&pf
->pdev
->dev
, "couldn't get link state, status: %d\n",
6137 old_link_speed
= pf
->hw
.phy
.link_info_old
.link_speed
;
6138 new_link_speed
= pf
->hw
.phy
.link_info
.link_speed
;
6140 if (new_link
== old_link
&&
6141 new_link_speed
== old_link_speed
&&
6142 (test_bit(__I40E_DOWN
, &vsi
->state
) ||
6143 new_link
== netif_carrier_ok(vsi
->netdev
)))
6146 if (!test_bit(__I40E_DOWN
, &vsi
->state
))
6147 i40e_print_link_message(vsi
, new_link
);
6149 /* Notify the base of the switch tree connected to
6150 * the link. Floating VEBs are not notified.
6152 if (pf
->lan_veb
!= I40E_NO_VEB
&& pf
->veb
[pf
->lan_veb
])
6153 i40e_veb_link_event(pf
->veb
[pf
->lan_veb
], new_link
);
6155 i40e_vsi_link_event(vsi
, new_link
);
6158 i40e_vc_notify_link_state(pf
);
6160 if (pf
->flags
& I40E_FLAG_PTP
)
6161 i40e_ptp_set_increment(pf
);
6165 * i40e_watchdog_subtask - periodic checks not using event driven response
6166 * @pf: board private structure
6168 static void i40e_watchdog_subtask(struct i40e_pf
*pf
)
6172 /* if interface is down do nothing */
6173 if (test_bit(__I40E_DOWN
, &pf
->state
) ||
6174 test_bit(__I40E_CONFIG_BUSY
, &pf
->state
))
6177 /* make sure we don't do these things too often */
6178 if (time_before(jiffies
, (pf
->service_timer_previous
+
6179 pf
->service_timer_period
)))
6181 pf
->service_timer_previous
= jiffies
;
6183 if (pf
->flags
& I40E_FLAG_LINK_POLLING_ENABLED
)
6184 i40e_link_event(pf
);
6186 /* Update the stats for active netdevs so the network stack
6187 * can look at updated numbers whenever it cares to
6189 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
6190 if (pf
->vsi
[i
] && pf
->vsi
[i
]->netdev
)
6191 i40e_update_stats(pf
->vsi
[i
]);
6193 if (pf
->flags
& I40E_FLAG_VEB_STATS_ENABLED
) {
6194 /* Update the stats for the active switching components */
6195 for (i
= 0; i
< I40E_MAX_VEB
; i
++)
6197 i40e_update_veb_stats(pf
->veb
[i
]);
6200 i40e_ptp_rx_hang(pf
->vsi
[pf
->lan_vsi
]);
6204 * i40e_reset_subtask - Set up for resetting the device and driver
6205 * @pf: board private structure
6207 static void i40e_reset_subtask(struct i40e_pf
*pf
)
6209 u32 reset_flags
= 0;
6212 if (test_bit(__I40E_REINIT_REQUESTED
, &pf
->state
)) {
6213 reset_flags
|= BIT(__I40E_REINIT_REQUESTED
);
6214 clear_bit(__I40E_REINIT_REQUESTED
, &pf
->state
);
6216 if (test_bit(__I40E_PF_RESET_REQUESTED
, &pf
->state
)) {
6217 reset_flags
|= BIT(__I40E_PF_RESET_REQUESTED
);
6218 clear_bit(__I40E_PF_RESET_REQUESTED
, &pf
->state
);
6220 if (test_bit(__I40E_CORE_RESET_REQUESTED
, &pf
->state
)) {
6221 reset_flags
|= BIT(__I40E_CORE_RESET_REQUESTED
);
6222 clear_bit(__I40E_CORE_RESET_REQUESTED
, &pf
->state
);
6224 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED
, &pf
->state
)) {
6225 reset_flags
|= BIT(__I40E_GLOBAL_RESET_REQUESTED
);
6226 clear_bit(__I40E_GLOBAL_RESET_REQUESTED
, &pf
->state
);
6228 if (test_bit(__I40E_DOWN_REQUESTED
, &pf
->state
)) {
6229 reset_flags
|= BIT(__I40E_DOWN_REQUESTED
);
6230 clear_bit(__I40E_DOWN_REQUESTED
, &pf
->state
);
6233 /* If there's a recovery already waiting, it takes
6234 * precedence before starting a new reset sequence.
6236 if (test_bit(__I40E_RESET_INTR_RECEIVED
, &pf
->state
)) {
6237 i40e_handle_reset_warning(pf
);
6241 /* If we're already down or resetting, just bail */
6243 !test_bit(__I40E_DOWN
, &pf
->state
) &&
6244 !test_bit(__I40E_CONFIG_BUSY
, &pf
->state
))
6245 i40e_do_reset(pf
, reset_flags
);
6252 * i40e_handle_link_event - Handle link event
6253 * @pf: board private structure
6254 * @e: event info posted on ARQ
6256 static void i40e_handle_link_event(struct i40e_pf
*pf
,
6257 struct i40e_arq_event_info
*e
)
6259 struct i40e_aqc_get_link_status
*status
=
6260 (struct i40e_aqc_get_link_status
*)&e
->desc
.params
.raw
;
6262 /* Do a new status request to re-enable LSE reporting
6263 * and load new status information into the hw struct
6264 * This completely ignores any state information
6265 * in the ARQ event info, instead choosing to always
6266 * issue the AQ update link status command.
6268 i40e_link_event(pf
);
6270 /* check for unqualified module, if link is down */
6271 if ((status
->link_info
& I40E_AQ_MEDIA_AVAILABLE
) &&
6272 (!(status
->an_info
& I40E_AQ_QUALIFIED_MODULE
)) &&
6273 (!(status
->link_info
& I40E_AQ_LINK_UP
)))
6274 dev_err(&pf
->pdev
->dev
,
6275 "The driver failed to link because an unqualified module was detected.\n");
6279 * i40e_clean_adminq_subtask - Clean the AdminQ rings
6280 * @pf: board private structure
6282 static void i40e_clean_adminq_subtask(struct i40e_pf
*pf
)
6284 struct i40e_arq_event_info event
;
6285 struct i40e_hw
*hw
= &pf
->hw
;
6292 /* Do not run clean AQ when PF reset fails */
6293 if (test_bit(__I40E_RESET_FAILED
, &pf
->state
))
6296 /* check for error indications */
6297 val
= rd32(&pf
->hw
, pf
->hw
.aq
.arq
.len
);
6299 if (val
& I40E_PF_ARQLEN_ARQVFE_MASK
) {
6300 if (hw
->debug_mask
& I40E_DEBUG_AQ
)
6301 dev_info(&pf
->pdev
->dev
, "ARQ VF Error detected\n");
6302 val
&= ~I40E_PF_ARQLEN_ARQVFE_MASK
;
6304 if (val
& I40E_PF_ARQLEN_ARQOVFL_MASK
) {
6305 if (hw
->debug_mask
& I40E_DEBUG_AQ
)
6306 dev_info(&pf
->pdev
->dev
, "ARQ Overflow Error detected\n");
6307 val
&= ~I40E_PF_ARQLEN_ARQOVFL_MASK
;
6308 pf
->arq_overflows
++;
6310 if (val
& I40E_PF_ARQLEN_ARQCRIT_MASK
) {
6311 if (hw
->debug_mask
& I40E_DEBUG_AQ
)
6312 dev_info(&pf
->pdev
->dev
, "ARQ Critical Error detected\n");
6313 val
&= ~I40E_PF_ARQLEN_ARQCRIT_MASK
;
6316 wr32(&pf
->hw
, pf
->hw
.aq
.arq
.len
, val
);
6318 val
= rd32(&pf
->hw
, pf
->hw
.aq
.asq
.len
);
6320 if (val
& I40E_PF_ATQLEN_ATQVFE_MASK
) {
6321 if (pf
->hw
.debug_mask
& I40E_DEBUG_AQ
)
6322 dev_info(&pf
->pdev
->dev
, "ASQ VF Error detected\n");
6323 val
&= ~I40E_PF_ATQLEN_ATQVFE_MASK
;
6325 if (val
& I40E_PF_ATQLEN_ATQOVFL_MASK
) {
6326 if (pf
->hw
.debug_mask
& I40E_DEBUG_AQ
)
6327 dev_info(&pf
->pdev
->dev
, "ASQ Overflow Error detected\n");
6328 val
&= ~I40E_PF_ATQLEN_ATQOVFL_MASK
;
6330 if (val
& I40E_PF_ATQLEN_ATQCRIT_MASK
) {
6331 if (pf
->hw
.debug_mask
& I40E_DEBUG_AQ
)
6332 dev_info(&pf
->pdev
->dev
, "ASQ Critical Error detected\n");
6333 val
&= ~I40E_PF_ATQLEN_ATQCRIT_MASK
;
6336 wr32(&pf
->hw
, pf
->hw
.aq
.asq
.len
, val
);
6338 event
.buf_len
= I40E_MAX_AQ_BUF_SIZE
;
6339 event
.msg_buf
= kzalloc(event
.buf_len
, GFP_KERNEL
);
6344 ret
= i40e_clean_arq_element(hw
, &event
, &pending
);
6345 if (ret
== I40E_ERR_ADMIN_QUEUE_NO_WORK
)
6348 dev_info(&pf
->pdev
->dev
, "ARQ event error %d\n", ret
);
6352 opcode
= le16_to_cpu(event
.desc
.opcode
);
6355 case i40e_aqc_opc_get_link_status
:
6356 i40e_handle_link_event(pf
, &event
);
6358 case i40e_aqc_opc_send_msg_to_pf
:
6359 ret
= i40e_vc_process_vf_msg(pf
,
6360 le16_to_cpu(event
.desc
.retval
),
6361 le32_to_cpu(event
.desc
.cookie_high
),
6362 le32_to_cpu(event
.desc
.cookie_low
),
6366 case i40e_aqc_opc_lldp_update_mib
:
6367 dev_dbg(&pf
->pdev
->dev
, "ARQ: Update LLDP MIB event received\n");
6368 #ifdef CONFIG_I40E_DCB
6370 ret
= i40e_handle_lldp_event(pf
, &event
);
6372 #endif /* CONFIG_I40E_DCB */
6374 case i40e_aqc_opc_event_lan_overflow
:
6375 dev_dbg(&pf
->pdev
->dev
, "ARQ LAN queue overflow event received\n");
6376 i40e_handle_lan_overflow_event(pf
, &event
);
6378 case i40e_aqc_opc_send_msg_to_peer
:
6379 dev_info(&pf
->pdev
->dev
, "ARQ: Msg from other pf\n");
6381 case i40e_aqc_opc_nvm_erase
:
6382 case i40e_aqc_opc_nvm_update
:
6383 case i40e_aqc_opc_oem_post_update
:
6384 i40e_debug(&pf
->hw
, I40E_DEBUG_NVM
,
6385 "ARQ NVM operation 0x%04x completed\n",
6389 dev_info(&pf
->pdev
->dev
,
6390 "ARQ: Unknown event 0x%04x ignored\n",
6394 } while (pending
&& (i
++ < pf
->adminq_work_limit
));
6396 clear_bit(__I40E_ADMINQ_EVENT_PENDING
, &pf
->state
);
6397 /* re-enable Admin queue interrupt cause */
6398 val
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
6399 val
|= I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
6400 wr32(hw
, I40E_PFINT_ICR0_ENA
, val
);
6403 kfree(event
.msg_buf
);
6407 * i40e_verify_eeprom - make sure eeprom is good to use
6408 * @pf: board private structure
6410 static void i40e_verify_eeprom(struct i40e_pf
*pf
)
6414 err
= i40e_diag_eeprom_test(&pf
->hw
);
6416 /* retry in case of garbage read */
6417 err
= i40e_diag_eeprom_test(&pf
->hw
);
6419 dev_info(&pf
->pdev
->dev
, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6421 set_bit(__I40E_BAD_EEPROM
, &pf
->state
);
6425 if (!err
&& test_bit(__I40E_BAD_EEPROM
, &pf
->state
)) {
6426 dev_info(&pf
->pdev
->dev
, "eeprom check passed, Tx/Rx traffic enabled\n");
6427 clear_bit(__I40E_BAD_EEPROM
, &pf
->state
);
6432 * i40e_enable_pf_switch_lb
6433 * @pf: pointer to the PF structure
6435 * enable switch loop back or die - no point in a return value
6437 static void i40e_enable_pf_switch_lb(struct i40e_pf
*pf
)
6439 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
6440 struct i40e_vsi_context ctxt
;
6443 ctxt
.seid
= pf
->main_vsi_seid
;
6444 ctxt
.pf_num
= pf
->hw
.pf_id
;
6446 ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
6448 dev_info(&pf
->pdev
->dev
,
6449 "couldn't get PF vsi config, err %s aq_err %s\n",
6450 i40e_stat_str(&pf
->hw
, ret
),
6451 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6454 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
6455 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
6456 ctxt
.info
.switch_id
|= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
6458 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
6460 dev_info(&pf
->pdev
->dev
,
6461 "update vsi switch failed, err %s aq_err %s\n",
6462 i40e_stat_str(&pf
->hw
, ret
),
6463 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6468 * i40e_disable_pf_switch_lb
6469 * @pf: pointer to the PF structure
6471 * disable switch loop back or die - no point in a return value
6473 static void i40e_disable_pf_switch_lb(struct i40e_pf
*pf
)
6475 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
6476 struct i40e_vsi_context ctxt
;
6479 ctxt
.seid
= pf
->main_vsi_seid
;
6480 ctxt
.pf_num
= pf
->hw
.pf_id
;
6482 ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
6484 dev_info(&pf
->pdev
->dev
,
6485 "couldn't get PF vsi config, err %s aq_err %s\n",
6486 i40e_stat_str(&pf
->hw
, ret
),
6487 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6490 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
6491 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
6492 ctxt
.info
.switch_id
&= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
6494 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
6496 dev_info(&pf
->pdev
->dev
,
6497 "update vsi switch failed, err %s aq_err %s\n",
6498 i40e_stat_str(&pf
->hw
, ret
),
6499 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6504 * i40e_config_bridge_mode - Configure the HW bridge mode
6505 * @veb: pointer to the bridge instance
6507 * Configure the loop back mode for the LAN VSI that is downlink to the
6508 * specified HW bridge instance. It is expected this function is called
6509 * when a new HW bridge is instantiated.
6511 static void i40e_config_bridge_mode(struct i40e_veb
*veb
)
6513 struct i40e_pf
*pf
= veb
->pf
;
6515 if (pf
->hw
.debug_mask
& I40E_DEBUG_LAN
)
6516 dev_info(&pf
->pdev
->dev
, "enabling bridge mode: %s\n",
6517 veb
->bridge_mode
== BRIDGE_MODE_VEPA
? "VEPA" : "VEB");
6518 if (veb
->bridge_mode
& BRIDGE_MODE_VEPA
)
6519 i40e_disable_pf_switch_lb(pf
);
6521 i40e_enable_pf_switch_lb(pf
);
6525 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6526 * @veb: pointer to the VEB instance
6528 * This is a recursive function that first builds the attached VSIs then
6529 * recurses in to build the next layer of VEB. We track the connections
6530 * through our own index numbers because the seid's from the HW could
6531 * change across the reset.
6533 static int i40e_reconstitute_veb(struct i40e_veb
*veb
)
6535 struct i40e_vsi
*ctl_vsi
= NULL
;
6536 struct i40e_pf
*pf
= veb
->pf
;
6540 /* build VSI that owns this VEB, temporarily attached to base VEB */
6541 for (v
= 0; v
< pf
->num_alloc_vsi
&& !ctl_vsi
; v
++) {
6543 pf
->vsi
[v
]->veb_idx
== veb
->idx
&&
6544 pf
->vsi
[v
]->flags
& I40E_VSI_FLAG_VEB_OWNER
) {
6545 ctl_vsi
= pf
->vsi
[v
];
6550 dev_info(&pf
->pdev
->dev
,
6551 "missing owner VSI for veb_idx %d\n", veb
->idx
);
6553 goto end_reconstitute
;
6555 if (ctl_vsi
!= pf
->vsi
[pf
->lan_vsi
])
6556 ctl_vsi
->uplink_seid
= pf
->vsi
[pf
->lan_vsi
]->uplink_seid
;
6557 ret
= i40e_add_vsi(ctl_vsi
);
6559 dev_info(&pf
->pdev
->dev
,
6560 "rebuild of veb_idx %d owner VSI failed: %d\n",
6562 goto end_reconstitute
;
6564 i40e_vsi_reset_stats(ctl_vsi
);
6566 /* create the VEB in the switch and move the VSI onto the VEB */
6567 ret
= i40e_add_veb(veb
, ctl_vsi
);
6569 goto end_reconstitute
;
6571 if (pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)
6572 veb
->bridge_mode
= BRIDGE_MODE_VEB
;
6574 veb
->bridge_mode
= BRIDGE_MODE_VEPA
;
6575 i40e_config_bridge_mode(veb
);
6577 /* create the remaining VSIs attached to this VEB */
6578 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
6579 if (!pf
->vsi
[v
] || pf
->vsi
[v
] == ctl_vsi
)
6582 if (pf
->vsi
[v
]->veb_idx
== veb
->idx
) {
6583 struct i40e_vsi
*vsi
= pf
->vsi
[v
];
6585 vsi
->uplink_seid
= veb
->seid
;
6586 ret
= i40e_add_vsi(vsi
);
6588 dev_info(&pf
->pdev
->dev
,
6589 "rebuild of vsi_idx %d failed: %d\n",
6591 goto end_reconstitute
;
6593 i40e_vsi_reset_stats(vsi
);
6597 /* create any VEBs attached to this VEB - RECURSION */
6598 for (veb_idx
= 0; veb_idx
< I40E_MAX_VEB
; veb_idx
++) {
6599 if (pf
->veb
[veb_idx
] && pf
->veb
[veb_idx
]->veb_idx
== veb
->idx
) {
6600 pf
->veb
[veb_idx
]->uplink_seid
= veb
->seid
;
6601 ret
= i40e_reconstitute_veb(pf
->veb
[veb_idx
]);
6612 * i40e_get_capabilities - get info about the HW
6613 * @pf: the PF struct
6615 static int i40e_get_capabilities(struct i40e_pf
*pf
)
6617 struct i40e_aqc_list_capabilities_element_resp
*cap_buf
;
6622 buf_len
= 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp
);
6624 cap_buf
= kzalloc(buf_len
, GFP_KERNEL
);
6628 /* this loads the data into the hw struct for us */
6629 err
= i40e_aq_discover_capabilities(&pf
->hw
, cap_buf
, buf_len
,
6631 i40e_aqc_opc_list_func_capabilities
,
6633 /* data loaded, buffer no longer needed */
6636 if (pf
->hw
.aq
.asq_last_status
== I40E_AQ_RC_ENOMEM
) {
6637 /* retry with a larger buffer */
6638 buf_len
= data_size
;
6639 } else if (pf
->hw
.aq
.asq_last_status
!= I40E_AQ_RC_OK
) {
6640 dev_info(&pf
->pdev
->dev
,
6641 "capability discovery failed, err %s aq_err %s\n",
6642 i40e_stat_str(&pf
->hw
, err
),
6643 i40e_aq_str(&pf
->hw
,
6644 pf
->hw
.aq
.asq_last_status
));
6649 if (pf
->hw
.debug_mask
& I40E_DEBUG_USER
)
6650 dev_info(&pf
->pdev
->dev
,
6651 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6652 pf
->hw
.pf_id
, pf
->hw
.func_caps
.num_vfs
,
6653 pf
->hw
.func_caps
.num_msix_vectors
,
6654 pf
->hw
.func_caps
.num_msix_vectors_vf
,
6655 pf
->hw
.func_caps
.fd_filters_guaranteed
,
6656 pf
->hw
.func_caps
.fd_filters_best_effort
,
6657 pf
->hw
.func_caps
.num_tx_qp
,
6658 pf
->hw
.func_caps
.num_vsis
);
6660 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6661 + pf->hw.func_caps.num_vfs)
6662 if (pf
->hw
.revision_id
== 0 && (DEF_NUM_VSI
> pf
->hw
.func_caps
.num_vsis
)) {
6663 dev_info(&pf
->pdev
->dev
,
6664 "got num_vsis %d, setting num_vsis to %d\n",
6665 pf
->hw
.func_caps
.num_vsis
, DEF_NUM_VSI
);
6666 pf
->hw
.func_caps
.num_vsis
= DEF_NUM_VSI
;
6672 static int i40e_vsi_clear(struct i40e_vsi
*vsi
);
6675 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6676 * @pf: board private structure
6678 static void i40e_fdir_sb_setup(struct i40e_pf
*pf
)
6680 struct i40e_vsi
*vsi
;
6683 /* quick workaround for an NVM issue that leaves a critical register
6686 if (!rd32(&pf
->hw
, I40E_GLQF_HKEY(0))) {
6687 static const u32 hkey
[] = {
6688 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6689 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6690 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6693 for (i
= 0; i
<= I40E_GLQF_HKEY_MAX_INDEX
; i
++)
6694 wr32(&pf
->hw
, I40E_GLQF_HKEY(i
), hkey
[i
]);
6697 if (!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
))
6700 /* find existing VSI and see if it needs configuring */
6702 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
6703 if (pf
->vsi
[i
] && pf
->vsi
[i
]->type
== I40E_VSI_FDIR
) {
6709 /* create a new VSI if none exists */
6711 vsi
= i40e_vsi_setup(pf
, I40E_VSI_FDIR
,
6712 pf
->vsi
[pf
->lan_vsi
]->seid
, 0);
6714 dev_info(&pf
->pdev
->dev
, "Couldn't create FDir VSI\n");
6715 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
6720 i40e_vsi_setup_irqhandler(vsi
, i40e_fdir_clean_ring
);
6724 * i40e_fdir_teardown - release the Flow Director resources
6725 * @pf: board private structure
6727 static void i40e_fdir_teardown(struct i40e_pf
*pf
)
6731 i40e_fdir_filter_exit(pf
);
6732 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
6733 if (pf
->vsi
[i
] && pf
->vsi
[i
]->type
== I40E_VSI_FDIR
) {
6734 i40e_vsi_release(pf
->vsi
[i
]);
6741 * i40e_prep_for_reset - prep for the core to reset
6742 * @pf: board private structure
6744 * Close up the VFs and other things in prep for PF Reset.
6746 static void i40e_prep_for_reset(struct i40e_pf
*pf
)
6748 struct i40e_hw
*hw
= &pf
->hw
;
6749 i40e_status ret
= 0;
6752 clear_bit(__I40E_RESET_INTR_RECEIVED
, &pf
->state
);
6753 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING
, &pf
->state
))
6755 if (i40e_check_asq_alive(&pf
->hw
))
6756 i40e_vc_notify_reset(pf
);
6758 dev_dbg(&pf
->pdev
->dev
, "Tearing down internal switch for reset\n");
6760 /* quiesce the VSIs and their queues that are not already DOWN */
6761 i40e_pf_quiesce_all_vsi(pf
);
6763 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
6765 pf
->vsi
[v
]->seid
= 0;
6768 i40e_shutdown_adminq(&pf
->hw
);
6770 /* call shutdown HMC */
6771 if (hw
->hmc
.hmc_obj
) {
6772 ret
= i40e_shutdown_lan_hmc(hw
);
6774 dev_warn(&pf
->pdev
->dev
,
6775 "shutdown_lan_hmc failed: %d\n", ret
);
6780 * i40e_send_version - update firmware with driver version
6783 static void i40e_send_version(struct i40e_pf
*pf
)
6785 struct i40e_driver_version dv
;
6787 dv
.major_version
= DRV_VERSION_MAJOR
;
6788 dv
.minor_version
= DRV_VERSION_MINOR
;
6789 dv
.build_version
= DRV_VERSION_BUILD
;
6790 dv
.subbuild_version
= 0;
6791 strlcpy(dv
.driver_string
, DRV_VERSION
, sizeof(dv
.driver_string
));
6792 i40e_aq_send_driver_version(&pf
->hw
, &dv
, NULL
);
6796 * i40e_reset_and_rebuild - reset and rebuild using a saved config
6797 * @pf: board private structure
6798 * @reinit: if the Main VSI needs to re-initialized.
6800 static void i40e_reset_and_rebuild(struct i40e_pf
*pf
, bool reinit
)
6802 struct i40e_hw
*hw
= &pf
->hw
;
6803 u8 set_fc_aq_fail
= 0;
6808 /* Now we wait for GRST to settle out.
6809 * We don't have to delete the VEBs or VSIs from the hw switch
6810 * because the reset will make them disappear.
6812 ret
= i40e_pf_reset(hw
);
6814 dev_info(&pf
->pdev
->dev
, "PF reset failed, %d\n", ret
);
6815 set_bit(__I40E_RESET_FAILED
, &pf
->state
);
6816 goto clear_recovery
;
6820 if (test_bit(__I40E_DOWN
, &pf
->state
))
6821 goto clear_recovery
;
6822 dev_dbg(&pf
->pdev
->dev
, "Rebuilding internal switch\n");
6824 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6825 ret
= i40e_init_adminq(&pf
->hw
);
6827 dev_info(&pf
->pdev
->dev
, "Rebuild AdminQ failed, err %s aq_err %s\n",
6828 i40e_stat_str(&pf
->hw
, ret
),
6829 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6830 goto clear_recovery
;
6833 /* re-verify the eeprom if we just had an EMP reset */
6834 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED
, &pf
->state
))
6835 i40e_verify_eeprom(pf
);
6837 i40e_clear_pxe_mode(hw
);
6838 ret
= i40e_get_capabilities(pf
);
6840 goto end_core_reset
;
6842 ret
= i40e_init_lan_hmc(hw
, hw
->func_caps
.num_tx_qp
,
6843 hw
->func_caps
.num_rx_qp
,
6844 pf
->fcoe_hmc_cntx_num
, pf
->fcoe_hmc_filt_num
);
6846 dev_info(&pf
->pdev
->dev
, "init_lan_hmc failed: %d\n", ret
);
6847 goto end_core_reset
;
6849 ret
= i40e_configure_lan_hmc(hw
, I40E_HMC_MODEL_DIRECT_ONLY
);
6851 dev_info(&pf
->pdev
->dev
, "configure_lan_hmc failed: %d\n", ret
);
6852 goto end_core_reset
;
6855 #ifdef CONFIG_I40E_DCB
6856 ret
= i40e_init_pf_dcb(pf
);
6858 dev_info(&pf
->pdev
->dev
, "DCB init failed %d, disabled\n", ret
);
6859 pf
->flags
&= ~I40E_FLAG_DCB_CAPABLE
;
6860 /* Continue without DCB enabled */
6862 #endif /* CONFIG_I40E_DCB */
6864 i40e_init_pf_fcoe(pf
);
6867 /* do basic switch setup */
6868 ret
= i40e_setup_pf_switch(pf
, reinit
);
6870 goto end_core_reset
;
6872 /* The driver only wants link up/down and module qualification
6873 * reports from firmware. Note the negative logic.
6875 ret
= i40e_aq_set_phy_int_mask(&pf
->hw
,
6876 ~(I40E_AQ_EVENT_LINK_UPDOWN
|
6877 I40E_AQ_EVENT_MEDIA_NA
|
6878 I40E_AQ_EVENT_MODULE_QUAL_FAIL
), NULL
);
6880 dev_info(&pf
->pdev
->dev
, "set phy mask fail, err %s aq_err %s\n",
6881 i40e_stat_str(&pf
->hw
, ret
),
6882 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6884 /* make sure our flow control settings are restored */
6885 ret
= i40e_set_fc(&pf
->hw
, &set_fc_aq_fail
, true);
6887 dev_dbg(&pf
->pdev
->dev
, "setting flow control: ret = %s last_status = %s\n",
6888 i40e_stat_str(&pf
->hw
, ret
),
6889 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6891 /* Rebuild the VSIs and VEBs that existed before reset.
6892 * They are still in our local switch element arrays, so only
6893 * need to rebuild the switch model in the HW.
6895 * If there were VEBs but the reconstitution failed, we'll try
6896 * try to recover minimal use by getting the basic PF VSI working.
6898 if (pf
->vsi
[pf
->lan_vsi
]->uplink_seid
!= pf
->mac_seid
) {
6899 dev_dbg(&pf
->pdev
->dev
, "attempting to rebuild switch\n");
6900 /* find the one VEB connected to the MAC, and find orphans */
6901 for (v
= 0; v
< I40E_MAX_VEB
; v
++) {
6905 if (pf
->veb
[v
]->uplink_seid
== pf
->mac_seid
||
6906 pf
->veb
[v
]->uplink_seid
== 0) {
6907 ret
= i40e_reconstitute_veb(pf
->veb
[v
]);
6912 /* If Main VEB failed, we're in deep doodoo,
6913 * so give up rebuilding the switch and set up
6914 * for minimal rebuild of PF VSI.
6915 * If orphan failed, we'll report the error
6916 * but try to keep going.
6918 if (pf
->veb
[v
]->uplink_seid
== pf
->mac_seid
) {
6919 dev_info(&pf
->pdev
->dev
,
6920 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6922 pf
->vsi
[pf
->lan_vsi
]->uplink_seid
6925 } else if (pf
->veb
[v
]->uplink_seid
== 0) {
6926 dev_info(&pf
->pdev
->dev
,
6927 "rebuild of orphan VEB failed: %d\n",
6934 if (pf
->vsi
[pf
->lan_vsi
]->uplink_seid
== pf
->mac_seid
) {
6935 dev_dbg(&pf
->pdev
->dev
, "attempting to rebuild PF VSI\n");
6936 /* no VEB, so rebuild only the Main VSI */
6937 ret
= i40e_add_vsi(pf
->vsi
[pf
->lan_vsi
]);
6939 dev_info(&pf
->pdev
->dev
,
6940 "rebuild of Main VSI failed: %d\n", ret
);
6941 goto end_core_reset
;
6945 /* Reconfigure hardware for allowing smaller MSS in the case
6946 * of TSO, so that we avoid the MDD being fired and causing
6947 * a reset in the case of small MSS+TSO.
6949 #define I40E_REG_MSS 0x000E64DC
6950 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
6951 #define I40E_64BYTE_MSS 0x400000
6952 val
= rd32(hw
, I40E_REG_MSS
);
6953 if ((val
& I40E_REG_MSS_MIN_MASK
) > I40E_64BYTE_MSS
) {
6954 val
&= ~I40E_REG_MSS_MIN_MASK
;
6955 val
|= I40E_64BYTE_MSS
;
6956 wr32(hw
, I40E_REG_MSS
, val
);
6959 if (pf
->flags
& I40E_FLAG_RESTART_AUTONEG
) {
6961 ret
= i40e_aq_set_link_restart_an(&pf
->hw
, true, NULL
);
6963 dev_info(&pf
->pdev
->dev
, "link restart failed, err %s aq_err %s\n",
6964 i40e_stat_str(&pf
->hw
, ret
),
6965 i40e_aq_str(&pf
->hw
,
6966 pf
->hw
.aq
.asq_last_status
));
6968 /* reinit the misc interrupt */
6969 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
6970 ret
= i40e_setup_misc_vector(pf
);
6972 /* Add a filter to drop all Flow control frames from any VSI from being
6973 * transmitted. By doing so we stop a malicious VF from sending out
6974 * PAUSE or PFC frames and potentially controlling traffic for other
6976 * The FW can still send Flow control frames if enabled.
6978 i40e_add_filter_to_drop_tx_flow_control_frames(&pf
->hw
,
6981 /* restart the VSIs that were rebuilt and running before the reset */
6982 i40e_pf_unquiesce_all_vsi(pf
);
6984 if (pf
->num_alloc_vfs
) {
6985 for (v
= 0; v
< pf
->num_alloc_vfs
; v
++)
6986 i40e_reset_vf(&pf
->vf
[v
], true);
6989 /* tell the firmware that we're starting */
6990 i40e_send_version(pf
);
6993 clear_bit(__I40E_RESET_FAILED
, &pf
->state
);
6995 clear_bit(__I40E_RESET_RECOVERY_PENDING
, &pf
->state
);
6999 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
7000 * @pf: board private structure
7002 * Close up the VFs and other things in prep for a Core Reset,
7003 * then get ready to rebuild the world.
7005 static void i40e_handle_reset_warning(struct i40e_pf
*pf
)
7007 i40e_prep_for_reset(pf
);
7008 i40e_reset_and_rebuild(pf
, false);
7012 * i40e_handle_mdd_event
7013 * @pf: pointer to the PF structure
7015 * Called from the MDD irq handler to identify possibly malicious vfs
7017 static void i40e_handle_mdd_event(struct i40e_pf
*pf
)
7019 struct i40e_hw
*hw
= &pf
->hw
;
7020 bool mdd_detected
= false;
7021 bool pf_mdd_detected
= false;
7026 if (!test_bit(__I40E_MDD_EVENT_PENDING
, &pf
->state
))
7029 /* find what triggered the MDD event */
7030 reg
= rd32(hw
, I40E_GL_MDET_TX
);
7031 if (reg
& I40E_GL_MDET_TX_VALID_MASK
) {
7032 u8 pf_num
= (reg
& I40E_GL_MDET_TX_PF_NUM_MASK
) >>
7033 I40E_GL_MDET_TX_PF_NUM_SHIFT
;
7034 u16 vf_num
= (reg
& I40E_GL_MDET_TX_VF_NUM_MASK
) >>
7035 I40E_GL_MDET_TX_VF_NUM_SHIFT
;
7036 u8 event
= (reg
& I40E_GL_MDET_TX_EVENT_MASK
) >>
7037 I40E_GL_MDET_TX_EVENT_SHIFT
;
7038 u16 queue
= ((reg
& I40E_GL_MDET_TX_QUEUE_MASK
) >>
7039 I40E_GL_MDET_TX_QUEUE_SHIFT
) -
7040 pf
->hw
.func_caps
.base_queue
;
7041 if (netif_msg_tx_err(pf
))
7042 dev_info(&pf
->pdev
->dev
, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
7043 event
, queue
, pf_num
, vf_num
);
7044 wr32(hw
, I40E_GL_MDET_TX
, 0xffffffff);
7045 mdd_detected
= true;
7047 reg
= rd32(hw
, I40E_GL_MDET_RX
);
7048 if (reg
& I40E_GL_MDET_RX_VALID_MASK
) {
7049 u8 func
= (reg
& I40E_GL_MDET_RX_FUNCTION_MASK
) >>
7050 I40E_GL_MDET_RX_FUNCTION_SHIFT
;
7051 u8 event
= (reg
& I40E_GL_MDET_RX_EVENT_MASK
) >>
7052 I40E_GL_MDET_RX_EVENT_SHIFT
;
7053 u16 queue
= ((reg
& I40E_GL_MDET_RX_QUEUE_MASK
) >>
7054 I40E_GL_MDET_RX_QUEUE_SHIFT
) -
7055 pf
->hw
.func_caps
.base_queue
;
7056 if (netif_msg_rx_err(pf
))
7057 dev_info(&pf
->pdev
->dev
, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
7058 event
, queue
, func
);
7059 wr32(hw
, I40E_GL_MDET_RX
, 0xffffffff);
7060 mdd_detected
= true;
7064 reg
= rd32(hw
, I40E_PF_MDET_TX
);
7065 if (reg
& I40E_PF_MDET_TX_VALID_MASK
) {
7066 wr32(hw
, I40E_PF_MDET_TX
, 0xFFFF);
7067 dev_info(&pf
->pdev
->dev
, "TX driver issue detected, PF reset issued\n");
7068 pf_mdd_detected
= true;
7070 reg
= rd32(hw
, I40E_PF_MDET_RX
);
7071 if (reg
& I40E_PF_MDET_RX_VALID_MASK
) {
7072 wr32(hw
, I40E_PF_MDET_RX
, 0xFFFF);
7073 dev_info(&pf
->pdev
->dev
, "RX driver issue detected, PF reset issued\n");
7074 pf_mdd_detected
= true;
7076 /* Queue belongs to the PF, initiate a reset */
7077 if (pf_mdd_detected
) {
7078 set_bit(__I40E_PF_RESET_REQUESTED
, &pf
->state
);
7079 i40e_service_event_schedule(pf
);
7083 /* see if one of the VFs needs its hand slapped */
7084 for (i
= 0; i
< pf
->num_alloc_vfs
&& mdd_detected
; i
++) {
7086 reg
= rd32(hw
, I40E_VP_MDET_TX(i
));
7087 if (reg
& I40E_VP_MDET_TX_VALID_MASK
) {
7088 wr32(hw
, I40E_VP_MDET_TX(i
), 0xFFFF);
7089 vf
->num_mdd_events
++;
7090 dev_info(&pf
->pdev
->dev
, "TX driver issue detected on VF %d\n",
7094 reg
= rd32(hw
, I40E_VP_MDET_RX(i
));
7095 if (reg
& I40E_VP_MDET_RX_VALID_MASK
) {
7096 wr32(hw
, I40E_VP_MDET_RX(i
), 0xFFFF);
7097 vf
->num_mdd_events
++;
7098 dev_info(&pf
->pdev
->dev
, "RX driver issue detected on VF %d\n",
7102 if (vf
->num_mdd_events
> I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED
) {
7103 dev_info(&pf
->pdev
->dev
,
7104 "Too many MDD events on VF %d, disabled\n", i
);
7105 dev_info(&pf
->pdev
->dev
,
7106 "Use PF Control I/F to re-enable the VF\n");
7107 set_bit(I40E_VF_STAT_DISABLED
, &vf
->vf_states
);
7111 /* re-enable mdd interrupt cause */
7112 clear_bit(__I40E_MDD_EVENT_PENDING
, &pf
->state
);
7113 reg
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
7114 reg
|= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
;
7115 wr32(hw
, I40E_PFINT_ICR0_ENA
, reg
);
7120 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
7121 * @pf: board private structure
7123 static void i40e_sync_udp_filters_subtask(struct i40e_pf
*pf
)
7125 struct i40e_hw
*hw
= &pf
->hw
;
7130 if (!(pf
->flags
& I40E_FLAG_UDP_FILTER_SYNC
))
7133 pf
->flags
&= ~I40E_FLAG_UDP_FILTER_SYNC
;
7135 for (i
= 0; i
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
; i
++) {
7136 if (pf
->pending_udp_bitmap
& BIT_ULL(i
)) {
7137 pf
->pending_udp_bitmap
&= ~BIT_ULL(i
);
7138 port
= pf
->udp_ports
[i
].index
;
7140 ret
= i40e_aq_add_udp_tunnel(hw
, port
,
7141 pf
->udp_ports
[i
].type
,
7144 ret
= i40e_aq_del_udp_tunnel(hw
, i
, NULL
);
7147 dev_dbg(&pf
->pdev
->dev
,
7148 "%s %s port %d, index %d failed, err %s aq_err %s\n",
7149 pf
->udp_ports
[i
].type
? "vxlan" : "geneve",
7150 port
? "add" : "delete",
7152 i40e_stat_str(&pf
->hw
, ret
),
7153 i40e_aq_str(&pf
->hw
,
7154 pf
->hw
.aq
.asq_last_status
));
7155 pf
->udp_ports
[i
].index
= 0;
7162 * i40e_service_task - Run the driver's async subtasks
7163 * @work: pointer to work_struct containing our data
7165 static void i40e_service_task(struct work_struct
*work
)
7167 struct i40e_pf
*pf
= container_of(work
,
7170 unsigned long start_time
= jiffies
;
7172 /* don't bother with service tasks if a reset is in progress */
7173 if (test_bit(__I40E_RESET_RECOVERY_PENDING
, &pf
->state
)) {
7174 i40e_service_event_complete(pf
);
7178 i40e_detect_recover_hung(pf
);
7179 i40e_sync_filters_subtask(pf
);
7180 i40e_reset_subtask(pf
);
7181 i40e_handle_mdd_event(pf
);
7182 i40e_vc_process_vflr_event(pf
);
7183 i40e_watchdog_subtask(pf
);
7184 i40e_fdir_reinit_subtask(pf
);
7185 i40e_client_subtask(pf
);
7186 i40e_sync_filters_subtask(pf
);
7187 i40e_sync_udp_filters_subtask(pf
);
7188 i40e_clean_adminq_subtask(pf
);
7190 i40e_service_event_complete(pf
);
7192 /* If the tasks have taken longer than one timer cycle or there
7193 * is more work to be done, reschedule the service task now
7194 * rather than wait for the timer to tick again.
7196 if (time_after(jiffies
, (start_time
+ pf
->service_timer_period
)) ||
7197 test_bit(__I40E_ADMINQ_EVENT_PENDING
, &pf
->state
) ||
7198 test_bit(__I40E_MDD_EVENT_PENDING
, &pf
->state
) ||
7199 test_bit(__I40E_VFLR_EVENT_PENDING
, &pf
->state
))
7200 i40e_service_event_schedule(pf
);
7204 * i40e_service_timer - timer callback
7205 * @data: pointer to PF struct
7207 static void i40e_service_timer(unsigned long data
)
7209 struct i40e_pf
*pf
= (struct i40e_pf
*)data
;
7211 mod_timer(&pf
->service_timer
,
7212 round_jiffies(jiffies
+ pf
->service_timer_period
));
7213 i40e_service_event_schedule(pf
);
7217 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
7218 * @vsi: the VSI being configured
7220 static int i40e_set_num_rings_in_vsi(struct i40e_vsi
*vsi
)
7222 struct i40e_pf
*pf
= vsi
->back
;
7224 switch (vsi
->type
) {
7226 vsi
->alloc_queue_pairs
= pf
->num_lan_qps
;
7227 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
7228 I40E_REQ_DESCRIPTOR_MULTIPLE
);
7229 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
7230 vsi
->num_q_vectors
= pf
->num_lan_msix
;
7232 vsi
->num_q_vectors
= 1;
7237 vsi
->alloc_queue_pairs
= 1;
7238 vsi
->num_desc
= ALIGN(I40E_FDIR_RING_COUNT
,
7239 I40E_REQ_DESCRIPTOR_MULTIPLE
);
7240 vsi
->num_q_vectors
= pf
->num_fdsb_msix
;
7243 case I40E_VSI_VMDQ2
:
7244 vsi
->alloc_queue_pairs
= pf
->num_vmdq_qps
;
7245 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
7246 I40E_REQ_DESCRIPTOR_MULTIPLE
);
7247 vsi
->num_q_vectors
= pf
->num_vmdq_msix
;
7250 case I40E_VSI_SRIOV
:
7251 vsi
->alloc_queue_pairs
= pf
->num_vf_qps
;
7252 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
7253 I40E_REQ_DESCRIPTOR_MULTIPLE
);
7258 vsi
->alloc_queue_pairs
= pf
->num_fcoe_qps
;
7259 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
7260 I40E_REQ_DESCRIPTOR_MULTIPLE
);
7261 vsi
->num_q_vectors
= pf
->num_fcoe_msix
;
7264 #endif /* I40E_FCOE */
7274 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
7275 * @type: VSI pointer
7276 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
7278 * On error: returns error code (negative)
7279 * On success: returns 0
7281 static int i40e_vsi_alloc_arrays(struct i40e_vsi
*vsi
, bool alloc_qvectors
)
7286 /* allocate memory for both Tx and Rx ring pointers */
7287 size
= sizeof(struct i40e_ring
*) * vsi
->alloc_queue_pairs
* 2;
7288 vsi
->tx_rings
= kzalloc(size
, GFP_KERNEL
);
7291 vsi
->rx_rings
= &vsi
->tx_rings
[vsi
->alloc_queue_pairs
];
7293 if (alloc_qvectors
) {
7294 /* allocate memory for q_vector pointers */
7295 size
= sizeof(struct i40e_q_vector
*) * vsi
->num_q_vectors
;
7296 vsi
->q_vectors
= kzalloc(size
, GFP_KERNEL
);
7297 if (!vsi
->q_vectors
) {
7305 kfree(vsi
->tx_rings
);
7310 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
7311 * @pf: board private structure
7312 * @type: type of VSI
7314 * On error: returns error code (negative)
7315 * On success: returns vsi index in PF (positive)
7317 static int i40e_vsi_mem_alloc(struct i40e_pf
*pf
, enum i40e_vsi_type type
)
7320 struct i40e_vsi
*vsi
;
7324 /* Need to protect the allocation of the VSIs at the PF level */
7325 mutex_lock(&pf
->switch_mutex
);
7327 /* VSI list may be fragmented if VSI creation/destruction has
7328 * been happening. We can afford to do a quick scan to look
7329 * for any free VSIs in the list.
7331 * find next empty vsi slot, looping back around if necessary
7334 while (i
< pf
->num_alloc_vsi
&& pf
->vsi
[i
])
7336 if (i
>= pf
->num_alloc_vsi
) {
7338 while (i
< pf
->next_vsi
&& pf
->vsi
[i
])
7342 if (i
< pf
->num_alloc_vsi
&& !pf
->vsi
[i
]) {
7343 vsi_idx
= i
; /* Found one! */
7346 goto unlock_pf
; /* out of VSI slots! */
7350 vsi
= kzalloc(sizeof(*vsi
), GFP_KERNEL
);
7357 set_bit(__I40E_DOWN
, &vsi
->state
);
7360 vsi
->int_rate_limit
= 0;
7361 vsi
->rss_table_size
= (vsi
->type
== I40E_VSI_MAIN
) ?
7362 pf
->rss_table_size
: 64;
7363 vsi
->netdev_registered
= false;
7364 vsi
->work_limit
= I40E_DEFAULT_IRQ_WORK
;
7365 INIT_LIST_HEAD(&vsi
->mac_filter_list
);
7366 vsi
->irqs_ready
= false;
7368 ret
= i40e_set_num_rings_in_vsi(vsi
);
7372 ret
= i40e_vsi_alloc_arrays(vsi
, true);
7376 /* Setup default MSIX irq handler for VSI */
7377 i40e_vsi_setup_irqhandler(vsi
, i40e_msix_clean_rings
);
7379 /* Initialize VSI lock */
7380 spin_lock_init(&vsi
->mac_filter_list_lock
);
7381 pf
->vsi
[vsi_idx
] = vsi
;
7386 pf
->next_vsi
= i
- 1;
7389 mutex_unlock(&pf
->switch_mutex
);
7394 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7395 * @type: VSI pointer
7396 * @free_qvectors: a bool to specify if q_vectors need to be freed.
7398 * On error: returns error code (negative)
7399 * On success: returns 0
7401 static void i40e_vsi_free_arrays(struct i40e_vsi
*vsi
, bool free_qvectors
)
7403 /* free the ring and vector containers */
7404 if (free_qvectors
) {
7405 kfree(vsi
->q_vectors
);
7406 vsi
->q_vectors
= NULL
;
7408 kfree(vsi
->tx_rings
);
7409 vsi
->tx_rings
= NULL
;
7410 vsi
->rx_rings
= NULL
;
7414 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
7416 * @vsi: Pointer to VSI structure
7418 static void i40e_clear_rss_config_user(struct i40e_vsi
*vsi
)
7423 kfree(vsi
->rss_hkey_user
);
7424 vsi
->rss_hkey_user
= NULL
;
7426 kfree(vsi
->rss_lut_user
);
7427 vsi
->rss_lut_user
= NULL
;
7431 * i40e_vsi_clear - Deallocate the VSI provided
7432 * @vsi: the VSI being un-configured
7434 static int i40e_vsi_clear(struct i40e_vsi
*vsi
)
7445 mutex_lock(&pf
->switch_mutex
);
7446 if (!pf
->vsi
[vsi
->idx
]) {
7447 dev_err(&pf
->pdev
->dev
, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7448 vsi
->idx
, vsi
->idx
, vsi
, vsi
->type
);
7452 if (pf
->vsi
[vsi
->idx
] != vsi
) {
7453 dev_err(&pf
->pdev
->dev
,
7454 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7455 pf
->vsi
[vsi
->idx
]->idx
,
7457 pf
->vsi
[vsi
->idx
]->type
,
7458 vsi
->idx
, vsi
, vsi
->type
);
7462 /* updates the PF for this cleared vsi */
7463 i40e_put_lump(pf
->qp_pile
, vsi
->base_queue
, vsi
->idx
);
7464 i40e_put_lump(pf
->irq_pile
, vsi
->base_vector
, vsi
->idx
);
7466 i40e_vsi_free_arrays(vsi
, true);
7467 i40e_clear_rss_config_user(vsi
);
7469 pf
->vsi
[vsi
->idx
] = NULL
;
7470 if (vsi
->idx
< pf
->next_vsi
)
7471 pf
->next_vsi
= vsi
->idx
;
7474 mutex_unlock(&pf
->switch_mutex
);
7482 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7483 * @vsi: the VSI being cleaned
7485 static void i40e_vsi_clear_rings(struct i40e_vsi
*vsi
)
7489 if (vsi
->tx_rings
&& vsi
->tx_rings
[0]) {
7490 for (i
= 0; i
< vsi
->alloc_queue_pairs
; i
++) {
7491 kfree_rcu(vsi
->tx_rings
[i
], rcu
);
7492 vsi
->tx_rings
[i
] = NULL
;
7493 vsi
->rx_rings
[i
] = NULL
;
7499 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7500 * @vsi: the VSI being configured
7502 static int i40e_alloc_rings(struct i40e_vsi
*vsi
)
7504 struct i40e_ring
*tx_ring
, *rx_ring
;
7505 struct i40e_pf
*pf
= vsi
->back
;
7508 /* Set basic values in the rings to be used later during open() */
7509 for (i
= 0; i
< vsi
->alloc_queue_pairs
; i
++) {
7510 /* allocate space for both Tx and Rx in one shot */
7511 tx_ring
= kzalloc(sizeof(struct i40e_ring
) * 2, GFP_KERNEL
);
7515 tx_ring
->queue_index
= i
;
7516 tx_ring
->reg_idx
= vsi
->base_queue
+ i
;
7517 tx_ring
->ring_active
= false;
7519 tx_ring
->netdev
= vsi
->netdev
;
7520 tx_ring
->dev
= &pf
->pdev
->dev
;
7521 tx_ring
->count
= vsi
->num_desc
;
7523 tx_ring
->dcb_tc
= 0;
7524 if (vsi
->back
->flags
& I40E_FLAG_WB_ON_ITR_CAPABLE
)
7525 tx_ring
->flags
= I40E_TXR_FLAGS_WB_ON_ITR
;
7526 tx_ring
->tx_itr_setting
= pf
->tx_itr_default
;
7527 vsi
->tx_rings
[i
] = tx_ring
;
7529 rx_ring
= &tx_ring
[1];
7530 rx_ring
->queue_index
= i
;
7531 rx_ring
->reg_idx
= vsi
->base_queue
+ i
;
7532 rx_ring
->ring_active
= false;
7534 rx_ring
->netdev
= vsi
->netdev
;
7535 rx_ring
->dev
= &pf
->pdev
->dev
;
7536 rx_ring
->count
= vsi
->num_desc
;
7538 rx_ring
->dcb_tc
= 0;
7539 rx_ring
->rx_itr_setting
= pf
->rx_itr_default
;
7540 vsi
->rx_rings
[i
] = rx_ring
;
7546 i40e_vsi_clear_rings(vsi
);
7551 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7552 * @pf: board private structure
7553 * @vectors: the number of MSI-X vectors to request
7555 * Returns the number of vectors reserved, or error
7557 static int i40e_reserve_msix_vectors(struct i40e_pf
*pf
, int vectors
)
7559 vectors
= pci_enable_msix_range(pf
->pdev
, pf
->msix_entries
,
7560 I40E_MIN_MSIX
, vectors
);
7562 dev_info(&pf
->pdev
->dev
,
7563 "MSI-X vector reservation failed: %d\n", vectors
);
7571 * i40e_init_msix - Setup the MSIX capability
7572 * @pf: board private structure
7574 * Work with the OS to set up the MSIX vectors needed.
7576 * Returns the number of vectors reserved or negative on failure
7578 static int i40e_init_msix(struct i40e_pf
*pf
)
7580 struct i40e_hw
*hw
= &pf
->hw
;
7584 int iwarp_requested
= 0;
7586 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
))
7589 /* The number of vectors we'll request will be comprised of:
7590 * - Add 1 for "other" cause for Admin Queue events, etc.
7591 * - The number of LAN queue pairs
7592 * - Queues being used for RSS.
7593 * We don't need as many as max_rss_size vectors.
7594 * use rss_size instead in the calculation since that
7595 * is governed by number of cpus in the system.
7596 * - assumes symmetric Tx/Rx pairing
7597 * - The number of VMDq pairs
7598 * - The CPU count within the NUMA node if iWARP is enabled
7600 * - The number of FCOE qps.
7602 * Once we count this up, try the request.
7604 * If we can't get what we want, we'll simplify to nearly nothing
7605 * and try again. If that still fails, we punt.
7607 vectors_left
= hw
->func_caps
.num_msix_vectors
;
7610 /* reserve one vector for miscellaneous handler */
7616 /* reserve vectors for the main PF traffic queues */
7617 pf
->num_lan_msix
= min_t(int, num_online_cpus(), vectors_left
);
7618 vectors_left
-= pf
->num_lan_msix
;
7619 v_budget
+= pf
->num_lan_msix
;
7621 /* reserve one vector for sideband flow director */
7622 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
7624 pf
->num_fdsb_msix
= 1;
7628 pf
->num_fdsb_msix
= 0;
7633 /* can we reserve enough for FCoE? */
7634 if (pf
->flags
& I40E_FLAG_FCOE_ENABLED
) {
7636 pf
->num_fcoe_msix
= 0;
7637 else if (vectors_left
>= pf
->num_fcoe_qps
)
7638 pf
->num_fcoe_msix
= pf
->num_fcoe_qps
;
7640 pf
->num_fcoe_msix
= 1;
7641 v_budget
+= pf
->num_fcoe_msix
;
7642 vectors_left
-= pf
->num_fcoe_msix
;
7646 /* can we reserve enough for iWARP? */
7647 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
7648 iwarp_requested
= pf
->num_iwarp_msix
;
7651 pf
->num_iwarp_msix
= 0;
7652 else if (vectors_left
< pf
->num_iwarp_msix
)
7653 pf
->num_iwarp_msix
= 1;
7654 v_budget
+= pf
->num_iwarp_msix
;
7655 vectors_left
-= pf
->num_iwarp_msix
;
7658 /* any vectors left over go for VMDq support */
7659 if (pf
->flags
& I40E_FLAG_VMDQ_ENABLED
) {
7660 int vmdq_vecs_wanted
= pf
->num_vmdq_vsis
* pf
->num_vmdq_qps
;
7661 int vmdq_vecs
= min_t(int, vectors_left
, vmdq_vecs_wanted
);
7663 if (!vectors_left
) {
7664 pf
->num_vmdq_msix
= 0;
7665 pf
->num_vmdq_qps
= 0;
7667 /* if we're short on vectors for what's desired, we limit
7668 * the queues per vmdq. If this is still more than are
7669 * available, the user will need to change the number of
7670 * queues/vectors used by the PF later with the ethtool
7673 if (vmdq_vecs
< vmdq_vecs_wanted
)
7674 pf
->num_vmdq_qps
= 1;
7675 pf
->num_vmdq_msix
= pf
->num_vmdq_qps
;
7677 v_budget
+= vmdq_vecs
;
7678 vectors_left
-= vmdq_vecs
;
7682 pf
->msix_entries
= kcalloc(v_budget
, sizeof(struct msix_entry
),
7684 if (!pf
->msix_entries
)
7687 for (i
= 0; i
< v_budget
; i
++)
7688 pf
->msix_entries
[i
].entry
= i
;
7689 v_actual
= i40e_reserve_msix_vectors(pf
, v_budget
);
7691 if (v_actual
< I40E_MIN_MSIX
) {
7692 pf
->flags
&= ~I40E_FLAG_MSIX_ENABLED
;
7693 kfree(pf
->msix_entries
);
7694 pf
->msix_entries
= NULL
;
7695 pci_disable_msix(pf
->pdev
);
7698 } else if (v_actual
== I40E_MIN_MSIX
) {
7699 /* Adjust for minimal MSIX use */
7700 pf
->num_vmdq_vsis
= 0;
7701 pf
->num_vmdq_qps
= 0;
7702 pf
->num_lan_qps
= 1;
7703 pf
->num_lan_msix
= 1;
7705 } else if (!vectors_left
) {
7706 /* If we have limited resources, we will start with no vectors
7707 * for the special features and then allocate vectors to some
7708 * of these features based on the policy and at the end disable
7709 * the features that did not get any vectors.
7713 dev_info(&pf
->pdev
->dev
,
7714 "MSI-X vector limit reached, attempting to redistribute vectors\n");
7715 /* reserve the misc vector */
7718 /* Scale vector usage down */
7719 pf
->num_vmdq_msix
= 1; /* force VMDqs to only one vector */
7720 pf
->num_vmdq_vsis
= 1;
7721 pf
->num_vmdq_qps
= 1;
7723 pf
->num_fcoe_qps
= 0;
7724 pf
->num_fcoe_msix
= 0;
7727 /* partition out the remaining vectors */
7730 pf
->num_lan_msix
= 1;
7733 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
7734 pf
->num_lan_msix
= 1;
7735 pf
->num_iwarp_msix
= 1;
7737 pf
->num_lan_msix
= 2;
7740 /* give one vector to FCoE */
7741 if (pf
->flags
& I40E_FLAG_FCOE_ENABLED
) {
7742 pf
->num_lan_msix
= 1;
7743 pf
->num_fcoe_msix
= 1;
7748 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
7749 pf
->num_iwarp_msix
= min_t(int, (vec
/ 3),
7751 pf
->num_vmdq_vsis
= min_t(int, (vec
/ 3),
7752 I40E_DEFAULT_NUM_VMDQ_VSI
);
7754 pf
->num_vmdq_vsis
= min_t(int, (vec
/ 2),
7755 I40E_DEFAULT_NUM_VMDQ_VSI
);
7757 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
7758 pf
->num_fdsb_msix
= 1;
7761 pf
->num_lan_msix
= min_t(int,
7762 (vec
- (pf
->num_iwarp_msix
+ pf
->num_vmdq_vsis
)),
7764 pf
->num_lan_qps
= pf
->num_lan_msix
;
7766 /* give one vector to FCoE */
7767 if (pf
->flags
& I40E_FLAG_FCOE_ENABLED
) {
7768 pf
->num_fcoe_msix
= 1;
7776 if ((pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) &&
7777 (pf
->num_fdsb_msix
== 0)) {
7778 dev_info(&pf
->pdev
->dev
, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
7779 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
7781 if ((pf
->flags
& I40E_FLAG_VMDQ_ENABLED
) &&
7782 (pf
->num_vmdq_msix
== 0)) {
7783 dev_info(&pf
->pdev
->dev
, "VMDq disabled, not enough MSI-X vectors\n");
7784 pf
->flags
&= ~I40E_FLAG_VMDQ_ENABLED
;
7787 if ((pf
->flags
& I40E_FLAG_IWARP_ENABLED
) &&
7788 (pf
->num_iwarp_msix
== 0)) {
7789 dev_info(&pf
->pdev
->dev
, "IWARP disabled, not enough MSI-X vectors\n");
7790 pf
->flags
&= ~I40E_FLAG_IWARP_ENABLED
;
7794 if ((pf
->flags
& I40E_FLAG_FCOE_ENABLED
) && (pf
->num_fcoe_msix
== 0)) {
7795 dev_info(&pf
->pdev
->dev
, "FCOE disabled, not enough MSI-X vectors\n");
7796 pf
->flags
&= ~I40E_FLAG_FCOE_ENABLED
;
7799 i40e_debug(&pf
->hw
, I40E_DEBUG_INIT
,
7800 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
7802 pf
->num_vmdq_msix
* pf
->num_vmdq_vsis
,
7804 pf
->num_iwarp_msix
);
7810 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7811 * @vsi: the VSI being configured
7812 * @v_idx: index of the vector in the vsi struct
7813 * @cpu: cpu to be used on affinity_mask
7815 * We allocate one q_vector. If allocation fails we return -ENOMEM.
7817 static int i40e_vsi_alloc_q_vector(struct i40e_vsi
*vsi
, int v_idx
, int cpu
)
7819 struct i40e_q_vector
*q_vector
;
7821 /* allocate q_vector */
7822 q_vector
= kzalloc(sizeof(struct i40e_q_vector
), GFP_KERNEL
);
7826 q_vector
->vsi
= vsi
;
7827 q_vector
->v_idx
= v_idx
;
7828 cpumask_set_cpu(cpu
, &q_vector
->affinity_mask
);
7831 netif_napi_add(vsi
->netdev
, &q_vector
->napi
,
7832 i40e_napi_poll
, NAPI_POLL_WEIGHT
);
7834 q_vector
->rx
.latency_range
= I40E_LOW_LATENCY
;
7835 q_vector
->tx
.latency_range
= I40E_LOW_LATENCY
;
7837 /* tie q_vector and vsi together */
7838 vsi
->q_vectors
[v_idx
] = q_vector
;
7844 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7845 * @vsi: the VSI being configured
7847 * We allocate one q_vector per queue interrupt. If allocation fails we
7850 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi
*vsi
)
7852 struct i40e_pf
*pf
= vsi
->back
;
7853 int err
, v_idx
, num_q_vectors
, current_cpu
;
7855 /* if not MSIX, give the one vector only to the LAN VSI */
7856 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
7857 num_q_vectors
= vsi
->num_q_vectors
;
7858 else if (vsi
== pf
->vsi
[pf
->lan_vsi
])
7863 current_cpu
= cpumask_first(cpu_online_mask
);
7865 for (v_idx
= 0; v_idx
< num_q_vectors
; v_idx
++) {
7866 err
= i40e_vsi_alloc_q_vector(vsi
, v_idx
, current_cpu
);
7869 current_cpu
= cpumask_next(current_cpu
, cpu_online_mask
);
7870 if (unlikely(current_cpu
>= nr_cpu_ids
))
7871 current_cpu
= cpumask_first(cpu_online_mask
);
7878 i40e_free_q_vector(vsi
, v_idx
);
7884 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7885 * @pf: board private structure to initialize
7887 static int i40e_init_interrupt_scheme(struct i40e_pf
*pf
)
7892 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
7893 vectors
= i40e_init_msix(pf
);
7895 pf
->flags
&= ~(I40E_FLAG_MSIX_ENABLED
|
7896 I40E_FLAG_IWARP_ENABLED
|
7898 I40E_FLAG_FCOE_ENABLED
|
7900 I40E_FLAG_RSS_ENABLED
|
7901 I40E_FLAG_DCB_CAPABLE
|
7902 I40E_FLAG_DCB_ENABLED
|
7903 I40E_FLAG_SRIOV_ENABLED
|
7904 I40E_FLAG_FD_SB_ENABLED
|
7905 I40E_FLAG_FD_ATR_ENABLED
|
7906 I40E_FLAG_VMDQ_ENABLED
);
7908 /* rework the queue expectations without MSIX */
7909 i40e_determine_queue_usage(pf
);
7913 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
) &&
7914 (pf
->flags
& I40E_FLAG_MSI_ENABLED
)) {
7915 dev_info(&pf
->pdev
->dev
, "MSI-X not available, trying MSI\n");
7916 vectors
= pci_enable_msi(pf
->pdev
);
7918 dev_info(&pf
->pdev
->dev
, "MSI init failed - %d\n",
7920 pf
->flags
&= ~I40E_FLAG_MSI_ENABLED
;
7922 vectors
= 1; /* one MSI or Legacy vector */
7925 if (!(pf
->flags
& (I40E_FLAG_MSIX_ENABLED
| I40E_FLAG_MSI_ENABLED
)))
7926 dev_info(&pf
->pdev
->dev
, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7928 /* set up vector assignment tracking */
7929 size
= sizeof(struct i40e_lump_tracking
) + (sizeof(u16
) * vectors
);
7930 pf
->irq_pile
= kzalloc(size
, GFP_KERNEL
);
7931 if (!pf
->irq_pile
) {
7932 dev_err(&pf
->pdev
->dev
, "error allocating irq_pile memory\n");
7935 pf
->irq_pile
->num_entries
= vectors
;
7936 pf
->irq_pile
->search_hint
= 0;
7938 /* track first vector for misc interrupts, ignore return */
7939 (void)i40e_get_lump(pf
, pf
->irq_pile
, 1, I40E_PILE_VALID_BIT
- 1);
7945 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7946 * @pf: board private structure
7948 * This sets up the handler for MSIX 0, which is used to manage the
7949 * non-queue interrupts, e.g. AdminQ and errors. This is not used
7950 * when in MSI or Legacy interrupt mode.
7952 static int i40e_setup_misc_vector(struct i40e_pf
*pf
)
7954 struct i40e_hw
*hw
= &pf
->hw
;
7957 /* Only request the irq if this is the first time through, and
7958 * not when we're rebuilding after a Reset
7960 if (!test_bit(__I40E_RESET_RECOVERY_PENDING
, &pf
->state
)) {
7961 err
= request_irq(pf
->msix_entries
[0].vector
,
7962 i40e_intr
, 0, pf
->int_name
, pf
);
7964 dev_info(&pf
->pdev
->dev
,
7965 "request_irq for %s failed: %d\n",
7971 i40e_enable_misc_int_causes(pf
);
7973 /* associate no queues to the misc vector */
7974 wr32(hw
, I40E_PFINT_LNKLST0
, I40E_QUEUE_END_OF_LIST
);
7975 wr32(hw
, I40E_PFINT_ITR0(I40E_RX_ITR
), I40E_ITR_8K
);
7979 i40e_irq_dynamic_enable_icr0(pf
, true);
7985 * i40e_config_rss_aq - Prepare for RSS using AQ commands
7986 * @vsi: vsi structure
7987 * @seed: RSS hash seed
7989 static int i40e_config_rss_aq(struct i40e_vsi
*vsi
, const u8
*seed
,
7990 u8
*lut
, u16 lut_size
)
7992 struct i40e_pf
*pf
= vsi
->back
;
7993 struct i40e_hw
*hw
= &pf
->hw
;
7997 struct i40e_aqc_get_set_rss_key_data
*seed_dw
=
7998 (struct i40e_aqc_get_set_rss_key_data
*)seed
;
7999 ret
= i40e_aq_set_rss_key(hw
, vsi
->id
, seed_dw
);
8001 dev_info(&pf
->pdev
->dev
,
8002 "Cannot set RSS key, err %s aq_err %s\n",
8003 i40e_stat_str(hw
, ret
),
8004 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
8009 bool pf_lut
= vsi
->type
== I40E_VSI_MAIN
? true : false;
8011 ret
= i40e_aq_set_rss_lut(hw
, vsi
->id
, pf_lut
, lut
, lut_size
);
8013 dev_info(&pf
->pdev
->dev
,
8014 "Cannot set RSS lut, err %s aq_err %s\n",
8015 i40e_stat_str(hw
, ret
),
8016 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
8024 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
8025 * @vsi: Pointer to vsi structure
8026 * @seed: Buffter to store the hash keys
8027 * @lut: Buffer to store the lookup table entries
8028 * @lut_size: Size of buffer to store the lookup table entries
8030 * Return 0 on success, negative on failure
8032 static int i40e_get_rss_aq(struct i40e_vsi
*vsi
, const u8
*seed
,
8033 u8
*lut
, u16 lut_size
)
8035 struct i40e_pf
*pf
= vsi
->back
;
8036 struct i40e_hw
*hw
= &pf
->hw
;
8040 ret
= i40e_aq_get_rss_key(hw
, vsi
->id
,
8041 (struct i40e_aqc_get_set_rss_key_data
*)seed
);
8043 dev_info(&pf
->pdev
->dev
,
8044 "Cannot get RSS key, err %s aq_err %s\n",
8045 i40e_stat_str(&pf
->hw
, ret
),
8046 i40e_aq_str(&pf
->hw
,
8047 pf
->hw
.aq
.asq_last_status
));
8053 bool pf_lut
= vsi
->type
== I40E_VSI_MAIN
? true : false;
8055 ret
= i40e_aq_get_rss_lut(hw
, vsi
->id
, pf_lut
, lut
, lut_size
);
8057 dev_info(&pf
->pdev
->dev
,
8058 "Cannot get RSS lut, err %s aq_err %s\n",
8059 i40e_stat_str(&pf
->hw
, ret
),
8060 i40e_aq_str(&pf
->hw
,
8061 pf
->hw
.aq
.asq_last_status
));
8070 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
8071 * @vsi: VSI structure
8073 static int i40e_vsi_config_rss(struct i40e_vsi
*vsi
)
8075 u8 seed
[I40E_HKEY_ARRAY_SIZE
];
8076 struct i40e_pf
*pf
= vsi
->back
;
8080 if (!(pf
->flags
& I40E_FLAG_RSS_AQ_CAPABLE
))
8084 vsi
->rss_size
= min_t(int, pf
->alloc_rss_size
,
8085 vsi
->num_queue_pairs
);
8089 lut
= kzalloc(vsi
->rss_table_size
, GFP_KERNEL
);
8092 /* Use the user configured hash keys and lookup table if there is one,
8093 * otherwise use default
8095 if (vsi
->rss_lut_user
)
8096 memcpy(lut
, vsi
->rss_lut_user
, vsi
->rss_table_size
);
8098 i40e_fill_rss_lut(pf
, lut
, vsi
->rss_table_size
, vsi
->rss_size
);
8099 if (vsi
->rss_hkey_user
)
8100 memcpy(seed
, vsi
->rss_hkey_user
, I40E_HKEY_ARRAY_SIZE
);
8102 netdev_rss_key_fill((void *)seed
, I40E_HKEY_ARRAY_SIZE
);
8103 ret
= i40e_config_rss_aq(vsi
, seed
, lut
, vsi
->rss_table_size
);
8110 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
8111 * @vsi: Pointer to vsi structure
8112 * @seed: RSS hash seed
8113 * @lut: Lookup table
8114 * @lut_size: Lookup table size
8116 * Returns 0 on success, negative on failure
8118 static int i40e_config_rss_reg(struct i40e_vsi
*vsi
, const u8
*seed
,
8119 const u8
*lut
, u16 lut_size
)
8121 struct i40e_pf
*pf
= vsi
->back
;
8122 struct i40e_hw
*hw
= &pf
->hw
;
8123 u16 vf_id
= vsi
->vf_id
;
8126 /* Fill out hash function seed */
8128 u32
*seed_dw
= (u32
*)seed
;
8130 if (vsi
->type
== I40E_VSI_MAIN
) {
8131 for (i
= 0; i
<= I40E_PFQF_HKEY_MAX_INDEX
; i
++)
8132 i40e_write_rx_ctl(hw
, I40E_PFQF_HKEY(i
),
8134 } else if (vsi
->type
== I40E_VSI_SRIOV
) {
8135 for (i
= 0; i
<= I40E_VFQF_HKEY1_MAX_INDEX
; i
++)
8136 i40e_write_rx_ctl(hw
,
8137 I40E_VFQF_HKEY1(i
, vf_id
),
8140 dev_err(&pf
->pdev
->dev
, "Cannot set RSS seed - invalid VSI type\n");
8145 u32
*lut_dw
= (u32
*)lut
;
8147 if (vsi
->type
== I40E_VSI_MAIN
) {
8148 if (lut_size
!= I40E_HLUT_ARRAY_SIZE
)
8150 for (i
= 0; i
<= I40E_PFQF_HLUT_MAX_INDEX
; i
++)
8151 wr32(hw
, I40E_PFQF_HLUT(i
), lut_dw
[i
]);
8152 } else if (vsi
->type
== I40E_VSI_SRIOV
) {
8153 if (lut_size
!= I40E_VF_HLUT_ARRAY_SIZE
)
8155 for (i
= 0; i
<= I40E_VFQF_HLUT_MAX_INDEX
; i
++)
8156 i40e_write_rx_ctl(hw
,
8157 I40E_VFQF_HLUT1(i
, vf_id
),
8160 dev_err(&pf
->pdev
->dev
, "Cannot set RSS LUT - invalid VSI type\n");
8169 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
8170 * @vsi: Pointer to VSI structure
8171 * @seed: Buffer to store the keys
8172 * @lut: Buffer to store the lookup table entries
8173 * @lut_size: Size of buffer to store the lookup table entries
8175 * Returns 0 on success, negative on failure
8177 static int i40e_get_rss_reg(struct i40e_vsi
*vsi
, u8
*seed
,
8178 u8
*lut
, u16 lut_size
)
8180 struct i40e_pf
*pf
= vsi
->back
;
8181 struct i40e_hw
*hw
= &pf
->hw
;
8185 u32
*seed_dw
= (u32
*)seed
;
8187 for (i
= 0; i
<= I40E_PFQF_HKEY_MAX_INDEX
; i
++)
8188 seed_dw
[i
] = i40e_read_rx_ctl(hw
, I40E_PFQF_HKEY(i
));
8191 u32
*lut_dw
= (u32
*)lut
;
8193 if (lut_size
!= I40E_HLUT_ARRAY_SIZE
)
8195 for (i
= 0; i
<= I40E_PFQF_HLUT_MAX_INDEX
; i
++)
8196 lut_dw
[i
] = rd32(hw
, I40E_PFQF_HLUT(i
));
8203 * i40e_config_rss - Configure RSS keys and lut
8204 * @vsi: Pointer to VSI structure
8205 * @seed: RSS hash seed
8206 * @lut: Lookup table
8207 * @lut_size: Lookup table size
8209 * Returns 0 on success, negative on failure
8211 int i40e_config_rss(struct i40e_vsi
*vsi
, u8
*seed
, u8
*lut
, u16 lut_size
)
8213 struct i40e_pf
*pf
= vsi
->back
;
8215 if (pf
->flags
& I40E_FLAG_RSS_AQ_CAPABLE
)
8216 return i40e_config_rss_aq(vsi
, seed
, lut
, lut_size
);
8218 return i40e_config_rss_reg(vsi
, seed
, lut
, lut_size
);
8222 * i40e_get_rss - Get RSS keys and lut
8223 * @vsi: Pointer to VSI structure
8224 * @seed: Buffer to store the keys
8225 * @lut: Buffer to store the lookup table entries
8226 * lut_size: Size of buffer to store the lookup table entries
8228 * Returns 0 on success, negative on failure
8230 int i40e_get_rss(struct i40e_vsi
*vsi
, u8
*seed
, u8
*lut
, u16 lut_size
)
8232 struct i40e_pf
*pf
= vsi
->back
;
8234 if (pf
->flags
& I40E_FLAG_RSS_AQ_CAPABLE
)
8235 return i40e_get_rss_aq(vsi
, seed
, lut
, lut_size
);
8237 return i40e_get_rss_reg(vsi
, seed
, lut
, lut_size
);
8241 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
8242 * @pf: Pointer to board private structure
8243 * @lut: Lookup table
8244 * @rss_table_size: Lookup table size
8245 * @rss_size: Range of queue number for hashing
8247 void i40e_fill_rss_lut(struct i40e_pf
*pf
, u8
*lut
,
8248 u16 rss_table_size
, u16 rss_size
)
8252 for (i
= 0; i
< rss_table_size
; i
++)
8253 lut
[i
] = i
% rss_size
;
8257 * i40e_pf_config_rss - Prepare for RSS if used
8258 * @pf: board private structure
8260 static int i40e_pf_config_rss(struct i40e_pf
*pf
)
8262 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
8263 u8 seed
[I40E_HKEY_ARRAY_SIZE
];
8265 struct i40e_hw
*hw
= &pf
->hw
;
8270 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
8271 hena
= (u64
)i40e_read_rx_ctl(hw
, I40E_PFQF_HENA(0)) |
8272 ((u64
)i40e_read_rx_ctl(hw
, I40E_PFQF_HENA(1)) << 32);
8273 hena
|= i40e_pf_get_default_rss_hena(pf
);
8275 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(0), (u32
)hena
);
8276 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(1), (u32
)(hena
>> 32));
8278 /* Determine the RSS table size based on the hardware capabilities */
8279 reg_val
= i40e_read_rx_ctl(hw
, I40E_PFQF_CTL_0
);
8280 reg_val
= (pf
->rss_table_size
== 512) ?
8281 (reg_val
| I40E_PFQF_CTL_0_HASHLUTSIZE_512
) :
8282 (reg_val
& ~I40E_PFQF_CTL_0_HASHLUTSIZE_512
);
8283 i40e_write_rx_ctl(hw
, I40E_PFQF_CTL_0
, reg_val
);
8285 /* Determine the RSS size of the VSI */
8287 vsi
->rss_size
= min_t(int, pf
->alloc_rss_size
,
8288 vsi
->num_queue_pairs
);
8292 lut
= kzalloc(vsi
->rss_table_size
, GFP_KERNEL
);
8296 /* Use user configured lut if there is one, otherwise use default */
8297 if (vsi
->rss_lut_user
)
8298 memcpy(lut
, vsi
->rss_lut_user
, vsi
->rss_table_size
);
8300 i40e_fill_rss_lut(pf
, lut
, vsi
->rss_table_size
, vsi
->rss_size
);
8302 /* Use user configured hash key if there is one, otherwise
8305 if (vsi
->rss_hkey_user
)
8306 memcpy(seed
, vsi
->rss_hkey_user
, I40E_HKEY_ARRAY_SIZE
);
8308 netdev_rss_key_fill((void *)seed
, I40E_HKEY_ARRAY_SIZE
);
8309 ret
= i40e_config_rss(vsi
, seed
, lut
, vsi
->rss_table_size
);
8316 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
8317 * @pf: board private structure
8318 * @queue_count: the requested queue count for rss.
8320 * returns 0 if rss is not enabled, if enabled returns the final rss queue
8321 * count which may be different from the requested queue count.
8323 int i40e_reconfig_rss_queues(struct i40e_pf
*pf
, int queue_count
)
8325 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
8328 if (!(pf
->flags
& I40E_FLAG_RSS_ENABLED
))
8331 new_rss_size
= min_t(int, queue_count
, pf
->rss_size_max
);
8333 if (queue_count
!= vsi
->num_queue_pairs
) {
8334 vsi
->req_queue_pairs
= queue_count
;
8335 i40e_prep_for_reset(pf
);
8337 pf
->alloc_rss_size
= new_rss_size
;
8339 i40e_reset_and_rebuild(pf
, true);
8341 /* Discard the user configured hash keys and lut, if less
8342 * queues are enabled.
8344 if (queue_count
< vsi
->rss_size
) {
8345 i40e_clear_rss_config_user(vsi
);
8346 dev_dbg(&pf
->pdev
->dev
,
8347 "discard user configured hash keys and lut\n");
8350 /* Reset vsi->rss_size, as number of enabled queues changed */
8351 vsi
->rss_size
= min_t(int, pf
->alloc_rss_size
,
8352 vsi
->num_queue_pairs
);
8354 i40e_pf_config_rss(pf
);
8356 dev_info(&pf
->pdev
->dev
, "RSS count/HW max RSS count: %d/%d\n",
8357 pf
->alloc_rss_size
, pf
->rss_size_max
);
8358 return pf
->alloc_rss_size
;
8362 * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
8363 * @pf: board private structure
8365 i40e_status
i40e_get_npar_bw_setting(struct i40e_pf
*pf
)
8368 bool min_valid
, max_valid
;
8371 status
= i40e_read_bw_from_alt_ram(&pf
->hw
, &max_bw
, &min_bw
,
8372 &min_valid
, &max_valid
);
8376 pf
->npar_min_bw
= min_bw
;
8378 pf
->npar_max_bw
= max_bw
;
8385 * i40e_set_npar_bw_setting - Set BW settings for this PF partition
8386 * @pf: board private structure
8388 i40e_status
i40e_set_npar_bw_setting(struct i40e_pf
*pf
)
8390 struct i40e_aqc_configure_partition_bw_data bw_data
;
8393 /* Set the valid bit for this PF */
8394 bw_data
.pf_valid_bits
= cpu_to_le16(BIT(pf
->hw
.pf_id
));
8395 bw_data
.max_bw
[pf
->hw
.pf_id
] = pf
->npar_max_bw
& I40E_ALT_BW_VALUE_MASK
;
8396 bw_data
.min_bw
[pf
->hw
.pf_id
] = pf
->npar_min_bw
& I40E_ALT_BW_VALUE_MASK
;
8398 /* Set the new bandwidths */
8399 status
= i40e_aq_configure_partition_bw(&pf
->hw
, &bw_data
, NULL
);
8405 * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
8406 * @pf: board private structure
8408 i40e_status
i40e_commit_npar_bw_setting(struct i40e_pf
*pf
)
8410 /* Commit temporary BW setting to permanent NVM image */
8411 enum i40e_admin_queue_err last_aq_status
;
8415 if (pf
->hw
.partition_id
!= 1) {
8416 dev_info(&pf
->pdev
->dev
,
8417 "Commit BW only works on partition 1! This is partition %d",
8418 pf
->hw
.partition_id
);
8419 ret
= I40E_NOT_SUPPORTED
;
8423 /* Acquire NVM for read access */
8424 ret
= i40e_acquire_nvm(&pf
->hw
, I40E_RESOURCE_READ
);
8425 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
8427 dev_info(&pf
->pdev
->dev
,
8428 "Cannot acquire NVM for read access, err %s aq_err %s\n",
8429 i40e_stat_str(&pf
->hw
, ret
),
8430 i40e_aq_str(&pf
->hw
, last_aq_status
));
8434 /* Read word 0x10 of NVM - SW compatibility word 1 */
8435 ret
= i40e_aq_read_nvm(&pf
->hw
,
8436 I40E_SR_NVM_CONTROL_WORD
,
8437 0x10, sizeof(nvm_word
), &nvm_word
,
8439 /* Save off last admin queue command status before releasing
8442 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
8443 i40e_release_nvm(&pf
->hw
);
8445 dev_info(&pf
->pdev
->dev
, "NVM read error, err %s aq_err %s\n",
8446 i40e_stat_str(&pf
->hw
, ret
),
8447 i40e_aq_str(&pf
->hw
, last_aq_status
));
8451 /* Wait a bit for NVM release to complete */
8454 /* Acquire NVM for write access */
8455 ret
= i40e_acquire_nvm(&pf
->hw
, I40E_RESOURCE_WRITE
);
8456 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
8458 dev_info(&pf
->pdev
->dev
,
8459 "Cannot acquire NVM for write access, err %s aq_err %s\n",
8460 i40e_stat_str(&pf
->hw
, ret
),
8461 i40e_aq_str(&pf
->hw
, last_aq_status
));
8464 /* Write it back out unchanged to initiate update NVM,
8465 * which will force a write of the shadow (alt) RAM to
8466 * the NVM - thus storing the bandwidth values permanently.
8468 ret
= i40e_aq_update_nvm(&pf
->hw
,
8469 I40E_SR_NVM_CONTROL_WORD
,
8470 0x10, sizeof(nvm_word
),
8471 &nvm_word
, true, NULL
);
8472 /* Save off last admin queue command status before releasing
8475 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
8476 i40e_release_nvm(&pf
->hw
);
8478 dev_info(&pf
->pdev
->dev
,
8479 "BW settings NOT SAVED, err %s aq_err %s\n",
8480 i40e_stat_str(&pf
->hw
, ret
),
8481 i40e_aq_str(&pf
->hw
, last_aq_status
));
8488 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
8489 * @pf: board private structure to initialize
8491 * i40e_sw_init initializes the Adapter private data structure.
8492 * Fields are initialized based on PCI device information and
8493 * OS network device settings (MTU size).
8495 static int i40e_sw_init(struct i40e_pf
*pf
)
8500 pf
->msg_enable
= netif_msg_init(I40E_DEFAULT_MSG_ENABLE
,
8501 (NETIF_MSG_DRV
|NETIF_MSG_PROBE
|NETIF_MSG_LINK
));
8502 if (debug
!= -1 && debug
!= I40E_DEFAULT_MSG_ENABLE
) {
8503 if (I40E_DEBUG_USER
& debug
)
8504 pf
->hw
.debug_mask
= debug
;
8505 pf
->msg_enable
= netif_msg_init((debug
& ~I40E_DEBUG_USER
),
8506 I40E_DEFAULT_MSG_ENABLE
);
8509 /* Set default capability flags */
8510 pf
->flags
= I40E_FLAG_RX_CSUM_ENABLED
|
8511 I40E_FLAG_MSI_ENABLED
|
8512 I40E_FLAG_MSIX_ENABLED
;
8514 /* Set default ITR */
8515 pf
->rx_itr_default
= I40E_ITR_DYNAMIC
| I40E_ITR_RX_DEF
;
8516 pf
->tx_itr_default
= I40E_ITR_DYNAMIC
| I40E_ITR_TX_DEF
;
8518 /* Depending on PF configurations, it is possible that the RSS
8519 * maximum might end up larger than the available queues
8521 pf
->rss_size_max
= BIT(pf
->hw
.func_caps
.rss_table_entry_width
);
8522 pf
->alloc_rss_size
= 1;
8523 pf
->rss_table_size
= pf
->hw
.func_caps
.rss_table_size
;
8524 pf
->rss_size_max
= min_t(int, pf
->rss_size_max
,
8525 pf
->hw
.func_caps
.num_tx_qp
);
8526 if (pf
->hw
.func_caps
.rss
) {
8527 pf
->flags
|= I40E_FLAG_RSS_ENABLED
;
8528 pf
->alloc_rss_size
= min_t(int, pf
->rss_size_max
,
8532 /* MFP mode enabled */
8533 if (pf
->hw
.func_caps
.npar_enable
|| pf
->hw
.func_caps
.flex10_enable
) {
8534 pf
->flags
|= I40E_FLAG_MFP_ENABLED
;
8535 dev_info(&pf
->pdev
->dev
, "MFP mode Enabled\n");
8536 if (i40e_get_npar_bw_setting(pf
))
8537 dev_warn(&pf
->pdev
->dev
,
8538 "Could not get NPAR bw settings\n");
8540 dev_info(&pf
->pdev
->dev
,
8541 "Min BW = %8.8x, Max BW = %8.8x\n",
8542 pf
->npar_min_bw
, pf
->npar_max_bw
);
8545 /* FW/NVM is not yet fixed in this regard */
8546 if ((pf
->hw
.func_caps
.fd_filters_guaranteed
> 0) ||
8547 (pf
->hw
.func_caps
.fd_filters_best_effort
> 0)) {
8548 pf
->flags
|= I40E_FLAG_FD_ATR_ENABLED
;
8549 pf
->atr_sample_rate
= I40E_DEFAULT_ATR_SAMPLE_RATE
;
8550 if (pf
->flags
& I40E_FLAG_MFP_ENABLED
&&
8551 pf
->hw
.num_partitions
> 1)
8552 dev_info(&pf
->pdev
->dev
,
8553 "Flow Director Sideband mode Disabled in MFP mode\n");
8555 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
8556 pf
->fdir_pf_filter_count
=
8557 pf
->hw
.func_caps
.fd_filters_guaranteed
;
8558 pf
->hw
.fdir_shared_filter_count
=
8559 pf
->hw
.func_caps
.fd_filters_best_effort
;
8562 if (i40e_is_mac_710(&pf
->hw
) &&
8563 (((pf
->hw
.aq
.fw_maj_ver
== 4) && (pf
->hw
.aq
.fw_min_ver
< 33)) ||
8564 (pf
->hw
.aq
.fw_maj_ver
< 4))) {
8565 pf
->flags
|= I40E_FLAG_RESTART_AUTONEG
;
8566 /* No DCB support for FW < v4.33 */
8567 pf
->flags
|= I40E_FLAG_NO_DCB_SUPPORT
;
8570 /* Disable FW LLDP if FW < v4.3 */
8571 if (i40e_is_mac_710(&pf
->hw
) &&
8572 (((pf
->hw
.aq
.fw_maj_ver
== 4) && (pf
->hw
.aq
.fw_min_ver
< 3)) ||
8573 (pf
->hw
.aq
.fw_maj_ver
< 4)))
8574 pf
->flags
|= I40E_FLAG_STOP_FW_LLDP
;
8576 /* Use the FW Set LLDP MIB API if FW > v4.40 */
8577 if (i40e_is_mac_710(&pf
->hw
) &&
8578 (((pf
->hw
.aq
.fw_maj_ver
== 4) && (pf
->hw
.aq
.fw_min_ver
>= 40)) ||
8579 (pf
->hw
.aq
.fw_maj_ver
>= 5)))
8580 pf
->flags
|= I40E_FLAG_USE_SET_LLDP_MIB
;
8582 if (pf
->hw
.func_caps
.vmdq
) {
8583 pf
->num_vmdq_vsis
= I40E_DEFAULT_NUM_VMDQ_VSI
;
8584 pf
->flags
|= I40E_FLAG_VMDQ_ENABLED
;
8585 pf
->num_vmdq_qps
= i40e_default_queues_per_vmdq(pf
);
8588 if (pf
->hw
.func_caps
.iwarp
) {
8589 pf
->flags
|= I40E_FLAG_IWARP_ENABLED
;
8590 /* IWARP needs one extra vector for CQP just like MISC.*/
8591 pf
->num_iwarp_msix
= (int)num_online_cpus() + 1;
8595 i40e_init_pf_fcoe(pf
);
8597 #endif /* I40E_FCOE */
8598 #ifdef CONFIG_PCI_IOV
8599 if (pf
->hw
.func_caps
.num_vfs
&& pf
->hw
.partition_id
== 1) {
8600 pf
->num_vf_qps
= I40E_DEFAULT_QUEUES_PER_VF
;
8601 pf
->flags
|= I40E_FLAG_SRIOV_ENABLED
;
8602 pf
->num_req_vfs
= min_t(int,
8603 pf
->hw
.func_caps
.num_vfs
,
8606 #endif /* CONFIG_PCI_IOV */
8607 if (pf
->hw
.mac
.type
== I40E_MAC_X722
) {
8608 pf
->flags
|= I40E_FLAG_RSS_AQ_CAPABLE
|
8609 I40E_FLAG_128_QP_RSS_CAPABLE
|
8610 I40E_FLAG_HW_ATR_EVICT_CAPABLE
|
8611 I40E_FLAG_OUTER_UDP_CSUM_CAPABLE
|
8612 I40E_FLAG_WB_ON_ITR_CAPABLE
|
8613 I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE
|
8614 I40E_FLAG_NO_PCI_LINK_CHECK
|
8615 I40E_FLAG_USE_SET_LLDP_MIB
|
8616 I40E_FLAG_GENEVE_OFFLOAD_CAPABLE
;
8617 } else if ((pf
->hw
.aq
.api_maj_ver
> 1) ||
8618 ((pf
->hw
.aq
.api_maj_ver
== 1) &&
8619 (pf
->hw
.aq
.api_min_ver
> 4))) {
8620 /* Supported in FW API version higher than 1.4 */
8621 pf
->flags
|= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE
;
8622 pf
->auto_disable_flags
= I40E_FLAG_HW_ATR_EVICT_CAPABLE
;
8624 pf
->auto_disable_flags
= I40E_FLAG_HW_ATR_EVICT_CAPABLE
;
8627 pf
->eeprom_version
= 0xDEAD;
8628 pf
->lan_veb
= I40E_NO_VEB
;
8629 pf
->lan_vsi
= I40E_NO_VSI
;
8631 /* By default FW has this off for performance reasons */
8632 pf
->flags
&= ~I40E_FLAG_VEB_STATS_ENABLED
;
8634 /* set up queue assignment tracking */
8635 size
= sizeof(struct i40e_lump_tracking
)
8636 + (sizeof(u16
) * pf
->hw
.func_caps
.num_tx_qp
);
8637 pf
->qp_pile
= kzalloc(size
, GFP_KERNEL
);
8642 pf
->qp_pile
->num_entries
= pf
->hw
.func_caps
.num_tx_qp
;
8643 pf
->qp_pile
->search_hint
= 0;
8645 pf
->tx_timeout_recovery_level
= 1;
8647 mutex_init(&pf
->switch_mutex
);
8649 /* If NPAR is enabled nudge the Tx scheduler */
8650 if (pf
->hw
.func_caps
.npar_enable
&& (!i40e_get_npar_bw_setting(pf
)))
8651 i40e_set_npar_bw_setting(pf
);
8658 * i40e_set_ntuple - set the ntuple feature flag and take action
8659 * @pf: board private structure to initialize
8660 * @features: the feature set that the stack is suggesting
8662 * returns a bool to indicate if reset needs to happen
8664 bool i40e_set_ntuple(struct i40e_pf
*pf
, netdev_features_t features
)
8666 bool need_reset
= false;
8668 /* Check if Flow Director n-tuple support was enabled or disabled. If
8669 * the state changed, we need to reset.
8671 if (features
& NETIF_F_NTUPLE
) {
8672 /* Enable filters and mark for reset */
8673 if (!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
))
8675 /* enable FD_SB only if there is MSI-X vector */
8676 if (pf
->num_fdsb_msix
> 0)
8677 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
8679 /* turn off filters, mark for reset and clear SW filter list */
8680 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
8682 i40e_fdir_filter_exit(pf
);
8684 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
8685 pf
->auto_disable_flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
8686 /* reset fd counters */
8687 pf
->fd_add_err
= pf
->fd_atr_cnt
= pf
->fd_tcp_rule
= 0;
8688 pf
->fdir_pf_active_filters
= 0;
8689 /* if ATR was auto disabled it can be re-enabled. */
8690 if ((pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) &&
8691 (pf
->auto_disable_flags
& I40E_FLAG_FD_ATR_ENABLED
)) {
8692 pf
->auto_disable_flags
&= ~I40E_FLAG_FD_ATR_ENABLED
;
8693 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
8694 dev_info(&pf
->pdev
->dev
, "ATR re-enabled.\n");
8701 * i40e_clear_rss_lut - clear the rx hash lookup table
8702 * @vsi: the VSI being configured
8704 static void i40e_clear_rss_lut(struct i40e_vsi
*vsi
)
8706 struct i40e_pf
*pf
= vsi
->back
;
8707 struct i40e_hw
*hw
= &pf
->hw
;
8708 u16 vf_id
= vsi
->vf_id
;
8711 if (vsi
->type
== I40E_VSI_MAIN
) {
8712 for (i
= 0; i
<= I40E_PFQF_HLUT_MAX_INDEX
; i
++)
8713 wr32(hw
, I40E_PFQF_HLUT(i
), 0);
8714 } else if (vsi
->type
== I40E_VSI_SRIOV
) {
8715 for (i
= 0; i
<= I40E_VFQF_HLUT_MAX_INDEX
; i
++)
8716 i40e_write_rx_ctl(hw
, I40E_VFQF_HLUT1(i
, vf_id
), 0);
8718 dev_err(&pf
->pdev
->dev
, "Cannot set RSS LUT - invalid VSI type\n");
8723 * i40e_set_features - set the netdev feature flags
8724 * @netdev: ptr to the netdev being adjusted
8725 * @features: the feature set that the stack is suggesting
8727 static int i40e_set_features(struct net_device
*netdev
,
8728 netdev_features_t features
)
8730 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
8731 struct i40e_vsi
*vsi
= np
->vsi
;
8732 struct i40e_pf
*pf
= vsi
->back
;
8735 if (features
& NETIF_F_RXHASH
&& !(netdev
->features
& NETIF_F_RXHASH
))
8736 i40e_pf_config_rss(pf
);
8737 else if (!(features
& NETIF_F_RXHASH
) &&
8738 netdev
->features
& NETIF_F_RXHASH
)
8739 i40e_clear_rss_lut(vsi
);
8741 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
8742 i40e_vlan_stripping_enable(vsi
);
8744 i40e_vlan_stripping_disable(vsi
);
8746 need_reset
= i40e_set_ntuple(pf
, features
);
8749 i40e_do_reset(pf
, BIT_ULL(__I40E_PF_RESET_REQUESTED
));
8755 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
8756 * @pf: board private structure
8757 * @port: The UDP port to look up
8759 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
8761 static u8
i40e_get_udp_port_idx(struct i40e_pf
*pf
, __be16 port
)
8765 for (i
= 0; i
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
; i
++) {
8766 if (pf
->udp_ports
[i
].index
== port
)
8774 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
8775 * @netdev: This physical port's netdev
8776 * @ti: Tunnel endpoint information
8778 static void i40e_udp_tunnel_add(struct net_device
*netdev
,
8779 struct udp_tunnel_info
*ti
)
8781 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
8782 struct i40e_vsi
*vsi
= np
->vsi
;
8783 struct i40e_pf
*pf
= vsi
->back
;
8784 __be16 port
= ti
->port
;
8788 idx
= i40e_get_udp_port_idx(pf
, port
);
8790 /* Check if port already exists */
8791 if (idx
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
) {
8792 netdev_info(netdev
, "port %d already offloaded\n",
8797 /* Now check if there is space to add the new port */
8798 next_idx
= i40e_get_udp_port_idx(pf
, 0);
8800 if (next_idx
== I40E_MAX_PF_UDP_OFFLOAD_PORTS
) {
8801 netdev_info(netdev
, "maximum number of offloaded UDP ports reached, not adding port %d\n",
8807 case UDP_TUNNEL_TYPE_VXLAN
:
8808 pf
->udp_ports
[next_idx
].type
= I40E_AQC_TUNNEL_TYPE_VXLAN
;
8810 case UDP_TUNNEL_TYPE_GENEVE
:
8811 if (!(pf
->flags
& I40E_FLAG_GENEVE_OFFLOAD_CAPABLE
))
8813 pf
->udp_ports
[next_idx
].type
= I40E_AQC_TUNNEL_TYPE_NGE
;
8819 /* New port: add it and mark its index in the bitmap */
8820 pf
->udp_ports
[next_idx
].index
= port
;
8821 pf
->pending_udp_bitmap
|= BIT_ULL(next_idx
);
8822 pf
->flags
|= I40E_FLAG_UDP_FILTER_SYNC
;
8826 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
8827 * @netdev: This physical port's netdev
8828 * @ti: Tunnel endpoint information
8830 static void i40e_udp_tunnel_del(struct net_device
*netdev
,
8831 struct udp_tunnel_info
*ti
)
8833 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
8834 struct i40e_vsi
*vsi
= np
->vsi
;
8835 struct i40e_pf
*pf
= vsi
->back
;
8836 __be16 port
= ti
->port
;
8839 idx
= i40e_get_udp_port_idx(pf
, port
);
8841 /* Check if port already exists */
8842 if (idx
>= I40E_MAX_PF_UDP_OFFLOAD_PORTS
)
8846 case UDP_TUNNEL_TYPE_VXLAN
:
8847 if (pf
->udp_ports
[idx
].type
!= I40E_AQC_TUNNEL_TYPE_VXLAN
)
8850 case UDP_TUNNEL_TYPE_GENEVE
:
8851 if (pf
->udp_ports
[idx
].type
!= I40E_AQC_TUNNEL_TYPE_NGE
)
8858 /* if port exists, set it to 0 (mark for deletion)
8859 * and make it pending
8861 pf
->udp_ports
[idx
].index
= 0;
8862 pf
->pending_udp_bitmap
|= BIT_ULL(idx
);
8863 pf
->flags
|= I40E_FLAG_UDP_FILTER_SYNC
;
8867 netdev_warn(netdev
, "UDP port %d was not found, not deleting\n",
8871 static int i40e_get_phys_port_id(struct net_device
*netdev
,
8872 struct netdev_phys_item_id
*ppid
)
8874 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
8875 struct i40e_pf
*pf
= np
->vsi
->back
;
8876 struct i40e_hw
*hw
= &pf
->hw
;
8878 if (!(pf
->flags
& I40E_FLAG_PORT_ID_VALID
))
8881 ppid
->id_len
= min_t(int, sizeof(hw
->mac
.port_addr
), sizeof(ppid
->id
));
8882 memcpy(ppid
->id
, hw
->mac
.port_addr
, ppid
->id_len
);
8888 * i40e_ndo_fdb_add - add an entry to the hardware database
8889 * @ndm: the input from the stack
8890 * @tb: pointer to array of nladdr (unused)
8891 * @dev: the net device pointer
8892 * @addr: the MAC address entry being added
8893 * @flags: instructions from stack about fdb operation
8895 static int i40e_ndo_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
8896 struct net_device
*dev
,
8897 const unsigned char *addr
, u16 vid
,
8900 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
8901 struct i40e_pf
*pf
= np
->vsi
->back
;
8904 if (!(pf
->flags
& I40E_FLAG_SRIOV_ENABLED
))
8908 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev
->name
);
8912 /* Hardware does not support aging addresses so if a
8913 * ndm_state is given only allow permanent addresses
8915 if (ndm
->ndm_state
&& !(ndm
->ndm_state
& NUD_PERMANENT
)) {
8916 netdev_info(dev
, "FDB only supports static addresses\n");
8920 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
))
8921 err
= dev_uc_add_excl(dev
, addr
);
8922 else if (is_multicast_ether_addr(addr
))
8923 err
= dev_mc_add_excl(dev
, addr
);
8927 /* Only return duplicate errors if NLM_F_EXCL is set */
8928 if (err
== -EEXIST
&& !(flags
& NLM_F_EXCL
))
8935 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
8936 * @dev: the netdev being configured
8937 * @nlh: RTNL message
8939 * Inserts a new hardware bridge if not already created and
8940 * enables the bridging mode requested (VEB or VEPA). If the
8941 * hardware bridge has already been inserted and the request
8942 * is to change the mode then that requires a PF reset to
8943 * allow rebuild of the components with required hardware
8944 * bridge mode enabled.
8946 static int i40e_ndo_bridge_setlink(struct net_device
*dev
,
8947 struct nlmsghdr
*nlh
,
8950 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
8951 struct i40e_vsi
*vsi
= np
->vsi
;
8952 struct i40e_pf
*pf
= vsi
->back
;
8953 struct i40e_veb
*veb
= NULL
;
8954 struct nlattr
*attr
, *br_spec
;
8957 /* Only for PF VSI for now */
8958 if (vsi
->seid
!= pf
->vsi
[pf
->lan_vsi
]->seid
)
8961 /* Find the HW bridge for PF VSI */
8962 for (i
= 0; i
< I40E_MAX_VEB
&& !veb
; i
++) {
8963 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== vsi
->uplink_seid
)
8967 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
8969 nla_for_each_nested(attr
, br_spec
, rem
) {
8972 if (nla_type(attr
) != IFLA_BRIDGE_MODE
)
8975 mode
= nla_get_u16(attr
);
8976 if ((mode
!= BRIDGE_MODE_VEPA
) &&
8977 (mode
!= BRIDGE_MODE_VEB
))
8980 /* Insert a new HW bridge */
8982 veb
= i40e_veb_setup(pf
, 0, vsi
->uplink_seid
, vsi
->seid
,
8983 vsi
->tc_config
.enabled_tc
);
8985 veb
->bridge_mode
= mode
;
8986 i40e_config_bridge_mode(veb
);
8988 /* No Bridge HW offload available */
8992 } else if (mode
!= veb
->bridge_mode
) {
8993 /* Existing HW bridge but different mode needs reset */
8994 veb
->bridge_mode
= mode
;
8995 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8996 if (mode
== BRIDGE_MODE_VEB
)
8997 pf
->flags
|= I40E_FLAG_VEB_MODE_ENABLED
;
8999 pf
->flags
&= ~I40E_FLAG_VEB_MODE_ENABLED
;
9000 i40e_do_reset(pf
, BIT_ULL(__I40E_PF_RESET_REQUESTED
));
9009 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
9012 * @seq: RTNL message seq #
9013 * @dev: the netdev being configured
9014 * @filter_mask: unused
9015 * @nlflags: netlink flags passed in
9017 * Return the mode in which the hardware bridge is operating in
9020 static int i40e_ndo_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
9021 struct net_device
*dev
,
9022 u32 __always_unused filter_mask
,
9025 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
9026 struct i40e_vsi
*vsi
= np
->vsi
;
9027 struct i40e_pf
*pf
= vsi
->back
;
9028 struct i40e_veb
*veb
= NULL
;
9031 /* Only for PF VSI for now */
9032 if (vsi
->seid
!= pf
->vsi
[pf
->lan_vsi
]->seid
)
9035 /* Find the HW bridge for the PF VSI */
9036 for (i
= 0; i
< I40E_MAX_VEB
&& !veb
; i
++) {
9037 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== vsi
->uplink_seid
)
9044 return ndo_dflt_bridge_getlink(skb
, pid
, seq
, dev
, veb
->bridge_mode
,
9045 0, 0, nlflags
, filter_mask
, NULL
);
9048 /* Hardware supports L4 tunnel length of 128B (=2^7) which includes
9049 * inner mac plus all inner ethertypes.
9051 #define I40E_MAX_TUNNEL_HDR_LEN 128
9053 * i40e_features_check - Validate encapsulated packet conforms to limits
9055 * @dev: This physical port's netdev
9056 * @features: Offload features that the stack believes apply
9058 static netdev_features_t
i40e_features_check(struct sk_buff
*skb
,
9059 struct net_device
*dev
,
9060 netdev_features_t features
)
9062 if (skb
->encapsulation
&&
9063 ((skb_inner_network_header(skb
) - skb_transport_header(skb
)) >
9064 I40E_MAX_TUNNEL_HDR_LEN
))
9065 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
9070 static const struct net_device_ops i40e_netdev_ops
= {
9071 .ndo_open
= i40e_open
,
9072 .ndo_stop
= i40e_close
,
9073 .ndo_start_xmit
= i40e_lan_xmit_frame
,
9074 .ndo_get_stats64
= i40e_get_netdev_stats_struct
,
9075 .ndo_set_rx_mode
= i40e_set_rx_mode
,
9076 .ndo_validate_addr
= eth_validate_addr
,
9077 .ndo_set_mac_address
= i40e_set_mac
,
9078 .ndo_change_mtu
= i40e_change_mtu
,
9079 .ndo_do_ioctl
= i40e_ioctl
,
9080 .ndo_tx_timeout
= i40e_tx_timeout
,
9081 .ndo_vlan_rx_add_vid
= i40e_vlan_rx_add_vid
,
9082 .ndo_vlan_rx_kill_vid
= i40e_vlan_rx_kill_vid
,
9083 #ifdef CONFIG_NET_POLL_CONTROLLER
9084 .ndo_poll_controller
= i40e_netpoll
,
9086 .ndo_setup_tc
= __i40e_setup_tc
,
9088 .ndo_fcoe_enable
= i40e_fcoe_enable
,
9089 .ndo_fcoe_disable
= i40e_fcoe_disable
,
9091 .ndo_set_features
= i40e_set_features
,
9092 .ndo_set_vf_mac
= i40e_ndo_set_vf_mac
,
9093 .ndo_set_vf_vlan
= i40e_ndo_set_vf_port_vlan
,
9094 .ndo_set_vf_rate
= i40e_ndo_set_vf_bw
,
9095 .ndo_get_vf_config
= i40e_ndo_get_vf_config
,
9096 .ndo_set_vf_link_state
= i40e_ndo_set_vf_link_state
,
9097 .ndo_set_vf_spoofchk
= i40e_ndo_set_vf_spoofchk
,
9098 .ndo_set_vf_trust
= i40e_ndo_set_vf_trust
,
9099 .ndo_udp_tunnel_add
= i40e_udp_tunnel_add
,
9100 .ndo_udp_tunnel_del
= i40e_udp_tunnel_del
,
9101 .ndo_get_phys_port_id
= i40e_get_phys_port_id
,
9102 .ndo_fdb_add
= i40e_ndo_fdb_add
,
9103 .ndo_features_check
= i40e_features_check
,
9104 .ndo_bridge_getlink
= i40e_ndo_bridge_getlink
,
9105 .ndo_bridge_setlink
= i40e_ndo_bridge_setlink
,
9109 * i40e_config_netdev - Setup the netdev flags
9110 * @vsi: the VSI being configured
9112 * Returns 0 on success, negative value on failure
9114 static int i40e_config_netdev(struct i40e_vsi
*vsi
)
9116 struct i40e_pf
*pf
= vsi
->back
;
9117 struct i40e_hw
*hw
= &pf
->hw
;
9118 struct i40e_netdev_priv
*np
;
9119 struct net_device
*netdev
;
9120 u8 mac_addr
[ETH_ALEN
];
9123 etherdev_size
= sizeof(struct i40e_netdev_priv
);
9124 netdev
= alloc_etherdev_mq(etherdev_size
, vsi
->alloc_queue_pairs
);
9128 vsi
->netdev
= netdev
;
9129 np
= netdev_priv(netdev
);
9132 netdev
->hw_enc_features
|= NETIF_F_SG
|
9136 NETIF_F_SOFT_FEATURES
|
9141 NETIF_F_GSO_GRE_CSUM
|
9142 NETIF_F_GSO_IPXIP4
|
9143 NETIF_F_GSO_IPXIP6
|
9144 NETIF_F_GSO_UDP_TUNNEL
|
9145 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
9146 NETIF_F_GSO_PARTIAL
|
9152 if (!(pf
->flags
& I40E_FLAG_OUTER_UDP_CSUM_CAPABLE
))
9153 netdev
->gso_partial_features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
9155 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
9157 /* record features VLANs can make use of */
9158 netdev
->vlan_features
|= netdev
->hw_enc_features
|
9159 NETIF_F_TSO_MANGLEID
;
9161 if (!(pf
->flags
& I40E_FLAG_MFP_ENABLED
))
9162 netdev
->hw_features
|= NETIF_F_NTUPLE
;
9164 netdev
->hw_features
|= netdev
->hw_enc_features
|
9165 NETIF_F_HW_VLAN_CTAG_TX
|
9166 NETIF_F_HW_VLAN_CTAG_RX
;
9168 netdev
->features
|= netdev
->hw_features
| NETIF_F_HW_VLAN_CTAG_FILTER
;
9169 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
9171 if (vsi
->type
== I40E_VSI_MAIN
) {
9172 SET_NETDEV_DEV(netdev
, &pf
->pdev
->dev
);
9173 ether_addr_copy(mac_addr
, hw
->mac
.perm_addr
);
9174 /* The following steps are necessary to prevent reception
9175 * of tagged packets - some older NVM configurations load a
9176 * default a MAC-VLAN filter that accepts any tagged packet
9177 * which must be replaced by a normal filter.
9179 i40e_rm_default_mac_filter(vsi
, mac_addr
);
9180 spin_lock_bh(&vsi
->mac_filter_list_lock
);
9181 i40e_add_filter(vsi
, mac_addr
, I40E_VLAN_ANY
, false, true);
9182 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
9184 /* relate the VSI_VMDQ name to the VSI_MAIN name */
9185 snprintf(netdev
->name
, IFNAMSIZ
, "%sv%%d",
9186 pf
->vsi
[pf
->lan_vsi
]->netdev
->name
);
9187 random_ether_addr(mac_addr
);
9189 spin_lock_bh(&vsi
->mac_filter_list_lock
);
9190 i40e_add_filter(vsi
, mac_addr
, I40E_VLAN_ANY
, false, false);
9191 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
9194 ether_addr_copy(netdev
->dev_addr
, mac_addr
);
9195 ether_addr_copy(netdev
->perm_addr
, mac_addr
);
9197 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
9198 netdev
->priv_flags
|= IFF_SUPP_NOFCS
;
9199 /* Setup netdev TC information */
9200 i40e_vsi_config_netdev_tc(vsi
, vsi
->tc_config
.enabled_tc
);
9202 netdev
->netdev_ops
= &i40e_netdev_ops
;
9203 netdev
->watchdog_timeo
= 5 * HZ
;
9204 i40e_set_ethtool_ops(netdev
);
9206 i40e_fcoe_config_netdev(netdev
, vsi
);
9213 * i40e_vsi_delete - Delete a VSI from the switch
9214 * @vsi: the VSI being removed
9216 * Returns 0 on success, negative value on failure
9218 static void i40e_vsi_delete(struct i40e_vsi
*vsi
)
9220 /* remove default VSI is not allowed */
9221 if (vsi
== vsi
->back
->vsi
[vsi
->back
->lan_vsi
])
9224 i40e_aq_delete_element(&vsi
->back
->hw
, vsi
->seid
, NULL
);
9228 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
9229 * @vsi: the VSI being queried
9231 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
9233 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi
*vsi
)
9235 struct i40e_veb
*veb
;
9236 struct i40e_pf
*pf
= vsi
->back
;
9238 /* Uplink is not a bridge so default to VEB */
9239 if (vsi
->veb_idx
== I40E_NO_VEB
)
9242 veb
= pf
->veb
[vsi
->veb_idx
];
9244 dev_info(&pf
->pdev
->dev
,
9245 "There is no veb associated with the bridge\n");
9249 /* Uplink is a bridge in VEPA mode */
9250 if (veb
->bridge_mode
& BRIDGE_MODE_VEPA
) {
9253 /* Uplink is a bridge in VEB mode */
9257 /* VEPA is now default bridge, so return 0 */
9262 * i40e_add_vsi - Add a VSI to the switch
9263 * @vsi: the VSI being configured
9265 * This initializes a VSI context depending on the VSI type to be added and
9266 * passes it down to the add_vsi aq command.
9268 static int i40e_add_vsi(struct i40e_vsi
*vsi
)
9271 i40e_status aq_ret
= 0;
9272 struct i40e_pf
*pf
= vsi
->back
;
9273 struct i40e_hw
*hw
= &pf
->hw
;
9274 struct i40e_vsi_context ctxt
;
9275 struct i40e_mac_filter
*f
, *ftmp
;
9277 u8 enabled_tc
= 0x1; /* TC0 enabled */
9280 memset(&ctxt
, 0, sizeof(ctxt
));
9281 switch (vsi
->type
) {
9283 /* The PF's main VSI is already setup as part of the
9284 * device initialization, so we'll not bother with
9285 * the add_vsi call, but we will retrieve the current
9288 ctxt
.seid
= pf
->main_vsi_seid
;
9289 ctxt
.pf_num
= pf
->hw
.pf_id
;
9291 ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
9292 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
9294 dev_info(&pf
->pdev
->dev
,
9295 "couldn't get PF vsi config, err %s aq_err %s\n",
9296 i40e_stat_str(&pf
->hw
, ret
),
9297 i40e_aq_str(&pf
->hw
,
9298 pf
->hw
.aq
.asq_last_status
));
9301 vsi
->info
= ctxt
.info
;
9302 vsi
->info
.valid_sections
= 0;
9304 vsi
->seid
= ctxt
.seid
;
9305 vsi
->id
= ctxt
.vsi_number
;
9307 enabled_tc
= i40e_pf_get_tc_map(pf
);
9309 /* MFP mode setup queue map and update VSI */
9310 if ((pf
->flags
& I40E_FLAG_MFP_ENABLED
) &&
9311 !(pf
->hw
.func_caps
.iscsi
)) { /* NIC type PF */
9312 memset(&ctxt
, 0, sizeof(ctxt
));
9313 ctxt
.seid
= pf
->main_vsi_seid
;
9314 ctxt
.pf_num
= pf
->hw
.pf_id
;
9316 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, false);
9317 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
9319 dev_info(&pf
->pdev
->dev
,
9320 "update vsi failed, err %s aq_err %s\n",
9321 i40e_stat_str(&pf
->hw
, ret
),
9322 i40e_aq_str(&pf
->hw
,
9323 pf
->hw
.aq
.asq_last_status
));
9327 /* update the local VSI info queue map */
9328 i40e_vsi_update_queue_map(vsi
, &ctxt
);
9329 vsi
->info
.valid_sections
= 0;
9331 /* Default/Main VSI is only enabled for TC0
9332 * reconfigure it to enable all TCs that are
9333 * available on the port in SFP mode.
9334 * For MFP case the iSCSI PF would use this
9335 * flow to enable LAN+iSCSI TC.
9337 ret
= i40e_vsi_config_tc(vsi
, enabled_tc
);
9339 dev_info(&pf
->pdev
->dev
,
9340 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
9342 i40e_stat_str(&pf
->hw
, ret
),
9343 i40e_aq_str(&pf
->hw
,
9344 pf
->hw
.aq
.asq_last_status
));
9351 ctxt
.pf_num
= hw
->pf_id
;
9353 ctxt
.uplink_seid
= vsi
->uplink_seid
;
9354 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
9355 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
9356 if ((pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
) &&
9357 (i40e_is_vsi_uplink_mode_veb(vsi
))) {
9358 ctxt
.info
.valid_sections
|=
9359 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
9360 ctxt
.info
.switch_id
=
9361 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
9363 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, true);
9366 case I40E_VSI_VMDQ2
:
9367 ctxt
.pf_num
= hw
->pf_id
;
9369 ctxt
.uplink_seid
= vsi
->uplink_seid
;
9370 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
9371 ctxt
.flags
= I40E_AQ_VSI_TYPE_VMDQ2
;
9373 /* This VSI is connected to VEB so the switch_id
9374 * should be set to zero by default.
9376 if (i40e_is_vsi_uplink_mode_veb(vsi
)) {
9377 ctxt
.info
.valid_sections
|=
9378 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
9379 ctxt
.info
.switch_id
=
9380 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
9383 /* Setup the VSI tx/rx queue map for TC0 only for now */
9384 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, true);
9387 case I40E_VSI_SRIOV
:
9388 ctxt
.pf_num
= hw
->pf_id
;
9389 ctxt
.vf_num
= vsi
->vf_id
+ hw
->func_caps
.vf_base_id
;
9390 ctxt
.uplink_seid
= vsi
->uplink_seid
;
9391 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
9392 ctxt
.flags
= I40E_AQ_VSI_TYPE_VF
;
9394 /* This VSI is connected to VEB so the switch_id
9395 * should be set to zero by default.
9397 if (i40e_is_vsi_uplink_mode_veb(vsi
)) {
9398 ctxt
.info
.valid_sections
|=
9399 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
9400 ctxt
.info
.switch_id
=
9401 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
9404 if (vsi
->back
->flags
& I40E_FLAG_IWARP_ENABLED
) {
9405 ctxt
.info
.valid_sections
|=
9406 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID
);
9407 ctxt
.info
.queueing_opt_flags
|=
9408 (I40E_AQ_VSI_QUE_OPT_TCP_ENA
|
9409 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI
);
9412 ctxt
.info
.valid_sections
|= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
9413 ctxt
.info
.port_vlan_flags
|= I40E_AQ_VSI_PVLAN_MODE_ALL
;
9414 if (pf
->vf
[vsi
->vf_id
].spoofchk
) {
9415 ctxt
.info
.valid_sections
|=
9416 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID
);
9417 ctxt
.info
.sec_flags
|=
9418 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK
|
9419 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
);
9421 /* Setup the VSI tx/rx queue map for TC0 only for now */
9422 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, true);
9427 ret
= i40e_fcoe_vsi_init(vsi
, &ctxt
);
9429 dev_info(&pf
->pdev
->dev
, "failed to initialize FCoE VSI\n");
9434 #endif /* I40E_FCOE */
9435 case I40E_VSI_IWARP
:
9436 /* send down message to iWARP */
9443 if (vsi
->type
!= I40E_VSI_MAIN
) {
9444 ret
= i40e_aq_add_vsi(hw
, &ctxt
, NULL
);
9446 dev_info(&vsi
->back
->pdev
->dev
,
9447 "add vsi failed, err %s aq_err %s\n",
9448 i40e_stat_str(&pf
->hw
, ret
),
9449 i40e_aq_str(&pf
->hw
,
9450 pf
->hw
.aq
.asq_last_status
));
9454 vsi
->info
= ctxt
.info
;
9455 vsi
->info
.valid_sections
= 0;
9456 vsi
->seid
= ctxt
.seid
;
9457 vsi
->id
= ctxt
.vsi_number
;
9459 /* Except FDIR VSI, for all othet VSI set the broadcast filter */
9460 if (vsi
->type
!= I40E_VSI_FDIR
) {
9461 aq_ret
= i40e_aq_set_vsi_broadcast(hw
, vsi
->seid
, true, NULL
);
9463 ret
= i40e_aq_rc_to_posix(aq_ret
,
9464 hw
->aq
.asq_last_status
);
9465 dev_info(&pf
->pdev
->dev
,
9466 "set brdcast promisc failed, err %s, aq_err %s\n",
9467 i40e_stat_str(hw
, aq_ret
),
9468 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
9472 vsi
->active_filters
= 0;
9473 clear_bit(__I40E_FILTER_OVERFLOW_PROMISC
, &vsi
->state
);
9474 spin_lock_bh(&vsi
->mac_filter_list_lock
);
9475 /* If macvlan filters already exist, force them to get loaded */
9476 list_for_each_entry_safe(f
, ftmp
, &vsi
->mac_filter_list
, list
) {
9477 f
->state
= I40E_FILTER_NEW
;
9480 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
9483 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
9484 pf
->flags
|= I40E_FLAG_FILTER_SYNC
;
9487 /* Update VSI BW information */
9488 ret
= i40e_vsi_get_bw_info(vsi
);
9490 dev_info(&pf
->pdev
->dev
,
9491 "couldn't get vsi bw info, err %s aq_err %s\n",
9492 i40e_stat_str(&pf
->hw
, ret
),
9493 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9494 /* VSI is already added so not tearing that up */
9503 * i40e_vsi_release - Delete a VSI and free its resources
9504 * @vsi: the VSI being removed
9506 * Returns 0 on success or < 0 on error
9508 int i40e_vsi_release(struct i40e_vsi
*vsi
)
9510 struct i40e_mac_filter
*f
, *ftmp
;
9511 struct i40e_veb
*veb
= NULL
;
9518 /* release of a VEB-owner or last VSI is not allowed */
9519 if (vsi
->flags
& I40E_VSI_FLAG_VEB_OWNER
) {
9520 dev_info(&pf
->pdev
->dev
, "VSI %d has existing VEB %d\n",
9521 vsi
->seid
, vsi
->uplink_seid
);
9524 if (vsi
== pf
->vsi
[pf
->lan_vsi
] &&
9525 !test_bit(__I40E_DOWN
, &pf
->state
)) {
9526 dev_info(&pf
->pdev
->dev
, "Can't remove PF VSI\n");
9530 uplink_seid
= vsi
->uplink_seid
;
9531 if (vsi
->type
!= I40E_VSI_SRIOV
) {
9532 if (vsi
->netdev_registered
) {
9533 vsi
->netdev_registered
= false;
9535 /* results in a call to i40e_close() */
9536 unregister_netdev(vsi
->netdev
);
9539 i40e_vsi_close(vsi
);
9541 i40e_vsi_disable_irq(vsi
);
9544 spin_lock_bh(&vsi
->mac_filter_list_lock
);
9545 list_for_each_entry_safe(f
, ftmp
, &vsi
->mac_filter_list
, list
)
9546 i40e_del_filter(vsi
, f
->macaddr
, f
->vlan
,
9547 f
->is_vf
, f
->is_netdev
);
9548 spin_unlock_bh(&vsi
->mac_filter_list_lock
);
9550 i40e_sync_vsi_filters(vsi
);
9552 i40e_vsi_delete(vsi
);
9553 i40e_vsi_free_q_vectors(vsi
);
9555 free_netdev(vsi
->netdev
);
9558 i40e_vsi_clear_rings(vsi
);
9559 i40e_vsi_clear(vsi
);
9561 /* If this was the last thing on the VEB, except for the
9562 * controlling VSI, remove the VEB, which puts the controlling
9563 * VSI onto the next level down in the switch.
9565 * Well, okay, there's one more exception here: don't remove
9566 * the orphan VEBs yet. We'll wait for an explicit remove request
9567 * from up the network stack.
9569 for (n
= 0, i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
9571 pf
->vsi
[i
]->uplink_seid
== uplink_seid
&&
9572 (pf
->vsi
[i
]->flags
& I40E_VSI_FLAG_VEB_OWNER
) == 0) {
9573 n
++; /* count the VSIs */
9576 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
9579 if (pf
->veb
[i
]->uplink_seid
== uplink_seid
)
9580 n
++; /* count the VEBs */
9581 if (pf
->veb
[i
]->seid
== uplink_seid
)
9584 if (n
== 0 && veb
&& veb
->uplink_seid
!= 0)
9585 i40e_veb_release(veb
);
9591 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
9592 * @vsi: ptr to the VSI
9594 * This should only be called after i40e_vsi_mem_alloc() which allocates the
9595 * corresponding SW VSI structure and initializes num_queue_pairs for the
9596 * newly allocated VSI.
9598 * Returns 0 on success or negative on failure
9600 static int i40e_vsi_setup_vectors(struct i40e_vsi
*vsi
)
9603 struct i40e_pf
*pf
= vsi
->back
;
9605 if (vsi
->q_vectors
[0]) {
9606 dev_info(&pf
->pdev
->dev
, "VSI %d has existing q_vectors\n",
9611 if (vsi
->base_vector
) {
9612 dev_info(&pf
->pdev
->dev
, "VSI %d has non-zero base vector %d\n",
9613 vsi
->seid
, vsi
->base_vector
);
9617 ret
= i40e_vsi_alloc_q_vectors(vsi
);
9619 dev_info(&pf
->pdev
->dev
,
9620 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
9621 vsi
->num_q_vectors
, vsi
->seid
, ret
);
9622 vsi
->num_q_vectors
= 0;
9623 goto vector_setup_out
;
9626 /* In Legacy mode, we do not have to get any other vector since we
9627 * piggyback on the misc/ICR0 for queue interrupts.
9629 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
))
9631 if (vsi
->num_q_vectors
)
9632 vsi
->base_vector
= i40e_get_lump(pf
, pf
->irq_pile
,
9633 vsi
->num_q_vectors
, vsi
->idx
);
9634 if (vsi
->base_vector
< 0) {
9635 dev_info(&pf
->pdev
->dev
,
9636 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
9637 vsi
->num_q_vectors
, vsi
->seid
, vsi
->base_vector
);
9638 i40e_vsi_free_q_vectors(vsi
);
9640 goto vector_setup_out
;
9648 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
9649 * @vsi: pointer to the vsi.
9651 * This re-allocates a vsi's queue resources.
9653 * Returns pointer to the successfully allocated and configured VSI sw struct
9654 * on success, otherwise returns NULL on failure.
9656 static struct i40e_vsi
*i40e_vsi_reinit_setup(struct i40e_vsi
*vsi
)
9667 i40e_put_lump(pf
->qp_pile
, vsi
->base_queue
, vsi
->idx
);
9668 i40e_vsi_clear_rings(vsi
);
9670 i40e_vsi_free_arrays(vsi
, false);
9671 i40e_set_num_rings_in_vsi(vsi
);
9672 ret
= i40e_vsi_alloc_arrays(vsi
, false);
9676 ret
= i40e_get_lump(pf
, pf
->qp_pile
, vsi
->alloc_queue_pairs
, vsi
->idx
);
9678 dev_info(&pf
->pdev
->dev
,
9679 "failed to get tracking for %d queues for VSI %d err %d\n",
9680 vsi
->alloc_queue_pairs
, vsi
->seid
, ret
);
9683 vsi
->base_queue
= ret
;
9685 /* Update the FW view of the VSI. Force a reset of TC and queue
9686 * layout configurations.
9688 enabled_tc
= pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
;
9689 pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
= 0;
9690 pf
->vsi
[pf
->lan_vsi
]->seid
= pf
->main_vsi_seid
;
9691 i40e_vsi_config_tc(pf
->vsi
[pf
->lan_vsi
], enabled_tc
);
9692 if (vsi
->type
== I40E_VSI_MAIN
)
9693 i40e_rm_default_mac_filter(vsi
, pf
->hw
.mac
.perm_addr
);
9695 /* assign it some queues */
9696 ret
= i40e_alloc_rings(vsi
);
9700 /* map all of the rings to the q_vectors */
9701 i40e_vsi_map_rings_to_vectors(vsi
);
9705 i40e_vsi_free_q_vectors(vsi
);
9706 if (vsi
->netdev_registered
) {
9707 vsi
->netdev_registered
= false;
9708 unregister_netdev(vsi
->netdev
);
9709 free_netdev(vsi
->netdev
);
9712 i40e_aq_delete_element(&pf
->hw
, vsi
->seid
, NULL
);
9714 i40e_vsi_clear(vsi
);
9719 * i40e_vsi_setup - Set up a VSI by a given type
9720 * @pf: board private structure
9722 * @uplink_seid: the switch element to link to
9723 * @param1: usage depends upon VSI type. For VF types, indicates VF id
9725 * This allocates the sw VSI structure and its queue resources, then add a VSI
9726 * to the identified VEB.
9728 * Returns pointer to the successfully allocated and configure VSI sw struct on
9729 * success, otherwise returns NULL on failure.
9731 struct i40e_vsi
*i40e_vsi_setup(struct i40e_pf
*pf
, u8 type
,
9732 u16 uplink_seid
, u32 param1
)
9734 struct i40e_vsi
*vsi
= NULL
;
9735 struct i40e_veb
*veb
= NULL
;
9739 /* The requested uplink_seid must be either
9740 * - the PF's port seid
9741 * no VEB is needed because this is the PF
9742 * or this is a Flow Director special case VSI
9743 * - seid of an existing VEB
9744 * - seid of a VSI that owns an existing VEB
9745 * - seid of a VSI that doesn't own a VEB
9746 * a new VEB is created and the VSI becomes the owner
9747 * - seid of the PF VSI, which is what creates the first VEB
9748 * this is a special case of the previous
9750 * Find which uplink_seid we were given and create a new VEB if needed
9752 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
9753 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== uplink_seid
) {
9759 if (!veb
&& uplink_seid
!= pf
->mac_seid
) {
9761 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
9762 if (pf
->vsi
[i
] && pf
->vsi
[i
]->seid
== uplink_seid
) {
9768 dev_info(&pf
->pdev
->dev
, "no such uplink_seid %d\n",
9773 if (vsi
->uplink_seid
== pf
->mac_seid
)
9774 veb
= i40e_veb_setup(pf
, 0, pf
->mac_seid
, vsi
->seid
,
9775 vsi
->tc_config
.enabled_tc
);
9776 else if ((vsi
->flags
& I40E_VSI_FLAG_VEB_OWNER
) == 0)
9777 veb
= i40e_veb_setup(pf
, 0, vsi
->uplink_seid
, vsi
->seid
,
9778 vsi
->tc_config
.enabled_tc
);
9780 if (vsi
->seid
!= pf
->vsi
[pf
->lan_vsi
]->seid
) {
9781 dev_info(&vsi
->back
->pdev
->dev
,
9782 "New VSI creation error, uplink seid of LAN VSI expected.\n");
9785 /* We come up by default in VEPA mode if SRIOV is not
9786 * already enabled, in which case we can't force VEPA
9789 if (!(pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)) {
9790 veb
->bridge_mode
= BRIDGE_MODE_VEPA
;
9791 pf
->flags
&= ~I40E_FLAG_VEB_MODE_ENABLED
;
9793 i40e_config_bridge_mode(veb
);
9795 for (i
= 0; i
< I40E_MAX_VEB
&& !veb
; i
++) {
9796 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== vsi
->uplink_seid
)
9800 dev_info(&pf
->pdev
->dev
, "couldn't add VEB\n");
9804 vsi
->flags
|= I40E_VSI_FLAG_VEB_OWNER
;
9805 uplink_seid
= veb
->seid
;
9808 /* get vsi sw struct */
9809 v_idx
= i40e_vsi_mem_alloc(pf
, type
);
9812 vsi
= pf
->vsi
[v_idx
];
9816 vsi
->veb_idx
= (veb
? veb
->idx
: I40E_NO_VEB
);
9818 if (type
== I40E_VSI_MAIN
)
9819 pf
->lan_vsi
= v_idx
;
9820 else if (type
== I40E_VSI_SRIOV
)
9821 vsi
->vf_id
= param1
;
9822 /* assign it some queues */
9823 ret
= i40e_get_lump(pf
, pf
->qp_pile
, vsi
->alloc_queue_pairs
,
9826 dev_info(&pf
->pdev
->dev
,
9827 "failed to get tracking for %d queues for VSI %d err=%d\n",
9828 vsi
->alloc_queue_pairs
, vsi
->seid
, ret
);
9831 vsi
->base_queue
= ret
;
9833 /* get a VSI from the hardware */
9834 vsi
->uplink_seid
= uplink_seid
;
9835 ret
= i40e_add_vsi(vsi
);
9839 switch (vsi
->type
) {
9840 /* setup the netdev if needed */
9842 /* Apply relevant filters if a platform-specific mac
9843 * address was selected.
9845 if (!!(pf
->flags
& I40E_FLAG_PF_MAC
)) {
9846 ret
= i40e_macaddr_init(vsi
, pf
->hw
.mac
.addr
);
9848 dev_warn(&pf
->pdev
->dev
,
9849 "could not set up macaddr; err %d\n",
9853 case I40E_VSI_VMDQ2
:
9855 ret
= i40e_config_netdev(vsi
);
9858 ret
= register_netdev(vsi
->netdev
);
9861 vsi
->netdev_registered
= true;
9862 netif_carrier_off(vsi
->netdev
);
9863 #ifdef CONFIG_I40E_DCB
9864 /* Setup DCB netlink interface */
9865 i40e_dcbnl_setup(vsi
);
9866 #endif /* CONFIG_I40E_DCB */
9870 /* set up vectors and rings if needed */
9871 ret
= i40e_vsi_setup_vectors(vsi
);
9875 ret
= i40e_alloc_rings(vsi
);
9879 /* map all of the rings to the q_vectors */
9880 i40e_vsi_map_rings_to_vectors(vsi
);
9882 i40e_vsi_reset_stats(vsi
);
9886 /* no netdev or rings for the other VSI types */
9890 if ((pf
->flags
& I40E_FLAG_RSS_AQ_CAPABLE
) &&
9891 (vsi
->type
== I40E_VSI_VMDQ2
)) {
9892 ret
= i40e_vsi_config_rss(vsi
);
9897 i40e_vsi_free_q_vectors(vsi
);
9899 if (vsi
->netdev_registered
) {
9900 vsi
->netdev_registered
= false;
9901 unregister_netdev(vsi
->netdev
);
9902 free_netdev(vsi
->netdev
);
9906 i40e_aq_delete_element(&pf
->hw
, vsi
->seid
, NULL
);
9908 i40e_vsi_clear(vsi
);
9914 * i40e_veb_get_bw_info - Query VEB BW information
9915 * @veb: the veb to query
9917 * Query the Tx scheduler BW configuration data for given VEB
9919 static int i40e_veb_get_bw_info(struct i40e_veb
*veb
)
9921 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data
;
9922 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data
;
9923 struct i40e_pf
*pf
= veb
->pf
;
9924 struct i40e_hw
*hw
= &pf
->hw
;
9929 ret
= i40e_aq_query_switch_comp_bw_config(hw
, veb
->seid
,
9932 dev_info(&pf
->pdev
->dev
,
9933 "query veb bw config failed, err %s aq_err %s\n",
9934 i40e_stat_str(&pf
->hw
, ret
),
9935 i40e_aq_str(&pf
->hw
, hw
->aq
.asq_last_status
));
9939 ret
= i40e_aq_query_switch_comp_ets_config(hw
, veb
->seid
,
9942 dev_info(&pf
->pdev
->dev
,
9943 "query veb bw ets config failed, err %s aq_err %s\n",
9944 i40e_stat_str(&pf
->hw
, ret
),
9945 i40e_aq_str(&pf
->hw
, hw
->aq
.asq_last_status
));
9949 veb
->bw_limit
= le16_to_cpu(ets_data
.port_bw_limit
);
9950 veb
->bw_max_quanta
= ets_data
.tc_bw_max
;
9951 veb
->is_abs_credits
= bw_data
.absolute_credits_enable
;
9952 veb
->enabled_tc
= ets_data
.tc_valid_bits
;
9953 tc_bw_max
= le16_to_cpu(bw_data
.tc_bw_max
[0]) |
9954 (le16_to_cpu(bw_data
.tc_bw_max
[1]) << 16);
9955 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
9956 veb
->bw_tc_share_credits
[i
] = bw_data
.tc_bw_share_credits
[i
];
9957 veb
->bw_tc_limit_credits
[i
] =
9958 le16_to_cpu(bw_data
.tc_bw_limits
[i
]);
9959 veb
->bw_tc_max_quanta
[i
] = ((tc_bw_max
>> (i
*4)) & 0x7);
9967 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
9968 * @pf: board private structure
9970 * On error: returns error code (negative)
9971 * On success: returns vsi index in PF (positive)
9973 static int i40e_veb_mem_alloc(struct i40e_pf
*pf
)
9976 struct i40e_veb
*veb
;
9979 /* Need to protect the allocation of switch elements at the PF level */
9980 mutex_lock(&pf
->switch_mutex
);
9982 /* VEB list may be fragmented if VEB creation/destruction has
9983 * been happening. We can afford to do a quick scan to look
9984 * for any free slots in the list.
9986 * find next empty veb slot, looping back around if necessary
9989 while ((i
< I40E_MAX_VEB
) && (pf
->veb
[i
] != NULL
))
9991 if (i
>= I40E_MAX_VEB
) {
9993 goto err_alloc_veb
; /* out of VEB slots! */
9996 veb
= kzalloc(sizeof(*veb
), GFP_KERNEL
);
10003 veb
->enabled_tc
= 1;
10008 mutex_unlock(&pf
->switch_mutex
);
10013 * i40e_switch_branch_release - Delete a branch of the switch tree
10014 * @branch: where to start deleting
10016 * This uses recursion to find the tips of the branch to be
10017 * removed, deleting until we get back to and can delete this VEB.
10019 static void i40e_switch_branch_release(struct i40e_veb
*branch
)
10021 struct i40e_pf
*pf
= branch
->pf
;
10022 u16 branch_seid
= branch
->seid
;
10023 u16 veb_idx
= branch
->idx
;
10026 /* release any VEBs on this VEB - RECURSION */
10027 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
10030 if (pf
->veb
[i
]->uplink_seid
== branch
->seid
)
10031 i40e_switch_branch_release(pf
->veb
[i
]);
10034 /* Release the VSIs on this VEB, but not the owner VSI.
10036 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
10037 * the VEB itself, so don't use (*branch) after this loop.
10039 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
10042 if (pf
->vsi
[i
]->uplink_seid
== branch_seid
&&
10043 (pf
->vsi
[i
]->flags
& I40E_VSI_FLAG_VEB_OWNER
) == 0) {
10044 i40e_vsi_release(pf
->vsi
[i
]);
10048 /* There's one corner case where the VEB might not have been
10049 * removed, so double check it here and remove it if needed.
10050 * This case happens if the veb was created from the debugfs
10051 * commands and no VSIs were added to it.
10053 if (pf
->veb
[veb_idx
])
10054 i40e_veb_release(pf
->veb
[veb_idx
]);
10058 * i40e_veb_clear - remove veb struct
10059 * @veb: the veb to remove
10061 static void i40e_veb_clear(struct i40e_veb
*veb
)
10067 struct i40e_pf
*pf
= veb
->pf
;
10069 mutex_lock(&pf
->switch_mutex
);
10070 if (pf
->veb
[veb
->idx
] == veb
)
10071 pf
->veb
[veb
->idx
] = NULL
;
10072 mutex_unlock(&pf
->switch_mutex
);
10079 * i40e_veb_release - Delete a VEB and free its resources
10080 * @veb: the VEB being removed
10082 void i40e_veb_release(struct i40e_veb
*veb
)
10084 struct i40e_vsi
*vsi
= NULL
;
10085 struct i40e_pf
*pf
;
10090 /* find the remaining VSI and check for extras */
10091 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
10092 if (pf
->vsi
[i
] && pf
->vsi
[i
]->uplink_seid
== veb
->seid
) {
10098 dev_info(&pf
->pdev
->dev
,
10099 "can't remove VEB %d with %d VSIs left\n",
10104 /* move the remaining VSI to uplink veb */
10105 vsi
->flags
&= ~I40E_VSI_FLAG_VEB_OWNER
;
10106 if (veb
->uplink_seid
) {
10107 vsi
->uplink_seid
= veb
->uplink_seid
;
10108 if (veb
->uplink_seid
== pf
->mac_seid
)
10109 vsi
->veb_idx
= I40E_NO_VEB
;
10111 vsi
->veb_idx
= veb
->veb_idx
;
10114 vsi
->uplink_seid
= pf
->vsi
[pf
->lan_vsi
]->uplink_seid
;
10115 vsi
->veb_idx
= pf
->vsi
[pf
->lan_vsi
]->veb_idx
;
10118 i40e_aq_delete_element(&pf
->hw
, veb
->seid
, NULL
);
10119 i40e_veb_clear(veb
);
10123 * i40e_add_veb - create the VEB in the switch
10124 * @veb: the VEB to be instantiated
10125 * @vsi: the controlling VSI
10127 static int i40e_add_veb(struct i40e_veb
*veb
, struct i40e_vsi
*vsi
)
10129 struct i40e_pf
*pf
= veb
->pf
;
10130 bool enable_stats
= !!(pf
->flags
& I40E_FLAG_VEB_STATS_ENABLED
);
10133 ret
= i40e_aq_add_veb(&pf
->hw
, veb
->uplink_seid
, vsi
->seid
,
10134 veb
->enabled_tc
, false,
10135 &veb
->seid
, enable_stats
, NULL
);
10137 /* get a VEB from the hardware */
10139 dev_info(&pf
->pdev
->dev
,
10140 "couldn't add VEB, err %s aq_err %s\n",
10141 i40e_stat_str(&pf
->hw
, ret
),
10142 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
10146 /* get statistics counter */
10147 ret
= i40e_aq_get_veb_parameters(&pf
->hw
, veb
->seid
, NULL
, NULL
,
10148 &veb
->stats_idx
, NULL
, NULL
, NULL
);
10150 dev_info(&pf
->pdev
->dev
,
10151 "couldn't get VEB statistics idx, err %s aq_err %s\n",
10152 i40e_stat_str(&pf
->hw
, ret
),
10153 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
10156 ret
= i40e_veb_get_bw_info(veb
);
10158 dev_info(&pf
->pdev
->dev
,
10159 "couldn't get VEB bw info, err %s aq_err %s\n",
10160 i40e_stat_str(&pf
->hw
, ret
),
10161 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
10162 i40e_aq_delete_element(&pf
->hw
, veb
->seid
, NULL
);
10166 vsi
->uplink_seid
= veb
->seid
;
10167 vsi
->veb_idx
= veb
->idx
;
10168 vsi
->flags
|= I40E_VSI_FLAG_VEB_OWNER
;
10174 * i40e_veb_setup - Set up a VEB
10175 * @pf: board private structure
10176 * @flags: VEB setup flags
10177 * @uplink_seid: the switch element to link to
10178 * @vsi_seid: the initial VSI seid
10179 * @enabled_tc: Enabled TC bit-map
10181 * This allocates the sw VEB structure and links it into the switch
10182 * It is possible and legal for this to be a duplicate of an already
10183 * existing VEB. It is also possible for both uplink and vsi seids
10184 * to be zero, in order to create a floating VEB.
10186 * Returns pointer to the successfully allocated VEB sw struct on
10187 * success, otherwise returns NULL on failure.
10189 struct i40e_veb
*i40e_veb_setup(struct i40e_pf
*pf
, u16 flags
,
10190 u16 uplink_seid
, u16 vsi_seid
,
10193 struct i40e_veb
*veb
, *uplink_veb
= NULL
;
10194 int vsi_idx
, veb_idx
;
10197 /* if one seid is 0, the other must be 0 to create a floating relay */
10198 if ((uplink_seid
== 0 || vsi_seid
== 0) &&
10199 (uplink_seid
+ vsi_seid
!= 0)) {
10200 dev_info(&pf
->pdev
->dev
,
10201 "one, not both seid's are 0: uplink=%d vsi=%d\n",
10202 uplink_seid
, vsi_seid
);
10206 /* make sure there is such a vsi and uplink */
10207 for (vsi_idx
= 0; vsi_idx
< pf
->num_alloc_vsi
; vsi_idx
++)
10208 if (pf
->vsi
[vsi_idx
] && pf
->vsi
[vsi_idx
]->seid
== vsi_seid
)
10210 if (vsi_idx
>= pf
->num_alloc_vsi
&& vsi_seid
!= 0) {
10211 dev_info(&pf
->pdev
->dev
, "vsi seid %d not found\n",
10216 if (uplink_seid
&& uplink_seid
!= pf
->mac_seid
) {
10217 for (veb_idx
= 0; veb_idx
< I40E_MAX_VEB
; veb_idx
++) {
10218 if (pf
->veb
[veb_idx
] &&
10219 pf
->veb
[veb_idx
]->seid
== uplink_seid
) {
10220 uplink_veb
= pf
->veb
[veb_idx
];
10225 dev_info(&pf
->pdev
->dev
,
10226 "uplink seid %d not found\n", uplink_seid
);
10231 /* get veb sw struct */
10232 veb_idx
= i40e_veb_mem_alloc(pf
);
10235 veb
= pf
->veb
[veb_idx
];
10236 veb
->flags
= flags
;
10237 veb
->uplink_seid
= uplink_seid
;
10238 veb
->veb_idx
= (uplink_veb
? uplink_veb
->idx
: I40E_NO_VEB
);
10239 veb
->enabled_tc
= (enabled_tc
? enabled_tc
: 0x1);
10241 /* create the VEB in the switch */
10242 ret
= i40e_add_veb(veb
, pf
->vsi
[vsi_idx
]);
10245 if (vsi_idx
== pf
->lan_vsi
)
10246 pf
->lan_veb
= veb
->idx
;
10251 i40e_veb_clear(veb
);
10257 * i40e_setup_pf_switch_element - set PF vars based on switch type
10258 * @pf: board private structure
10259 * @ele: element we are building info from
10260 * @num_reported: total number of elements
10261 * @printconfig: should we print the contents
10263 * helper function to assist in extracting a few useful SEID values.
10265 static void i40e_setup_pf_switch_element(struct i40e_pf
*pf
,
10266 struct i40e_aqc_switch_config_element_resp
*ele
,
10267 u16 num_reported
, bool printconfig
)
10269 u16 downlink_seid
= le16_to_cpu(ele
->downlink_seid
);
10270 u16 uplink_seid
= le16_to_cpu(ele
->uplink_seid
);
10271 u8 element_type
= ele
->element_type
;
10272 u16 seid
= le16_to_cpu(ele
->seid
);
10275 dev_info(&pf
->pdev
->dev
,
10276 "type=%d seid=%d uplink=%d downlink=%d\n",
10277 element_type
, seid
, uplink_seid
, downlink_seid
);
10279 switch (element_type
) {
10280 case I40E_SWITCH_ELEMENT_TYPE_MAC
:
10281 pf
->mac_seid
= seid
;
10283 case I40E_SWITCH_ELEMENT_TYPE_VEB
:
10285 if (uplink_seid
!= pf
->mac_seid
)
10287 if (pf
->lan_veb
== I40E_NO_VEB
) {
10290 /* find existing or else empty VEB */
10291 for (v
= 0; v
< I40E_MAX_VEB
; v
++) {
10292 if (pf
->veb
[v
] && (pf
->veb
[v
]->seid
== seid
)) {
10297 if (pf
->lan_veb
== I40E_NO_VEB
) {
10298 v
= i40e_veb_mem_alloc(pf
);
10305 pf
->veb
[pf
->lan_veb
]->seid
= seid
;
10306 pf
->veb
[pf
->lan_veb
]->uplink_seid
= pf
->mac_seid
;
10307 pf
->veb
[pf
->lan_veb
]->pf
= pf
;
10308 pf
->veb
[pf
->lan_veb
]->veb_idx
= I40E_NO_VEB
;
10310 case I40E_SWITCH_ELEMENT_TYPE_VSI
:
10311 if (num_reported
!= 1)
10313 /* This is immediately after a reset so we can assume this is
10316 pf
->mac_seid
= uplink_seid
;
10317 pf
->pf_seid
= downlink_seid
;
10318 pf
->main_vsi_seid
= seid
;
10320 dev_info(&pf
->pdev
->dev
,
10321 "pf_seid=%d main_vsi_seid=%d\n",
10322 pf
->pf_seid
, pf
->main_vsi_seid
);
10324 case I40E_SWITCH_ELEMENT_TYPE_PF
:
10325 case I40E_SWITCH_ELEMENT_TYPE_VF
:
10326 case I40E_SWITCH_ELEMENT_TYPE_EMP
:
10327 case I40E_SWITCH_ELEMENT_TYPE_BMC
:
10328 case I40E_SWITCH_ELEMENT_TYPE_PE
:
10329 case I40E_SWITCH_ELEMENT_TYPE_PA
:
10330 /* ignore these for now */
10333 dev_info(&pf
->pdev
->dev
, "unknown element type=%d seid=%d\n",
10334 element_type
, seid
);
10340 * i40e_fetch_switch_configuration - Get switch config from firmware
10341 * @pf: board private structure
10342 * @printconfig: should we print the contents
10344 * Get the current switch configuration from the device and
10345 * extract a few useful SEID values.
10347 int i40e_fetch_switch_configuration(struct i40e_pf
*pf
, bool printconfig
)
10349 struct i40e_aqc_get_switch_config_resp
*sw_config
;
10355 aq_buf
= kzalloc(I40E_AQ_LARGE_BUF
, GFP_KERNEL
);
10359 sw_config
= (struct i40e_aqc_get_switch_config_resp
*)aq_buf
;
10361 u16 num_reported
, num_total
;
10363 ret
= i40e_aq_get_switch_config(&pf
->hw
, sw_config
,
10367 dev_info(&pf
->pdev
->dev
,
10368 "get switch config failed err %s aq_err %s\n",
10369 i40e_stat_str(&pf
->hw
, ret
),
10370 i40e_aq_str(&pf
->hw
,
10371 pf
->hw
.aq
.asq_last_status
));
10376 num_reported
= le16_to_cpu(sw_config
->header
.num_reported
);
10377 num_total
= le16_to_cpu(sw_config
->header
.num_total
);
10380 dev_info(&pf
->pdev
->dev
,
10381 "header: %d reported %d total\n",
10382 num_reported
, num_total
);
10384 for (i
= 0; i
< num_reported
; i
++) {
10385 struct i40e_aqc_switch_config_element_resp
*ele
=
10386 &sw_config
->element
[i
];
10388 i40e_setup_pf_switch_element(pf
, ele
, num_reported
,
10391 } while (next_seid
!= 0);
10398 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
10399 * @pf: board private structure
10400 * @reinit: if the Main VSI needs to re-initialized.
10402 * Returns 0 on success, negative value on failure
10404 static int i40e_setup_pf_switch(struct i40e_pf
*pf
, bool reinit
)
10409 /* find out what's out there already */
10410 ret
= i40e_fetch_switch_configuration(pf
, false);
10412 dev_info(&pf
->pdev
->dev
,
10413 "couldn't fetch switch config, err %s aq_err %s\n",
10414 i40e_stat_str(&pf
->hw
, ret
),
10415 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
10418 i40e_pf_reset_stats(pf
);
10420 /* set the switch config bit for the whole device to
10421 * support limited promisc or true promisc
10422 * when user requests promisc. The default is limited
10426 if ((pf
->hw
.pf_id
== 0) &&
10427 !(pf
->flags
& I40E_FLAG_TRUE_PROMISC_SUPPORT
))
10428 flags
= I40E_AQ_SET_SWITCH_CFG_PROMISC
;
10430 if (pf
->hw
.pf_id
== 0) {
10433 valid_flags
= I40E_AQ_SET_SWITCH_CFG_PROMISC
;
10434 ret
= i40e_aq_set_switch_config(&pf
->hw
, flags
, valid_flags
,
10436 if (ret
&& pf
->hw
.aq
.asq_last_status
!= I40E_AQ_RC_ESRCH
) {
10437 dev_info(&pf
->pdev
->dev
,
10438 "couldn't set switch config bits, err %s aq_err %s\n",
10439 i40e_stat_str(&pf
->hw
, ret
),
10440 i40e_aq_str(&pf
->hw
,
10441 pf
->hw
.aq
.asq_last_status
));
10442 /* not a fatal problem, just keep going */
10446 /* first time setup */
10447 if (pf
->lan_vsi
== I40E_NO_VSI
|| reinit
) {
10448 struct i40e_vsi
*vsi
= NULL
;
10451 /* Set up the PF VSI associated with the PF's main VSI
10452 * that is already in the HW switch
10454 if (pf
->lan_veb
!= I40E_NO_VEB
&& pf
->veb
[pf
->lan_veb
])
10455 uplink_seid
= pf
->veb
[pf
->lan_veb
]->seid
;
10457 uplink_seid
= pf
->mac_seid
;
10458 if (pf
->lan_vsi
== I40E_NO_VSI
)
10459 vsi
= i40e_vsi_setup(pf
, I40E_VSI_MAIN
, uplink_seid
, 0);
10461 vsi
= i40e_vsi_reinit_setup(pf
->vsi
[pf
->lan_vsi
]);
10463 dev_info(&pf
->pdev
->dev
, "setup of MAIN VSI failed\n");
10464 i40e_fdir_teardown(pf
);
10468 /* force a reset of TC and queue layout configurations */
10469 u8 enabled_tc
= pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
;
10471 pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
= 0;
10472 pf
->vsi
[pf
->lan_vsi
]->seid
= pf
->main_vsi_seid
;
10473 i40e_vsi_config_tc(pf
->vsi
[pf
->lan_vsi
], enabled_tc
);
10475 i40e_vlan_stripping_disable(pf
->vsi
[pf
->lan_vsi
]);
10477 i40e_fdir_sb_setup(pf
);
10479 /* Setup static PF queue filter control settings */
10480 ret
= i40e_setup_pf_filter_control(pf
);
10482 dev_info(&pf
->pdev
->dev
, "setup_pf_filter_control failed: %d\n",
10484 /* Failure here should not stop continuing other steps */
10487 /* enable RSS in the HW, even for only one queue, as the stack can use
10490 if ((pf
->flags
& I40E_FLAG_RSS_ENABLED
))
10491 i40e_pf_config_rss(pf
);
10493 /* fill in link information and enable LSE reporting */
10494 i40e_update_link_info(&pf
->hw
);
10495 i40e_link_event(pf
);
10497 /* Initialize user-specific link properties */
10498 pf
->fc_autoneg_status
= ((pf
->hw
.phy
.link_info
.an_info
&
10499 I40E_AQ_AN_COMPLETED
) ? true : false);
10507 * i40e_determine_queue_usage - Work out queue distribution
10508 * @pf: board private structure
10510 static void i40e_determine_queue_usage(struct i40e_pf
*pf
)
10514 pf
->num_lan_qps
= 0;
10516 pf
->num_fcoe_qps
= 0;
10519 /* Find the max queues to be put into basic use. We'll always be
10520 * using TC0, whether or not DCB is running, and TC0 will get the
10523 queues_left
= pf
->hw
.func_caps
.num_tx_qp
;
10525 if ((queues_left
== 1) ||
10526 !(pf
->flags
& I40E_FLAG_MSIX_ENABLED
)) {
10527 /* one qp for PF, no queues for anything else */
10529 pf
->alloc_rss_size
= pf
->num_lan_qps
= 1;
10531 /* make sure all the fancies are disabled */
10532 pf
->flags
&= ~(I40E_FLAG_RSS_ENABLED
|
10533 I40E_FLAG_IWARP_ENABLED
|
10535 I40E_FLAG_FCOE_ENABLED
|
10537 I40E_FLAG_FD_SB_ENABLED
|
10538 I40E_FLAG_FD_ATR_ENABLED
|
10539 I40E_FLAG_DCB_CAPABLE
|
10540 I40E_FLAG_DCB_ENABLED
|
10541 I40E_FLAG_SRIOV_ENABLED
|
10542 I40E_FLAG_VMDQ_ENABLED
);
10543 } else if (!(pf
->flags
& (I40E_FLAG_RSS_ENABLED
|
10544 I40E_FLAG_FD_SB_ENABLED
|
10545 I40E_FLAG_FD_ATR_ENABLED
|
10546 I40E_FLAG_DCB_CAPABLE
))) {
10547 /* one qp for PF */
10548 pf
->alloc_rss_size
= pf
->num_lan_qps
= 1;
10549 queues_left
-= pf
->num_lan_qps
;
10551 pf
->flags
&= ~(I40E_FLAG_RSS_ENABLED
|
10552 I40E_FLAG_IWARP_ENABLED
|
10554 I40E_FLAG_FCOE_ENABLED
|
10556 I40E_FLAG_FD_SB_ENABLED
|
10557 I40E_FLAG_FD_ATR_ENABLED
|
10558 I40E_FLAG_DCB_ENABLED
|
10559 I40E_FLAG_VMDQ_ENABLED
);
10561 /* Not enough queues for all TCs */
10562 if ((pf
->flags
& I40E_FLAG_DCB_CAPABLE
) &&
10563 (queues_left
< I40E_MAX_TRAFFIC_CLASS
)) {
10564 pf
->flags
&= ~(I40E_FLAG_DCB_CAPABLE
|
10565 I40E_FLAG_DCB_ENABLED
);
10566 dev_info(&pf
->pdev
->dev
, "not enough queues for DCB. DCB is disabled.\n");
10568 pf
->num_lan_qps
= max_t(int, pf
->rss_size_max
,
10569 num_online_cpus());
10570 pf
->num_lan_qps
= min_t(int, pf
->num_lan_qps
,
10571 pf
->hw
.func_caps
.num_tx_qp
);
10573 queues_left
-= pf
->num_lan_qps
;
10577 if (pf
->flags
& I40E_FLAG_FCOE_ENABLED
) {
10578 if (I40E_DEFAULT_FCOE
<= queues_left
) {
10579 pf
->num_fcoe_qps
= I40E_DEFAULT_FCOE
;
10580 } else if (I40E_MINIMUM_FCOE
<= queues_left
) {
10581 pf
->num_fcoe_qps
= I40E_MINIMUM_FCOE
;
10583 pf
->num_fcoe_qps
= 0;
10584 pf
->flags
&= ~I40E_FLAG_FCOE_ENABLED
;
10585 dev_info(&pf
->pdev
->dev
, "not enough queues for FCoE. FCoE feature will be disabled\n");
10588 queues_left
-= pf
->num_fcoe_qps
;
10592 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
10593 if (queues_left
> 1) {
10594 queues_left
-= 1; /* save 1 queue for FD */
10596 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
10597 dev_info(&pf
->pdev
->dev
, "not enough queues for Flow Director. Flow Director feature is disabled\n");
10601 if ((pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) &&
10602 pf
->num_vf_qps
&& pf
->num_req_vfs
&& queues_left
) {
10603 pf
->num_req_vfs
= min_t(int, pf
->num_req_vfs
,
10604 (queues_left
/ pf
->num_vf_qps
));
10605 queues_left
-= (pf
->num_req_vfs
* pf
->num_vf_qps
);
10608 if ((pf
->flags
& I40E_FLAG_VMDQ_ENABLED
) &&
10609 pf
->num_vmdq_vsis
&& pf
->num_vmdq_qps
&& queues_left
) {
10610 pf
->num_vmdq_vsis
= min_t(int, pf
->num_vmdq_vsis
,
10611 (queues_left
/ pf
->num_vmdq_qps
));
10612 queues_left
-= (pf
->num_vmdq_vsis
* pf
->num_vmdq_qps
);
10615 pf
->queues_left
= queues_left
;
10616 dev_dbg(&pf
->pdev
->dev
,
10617 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
10618 pf
->hw
.func_caps
.num_tx_qp
,
10619 !!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
),
10620 pf
->num_lan_qps
, pf
->alloc_rss_size
, pf
->num_req_vfs
,
10621 pf
->num_vf_qps
, pf
->num_vmdq_vsis
, pf
->num_vmdq_qps
,
10624 dev_dbg(&pf
->pdev
->dev
, "fcoe queues = %d\n", pf
->num_fcoe_qps
);
10629 * i40e_setup_pf_filter_control - Setup PF static filter control
10630 * @pf: PF to be setup
10632 * i40e_setup_pf_filter_control sets up a PF's initial filter control
10633 * settings. If PE/FCoE are enabled then it will also set the per PF
10634 * based filter sizes required for them. It also enables Flow director,
10635 * ethertype and macvlan type filter settings for the pf.
10637 * Returns 0 on success, negative on failure
10639 static int i40e_setup_pf_filter_control(struct i40e_pf
*pf
)
10641 struct i40e_filter_control_settings
*settings
= &pf
->filter_settings
;
10643 settings
->hash_lut_size
= I40E_HASH_LUT_SIZE_128
;
10645 /* Flow Director is enabled */
10646 if (pf
->flags
& (I40E_FLAG_FD_SB_ENABLED
| I40E_FLAG_FD_ATR_ENABLED
))
10647 settings
->enable_fdir
= true;
10649 /* Ethtype and MACVLAN filters enabled for PF */
10650 settings
->enable_ethtype
= true;
10651 settings
->enable_macvlan
= true;
10653 if (i40e_set_filter_control(&pf
->hw
, settings
))
10659 #define INFO_STRING_LEN 255
10660 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
10661 static void i40e_print_features(struct i40e_pf
*pf
)
10663 struct i40e_hw
*hw
= &pf
->hw
;
10667 buf
= kmalloc(INFO_STRING_LEN
, GFP_KERNEL
);
10671 i
= snprintf(buf
, INFO_STRING_LEN
, "Features: PF-id[%d]", hw
->pf_id
);
10672 #ifdef CONFIG_PCI_IOV
10673 i
+= snprintf(&buf
[i
], REMAIN(i
), " VFs: %d", pf
->num_req_vfs
);
10675 i
+= snprintf(&buf
[i
], REMAIN(i
), " VSIs: %d QP: %d",
10676 pf
->hw
.func_caps
.num_vsis
,
10677 pf
->vsi
[pf
->lan_vsi
]->num_queue_pairs
);
10678 if (pf
->flags
& I40E_FLAG_RSS_ENABLED
)
10679 i
+= snprintf(&buf
[i
], REMAIN(i
), " RSS");
10680 if (pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
)
10681 i
+= snprintf(&buf
[i
], REMAIN(i
), " FD_ATR");
10682 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
10683 i
+= snprintf(&buf
[i
], REMAIN(i
), " FD_SB");
10684 i
+= snprintf(&buf
[i
], REMAIN(i
), " NTUPLE");
10686 if (pf
->flags
& I40E_FLAG_DCB_CAPABLE
)
10687 i
+= snprintf(&buf
[i
], REMAIN(i
), " DCB");
10688 i
+= snprintf(&buf
[i
], REMAIN(i
), " VxLAN");
10689 i
+= snprintf(&buf
[i
], REMAIN(i
), " Geneve");
10690 if (pf
->flags
& I40E_FLAG_PTP
)
10691 i
+= snprintf(&buf
[i
], REMAIN(i
), " PTP");
10693 if (pf
->flags
& I40E_FLAG_FCOE_ENABLED
)
10694 i
+= snprintf(&buf
[i
], REMAIN(i
), " FCOE");
10696 if (pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)
10697 i
+= snprintf(&buf
[i
], REMAIN(i
), " VEB");
10699 i
+= snprintf(&buf
[i
], REMAIN(i
), " VEPA");
10701 dev_info(&pf
->pdev
->dev
, "%s\n", buf
);
10703 WARN_ON(i
> INFO_STRING_LEN
);
10707 * i40e_get_platform_mac_addr - get platform-specific MAC address
10709 * @pdev: PCI device information struct
10710 * @pf: board private structure
10712 * Look up the MAC address in Open Firmware on systems that support it,
10713 * and use IDPROM on SPARC if no OF address is found. On return, the
10714 * I40E_FLAG_PF_MAC will be wset in pf->flags if a platform-specific value
10715 * has been selected.
10717 static void i40e_get_platform_mac_addr(struct pci_dev
*pdev
, struct i40e_pf
*pf
)
10719 pf
->flags
&= ~I40E_FLAG_PF_MAC
;
10720 if (!eth_platform_get_mac_address(&pdev
->dev
, pf
->hw
.mac
.addr
))
10721 pf
->flags
|= I40E_FLAG_PF_MAC
;
10725 * i40e_probe - Device initialization routine
10726 * @pdev: PCI device information struct
10727 * @ent: entry in i40e_pci_tbl
10729 * i40e_probe initializes a PF identified by a pci_dev structure.
10730 * The OS initialization, configuring of the PF private structure,
10731 * and a hardware reset occur.
10733 * Returns 0 on success, negative on failure
10735 static int i40e_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
10737 struct i40e_aq_get_phy_abilities_resp abilities
;
10738 struct i40e_pf
*pf
;
10739 struct i40e_hw
*hw
;
10740 static u16 pfs_found
;
10748 err
= pci_enable_device_mem(pdev
);
10752 /* set up for high or low dma */
10753 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
10755 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
10757 dev_err(&pdev
->dev
,
10758 "DMA configuration failed: 0x%x\n", err
);
10763 /* set up pci connections */
10764 err
= pci_request_mem_regions(pdev
, i40e_driver_name
);
10766 dev_info(&pdev
->dev
,
10767 "pci_request_selected_regions failed %d\n", err
);
10771 pci_enable_pcie_error_reporting(pdev
);
10772 pci_set_master(pdev
);
10774 /* Now that we have a PCI connection, we need to do the
10775 * low level device setup. This is primarily setting up
10776 * the Admin Queue structures and then querying for the
10777 * device's current profile information.
10779 pf
= kzalloc(sizeof(*pf
), GFP_KERNEL
);
10786 set_bit(__I40E_DOWN
, &pf
->state
);
10791 pf
->ioremap_len
= min_t(int, pci_resource_len(pdev
, 0),
10792 I40E_MAX_CSR_SPACE
);
10794 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0), pf
->ioremap_len
);
10795 if (!hw
->hw_addr
) {
10797 dev_info(&pdev
->dev
, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
10798 (unsigned int)pci_resource_start(pdev
, 0),
10799 pf
->ioremap_len
, err
);
10802 hw
->vendor_id
= pdev
->vendor
;
10803 hw
->device_id
= pdev
->device
;
10804 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
10805 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
10806 hw
->subsystem_device_id
= pdev
->subsystem_device
;
10807 hw
->bus
.device
= PCI_SLOT(pdev
->devfn
);
10808 hw
->bus
.func
= PCI_FUNC(pdev
->devfn
);
10809 pf
->instance
= pfs_found
;
10811 /* set up the locks for the AQ, do this only once in probe
10812 * and destroy them only once in remove
10814 mutex_init(&hw
->aq
.asq_mutex
);
10815 mutex_init(&hw
->aq
.arq_mutex
);
10818 pf
->msg_enable
= pf
->hw
.debug_mask
;
10819 pf
->msg_enable
= debug
;
10822 /* do a special CORER for clearing PXE mode once at init */
10823 if (hw
->revision_id
== 0 &&
10824 (rd32(hw
, I40E_GLLAN_RCTL_0
) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK
)) {
10825 wr32(hw
, I40E_GLGEN_RTRIG
, I40E_GLGEN_RTRIG_CORER_MASK
);
10830 i40e_clear_pxe_mode(hw
);
10833 /* Reset here to make sure all is clean and to define PF 'n' */
10835 err
= i40e_pf_reset(hw
);
10837 dev_info(&pdev
->dev
, "Initial pf_reset failed: %d\n", err
);
10842 hw
->aq
.num_arq_entries
= I40E_AQ_LEN
;
10843 hw
->aq
.num_asq_entries
= I40E_AQ_LEN
;
10844 hw
->aq
.arq_buf_size
= I40E_MAX_AQ_BUF_SIZE
;
10845 hw
->aq
.asq_buf_size
= I40E_MAX_AQ_BUF_SIZE
;
10846 pf
->adminq_work_limit
= I40E_AQ_WORK_LIMIT
;
10848 snprintf(pf
->int_name
, sizeof(pf
->int_name
) - 1,
10850 dev_driver_string(&pf
->pdev
->dev
), dev_name(&pdev
->dev
));
10852 err
= i40e_init_shared_code(hw
);
10854 dev_warn(&pdev
->dev
, "unidentified MAC or BLANK NVM: %d\n",
10859 /* set up a default setting for link flow control */
10860 pf
->hw
.fc
.requested_mode
= I40E_FC_NONE
;
10862 err
= i40e_init_adminq(hw
);
10864 if (err
== I40E_ERR_FIRMWARE_API_VERSION
)
10865 dev_info(&pdev
->dev
,
10866 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
10868 dev_info(&pdev
->dev
,
10869 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
10874 /* provide nvm, fw, api versions */
10875 dev_info(&pdev
->dev
, "fw %d.%d.%05d api %d.%d nvm %s\n",
10876 hw
->aq
.fw_maj_ver
, hw
->aq
.fw_min_ver
, hw
->aq
.fw_build
,
10877 hw
->aq
.api_maj_ver
, hw
->aq
.api_min_ver
,
10878 i40e_nvm_version_str(hw
));
10880 if (hw
->aq
.api_maj_ver
== I40E_FW_API_VERSION_MAJOR
&&
10881 hw
->aq
.api_min_ver
> I40E_FW_API_VERSION_MINOR
)
10882 dev_info(&pdev
->dev
,
10883 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
10884 else if (hw
->aq
.api_maj_ver
< I40E_FW_API_VERSION_MAJOR
||
10885 hw
->aq
.api_min_ver
< (I40E_FW_API_VERSION_MINOR
- 1))
10886 dev_info(&pdev
->dev
,
10887 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
10889 i40e_verify_eeprom(pf
);
10891 /* Rev 0 hardware was never productized */
10892 if (hw
->revision_id
< 1)
10893 dev_warn(&pdev
->dev
, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
10895 i40e_clear_pxe_mode(hw
);
10896 err
= i40e_get_capabilities(pf
);
10898 goto err_adminq_setup
;
10900 err
= i40e_sw_init(pf
);
10902 dev_info(&pdev
->dev
, "sw_init failed: %d\n", err
);
10906 err
= i40e_init_lan_hmc(hw
, hw
->func_caps
.num_tx_qp
,
10907 hw
->func_caps
.num_rx_qp
,
10908 pf
->fcoe_hmc_cntx_num
, pf
->fcoe_hmc_filt_num
);
10910 dev_info(&pdev
->dev
, "init_lan_hmc failed: %d\n", err
);
10911 goto err_init_lan_hmc
;
10914 err
= i40e_configure_lan_hmc(hw
, I40E_HMC_MODEL_DIRECT_ONLY
);
10916 dev_info(&pdev
->dev
, "configure_lan_hmc failed: %d\n", err
);
10918 goto err_configure_lan_hmc
;
10921 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
10922 * Ignore error return codes because if it was already disabled via
10923 * hardware settings this will fail
10925 if (pf
->flags
& I40E_FLAG_STOP_FW_LLDP
) {
10926 dev_info(&pdev
->dev
, "Stopping firmware LLDP agent.\n");
10927 i40e_aq_stop_lldp(hw
, true, NULL
);
10930 i40e_get_mac_addr(hw
, hw
->mac
.addr
);
10931 /* allow a platform config to override the HW addr */
10932 i40e_get_platform_mac_addr(pdev
, pf
);
10933 if (!is_valid_ether_addr(hw
->mac
.addr
)) {
10934 dev_info(&pdev
->dev
, "invalid MAC address %pM\n", hw
->mac
.addr
);
10938 dev_info(&pdev
->dev
, "MAC address: %pM\n", hw
->mac
.addr
);
10939 ether_addr_copy(hw
->mac
.perm_addr
, hw
->mac
.addr
);
10940 i40e_get_port_mac_addr(hw
, hw
->mac
.port_addr
);
10941 if (is_valid_ether_addr(hw
->mac
.port_addr
))
10942 pf
->flags
|= I40E_FLAG_PORT_ID_VALID
;
10944 err
= i40e_get_san_mac_addr(hw
, hw
->mac
.san_addr
);
10946 dev_info(&pdev
->dev
,
10947 "(non-fatal) SAN MAC retrieval failed: %d\n", err
);
10948 if (!is_valid_ether_addr(hw
->mac
.san_addr
)) {
10949 dev_warn(&pdev
->dev
, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
10951 ether_addr_copy(hw
->mac
.san_addr
, hw
->mac
.addr
);
10953 dev_info(&pf
->pdev
->dev
, "SAN MAC: %pM\n", hw
->mac
.san_addr
);
10954 #endif /* I40E_FCOE */
10956 pci_set_drvdata(pdev
, pf
);
10957 pci_save_state(pdev
);
10958 #ifdef CONFIG_I40E_DCB
10959 err
= i40e_init_pf_dcb(pf
);
10961 dev_info(&pdev
->dev
, "DCB init failed %d, disabled\n", err
);
10962 pf
->flags
&= ~(I40E_FLAG_DCB_CAPABLE
& I40E_FLAG_DCB_ENABLED
);
10963 /* Continue without DCB enabled */
10965 #endif /* CONFIG_I40E_DCB */
10967 /* set up periodic task facility */
10968 setup_timer(&pf
->service_timer
, i40e_service_timer
, (unsigned long)pf
);
10969 pf
->service_timer_period
= HZ
;
10971 INIT_WORK(&pf
->service_task
, i40e_service_task
);
10972 clear_bit(__I40E_SERVICE_SCHED
, &pf
->state
);
10973 pf
->flags
|= I40E_FLAG_NEED_LINK_UPDATE
;
10975 /* NVM bit on means WoL disabled for the port */
10976 i40e_read_nvm_word(hw
, I40E_SR_NVM_WAKE_ON_LAN
, &wol_nvm_bits
);
10977 if (BIT (hw
->port
) & wol_nvm_bits
|| hw
->partition_id
!= 1)
10978 pf
->wol_en
= false;
10981 device_set_wakeup_enable(&pf
->pdev
->dev
, pf
->wol_en
);
10983 /* set up the main switch operations */
10984 i40e_determine_queue_usage(pf
);
10985 err
= i40e_init_interrupt_scheme(pf
);
10987 goto err_switch_setup
;
10989 /* The number of VSIs reported by the FW is the minimum guaranteed
10990 * to us; HW supports far more and we share the remaining pool with
10991 * the other PFs. We allocate space for more than the guarantee with
10992 * the understanding that we might not get them all later.
10994 if (pf
->hw
.func_caps
.num_vsis
< I40E_MIN_VSI_ALLOC
)
10995 pf
->num_alloc_vsi
= I40E_MIN_VSI_ALLOC
;
10997 pf
->num_alloc_vsi
= pf
->hw
.func_caps
.num_vsis
;
10999 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
11000 pf
->vsi
= kcalloc(pf
->num_alloc_vsi
, sizeof(struct i40e_vsi
*),
11004 goto err_switch_setup
;
11007 #ifdef CONFIG_PCI_IOV
11008 /* prep for VF support */
11009 if ((pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) &&
11010 (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) &&
11011 !test_bit(__I40E_BAD_EEPROM
, &pf
->state
)) {
11012 if (pci_num_vf(pdev
))
11013 pf
->flags
|= I40E_FLAG_VEB_MODE_ENABLED
;
11016 err
= i40e_setup_pf_switch(pf
, false);
11018 dev_info(&pdev
->dev
, "setup_pf_switch failed: %d\n", err
);
11022 /* Make sure flow control is set according to current settings */
11023 err
= i40e_set_fc(hw
, &set_fc_aq_fail
, true);
11024 if (set_fc_aq_fail
& I40E_SET_FC_AQ_FAIL_GET
)
11025 dev_dbg(&pf
->pdev
->dev
,
11026 "Set fc with err %s aq_err %s on get_phy_cap\n",
11027 i40e_stat_str(hw
, err
),
11028 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
11029 if (set_fc_aq_fail
& I40E_SET_FC_AQ_FAIL_SET
)
11030 dev_dbg(&pf
->pdev
->dev
,
11031 "Set fc with err %s aq_err %s on set_phy_config\n",
11032 i40e_stat_str(hw
, err
),
11033 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
11034 if (set_fc_aq_fail
& I40E_SET_FC_AQ_FAIL_UPDATE
)
11035 dev_dbg(&pf
->pdev
->dev
,
11036 "Set fc with err %s aq_err %s on get_link_info\n",
11037 i40e_stat_str(hw
, err
),
11038 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
11040 /* if FDIR VSI was set up, start it now */
11041 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
11042 if (pf
->vsi
[i
] && pf
->vsi
[i
]->type
== I40E_VSI_FDIR
) {
11043 i40e_vsi_open(pf
->vsi
[i
]);
11048 /* The driver only wants link up/down and module qualification
11049 * reports from firmware. Note the negative logic.
11051 err
= i40e_aq_set_phy_int_mask(&pf
->hw
,
11052 ~(I40E_AQ_EVENT_LINK_UPDOWN
|
11053 I40E_AQ_EVENT_MEDIA_NA
|
11054 I40E_AQ_EVENT_MODULE_QUAL_FAIL
), NULL
);
11056 dev_info(&pf
->pdev
->dev
, "set phy mask fail, err %s aq_err %s\n",
11057 i40e_stat_str(&pf
->hw
, err
),
11058 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
11060 /* Reconfigure hardware for allowing smaller MSS in the case
11061 * of TSO, so that we avoid the MDD being fired and causing
11062 * a reset in the case of small MSS+TSO.
11064 val
= rd32(hw
, I40E_REG_MSS
);
11065 if ((val
& I40E_REG_MSS_MIN_MASK
) > I40E_64BYTE_MSS
) {
11066 val
&= ~I40E_REG_MSS_MIN_MASK
;
11067 val
|= I40E_64BYTE_MSS
;
11068 wr32(hw
, I40E_REG_MSS
, val
);
11071 if (pf
->flags
& I40E_FLAG_RESTART_AUTONEG
) {
11073 err
= i40e_aq_set_link_restart_an(&pf
->hw
, true, NULL
);
11075 dev_info(&pf
->pdev
->dev
, "link restart failed, err %s aq_err %s\n",
11076 i40e_stat_str(&pf
->hw
, err
),
11077 i40e_aq_str(&pf
->hw
,
11078 pf
->hw
.aq
.asq_last_status
));
11080 /* The main driver is (mostly) up and happy. We need to set this state
11081 * before setting up the misc vector or we get a race and the vector
11082 * ends up disabled forever.
11084 clear_bit(__I40E_DOWN
, &pf
->state
);
11086 /* In case of MSIX we are going to setup the misc vector right here
11087 * to handle admin queue events etc. In case of legacy and MSI
11088 * the misc functionality and queue processing is combined in
11089 * the same vector and that gets setup at open.
11091 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
11092 err
= i40e_setup_misc_vector(pf
);
11094 dev_info(&pdev
->dev
,
11095 "setup of misc vector failed: %d\n", err
);
11100 #ifdef CONFIG_PCI_IOV
11101 /* prep for VF support */
11102 if ((pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) &&
11103 (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) &&
11104 !test_bit(__I40E_BAD_EEPROM
, &pf
->state
)) {
11105 /* disable link interrupts for VFs */
11106 val
= rd32(hw
, I40E_PFGEN_PORTMDIO_NUM
);
11107 val
&= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK
;
11108 wr32(hw
, I40E_PFGEN_PORTMDIO_NUM
, val
);
11111 if (pci_num_vf(pdev
)) {
11112 dev_info(&pdev
->dev
,
11113 "Active VFs found, allocating resources.\n");
11114 err
= i40e_alloc_vfs(pf
, pci_num_vf(pdev
));
11116 dev_info(&pdev
->dev
,
11117 "Error %d allocating resources for existing VFs\n",
11121 #endif /* CONFIG_PCI_IOV */
11123 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
11124 pf
->iwarp_base_vector
= i40e_get_lump(pf
, pf
->irq_pile
,
11125 pf
->num_iwarp_msix
,
11126 I40E_IWARP_IRQ_PILE_ID
);
11127 if (pf
->iwarp_base_vector
< 0) {
11128 dev_info(&pdev
->dev
,
11129 "failed to get tracking for %d vectors for IWARP err=%d\n",
11130 pf
->num_iwarp_msix
, pf
->iwarp_base_vector
);
11131 pf
->flags
&= ~I40E_FLAG_IWARP_ENABLED
;
11135 i40e_dbg_pf_init(pf
);
11137 /* tell the firmware that we're starting */
11138 i40e_send_version(pf
);
11140 /* since everything's happy, start the service_task timer */
11141 mod_timer(&pf
->service_timer
,
11142 round_jiffies(jiffies
+ pf
->service_timer_period
));
11144 /* add this PF to client device list and launch a client service task */
11145 err
= i40e_lan_add_device(pf
);
11147 dev_info(&pdev
->dev
, "Failed to add PF to client API service list: %d\n",
11151 /* create FCoE interface */
11152 i40e_fcoe_vsi_setup(pf
);
11155 #define PCI_SPEED_SIZE 8
11156 #define PCI_WIDTH_SIZE 8
11157 /* Devices on the IOSF bus do not have this information
11158 * and will report PCI Gen 1 x 1 by default so don't bother
11161 if (!(pf
->flags
& I40E_FLAG_NO_PCI_LINK_CHECK
)) {
11162 char speed
[PCI_SPEED_SIZE
] = "Unknown";
11163 char width
[PCI_WIDTH_SIZE
] = "Unknown";
11165 /* Get the negotiated link width and speed from PCI config
11168 pcie_capability_read_word(pf
->pdev
, PCI_EXP_LNKSTA
,
11171 i40e_set_pci_config_data(hw
, link_status
);
11173 switch (hw
->bus
.speed
) {
11174 case i40e_bus_speed_8000
:
11175 strncpy(speed
, "8.0", PCI_SPEED_SIZE
); break;
11176 case i40e_bus_speed_5000
:
11177 strncpy(speed
, "5.0", PCI_SPEED_SIZE
); break;
11178 case i40e_bus_speed_2500
:
11179 strncpy(speed
, "2.5", PCI_SPEED_SIZE
); break;
11183 switch (hw
->bus
.width
) {
11184 case i40e_bus_width_pcie_x8
:
11185 strncpy(width
, "8", PCI_WIDTH_SIZE
); break;
11186 case i40e_bus_width_pcie_x4
:
11187 strncpy(width
, "4", PCI_WIDTH_SIZE
); break;
11188 case i40e_bus_width_pcie_x2
:
11189 strncpy(width
, "2", PCI_WIDTH_SIZE
); break;
11190 case i40e_bus_width_pcie_x1
:
11191 strncpy(width
, "1", PCI_WIDTH_SIZE
); break;
11196 dev_info(&pdev
->dev
, "PCI-Express: Speed %sGT/s Width x%s\n",
11199 if (hw
->bus
.width
< i40e_bus_width_pcie_x8
||
11200 hw
->bus
.speed
< i40e_bus_speed_8000
) {
11201 dev_warn(&pdev
->dev
, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
11202 dev_warn(&pdev
->dev
, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
11206 /* get the requested speeds from the fw */
11207 err
= i40e_aq_get_phy_capabilities(hw
, false, false, &abilities
, NULL
);
11209 dev_dbg(&pf
->pdev
->dev
, "get requested speeds ret = %s last_status = %s\n",
11210 i40e_stat_str(&pf
->hw
, err
),
11211 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
11212 pf
->hw
.phy
.link_info
.requested_speeds
= abilities
.link_speed
;
11214 /* get the supported phy types from the fw */
11215 err
= i40e_aq_get_phy_capabilities(hw
, false, true, &abilities
, NULL
);
11217 dev_dbg(&pf
->pdev
->dev
, "get supported phy types ret = %s last_status = %s\n",
11218 i40e_stat_str(&pf
->hw
, err
),
11219 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
11220 pf
->hw
.phy
.phy_types
= le32_to_cpu(abilities
.phy_type
);
11222 /* Add a filter to drop all Flow control frames from any VSI from being
11223 * transmitted. By doing so we stop a malicious VF from sending out
11224 * PAUSE or PFC frames and potentially controlling traffic for other
11226 * The FW can still send Flow control frames if enabled.
11228 i40e_add_filter_to_drop_tx_flow_control_frames(&pf
->hw
,
11229 pf
->main_vsi_seid
);
11231 if ((pf
->hw
.device_id
== I40E_DEV_ID_10G_BASE_T
) ||
11232 (pf
->hw
.device_id
== I40E_DEV_ID_10G_BASE_T4
))
11233 pf
->flags
|= I40E_FLAG_HAVE_10GBASET_PHY
;
11235 /* print a string summarizing features */
11236 i40e_print_features(pf
);
11240 /* Unwind what we've done if something failed in the setup */
11242 set_bit(__I40E_DOWN
, &pf
->state
);
11243 i40e_clear_interrupt_scheme(pf
);
11246 i40e_reset_interrupt_capability(pf
);
11247 del_timer_sync(&pf
->service_timer
);
11249 err_configure_lan_hmc
:
11250 (void)i40e_shutdown_lan_hmc(hw
);
11252 kfree(pf
->qp_pile
);
11256 iounmap(hw
->hw_addr
);
11260 pci_disable_pcie_error_reporting(pdev
);
11261 pci_release_mem_regions(pdev
);
11264 pci_disable_device(pdev
);
11269 * i40e_remove - Device removal routine
11270 * @pdev: PCI device information struct
11272 * i40e_remove is called by the PCI subsystem to alert the driver
11273 * that is should release a PCI device. This could be caused by a
11274 * Hot-Plug event, or because the driver is going to be removed from
11277 static void i40e_remove(struct pci_dev
*pdev
)
11279 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
11280 struct i40e_hw
*hw
= &pf
->hw
;
11281 i40e_status ret_code
;
11284 i40e_dbg_pf_exit(pf
);
11288 /* Disable RSS in hw */
11289 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(0), 0);
11290 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(1), 0);
11292 /* no more scheduling of any task */
11293 set_bit(__I40E_SUSPENDED
, &pf
->state
);
11294 set_bit(__I40E_DOWN
, &pf
->state
);
11295 if (pf
->service_timer
.data
)
11296 del_timer_sync(&pf
->service_timer
);
11297 if (pf
->service_task
.func
)
11298 cancel_work_sync(&pf
->service_task
);
11300 if (pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) {
11302 pf
->flags
&= ~I40E_FLAG_SRIOV_ENABLED
;
11305 i40e_fdir_teardown(pf
);
11307 /* If there is a switch structure or any orphans, remove them.
11308 * This will leave only the PF's VSI remaining.
11310 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
11314 if (pf
->veb
[i
]->uplink_seid
== pf
->mac_seid
||
11315 pf
->veb
[i
]->uplink_seid
== 0)
11316 i40e_switch_branch_release(pf
->veb
[i
]);
11319 /* Now we can shutdown the PF's VSI, just before we kill
11322 if (pf
->vsi
[pf
->lan_vsi
])
11323 i40e_vsi_release(pf
->vsi
[pf
->lan_vsi
]);
11325 /* remove attached clients */
11326 ret_code
= i40e_lan_del_device(pf
);
11328 dev_warn(&pdev
->dev
, "Failed to delete client device: %d\n",
11332 /* shutdown and destroy the HMC */
11333 if (hw
->hmc
.hmc_obj
) {
11334 ret_code
= i40e_shutdown_lan_hmc(hw
);
11336 dev_warn(&pdev
->dev
,
11337 "Failed to destroy the HMC resources: %d\n",
11341 /* shutdown the adminq */
11342 i40e_shutdown_adminq(hw
);
11344 /* destroy the locks only once, here */
11345 mutex_destroy(&hw
->aq
.arq_mutex
);
11346 mutex_destroy(&hw
->aq
.asq_mutex
);
11348 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
11349 i40e_clear_interrupt_scheme(pf
);
11350 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
11352 i40e_vsi_clear_rings(pf
->vsi
[i
]);
11353 i40e_vsi_clear(pf
->vsi
[i
]);
11358 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
11363 kfree(pf
->qp_pile
);
11366 iounmap(hw
->hw_addr
);
11368 pci_release_mem_regions(pdev
);
11370 pci_disable_pcie_error_reporting(pdev
);
11371 pci_disable_device(pdev
);
11375 * i40e_pci_error_detected - warning that something funky happened in PCI land
11376 * @pdev: PCI device information struct
11378 * Called to warn that something happened and the error handling steps
11379 * are in progress. Allows the driver to quiesce things, be ready for
11382 static pci_ers_result_t
i40e_pci_error_detected(struct pci_dev
*pdev
,
11383 enum pci_channel_state error
)
11385 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
11387 dev_info(&pdev
->dev
, "%s: error %d\n", __func__
, error
);
11390 dev_info(&pdev
->dev
,
11391 "Cannot recover - error happened during device probe\n");
11392 return PCI_ERS_RESULT_DISCONNECT
;
11395 /* shutdown all operations */
11396 if (!test_bit(__I40E_SUSPENDED
, &pf
->state
)) {
11398 i40e_prep_for_reset(pf
);
11402 /* Request a slot reset */
11403 return PCI_ERS_RESULT_NEED_RESET
;
11407 * i40e_pci_error_slot_reset - a PCI slot reset just happened
11408 * @pdev: PCI device information struct
11410 * Called to find if the driver can work with the device now that
11411 * the pci slot has been reset. If a basic connection seems good
11412 * (registers are readable and have sane content) then return a
11413 * happy little PCI_ERS_RESULT_xxx.
11415 static pci_ers_result_t
i40e_pci_error_slot_reset(struct pci_dev
*pdev
)
11417 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
11418 pci_ers_result_t result
;
11422 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
11423 if (pci_enable_device_mem(pdev
)) {
11424 dev_info(&pdev
->dev
,
11425 "Cannot re-enable PCI device after reset.\n");
11426 result
= PCI_ERS_RESULT_DISCONNECT
;
11428 pci_set_master(pdev
);
11429 pci_restore_state(pdev
);
11430 pci_save_state(pdev
);
11431 pci_wake_from_d3(pdev
, false);
11433 reg
= rd32(&pf
->hw
, I40E_GLGEN_RTRIG
);
11435 result
= PCI_ERS_RESULT_RECOVERED
;
11437 result
= PCI_ERS_RESULT_DISCONNECT
;
11440 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
11442 dev_info(&pdev
->dev
,
11443 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
11445 /* non-fatal, continue */
11452 * i40e_pci_error_resume - restart operations after PCI error recovery
11453 * @pdev: PCI device information struct
11455 * Called to allow the driver to bring things back up after PCI error
11456 * and/or reset recovery has finished.
11458 static void i40e_pci_error_resume(struct pci_dev
*pdev
)
11460 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
11462 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
11463 if (test_bit(__I40E_SUSPENDED
, &pf
->state
))
11467 i40e_handle_reset_warning(pf
);
11472 * i40e_shutdown - PCI callback for shutting down
11473 * @pdev: PCI device information struct
11475 static void i40e_shutdown(struct pci_dev
*pdev
)
11477 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
11478 struct i40e_hw
*hw
= &pf
->hw
;
11480 set_bit(__I40E_SUSPENDED
, &pf
->state
);
11481 set_bit(__I40E_DOWN
, &pf
->state
);
11483 i40e_prep_for_reset(pf
);
11486 wr32(hw
, I40E_PFPM_APM
, (pf
->wol_en
? I40E_PFPM_APM_APME_MASK
: 0));
11487 wr32(hw
, I40E_PFPM_WUFC
, (pf
->wol_en
? I40E_PFPM_WUFC_MAG_MASK
: 0));
11489 del_timer_sync(&pf
->service_timer
);
11490 cancel_work_sync(&pf
->service_task
);
11491 i40e_fdir_teardown(pf
);
11494 i40e_prep_for_reset(pf
);
11497 wr32(hw
, I40E_PFPM_APM
,
11498 (pf
->wol_en
? I40E_PFPM_APM_APME_MASK
: 0));
11499 wr32(hw
, I40E_PFPM_WUFC
,
11500 (pf
->wol_en
? I40E_PFPM_WUFC_MAG_MASK
: 0));
11502 i40e_clear_interrupt_scheme(pf
);
11504 if (system_state
== SYSTEM_POWER_OFF
) {
11505 pci_wake_from_d3(pdev
, pf
->wol_en
);
11506 pci_set_power_state(pdev
, PCI_D3hot
);
11512 * i40e_suspend - PCI callback for moving to D3
11513 * @pdev: PCI device information struct
11515 static int i40e_suspend(struct pci_dev
*pdev
, pm_message_t state
)
11517 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
11518 struct i40e_hw
*hw
= &pf
->hw
;
11521 set_bit(__I40E_SUSPENDED
, &pf
->state
);
11522 set_bit(__I40E_DOWN
, &pf
->state
);
11525 i40e_prep_for_reset(pf
);
11528 wr32(hw
, I40E_PFPM_APM
, (pf
->wol_en
? I40E_PFPM_APM_APME_MASK
: 0));
11529 wr32(hw
, I40E_PFPM_WUFC
, (pf
->wol_en
? I40E_PFPM_WUFC_MAG_MASK
: 0));
11531 i40e_stop_misc_vector(pf
);
11533 retval
= pci_save_state(pdev
);
11537 pci_wake_from_d3(pdev
, pf
->wol_en
);
11538 pci_set_power_state(pdev
, PCI_D3hot
);
11544 * i40e_resume - PCI callback for waking up from D3
11545 * @pdev: PCI device information struct
11547 static int i40e_resume(struct pci_dev
*pdev
)
11549 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
11552 pci_set_power_state(pdev
, PCI_D0
);
11553 pci_restore_state(pdev
);
11554 /* pci_restore_state() clears dev->state_saves, so
11555 * call pci_save_state() again to restore it.
11557 pci_save_state(pdev
);
11559 err
= pci_enable_device_mem(pdev
);
11561 dev_err(&pdev
->dev
, "Cannot enable PCI device from suspend\n");
11564 pci_set_master(pdev
);
11566 /* no wakeup events while running */
11567 pci_wake_from_d3(pdev
, false);
11569 /* handling the reset will rebuild the device state */
11570 if (test_and_clear_bit(__I40E_SUSPENDED
, &pf
->state
)) {
11571 clear_bit(__I40E_DOWN
, &pf
->state
);
11573 i40e_reset_and_rebuild(pf
, false);
11581 static const struct pci_error_handlers i40e_err_handler
= {
11582 .error_detected
= i40e_pci_error_detected
,
11583 .slot_reset
= i40e_pci_error_slot_reset
,
11584 .resume
= i40e_pci_error_resume
,
11587 static struct pci_driver i40e_driver
= {
11588 .name
= i40e_driver_name
,
11589 .id_table
= i40e_pci_tbl
,
11590 .probe
= i40e_probe
,
11591 .remove
= i40e_remove
,
11593 .suspend
= i40e_suspend
,
11594 .resume
= i40e_resume
,
11596 .shutdown
= i40e_shutdown
,
11597 .err_handler
= &i40e_err_handler
,
11598 .sriov_configure
= i40e_pci_sriov_configure
,
11602 * i40e_init_module - Driver registration routine
11604 * i40e_init_module is the first routine called when the driver is
11605 * loaded. All it does is register with the PCI subsystem.
11607 static int __init
i40e_init_module(void)
11609 pr_info("%s: %s - version %s\n", i40e_driver_name
,
11610 i40e_driver_string
, i40e_driver_version_str
);
11611 pr_info("%s: %s\n", i40e_driver_name
, i40e_copyright
);
11613 /* we will see if single thread per module is enough for now,
11614 * it can't be any worse than using the system workqueue which
11615 * was already single threaded
11617 i40e_wq
= alloc_workqueue("%s", WQ_UNBOUND
| WQ_MEM_RECLAIM
, 1,
11620 pr_err("%s: Failed to create workqueue\n", i40e_driver_name
);
11625 return pci_register_driver(&i40e_driver
);
11627 module_init(i40e_init_module
);
11630 * i40e_exit_module - Driver exit cleanup routine
11632 * i40e_exit_module is called just before the driver is removed
11635 static void __exit
i40e_exit_module(void)
11637 pci_unregister_driver(&i40e_driver
);
11638 destroy_workqueue(i40e_wq
);
11641 module_exit(i40e_exit_module
);