1 /*******************************************************************************
3 * Intel Ethernet Controller XL710 Family Linux Driver
4 * Copyright(c) 2013 - 2017 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 ******************************************************************************/
27 #include <linux/etherdevice.h>
28 #include <linux/of_net.h>
29 #include <linux/pci.h>
30 #include <linux/bpf.h>
34 #include "i40e_diag.h"
35 #include <net/udp_tunnel.h>
36 /* All i40e tracepoints are defined by the include below, which
37 * must be included exactly once across the whole kernel with
38 * CREATE_TRACE_POINTS defined
40 #define CREATE_TRACE_POINTS
41 #include "i40e_trace.h"
43 const char i40e_driver_name
[] = "i40e";
44 static const char i40e_driver_string
[] =
45 "Intel(R) Ethernet Connection XL710 Network Driver";
49 #define DRV_VERSION_MAJOR 2
50 #define DRV_VERSION_MINOR 3
51 #define DRV_VERSION_BUILD 2
52 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
53 __stringify(DRV_VERSION_MINOR) "." \
54 __stringify(DRV_VERSION_BUILD) DRV_KERN
55 const char i40e_driver_version_str
[] = DRV_VERSION
;
56 static const char i40e_copyright
[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
58 /* a bit of forward declarations */
59 static void i40e_vsi_reinit_locked(struct i40e_vsi
*vsi
);
60 static void i40e_handle_reset_warning(struct i40e_pf
*pf
, bool lock_acquired
);
61 static int i40e_add_vsi(struct i40e_vsi
*vsi
);
62 static int i40e_add_veb(struct i40e_veb
*veb
, struct i40e_vsi
*vsi
);
63 static int i40e_setup_pf_switch(struct i40e_pf
*pf
, bool reinit
);
64 static int i40e_setup_misc_vector(struct i40e_pf
*pf
);
65 static void i40e_determine_queue_usage(struct i40e_pf
*pf
);
66 static int i40e_setup_pf_filter_control(struct i40e_pf
*pf
);
67 static void i40e_prep_for_reset(struct i40e_pf
*pf
, bool lock_acquired
);
68 static int i40e_reset(struct i40e_pf
*pf
);
69 static void i40e_rebuild(struct i40e_pf
*pf
, bool reinit
, bool lock_acquired
);
70 static void i40e_fdir_sb_setup(struct i40e_pf
*pf
);
71 static int i40e_veb_get_bw_info(struct i40e_veb
*veb
);
72 static int i40e_add_del_cloud_filter(struct i40e_vsi
*vsi
,
73 struct i40e_cloud_filter
*filter
,
75 static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi
*vsi
,
76 struct i40e_cloud_filter
*filter
,
78 static int i40e_get_capabilities(struct i40e_pf
*pf
,
79 enum i40e_admin_queue_opc list_type
);
82 /* i40e_pci_tbl - PCI Device ID Table
84 * Last entry must be all 0s
86 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
87 * Class, Class Mask, private data (not used) }
89 static const struct pci_device_id i40e_pci_tbl
[] = {
90 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_SFP_XL710
), 0},
91 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QEMU
), 0},
92 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_KX_B
), 0},
93 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_KX_C
), 0},
94 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_A
), 0},
95 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_B
), 0},
96 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_C
), 0},
97 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_10G_BASE_T
), 0},
98 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_10G_BASE_T4
), 0},
99 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_KX_X722
), 0},
100 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_X722
), 0},
101 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_SFP_X722
), 0},
102 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_1G_BASE_T_X722
), 0},
103 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_10G_BASE_T_X722
), 0},
104 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_SFP_I_X722
), 0},
105 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_20G_KR2
), 0},
106 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_20G_KR2_A
), 0},
107 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_25G_B
), 0},
108 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_25G_SFP28
), 0},
109 /* required last entry */
112 MODULE_DEVICE_TABLE(pci
, i40e_pci_tbl
);
114 #define I40E_MAX_VF_COUNT 128
115 static int debug
= -1;
116 module_param(debug
, uint
, 0);
117 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
119 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
120 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
121 MODULE_LICENSE("GPL");
122 MODULE_VERSION(DRV_VERSION
);
124 static struct workqueue_struct
*i40e_wq
;
127 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
128 * @hw: pointer to the HW structure
129 * @mem: ptr to mem struct to fill out
130 * @size: size of memory requested
131 * @alignment: what to align the allocation to
133 int i40e_allocate_dma_mem_d(struct i40e_hw
*hw
, struct i40e_dma_mem
*mem
,
134 u64 size
, u32 alignment
)
136 struct i40e_pf
*pf
= (struct i40e_pf
*)hw
->back
;
138 mem
->size
= ALIGN(size
, alignment
);
139 mem
->va
= dma_zalloc_coherent(&pf
->pdev
->dev
, mem
->size
,
140 &mem
->pa
, GFP_KERNEL
);
148 * i40e_free_dma_mem_d - OS specific memory free for shared code
149 * @hw: pointer to the HW structure
150 * @mem: ptr to mem struct to free
152 int i40e_free_dma_mem_d(struct i40e_hw
*hw
, struct i40e_dma_mem
*mem
)
154 struct i40e_pf
*pf
= (struct i40e_pf
*)hw
->back
;
156 dma_free_coherent(&pf
->pdev
->dev
, mem
->size
, mem
->va
, mem
->pa
);
165 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
166 * @hw: pointer to the HW structure
167 * @mem: ptr to mem struct to fill out
168 * @size: size of memory requested
170 int i40e_allocate_virt_mem_d(struct i40e_hw
*hw
, struct i40e_virt_mem
*mem
,
174 mem
->va
= kzalloc(size
, GFP_KERNEL
);
183 * i40e_free_virt_mem_d - OS specific memory free for shared code
184 * @hw: pointer to the HW structure
185 * @mem: ptr to mem struct to free
187 int i40e_free_virt_mem_d(struct i40e_hw
*hw
, struct i40e_virt_mem
*mem
)
189 /* it's ok to kfree a NULL pointer */
198 * i40e_get_lump - find a lump of free generic resource
199 * @pf: board private structure
200 * @pile: the pile of resource to search
201 * @needed: the number of items needed
202 * @id: an owner id to stick on the items assigned
204 * Returns the base item index of the lump, or negative for error
206 * The search_hint trick and lack of advanced fit-finding only work
207 * because we're highly likely to have all the same size lump requests.
208 * Linear search time and any fragmentation should be minimal.
210 static int i40e_get_lump(struct i40e_pf
*pf
, struct i40e_lump_tracking
*pile
,
216 if (!pile
|| needed
== 0 || id
>= I40E_PILE_VALID_BIT
) {
217 dev_info(&pf
->pdev
->dev
,
218 "param err: pile=%p needed=%d id=0x%04x\n",
223 /* start the linear search with an imperfect hint */
224 i
= pile
->search_hint
;
225 while (i
< pile
->num_entries
) {
226 /* skip already allocated entries */
227 if (pile
->list
[i
] & I40E_PILE_VALID_BIT
) {
232 /* do we have enough in this lump? */
233 for (j
= 0; (j
< needed
) && ((i
+j
) < pile
->num_entries
); j
++) {
234 if (pile
->list
[i
+j
] & I40E_PILE_VALID_BIT
)
239 /* there was enough, so assign it to the requestor */
240 for (j
= 0; j
< needed
; j
++)
241 pile
->list
[i
+j
] = id
| I40E_PILE_VALID_BIT
;
243 pile
->search_hint
= i
+ j
;
247 /* not enough, so skip over it and continue looking */
255 * i40e_put_lump - return a lump of generic resource
256 * @pile: the pile of resource to search
257 * @index: the base item index
258 * @id: the owner id of the items assigned
260 * Returns the count of items in the lump
262 static int i40e_put_lump(struct i40e_lump_tracking
*pile
, u16 index
, u16 id
)
264 int valid_id
= (id
| I40E_PILE_VALID_BIT
);
268 if (!pile
|| index
>= pile
->num_entries
)
272 i
< pile
->num_entries
&& pile
->list
[i
] == valid_id
;
278 if (count
&& index
< pile
->search_hint
)
279 pile
->search_hint
= index
;
285 * i40e_find_vsi_from_id - searches for the vsi with the given id
286 * @pf - the pf structure to search for the vsi
287 * @id - id of the vsi it is searching for
289 struct i40e_vsi
*i40e_find_vsi_from_id(struct i40e_pf
*pf
, u16 id
)
293 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
294 if (pf
->vsi
[i
] && (pf
->vsi
[i
]->id
== id
))
301 * i40e_service_event_schedule - Schedule the service task to wake up
302 * @pf: board private structure
304 * If not already scheduled, this puts the task into the work queue
306 void i40e_service_event_schedule(struct i40e_pf
*pf
)
308 if (!test_bit(__I40E_DOWN
, pf
->state
) &&
309 !test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
310 queue_work(i40e_wq
, &pf
->service_task
);
314 * i40e_tx_timeout - Respond to a Tx Hang
315 * @netdev: network interface device structure
317 * If any port has noticed a Tx timeout, it is likely that the whole
318 * device is munged, not just the one netdev port, so go for the full
321 static void i40e_tx_timeout(struct net_device
*netdev
)
323 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
324 struct i40e_vsi
*vsi
= np
->vsi
;
325 struct i40e_pf
*pf
= vsi
->back
;
326 struct i40e_ring
*tx_ring
= NULL
;
327 unsigned int i
, hung_queue
= 0;
330 pf
->tx_timeout_count
++;
332 /* find the stopped queue the same way the stack does */
333 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
334 struct netdev_queue
*q
;
335 unsigned long trans_start
;
337 q
= netdev_get_tx_queue(netdev
, i
);
338 trans_start
= q
->trans_start
;
339 if (netif_xmit_stopped(q
) &&
341 (trans_start
+ netdev
->watchdog_timeo
))) {
347 if (i
== netdev
->num_tx_queues
) {
348 netdev_info(netdev
, "tx_timeout: no netdev hung queue found\n");
350 /* now that we have an index, find the tx_ring struct */
351 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
352 if (vsi
->tx_rings
[i
] && vsi
->tx_rings
[i
]->desc
) {
354 vsi
->tx_rings
[i
]->queue_index
) {
355 tx_ring
= vsi
->tx_rings
[i
];
362 if (time_after(jiffies
, (pf
->tx_timeout_last_recovery
+ HZ
*20)))
363 pf
->tx_timeout_recovery_level
= 1; /* reset after some time */
364 else if (time_before(jiffies
,
365 (pf
->tx_timeout_last_recovery
+ netdev
->watchdog_timeo
)))
366 return; /* don't do any new action before the next timeout */
369 head
= i40e_get_head(tx_ring
);
370 /* Read interrupt register */
371 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
373 I40E_PFINT_DYN_CTLN(tx_ring
->q_vector
->v_idx
+
374 tx_ring
->vsi
->base_vector
- 1));
376 val
= rd32(&pf
->hw
, I40E_PFINT_DYN_CTL0
);
378 netdev_info(netdev
, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
379 vsi
->seid
, hung_queue
, tx_ring
->next_to_clean
,
380 head
, tx_ring
->next_to_use
,
381 readl(tx_ring
->tail
), val
);
384 pf
->tx_timeout_last_recovery
= jiffies
;
385 netdev_info(netdev
, "tx_timeout recovery level %d, hung_queue %d\n",
386 pf
->tx_timeout_recovery_level
, hung_queue
);
388 switch (pf
->tx_timeout_recovery_level
) {
390 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
393 set_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
);
396 set_bit(__I40E_GLOBAL_RESET_REQUESTED
, pf
->state
);
399 netdev_err(netdev
, "tx_timeout recovery unsuccessful\n");
403 i40e_service_event_schedule(pf
);
404 pf
->tx_timeout_recovery_level
++;
408 * i40e_get_vsi_stats_struct - Get System Network Statistics
409 * @vsi: the VSI we care about
411 * Returns the address of the device statistics structure.
412 * The statistics are actually updated from the service task.
414 struct rtnl_link_stats64
*i40e_get_vsi_stats_struct(struct i40e_vsi
*vsi
)
416 return &vsi
->net_stats
;
420 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
421 * @ring: Tx ring to get statistics from
422 * @stats: statistics entry to be updated
424 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring
*ring
,
425 struct rtnl_link_stats64
*stats
)
431 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
432 packets
= ring
->stats
.packets
;
433 bytes
= ring
->stats
.bytes
;
434 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
436 stats
->tx_packets
+= packets
;
437 stats
->tx_bytes
+= bytes
;
441 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
442 * @netdev: network interface device structure
444 * Returns the address of the device statistics structure.
445 * The statistics are actually updated from the service task.
447 static void i40e_get_netdev_stats_struct(struct net_device
*netdev
,
448 struct rtnl_link_stats64
*stats
)
450 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
451 struct i40e_ring
*tx_ring
, *rx_ring
;
452 struct i40e_vsi
*vsi
= np
->vsi
;
453 struct rtnl_link_stats64
*vsi_stats
= i40e_get_vsi_stats_struct(vsi
);
456 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
))
463 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
467 tx_ring
= READ_ONCE(vsi
->tx_rings
[i
]);
470 i40e_get_netdev_stats_struct_tx(tx_ring
, stats
);
472 rx_ring
= &tx_ring
[1];
475 start
= u64_stats_fetch_begin_irq(&rx_ring
->syncp
);
476 packets
= rx_ring
->stats
.packets
;
477 bytes
= rx_ring
->stats
.bytes
;
478 } while (u64_stats_fetch_retry_irq(&rx_ring
->syncp
, start
));
480 stats
->rx_packets
+= packets
;
481 stats
->rx_bytes
+= bytes
;
483 if (i40e_enabled_xdp_vsi(vsi
))
484 i40e_get_netdev_stats_struct_tx(&rx_ring
[1], stats
);
488 /* following stats updated by i40e_watchdog_subtask() */
489 stats
->multicast
= vsi_stats
->multicast
;
490 stats
->tx_errors
= vsi_stats
->tx_errors
;
491 stats
->tx_dropped
= vsi_stats
->tx_dropped
;
492 stats
->rx_errors
= vsi_stats
->rx_errors
;
493 stats
->rx_dropped
= vsi_stats
->rx_dropped
;
494 stats
->rx_crc_errors
= vsi_stats
->rx_crc_errors
;
495 stats
->rx_length_errors
= vsi_stats
->rx_length_errors
;
499 * i40e_vsi_reset_stats - Resets all stats of the given vsi
500 * @vsi: the VSI to have its stats reset
502 void i40e_vsi_reset_stats(struct i40e_vsi
*vsi
)
504 struct rtnl_link_stats64
*ns
;
510 ns
= i40e_get_vsi_stats_struct(vsi
);
511 memset(ns
, 0, sizeof(*ns
));
512 memset(&vsi
->net_stats_offsets
, 0, sizeof(vsi
->net_stats_offsets
));
513 memset(&vsi
->eth_stats
, 0, sizeof(vsi
->eth_stats
));
514 memset(&vsi
->eth_stats_offsets
, 0, sizeof(vsi
->eth_stats_offsets
));
515 if (vsi
->rx_rings
&& vsi
->rx_rings
[0]) {
516 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
517 memset(&vsi
->rx_rings
[i
]->stats
, 0,
518 sizeof(vsi
->rx_rings
[i
]->stats
));
519 memset(&vsi
->rx_rings
[i
]->rx_stats
, 0,
520 sizeof(vsi
->rx_rings
[i
]->rx_stats
));
521 memset(&vsi
->tx_rings
[i
]->stats
, 0,
522 sizeof(vsi
->tx_rings
[i
]->stats
));
523 memset(&vsi
->tx_rings
[i
]->tx_stats
, 0,
524 sizeof(vsi
->tx_rings
[i
]->tx_stats
));
527 vsi
->stat_offsets_loaded
= false;
531 * i40e_pf_reset_stats - Reset all of the stats for the given PF
532 * @pf: the PF to be reset
534 void i40e_pf_reset_stats(struct i40e_pf
*pf
)
538 memset(&pf
->stats
, 0, sizeof(pf
->stats
));
539 memset(&pf
->stats_offsets
, 0, sizeof(pf
->stats_offsets
));
540 pf
->stat_offsets_loaded
= false;
542 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
544 memset(&pf
->veb
[i
]->stats
, 0,
545 sizeof(pf
->veb
[i
]->stats
));
546 memset(&pf
->veb
[i
]->stats_offsets
, 0,
547 sizeof(pf
->veb
[i
]->stats_offsets
));
548 pf
->veb
[i
]->stat_offsets_loaded
= false;
551 pf
->hw_csum_rx_error
= 0;
555 * i40e_stat_update48 - read and update a 48 bit stat from the chip
556 * @hw: ptr to the hardware info
557 * @hireg: the high 32 bit reg to read
558 * @loreg: the low 32 bit reg to read
559 * @offset_loaded: has the initial offset been loaded yet
560 * @offset: ptr to current offset value
561 * @stat: ptr to the stat
563 * Since the device stats are not reset at PFReset, they likely will not
564 * be zeroed when the driver starts. We'll save the first values read
565 * and use them as offsets to be subtracted from the raw values in order
566 * to report stats that count from zero. In the process, we also manage
567 * the potential roll-over.
569 static void i40e_stat_update48(struct i40e_hw
*hw
, u32 hireg
, u32 loreg
,
570 bool offset_loaded
, u64
*offset
, u64
*stat
)
574 if (hw
->device_id
== I40E_DEV_ID_QEMU
) {
575 new_data
= rd32(hw
, loreg
);
576 new_data
|= ((u64
)(rd32(hw
, hireg
) & 0xFFFF)) << 32;
578 new_data
= rd64(hw
, loreg
);
582 if (likely(new_data
>= *offset
))
583 *stat
= new_data
- *offset
;
585 *stat
= (new_data
+ BIT_ULL(48)) - *offset
;
586 *stat
&= 0xFFFFFFFFFFFFULL
;
590 * i40e_stat_update32 - read and update a 32 bit stat from the chip
591 * @hw: ptr to the hardware info
592 * @reg: the hw reg to read
593 * @offset_loaded: has the initial offset been loaded yet
594 * @offset: ptr to current offset value
595 * @stat: ptr to the stat
597 static void i40e_stat_update32(struct i40e_hw
*hw
, u32 reg
,
598 bool offset_loaded
, u64
*offset
, u64
*stat
)
602 new_data
= rd32(hw
, reg
);
605 if (likely(new_data
>= *offset
))
606 *stat
= (u32
)(new_data
- *offset
);
608 *stat
= (u32
)((new_data
+ BIT_ULL(32)) - *offset
);
612 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
613 * @hw: ptr to the hardware info
614 * @reg: the hw reg to read and clear
615 * @stat: ptr to the stat
617 static void i40e_stat_update_and_clear32(struct i40e_hw
*hw
, u32 reg
, u64
*stat
)
619 u32 new_data
= rd32(hw
, reg
);
621 wr32(hw
, reg
, 1); /* must write a nonzero value to clear register */
626 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
627 * @vsi: the VSI to be updated
629 void i40e_update_eth_stats(struct i40e_vsi
*vsi
)
631 int stat_idx
= le16_to_cpu(vsi
->info
.stat_counter_idx
);
632 struct i40e_pf
*pf
= vsi
->back
;
633 struct i40e_hw
*hw
= &pf
->hw
;
634 struct i40e_eth_stats
*oes
;
635 struct i40e_eth_stats
*es
; /* device's eth stats */
637 es
= &vsi
->eth_stats
;
638 oes
= &vsi
->eth_stats_offsets
;
640 /* Gather up the stats that the hw collects */
641 i40e_stat_update32(hw
, I40E_GLV_TEPC(stat_idx
),
642 vsi
->stat_offsets_loaded
,
643 &oes
->tx_errors
, &es
->tx_errors
);
644 i40e_stat_update32(hw
, I40E_GLV_RDPC(stat_idx
),
645 vsi
->stat_offsets_loaded
,
646 &oes
->rx_discards
, &es
->rx_discards
);
647 i40e_stat_update32(hw
, I40E_GLV_RUPP(stat_idx
),
648 vsi
->stat_offsets_loaded
,
649 &oes
->rx_unknown_protocol
, &es
->rx_unknown_protocol
);
650 i40e_stat_update32(hw
, I40E_GLV_TEPC(stat_idx
),
651 vsi
->stat_offsets_loaded
,
652 &oes
->tx_errors
, &es
->tx_errors
);
654 i40e_stat_update48(hw
, I40E_GLV_GORCH(stat_idx
),
655 I40E_GLV_GORCL(stat_idx
),
656 vsi
->stat_offsets_loaded
,
657 &oes
->rx_bytes
, &es
->rx_bytes
);
658 i40e_stat_update48(hw
, I40E_GLV_UPRCH(stat_idx
),
659 I40E_GLV_UPRCL(stat_idx
),
660 vsi
->stat_offsets_loaded
,
661 &oes
->rx_unicast
, &es
->rx_unicast
);
662 i40e_stat_update48(hw
, I40E_GLV_MPRCH(stat_idx
),
663 I40E_GLV_MPRCL(stat_idx
),
664 vsi
->stat_offsets_loaded
,
665 &oes
->rx_multicast
, &es
->rx_multicast
);
666 i40e_stat_update48(hw
, I40E_GLV_BPRCH(stat_idx
),
667 I40E_GLV_BPRCL(stat_idx
),
668 vsi
->stat_offsets_loaded
,
669 &oes
->rx_broadcast
, &es
->rx_broadcast
);
671 i40e_stat_update48(hw
, I40E_GLV_GOTCH(stat_idx
),
672 I40E_GLV_GOTCL(stat_idx
),
673 vsi
->stat_offsets_loaded
,
674 &oes
->tx_bytes
, &es
->tx_bytes
);
675 i40e_stat_update48(hw
, I40E_GLV_UPTCH(stat_idx
),
676 I40E_GLV_UPTCL(stat_idx
),
677 vsi
->stat_offsets_loaded
,
678 &oes
->tx_unicast
, &es
->tx_unicast
);
679 i40e_stat_update48(hw
, I40E_GLV_MPTCH(stat_idx
),
680 I40E_GLV_MPTCL(stat_idx
),
681 vsi
->stat_offsets_loaded
,
682 &oes
->tx_multicast
, &es
->tx_multicast
);
683 i40e_stat_update48(hw
, I40E_GLV_BPTCH(stat_idx
),
684 I40E_GLV_BPTCL(stat_idx
),
685 vsi
->stat_offsets_loaded
,
686 &oes
->tx_broadcast
, &es
->tx_broadcast
);
687 vsi
->stat_offsets_loaded
= true;
691 * i40e_update_veb_stats - Update Switch component statistics
692 * @veb: the VEB being updated
694 static void i40e_update_veb_stats(struct i40e_veb
*veb
)
696 struct i40e_pf
*pf
= veb
->pf
;
697 struct i40e_hw
*hw
= &pf
->hw
;
698 struct i40e_eth_stats
*oes
;
699 struct i40e_eth_stats
*es
; /* device's eth stats */
700 struct i40e_veb_tc_stats
*veb_oes
;
701 struct i40e_veb_tc_stats
*veb_es
;
704 idx
= veb
->stats_idx
;
706 oes
= &veb
->stats_offsets
;
707 veb_es
= &veb
->tc_stats
;
708 veb_oes
= &veb
->tc_stats_offsets
;
710 /* Gather up the stats that the hw collects */
711 i40e_stat_update32(hw
, I40E_GLSW_TDPC(idx
),
712 veb
->stat_offsets_loaded
,
713 &oes
->tx_discards
, &es
->tx_discards
);
714 if (hw
->revision_id
> 0)
715 i40e_stat_update32(hw
, I40E_GLSW_RUPP(idx
),
716 veb
->stat_offsets_loaded
,
717 &oes
->rx_unknown_protocol
,
718 &es
->rx_unknown_protocol
);
719 i40e_stat_update48(hw
, I40E_GLSW_GORCH(idx
), I40E_GLSW_GORCL(idx
),
720 veb
->stat_offsets_loaded
,
721 &oes
->rx_bytes
, &es
->rx_bytes
);
722 i40e_stat_update48(hw
, I40E_GLSW_UPRCH(idx
), I40E_GLSW_UPRCL(idx
),
723 veb
->stat_offsets_loaded
,
724 &oes
->rx_unicast
, &es
->rx_unicast
);
725 i40e_stat_update48(hw
, I40E_GLSW_MPRCH(idx
), I40E_GLSW_MPRCL(idx
),
726 veb
->stat_offsets_loaded
,
727 &oes
->rx_multicast
, &es
->rx_multicast
);
728 i40e_stat_update48(hw
, I40E_GLSW_BPRCH(idx
), I40E_GLSW_BPRCL(idx
),
729 veb
->stat_offsets_loaded
,
730 &oes
->rx_broadcast
, &es
->rx_broadcast
);
732 i40e_stat_update48(hw
, I40E_GLSW_GOTCH(idx
), I40E_GLSW_GOTCL(idx
),
733 veb
->stat_offsets_loaded
,
734 &oes
->tx_bytes
, &es
->tx_bytes
);
735 i40e_stat_update48(hw
, I40E_GLSW_UPTCH(idx
), I40E_GLSW_UPTCL(idx
),
736 veb
->stat_offsets_loaded
,
737 &oes
->tx_unicast
, &es
->tx_unicast
);
738 i40e_stat_update48(hw
, I40E_GLSW_MPTCH(idx
), I40E_GLSW_MPTCL(idx
),
739 veb
->stat_offsets_loaded
,
740 &oes
->tx_multicast
, &es
->tx_multicast
);
741 i40e_stat_update48(hw
, I40E_GLSW_BPTCH(idx
), I40E_GLSW_BPTCL(idx
),
742 veb
->stat_offsets_loaded
,
743 &oes
->tx_broadcast
, &es
->tx_broadcast
);
744 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
745 i40e_stat_update48(hw
, I40E_GLVEBTC_RPCH(i
, idx
),
746 I40E_GLVEBTC_RPCL(i
, idx
),
747 veb
->stat_offsets_loaded
,
748 &veb_oes
->tc_rx_packets
[i
],
749 &veb_es
->tc_rx_packets
[i
]);
750 i40e_stat_update48(hw
, I40E_GLVEBTC_RBCH(i
, idx
),
751 I40E_GLVEBTC_RBCL(i
, idx
),
752 veb
->stat_offsets_loaded
,
753 &veb_oes
->tc_rx_bytes
[i
],
754 &veb_es
->tc_rx_bytes
[i
]);
755 i40e_stat_update48(hw
, I40E_GLVEBTC_TPCH(i
, idx
),
756 I40E_GLVEBTC_TPCL(i
, idx
),
757 veb
->stat_offsets_loaded
,
758 &veb_oes
->tc_tx_packets
[i
],
759 &veb_es
->tc_tx_packets
[i
]);
760 i40e_stat_update48(hw
, I40E_GLVEBTC_TBCH(i
, idx
),
761 I40E_GLVEBTC_TBCL(i
, idx
),
762 veb
->stat_offsets_loaded
,
763 &veb_oes
->tc_tx_bytes
[i
],
764 &veb_es
->tc_tx_bytes
[i
]);
766 veb
->stat_offsets_loaded
= true;
770 * i40e_update_vsi_stats - Update the vsi statistics counters.
771 * @vsi: the VSI to be updated
773 * There are a few instances where we store the same stat in a
774 * couple of different structs. This is partly because we have
775 * the netdev stats that need to be filled out, which is slightly
776 * different from the "eth_stats" defined by the chip and used in
777 * VF communications. We sort it out here.
779 static void i40e_update_vsi_stats(struct i40e_vsi
*vsi
)
781 struct i40e_pf
*pf
= vsi
->back
;
782 struct rtnl_link_stats64
*ons
;
783 struct rtnl_link_stats64
*ns
; /* netdev stats */
784 struct i40e_eth_stats
*oes
;
785 struct i40e_eth_stats
*es
; /* device's eth stats */
786 u32 tx_restart
, tx_busy
;
797 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
) ||
798 test_bit(__I40E_CONFIG_BUSY
, pf
->state
))
801 ns
= i40e_get_vsi_stats_struct(vsi
);
802 ons
= &vsi
->net_stats_offsets
;
803 es
= &vsi
->eth_stats
;
804 oes
= &vsi
->eth_stats_offsets
;
806 /* Gather up the netdev and vsi stats that the driver collects
807 * on the fly during packet processing
811 tx_restart
= tx_busy
= tx_linearize
= tx_force_wb
= 0;
815 for (q
= 0; q
< vsi
->num_queue_pairs
; q
++) {
817 p
= READ_ONCE(vsi
->tx_rings
[q
]);
820 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
821 packets
= p
->stats
.packets
;
822 bytes
= p
->stats
.bytes
;
823 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
826 tx_restart
+= p
->tx_stats
.restart_queue
;
827 tx_busy
+= p
->tx_stats
.tx_busy
;
828 tx_linearize
+= p
->tx_stats
.tx_linearize
;
829 tx_force_wb
+= p
->tx_stats
.tx_force_wb
;
831 /* Rx queue is part of the same block as Tx queue */
834 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
835 packets
= p
->stats
.packets
;
836 bytes
= p
->stats
.bytes
;
837 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
840 rx_buf
+= p
->rx_stats
.alloc_buff_failed
;
841 rx_page
+= p
->rx_stats
.alloc_page_failed
;
844 vsi
->tx_restart
= tx_restart
;
845 vsi
->tx_busy
= tx_busy
;
846 vsi
->tx_linearize
= tx_linearize
;
847 vsi
->tx_force_wb
= tx_force_wb
;
848 vsi
->rx_page_failed
= rx_page
;
849 vsi
->rx_buf_failed
= rx_buf
;
851 ns
->rx_packets
= rx_p
;
853 ns
->tx_packets
= tx_p
;
856 /* update netdev stats from eth stats */
857 i40e_update_eth_stats(vsi
);
858 ons
->tx_errors
= oes
->tx_errors
;
859 ns
->tx_errors
= es
->tx_errors
;
860 ons
->multicast
= oes
->rx_multicast
;
861 ns
->multicast
= es
->rx_multicast
;
862 ons
->rx_dropped
= oes
->rx_discards
;
863 ns
->rx_dropped
= es
->rx_discards
;
864 ons
->tx_dropped
= oes
->tx_discards
;
865 ns
->tx_dropped
= es
->tx_discards
;
867 /* pull in a couple PF stats if this is the main vsi */
868 if (vsi
== pf
->vsi
[pf
->lan_vsi
]) {
869 ns
->rx_crc_errors
= pf
->stats
.crc_errors
;
870 ns
->rx_errors
= pf
->stats
.crc_errors
+ pf
->stats
.illegal_bytes
;
871 ns
->rx_length_errors
= pf
->stats
.rx_length_errors
;
876 * i40e_update_pf_stats - Update the PF statistics counters.
877 * @pf: the PF to be updated
879 static void i40e_update_pf_stats(struct i40e_pf
*pf
)
881 struct i40e_hw_port_stats
*osd
= &pf
->stats_offsets
;
882 struct i40e_hw_port_stats
*nsd
= &pf
->stats
;
883 struct i40e_hw
*hw
= &pf
->hw
;
887 i40e_stat_update48(hw
, I40E_GLPRT_GORCH(hw
->port
),
888 I40E_GLPRT_GORCL(hw
->port
),
889 pf
->stat_offsets_loaded
,
890 &osd
->eth
.rx_bytes
, &nsd
->eth
.rx_bytes
);
891 i40e_stat_update48(hw
, I40E_GLPRT_GOTCH(hw
->port
),
892 I40E_GLPRT_GOTCL(hw
->port
),
893 pf
->stat_offsets_loaded
,
894 &osd
->eth
.tx_bytes
, &nsd
->eth
.tx_bytes
);
895 i40e_stat_update32(hw
, I40E_GLPRT_RDPC(hw
->port
),
896 pf
->stat_offsets_loaded
,
897 &osd
->eth
.rx_discards
,
898 &nsd
->eth
.rx_discards
);
899 i40e_stat_update48(hw
, I40E_GLPRT_UPRCH(hw
->port
),
900 I40E_GLPRT_UPRCL(hw
->port
),
901 pf
->stat_offsets_loaded
,
902 &osd
->eth
.rx_unicast
,
903 &nsd
->eth
.rx_unicast
);
904 i40e_stat_update48(hw
, I40E_GLPRT_MPRCH(hw
->port
),
905 I40E_GLPRT_MPRCL(hw
->port
),
906 pf
->stat_offsets_loaded
,
907 &osd
->eth
.rx_multicast
,
908 &nsd
->eth
.rx_multicast
);
909 i40e_stat_update48(hw
, I40E_GLPRT_BPRCH(hw
->port
),
910 I40E_GLPRT_BPRCL(hw
->port
),
911 pf
->stat_offsets_loaded
,
912 &osd
->eth
.rx_broadcast
,
913 &nsd
->eth
.rx_broadcast
);
914 i40e_stat_update48(hw
, I40E_GLPRT_UPTCH(hw
->port
),
915 I40E_GLPRT_UPTCL(hw
->port
),
916 pf
->stat_offsets_loaded
,
917 &osd
->eth
.tx_unicast
,
918 &nsd
->eth
.tx_unicast
);
919 i40e_stat_update48(hw
, I40E_GLPRT_MPTCH(hw
->port
),
920 I40E_GLPRT_MPTCL(hw
->port
),
921 pf
->stat_offsets_loaded
,
922 &osd
->eth
.tx_multicast
,
923 &nsd
->eth
.tx_multicast
);
924 i40e_stat_update48(hw
, I40E_GLPRT_BPTCH(hw
->port
),
925 I40E_GLPRT_BPTCL(hw
->port
),
926 pf
->stat_offsets_loaded
,
927 &osd
->eth
.tx_broadcast
,
928 &nsd
->eth
.tx_broadcast
);
930 i40e_stat_update32(hw
, I40E_GLPRT_TDOLD(hw
->port
),
931 pf
->stat_offsets_loaded
,
932 &osd
->tx_dropped_link_down
,
933 &nsd
->tx_dropped_link_down
);
935 i40e_stat_update32(hw
, I40E_GLPRT_CRCERRS(hw
->port
),
936 pf
->stat_offsets_loaded
,
937 &osd
->crc_errors
, &nsd
->crc_errors
);
939 i40e_stat_update32(hw
, I40E_GLPRT_ILLERRC(hw
->port
),
940 pf
->stat_offsets_loaded
,
941 &osd
->illegal_bytes
, &nsd
->illegal_bytes
);
943 i40e_stat_update32(hw
, I40E_GLPRT_MLFC(hw
->port
),
944 pf
->stat_offsets_loaded
,
945 &osd
->mac_local_faults
,
946 &nsd
->mac_local_faults
);
947 i40e_stat_update32(hw
, I40E_GLPRT_MRFC(hw
->port
),
948 pf
->stat_offsets_loaded
,
949 &osd
->mac_remote_faults
,
950 &nsd
->mac_remote_faults
);
952 i40e_stat_update32(hw
, I40E_GLPRT_RLEC(hw
->port
),
953 pf
->stat_offsets_loaded
,
954 &osd
->rx_length_errors
,
955 &nsd
->rx_length_errors
);
957 i40e_stat_update32(hw
, I40E_GLPRT_LXONRXC(hw
->port
),
958 pf
->stat_offsets_loaded
,
959 &osd
->link_xon_rx
, &nsd
->link_xon_rx
);
960 i40e_stat_update32(hw
, I40E_GLPRT_LXONTXC(hw
->port
),
961 pf
->stat_offsets_loaded
,
962 &osd
->link_xon_tx
, &nsd
->link_xon_tx
);
963 i40e_stat_update32(hw
, I40E_GLPRT_LXOFFRXC(hw
->port
),
964 pf
->stat_offsets_loaded
,
965 &osd
->link_xoff_rx
, &nsd
->link_xoff_rx
);
966 i40e_stat_update32(hw
, I40E_GLPRT_LXOFFTXC(hw
->port
),
967 pf
->stat_offsets_loaded
,
968 &osd
->link_xoff_tx
, &nsd
->link_xoff_tx
);
970 for (i
= 0; i
< 8; i
++) {
971 i40e_stat_update32(hw
, I40E_GLPRT_PXOFFRXC(hw
->port
, i
),
972 pf
->stat_offsets_loaded
,
973 &osd
->priority_xoff_rx
[i
],
974 &nsd
->priority_xoff_rx
[i
]);
975 i40e_stat_update32(hw
, I40E_GLPRT_PXONRXC(hw
->port
, i
),
976 pf
->stat_offsets_loaded
,
977 &osd
->priority_xon_rx
[i
],
978 &nsd
->priority_xon_rx
[i
]);
979 i40e_stat_update32(hw
, I40E_GLPRT_PXONTXC(hw
->port
, i
),
980 pf
->stat_offsets_loaded
,
981 &osd
->priority_xon_tx
[i
],
982 &nsd
->priority_xon_tx
[i
]);
983 i40e_stat_update32(hw
, I40E_GLPRT_PXOFFTXC(hw
->port
, i
),
984 pf
->stat_offsets_loaded
,
985 &osd
->priority_xoff_tx
[i
],
986 &nsd
->priority_xoff_tx
[i
]);
987 i40e_stat_update32(hw
,
988 I40E_GLPRT_RXON2OFFCNT(hw
->port
, i
),
989 pf
->stat_offsets_loaded
,
990 &osd
->priority_xon_2_xoff
[i
],
991 &nsd
->priority_xon_2_xoff
[i
]);
994 i40e_stat_update48(hw
, I40E_GLPRT_PRC64H(hw
->port
),
995 I40E_GLPRT_PRC64L(hw
->port
),
996 pf
->stat_offsets_loaded
,
997 &osd
->rx_size_64
, &nsd
->rx_size_64
);
998 i40e_stat_update48(hw
, I40E_GLPRT_PRC127H(hw
->port
),
999 I40E_GLPRT_PRC127L(hw
->port
),
1000 pf
->stat_offsets_loaded
,
1001 &osd
->rx_size_127
, &nsd
->rx_size_127
);
1002 i40e_stat_update48(hw
, I40E_GLPRT_PRC255H(hw
->port
),
1003 I40E_GLPRT_PRC255L(hw
->port
),
1004 pf
->stat_offsets_loaded
,
1005 &osd
->rx_size_255
, &nsd
->rx_size_255
);
1006 i40e_stat_update48(hw
, I40E_GLPRT_PRC511H(hw
->port
),
1007 I40E_GLPRT_PRC511L(hw
->port
),
1008 pf
->stat_offsets_loaded
,
1009 &osd
->rx_size_511
, &nsd
->rx_size_511
);
1010 i40e_stat_update48(hw
, I40E_GLPRT_PRC1023H(hw
->port
),
1011 I40E_GLPRT_PRC1023L(hw
->port
),
1012 pf
->stat_offsets_loaded
,
1013 &osd
->rx_size_1023
, &nsd
->rx_size_1023
);
1014 i40e_stat_update48(hw
, I40E_GLPRT_PRC1522H(hw
->port
),
1015 I40E_GLPRT_PRC1522L(hw
->port
),
1016 pf
->stat_offsets_loaded
,
1017 &osd
->rx_size_1522
, &nsd
->rx_size_1522
);
1018 i40e_stat_update48(hw
, I40E_GLPRT_PRC9522H(hw
->port
),
1019 I40E_GLPRT_PRC9522L(hw
->port
),
1020 pf
->stat_offsets_loaded
,
1021 &osd
->rx_size_big
, &nsd
->rx_size_big
);
1023 i40e_stat_update48(hw
, I40E_GLPRT_PTC64H(hw
->port
),
1024 I40E_GLPRT_PTC64L(hw
->port
),
1025 pf
->stat_offsets_loaded
,
1026 &osd
->tx_size_64
, &nsd
->tx_size_64
);
1027 i40e_stat_update48(hw
, I40E_GLPRT_PTC127H(hw
->port
),
1028 I40E_GLPRT_PTC127L(hw
->port
),
1029 pf
->stat_offsets_loaded
,
1030 &osd
->tx_size_127
, &nsd
->tx_size_127
);
1031 i40e_stat_update48(hw
, I40E_GLPRT_PTC255H(hw
->port
),
1032 I40E_GLPRT_PTC255L(hw
->port
),
1033 pf
->stat_offsets_loaded
,
1034 &osd
->tx_size_255
, &nsd
->tx_size_255
);
1035 i40e_stat_update48(hw
, I40E_GLPRT_PTC511H(hw
->port
),
1036 I40E_GLPRT_PTC511L(hw
->port
),
1037 pf
->stat_offsets_loaded
,
1038 &osd
->tx_size_511
, &nsd
->tx_size_511
);
1039 i40e_stat_update48(hw
, I40E_GLPRT_PTC1023H(hw
->port
),
1040 I40E_GLPRT_PTC1023L(hw
->port
),
1041 pf
->stat_offsets_loaded
,
1042 &osd
->tx_size_1023
, &nsd
->tx_size_1023
);
1043 i40e_stat_update48(hw
, I40E_GLPRT_PTC1522H(hw
->port
),
1044 I40E_GLPRT_PTC1522L(hw
->port
),
1045 pf
->stat_offsets_loaded
,
1046 &osd
->tx_size_1522
, &nsd
->tx_size_1522
);
1047 i40e_stat_update48(hw
, I40E_GLPRT_PTC9522H(hw
->port
),
1048 I40E_GLPRT_PTC9522L(hw
->port
),
1049 pf
->stat_offsets_loaded
,
1050 &osd
->tx_size_big
, &nsd
->tx_size_big
);
1052 i40e_stat_update32(hw
, I40E_GLPRT_RUC(hw
->port
),
1053 pf
->stat_offsets_loaded
,
1054 &osd
->rx_undersize
, &nsd
->rx_undersize
);
1055 i40e_stat_update32(hw
, I40E_GLPRT_RFC(hw
->port
),
1056 pf
->stat_offsets_loaded
,
1057 &osd
->rx_fragments
, &nsd
->rx_fragments
);
1058 i40e_stat_update32(hw
, I40E_GLPRT_ROC(hw
->port
),
1059 pf
->stat_offsets_loaded
,
1060 &osd
->rx_oversize
, &nsd
->rx_oversize
);
1061 i40e_stat_update32(hw
, I40E_GLPRT_RJC(hw
->port
),
1062 pf
->stat_offsets_loaded
,
1063 &osd
->rx_jabber
, &nsd
->rx_jabber
);
1066 i40e_stat_update_and_clear32(hw
,
1067 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw
->pf_id
)),
1068 &nsd
->fd_atr_match
);
1069 i40e_stat_update_and_clear32(hw
,
1070 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw
->pf_id
)),
1072 i40e_stat_update_and_clear32(hw
,
1073 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw
->pf_id
)),
1074 &nsd
->fd_atr_tunnel_match
);
1076 val
= rd32(hw
, I40E_PRTPM_EEE_STAT
);
1077 nsd
->tx_lpi_status
=
1078 (val
& I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK
) >>
1079 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT
;
1080 nsd
->rx_lpi_status
=
1081 (val
& I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK
) >>
1082 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT
;
1083 i40e_stat_update32(hw
, I40E_PRTPM_TLPIC
,
1084 pf
->stat_offsets_loaded
,
1085 &osd
->tx_lpi_count
, &nsd
->tx_lpi_count
);
1086 i40e_stat_update32(hw
, I40E_PRTPM_RLPIC
,
1087 pf
->stat_offsets_loaded
,
1088 &osd
->rx_lpi_count
, &nsd
->rx_lpi_count
);
1090 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
&&
1091 !(pf
->flags
& I40E_FLAG_FD_SB_AUTO_DISABLED
))
1092 nsd
->fd_sb_status
= true;
1094 nsd
->fd_sb_status
= false;
1096 if (pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
&&
1097 !(pf
->flags
& I40E_FLAG_FD_ATR_AUTO_DISABLED
))
1098 nsd
->fd_atr_status
= true;
1100 nsd
->fd_atr_status
= false;
1102 pf
->stat_offsets_loaded
= true;
1106 * i40e_update_stats - Update the various statistics counters.
1107 * @vsi: the VSI to be updated
1109 * Update the various stats for this VSI and its related entities.
1111 void i40e_update_stats(struct i40e_vsi
*vsi
)
1113 struct i40e_pf
*pf
= vsi
->back
;
1115 if (vsi
== pf
->vsi
[pf
->lan_vsi
])
1116 i40e_update_pf_stats(pf
);
1118 i40e_update_vsi_stats(vsi
);
1122 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1123 * @vsi: the VSI to be searched
1124 * @macaddr: the MAC address
1127 * Returns ptr to the filter object or NULL
1129 static struct i40e_mac_filter
*i40e_find_filter(struct i40e_vsi
*vsi
,
1130 const u8
*macaddr
, s16 vlan
)
1132 struct i40e_mac_filter
*f
;
1135 if (!vsi
|| !macaddr
)
1138 key
= i40e_addr_to_hkey(macaddr
);
1139 hash_for_each_possible(vsi
->mac_filter_hash
, f
, hlist
, key
) {
1140 if ((ether_addr_equal(macaddr
, f
->macaddr
)) &&
1148 * i40e_find_mac - Find a mac addr in the macvlan filters list
1149 * @vsi: the VSI to be searched
1150 * @macaddr: the MAC address we are searching for
1152 * Returns the first filter with the provided MAC address or NULL if
1153 * MAC address was not found
1155 struct i40e_mac_filter
*i40e_find_mac(struct i40e_vsi
*vsi
, const u8
*macaddr
)
1157 struct i40e_mac_filter
*f
;
1160 if (!vsi
|| !macaddr
)
1163 key
= i40e_addr_to_hkey(macaddr
);
1164 hash_for_each_possible(vsi
->mac_filter_hash
, f
, hlist
, key
) {
1165 if ((ether_addr_equal(macaddr
, f
->macaddr
)))
1172 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1173 * @vsi: the VSI to be searched
1175 * Returns true if VSI is in vlan mode or false otherwise
1177 bool i40e_is_vsi_in_vlan(struct i40e_vsi
*vsi
)
1179 /* If we have a PVID, always operate in VLAN mode */
1183 /* We need to operate in VLAN mode whenever we have any filters with
1184 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1185 * time, incurring search cost repeatedly. However, we can notice two
1188 * 1) the only place where we can gain a VLAN filter is in
1191 * 2) the only place where filters are actually removed is in
1192 * i40e_sync_filters_subtask.
1194 * Thus, we can simply use a boolean value, has_vlan_filters which we
1195 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1196 * we have to perform the full search after deleting filters in
1197 * i40e_sync_filters_subtask, but we already have to search
1198 * filters here and can perform the check at the same time. This
1199 * results in avoiding embedding a loop for VLAN mode inside another
1200 * loop over all the filters, and should maintain correctness as noted
1203 return vsi
->has_vlan_filter
;
1207 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1208 * @vsi: the VSI to configure
1209 * @tmp_add_list: list of filters ready to be added
1210 * @tmp_del_list: list of filters ready to be deleted
1211 * @vlan_filters: the number of active VLAN filters
1213 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1214 * behave as expected. If we have any active VLAN filters remaining or about
1215 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1216 * so that they only match against untagged traffic. If we no longer have any
1217 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1218 * so that they match against both tagged and untagged traffic. In this way,
1219 * we ensure that we correctly receive the desired traffic. This ensures that
1220 * when we have an active VLAN we will receive only untagged traffic and
1221 * traffic matching active VLANs. If we have no active VLANs then we will
1222 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1224 * Finally, in a similar fashion, this function also corrects filters when
1225 * there is an active PVID assigned to this VSI.
1227 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1229 * This function is only expected to be called from within
1230 * i40e_sync_vsi_filters.
1232 * NOTE: This function expects to be called while under the
1233 * mac_filter_hash_lock
1235 static int i40e_correct_mac_vlan_filters(struct i40e_vsi
*vsi
,
1236 struct hlist_head
*tmp_add_list
,
1237 struct hlist_head
*tmp_del_list
,
1240 s16 pvid
= le16_to_cpu(vsi
->info
.pvid
);
1241 struct i40e_mac_filter
*f
, *add_head
;
1242 struct i40e_new_mac_filter
*new;
1243 struct hlist_node
*h
;
1246 /* To determine if a particular filter needs to be replaced we
1247 * have the three following conditions:
1249 * a) if we have a PVID assigned, then all filters which are
1250 * not marked as VLAN=PVID must be replaced with filters that
1252 * b) otherwise, if we have any active VLANS, all filters
1253 * which are marked as VLAN=-1 must be replaced with
1254 * filters marked as VLAN=0
1255 * c) finally, if we do not have any active VLANS, all filters
1256 * which are marked as VLAN=0 must be replaced with filters
1260 /* Update the filters about to be added in place */
1261 hlist_for_each_entry(new, tmp_add_list
, hlist
) {
1262 if (pvid
&& new->f
->vlan
!= pvid
)
1263 new->f
->vlan
= pvid
;
1264 else if (vlan_filters
&& new->f
->vlan
== I40E_VLAN_ANY
)
1266 else if (!vlan_filters
&& new->f
->vlan
== 0)
1267 new->f
->vlan
= I40E_VLAN_ANY
;
1270 /* Update the remaining active filters */
1271 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
1272 /* Combine the checks for whether a filter needs to be changed
1273 * and then determine the new VLAN inside the if block, in
1274 * order to avoid duplicating code for adding the new filter
1275 * then deleting the old filter.
1277 if ((pvid
&& f
->vlan
!= pvid
) ||
1278 (vlan_filters
&& f
->vlan
== I40E_VLAN_ANY
) ||
1279 (!vlan_filters
&& f
->vlan
== 0)) {
1280 /* Determine the new vlan we will be adding */
1283 else if (vlan_filters
)
1286 new_vlan
= I40E_VLAN_ANY
;
1288 /* Create the new filter */
1289 add_head
= i40e_add_filter(vsi
, f
->macaddr
, new_vlan
);
1293 /* Create a temporary i40e_new_mac_filter */
1294 new = kzalloc(sizeof(*new), GFP_ATOMIC
);
1299 new->state
= add_head
->state
;
1301 /* Add the new filter to the tmp list */
1302 hlist_add_head(&new->hlist
, tmp_add_list
);
1304 /* Put the original filter into the delete list */
1305 f
->state
= I40E_FILTER_REMOVE
;
1306 hash_del(&f
->hlist
);
1307 hlist_add_head(&f
->hlist
, tmp_del_list
);
1311 vsi
->has_vlan_filter
= !!vlan_filters
;
1317 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1318 * @vsi: the PF Main VSI - inappropriate for any other VSI
1319 * @macaddr: the MAC address
1321 * Remove whatever filter the firmware set up so the driver can manage
1322 * its own filtering intelligently.
1324 static void i40e_rm_default_mac_filter(struct i40e_vsi
*vsi
, u8
*macaddr
)
1326 struct i40e_aqc_remove_macvlan_element_data element
;
1327 struct i40e_pf
*pf
= vsi
->back
;
1329 /* Only appropriate for the PF main VSI */
1330 if (vsi
->type
!= I40E_VSI_MAIN
)
1333 memset(&element
, 0, sizeof(element
));
1334 ether_addr_copy(element
.mac_addr
, macaddr
);
1335 element
.vlan_tag
= 0;
1336 /* Ignore error returns, some firmware does it this way... */
1337 element
.flags
= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH
;
1338 i40e_aq_remove_macvlan(&pf
->hw
, vsi
->seid
, &element
, 1, NULL
);
1340 memset(&element
, 0, sizeof(element
));
1341 ether_addr_copy(element
.mac_addr
, macaddr
);
1342 element
.vlan_tag
= 0;
1343 /* ...and some firmware does it this way. */
1344 element
.flags
= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH
|
1345 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN
;
1346 i40e_aq_remove_macvlan(&pf
->hw
, vsi
->seid
, &element
, 1, NULL
);
1350 * i40e_add_filter - Add a mac/vlan filter to the VSI
1351 * @vsi: the VSI to be searched
1352 * @macaddr: the MAC address
1355 * Returns ptr to the filter object or NULL when no memory available.
1357 * NOTE: This function is expected to be called with mac_filter_hash_lock
1360 struct i40e_mac_filter
*i40e_add_filter(struct i40e_vsi
*vsi
,
1361 const u8
*macaddr
, s16 vlan
)
1363 struct i40e_mac_filter
*f
;
1366 if (!vsi
|| !macaddr
)
1369 f
= i40e_find_filter(vsi
, macaddr
, vlan
);
1371 f
= kzalloc(sizeof(*f
), GFP_ATOMIC
);
1375 /* Update the boolean indicating if we need to function in
1379 vsi
->has_vlan_filter
= true;
1381 ether_addr_copy(f
->macaddr
, macaddr
);
1383 /* If we're in overflow promisc mode, set the state directly
1384 * to failed, so we don't bother to try sending the filter
1387 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
))
1388 f
->state
= I40E_FILTER_FAILED
;
1390 f
->state
= I40E_FILTER_NEW
;
1391 INIT_HLIST_NODE(&f
->hlist
);
1393 key
= i40e_addr_to_hkey(macaddr
);
1394 hash_add(vsi
->mac_filter_hash
, &f
->hlist
, key
);
1396 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1397 vsi
->back
->flags
|= I40E_FLAG_FILTER_SYNC
;
1400 /* If we're asked to add a filter that has been marked for removal, it
1401 * is safe to simply restore it to active state. __i40e_del_filter
1402 * will have simply deleted any filters which were previously marked
1403 * NEW or FAILED, so if it is currently marked REMOVE it must have
1404 * previously been ACTIVE. Since we haven't yet run the sync filters
1405 * task, just restore this filter to the ACTIVE state so that the
1406 * sync task leaves it in place
1408 if (f
->state
== I40E_FILTER_REMOVE
)
1409 f
->state
= I40E_FILTER_ACTIVE
;
1415 * __i40e_del_filter - Remove a specific filter from the VSI
1416 * @vsi: VSI to remove from
1417 * @f: the filter to remove from the list
1419 * This function should be called instead of i40e_del_filter only if you know
1420 * the exact filter you will remove already, such as via i40e_find_filter or
1423 * NOTE: This function is expected to be called with mac_filter_hash_lock
1425 * ANOTHER NOTE: This function MUST be called from within the context of
1426 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1427 * instead of list_for_each_entry().
1429 void __i40e_del_filter(struct i40e_vsi
*vsi
, struct i40e_mac_filter
*f
)
1434 /* If the filter was never added to firmware then we can just delete it
1435 * directly and we don't want to set the status to remove or else an
1436 * admin queue command will unnecessarily fire.
1438 if ((f
->state
== I40E_FILTER_FAILED
) ||
1439 (f
->state
== I40E_FILTER_NEW
)) {
1440 hash_del(&f
->hlist
);
1443 f
->state
= I40E_FILTER_REMOVE
;
1446 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1447 vsi
->back
->flags
|= I40E_FLAG_FILTER_SYNC
;
1451 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1452 * @vsi: the VSI to be searched
1453 * @macaddr: the MAC address
1456 * NOTE: This function is expected to be called with mac_filter_hash_lock
1458 * ANOTHER NOTE: This function MUST be called from within the context of
1459 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1460 * instead of list_for_each_entry().
1462 void i40e_del_filter(struct i40e_vsi
*vsi
, const u8
*macaddr
, s16 vlan
)
1464 struct i40e_mac_filter
*f
;
1466 if (!vsi
|| !macaddr
)
1469 f
= i40e_find_filter(vsi
, macaddr
, vlan
);
1470 __i40e_del_filter(vsi
, f
);
1474 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1475 * @vsi: the VSI to be searched
1476 * @macaddr: the mac address to be filtered
1478 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1479 * go through all the macvlan filters and add a macvlan filter for each
1480 * unique vlan that already exists. If a PVID has been assigned, instead only
1481 * add the macaddr to that VLAN.
1483 * Returns last filter added on success, else NULL
1485 struct i40e_mac_filter
*i40e_add_mac_filter(struct i40e_vsi
*vsi
,
1488 struct i40e_mac_filter
*f
, *add
= NULL
;
1489 struct hlist_node
*h
;
1493 return i40e_add_filter(vsi
, macaddr
,
1494 le16_to_cpu(vsi
->info
.pvid
));
1496 if (!i40e_is_vsi_in_vlan(vsi
))
1497 return i40e_add_filter(vsi
, macaddr
, I40E_VLAN_ANY
);
1499 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
1500 if (f
->state
== I40E_FILTER_REMOVE
)
1502 add
= i40e_add_filter(vsi
, macaddr
, f
->vlan
);
1511 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1512 * @vsi: the VSI to be searched
1513 * @macaddr: the mac address to be removed
1515 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1518 * Returns 0 for success, or error
1520 int i40e_del_mac_filter(struct i40e_vsi
*vsi
, const u8
*macaddr
)
1522 struct i40e_mac_filter
*f
;
1523 struct hlist_node
*h
;
1527 WARN(!spin_is_locked(&vsi
->mac_filter_hash_lock
),
1528 "Missing mac_filter_hash_lock\n");
1529 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
1530 if (ether_addr_equal(macaddr
, f
->macaddr
)) {
1531 __i40e_del_filter(vsi
, f
);
1543 * i40e_set_mac - NDO callback to set mac address
1544 * @netdev: network interface device structure
1545 * @p: pointer to an address structure
1547 * Returns 0 on success, negative on failure
1549 static int i40e_set_mac(struct net_device
*netdev
, void *p
)
1551 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1552 struct i40e_vsi
*vsi
= np
->vsi
;
1553 struct i40e_pf
*pf
= vsi
->back
;
1554 struct i40e_hw
*hw
= &pf
->hw
;
1555 struct sockaddr
*addr
= p
;
1557 if (!is_valid_ether_addr(addr
->sa_data
))
1558 return -EADDRNOTAVAIL
;
1560 if (ether_addr_equal(netdev
->dev_addr
, addr
->sa_data
)) {
1561 netdev_info(netdev
, "already using mac address %pM\n",
1566 if (test_bit(__I40E_VSI_DOWN
, vsi
->back
->state
) ||
1567 test_bit(__I40E_RESET_RECOVERY_PENDING
, vsi
->back
->state
))
1568 return -EADDRNOTAVAIL
;
1570 if (ether_addr_equal(hw
->mac
.addr
, addr
->sa_data
))
1571 netdev_info(netdev
, "returning to hw mac address %pM\n",
1574 netdev_info(netdev
, "set new mac address %pM\n", addr
->sa_data
);
1576 /* Copy the address first, so that we avoid a possible race with
1577 * .set_rx_mode(). If we copy after changing the address in the filter
1578 * list, we might open ourselves to a narrow race window where
1579 * .set_rx_mode could delete our dev_addr filter and prevent traffic
1582 ether_addr_copy(netdev
->dev_addr
, addr
->sa_data
);
1584 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
1585 i40e_del_mac_filter(vsi
, netdev
->dev_addr
);
1586 i40e_add_mac_filter(vsi
, addr
->sa_data
);
1587 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
1588 if (vsi
->type
== I40E_VSI_MAIN
) {
1591 ret
= i40e_aq_mac_address_write(&vsi
->back
->hw
,
1592 I40E_AQC_WRITE_TYPE_LAA_WOL
,
1593 addr
->sa_data
, NULL
);
1595 netdev_info(netdev
, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1596 i40e_stat_str(hw
, ret
),
1597 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
1600 /* schedule our worker thread which will take care of
1601 * applying the new filter changes
1603 i40e_service_event_schedule(vsi
->back
);
1608 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1609 * @vsi: vsi structure
1610 * @seed: RSS hash seed
1612 static int i40e_config_rss_aq(struct i40e_vsi
*vsi
, const u8
*seed
,
1613 u8
*lut
, u16 lut_size
)
1615 struct i40e_pf
*pf
= vsi
->back
;
1616 struct i40e_hw
*hw
= &pf
->hw
;
1620 struct i40e_aqc_get_set_rss_key_data
*seed_dw
=
1621 (struct i40e_aqc_get_set_rss_key_data
*)seed
;
1622 ret
= i40e_aq_set_rss_key(hw
, vsi
->id
, seed_dw
);
1624 dev_info(&pf
->pdev
->dev
,
1625 "Cannot set RSS key, err %s aq_err %s\n",
1626 i40e_stat_str(hw
, ret
),
1627 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
1632 bool pf_lut
= vsi
->type
== I40E_VSI_MAIN
? true : false;
1634 ret
= i40e_aq_set_rss_lut(hw
, vsi
->id
, pf_lut
, lut
, lut_size
);
1636 dev_info(&pf
->pdev
->dev
,
1637 "Cannot set RSS lut, err %s aq_err %s\n",
1638 i40e_stat_str(hw
, ret
),
1639 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
1647 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1648 * @vsi: VSI structure
1650 static int i40e_vsi_config_rss(struct i40e_vsi
*vsi
)
1652 struct i40e_pf
*pf
= vsi
->back
;
1653 u8 seed
[I40E_HKEY_ARRAY_SIZE
];
1657 if (!(pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
))
1660 vsi
->rss_size
= min_t(int, pf
->alloc_rss_size
,
1661 vsi
->num_queue_pairs
);
1664 lut
= kzalloc(vsi
->rss_table_size
, GFP_KERNEL
);
1668 /* Use the user configured hash keys and lookup table if there is one,
1669 * otherwise use default
1671 if (vsi
->rss_lut_user
)
1672 memcpy(lut
, vsi
->rss_lut_user
, vsi
->rss_table_size
);
1674 i40e_fill_rss_lut(pf
, lut
, vsi
->rss_table_size
, vsi
->rss_size
);
1675 if (vsi
->rss_hkey_user
)
1676 memcpy(seed
, vsi
->rss_hkey_user
, I40E_HKEY_ARRAY_SIZE
);
1678 netdev_rss_key_fill((void *)seed
, I40E_HKEY_ARRAY_SIZE
);
1679 ret
= i40e_config_rss_aq(vsi
, seed
, lut
, vsi
->rss_table_size
);
1685 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1686 * @vsi: the VSI being configured,
1687 * @ctxt: VSI context structure
1688 * @enabled_tc: number of traffic classes to enable
1690 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1692 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi
*vsi
,
1693 struct i40e_vsi_context
*ctxt
,
1696 u16 qcount
= 0, max_qcount
, qmap
, sections
= 0;
1697 int i
, override_q
, pow
, num_qps
, ret
;
1698 u8 netdev_tc
= 0, offset
= 0;
1700 if (vsi
->type
!= I40E_VSI_MAIN
)
1702 sections
= I40E_AQ_VSI_PROP_QUEUE_MAP_VALID
;
1703 sections
|= I40E_AQ_VSI_PROP_SCHED_VALID
;
1704 vsi
->tc_config
.numtc
= vsi
->mqprio_qopt
.qopt
.num_tc
;
1705 vsi
->tc_config
.enabled_tc
= enabled_tc
? enabled_tc
: 1;
1706 num_qps
= vsi
->mqprio_qopt
.qopt
.count
[0];
1708 /* find the next higher power-of-2 of num queue pairs */
1709 pow
= ilog2(num_qps
);
1710 if (!is_power_of_2(num_qps
))
1712 qmap
= (offset
<< I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT
) |
1713 (pow
<< I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT
);
1715 /* Setup queue offset/count for all TCs for given VSI */
1716 max_qcount
= vsi
->mqprio_qopt
.qopt
.count
[0];
1717 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1718 /* See if the given TC is enabled for the given VSI */
1719 if (vsi
->tc_config
.enabled_tc
& BIT(i
)) {
1720 offset
= vsi
->mqprio_qopt
.qopt
.offset
[i
];
1721 qcount
= vsi
->mqprio_qopt
.qopt
.count
[i
];
1722 if (qcount
> max_qcount
)
1723 max_qcount
= qcount
;
1724 vsi
->tc_config
.tc_info
[i
].qoffset
= offset
;
1725 vsi
->tc_config
.tc_info
[i
].qcount
= qcount
;
1726 vsi
->tc_config
.tc_info
[i
].netdev_tc
= netdev_tc
++;
1728 /* TC is not enabled so set the offset to
1729 * default queue and allocate one queue
1732 vsi
->tc_config
.tc_info
[i
].qoffset
= 0;
1733 vsi
->tc_config
.tc_info
[i
].qcount
= 1;
1734 vsi
->tc_config
.tc_info
[i
].netdev_tc
= 0;
1738 /* Set actual Tx/Rx queue pairs */
1739 vsi
->num_queue_pairs
= offset
+ qcount
;
1741 /* Setup queue TC[0].qmap for given VSI context */
1742 ctxt
->info
.tc_mapping
[0] = cpu_to_le16(qmap
);
1743 ctxt
->info
.mapping_flags
|= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG
);
1744 ctxt
->info
.queue_mapping
[0] = cpu_to_le16(vsi
->base_queue
);
1745 ctxt
->info
.valid_sections
|= cpu_to_le16(sections
);
1747 /* Reconfigure RSS for main VSI with max queue count */
1748 vsi
->rss_size
= max_qcount
;
1749 ret
= i40e_vsi_config_rss(vsi
);
1751 dev_info(&vsi
->back
->pdev
->dev
,
1752 "Failed to reconfig rss for num_queues (%u)\n",
1756 vsi
->reconfig_rss
= true;
1757 dev_dbg(&vsi
->back
->pdev
->dev
,
1758 "Reconfigured rss with num_queues (%u)\n", max_qcount
);
1760 /* Find queue count available for channel VSIs and starting offset
1763 override_q
= vsi
->mqprio_qopt
.qopt
.count
[0];
1764 if (override_q
&& override_q
< vsi
->num_queue_pairs
) {
1765 vsi
->cnt_q_avail
= vsi
->num_queue_pairs
- override_q
;
1766 vsi
->next_base_queue
= override_q
;
1772 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1773 * @vsi: the VSI being setup
1774 * @ctxt: VSI context structure
1775 * @enabled_tc: Enabled TCs bitmap
1776 * @is_add: True if called before Add VSI
1778 * Setup VSI queue mapping for enabled traffic classes.
1780 static void i40e_vsi_setup_queue_map(struct i40e_vsi
*vsi
,
1781 struct i40e_vsi_context
*ctxt
,
1785 struct i40e_pf
*pf
= vsi
->back
;
1795 sections
= I40E_AQ_VSI_PROP_QUEUE_MAP_VALID
;
1798 /* Number of queues per enabled TC */
1799 num_tc_qps
= vsi
->alloc_queue_pairs
;
1800 if (enabled_tc
&& (vsi
->back
->flags
& I40E_FLAG_DCB_ENABLED
)) {
1801 /* Find numtc from enabled TC bitmap */
1802 for (i
= 0, numtc
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1803 if (enabled_tc
& BIT(i
)) /* TC is enabled */
1807 dev_warn(&pf
->pdev
->dev
, "DCB is enabled but no TC enabled, forcing TC0\n");
1810 num_tc_qps
= num_tc_qps
/ numtc
;
1811 num_tc_qps
= min_t(int, num_tc_qps
,
1812 i40e_pf_get_max_q_per_tc(pf
));
1815 vsi
->tc_config
.numtc
= numtc
;
1816 vsi
->tc_config
.enabled_tc
= enabled_tc
? enabled_tc
: 1;
1818 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
1819 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
1820 num_tc_qps
= min_t(int, num_tc_qps
, pf
->num_lan_msix
);
1822 /* Setup queue offset/count for all TCs for given VSI */
1823 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1824 /* See if the given TC is enabled for the given VSI */
1825 if (vsi
->tc_config
.enabled_tc
& BIT(i
)) {
1829 switch (vsi
->type
) {
1831 if (!(pf
->flags
& (I40E_FLAG_FD_SB_ENABLED
|
1832 I40E_FLAG_FD_ATR_ENABLED
)) ||
1833 vsi
->tc_config
.enabled_tc
!= 1) {
1834 qcount
= min_t(int, pf
->alloc_rss_size
,
1839 case I40E_VSI_SRIOV
:
1840 case I40E_VSI_VMDQ2
:
1842 qcount
= num_tc_qps
;
1846 vsi
->tc_config
.tc_info
[i
].qoffset
= offset
;
1847 vsi
->tc_config
.tc_info
[i
].qcount
= qcount
;
1849 /* find the next higher power-of-2 of num queue pairs */
1852 while (num_qps
&& (BIT_ULL(pow
) < qcount
)) {
1857 vsi
->tc_config
.tc_info
[i
].netdev_tc
= netdev_tc
++;
1859 (offset
<< I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT
) |
1860 (pow
<< I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT
);
1864 /* TC is not enabled so set the offset to
1865 * default queue and allocate one queue
1868 vsi
->tc_config
.tc_info
[i
].qoffset
= 0;
1869 vsi
->tc_config
.tc_info
[i
].qcount
= 1;
1870 vsi
->tc_config
.tc_info
[i
].netdev_tc
= 0;
1874 ctxt
->info
.tc_mapping
[i
] = cpu_to_le16(qmap
);
1877 /* Set actual Tx/Rx queue pairs */
1878 vsi
->num_queue_pairs
= offset
;
1879 if ((vsi
->type
== I40E_VSI_MAIN
) && (numtc
== 1)) {
1880 if (vsi
->req_queue_pairs
> 0)
1881 vsi
->num_queue_pairs
= vsi
->req_queue_pairs
;
1882 else if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
1883 vsi
->num_queue_pairs
= pf
->num_lan_msix
;
1886 /* Scheduler section valid can only be set for ADD VSI */
1888 sections
|= I40E_AQ_VSI_PROP_SCHED_VALID
;
1890 ctxt
->info
.up_enable_bits
= enabled_tc
;
1892 if (vsi
->type
== I40E_VSI_SRIOV
) {
1893 ctxt
->info
.mapping_flags
|=
1894 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG
);
1895 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
1896 ctxt
->info
.queue_mapping
[i
] =
1897 cpu_to_le16(vsi
->base_queue
+ i
);
1899 ctxt
->info
.mapping_flags
|=
1900 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG
);
1901 ctxt
->info
.queue_mapping
[0] = cpu_to_le16(vsi
->base_queue
);
1903 ctxt
->info
.valid_sections
|= cpu_to_le16(sections
);
1907 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1908 * @netdev: the netdevice
1909 * @addr: address to add
1911 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1912 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1914 static int i40e_addr_sync(struct net_device
*netdev
, const u8
*addr
)
1916 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1917 struct i40e_vsi
*vsi
= np
->vsi
;
1919 if (i40e_add_mac_filter(vsi
, addr
))
1926 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1927 * @netdev: the netdevice
1928 * @addr: address to add
1930 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1931 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1933 static int i40e_addr_unsync(struct net_device
*netdev
, const u8
*addr
)
1935 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1936 struct i40e_vsi
*vsi
= np
->vsi
;
1938 /* Under some circumstances, we might receive a request to delete
1939 * our own device address from our uc list. Because we store the
1940 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1941 * such requests and not delete our device address from this list.
1943 if (ether_addr_equal(addr
, netdev
->dev_addr
))
1946 i40e_del_mac_filter(vsi
, addr
);
1952 * i40e_set_rx_mode - NDO callback to set the netdev filters
1953 * @netdev: network interface device structure
1955 static void i40e_set_rx_mode(struct net_device
*netdev
)
1957 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1958 struct i40e_vsi
*vsi
= np
->vsi
;
1960 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
1962 __dev_uc_sync(netdev
, i40e_addr_sync
, i40e_addr_unsync
);
1963 __dev_mc_sync(netdev
, i40e_addr_sync
, i40e_addr_unsync
);
1965 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
1967 /* check for other flag changes */
1968 if (vsi
->current_netdev_flags
!= vsi
->netdev
->flags
) {
1969 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1970 vsi
->back
->flags
|= I40E_FLAG_FILTER_SYNC
;
1975 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1976 * @vsi: Pointer to VSI struct
1977 * @from: Pointer to list which contains MAC filter entries - changes to
1978 * those entries needs to be undone.
1980 * MAC filter entries from this list were slated for deletion.
1982 static void i40e_undo_del_filter_entries(struct i40e_vsi
*vsi
,
1983 struct hlist_head
*from
)
1985 struct i40e_mac_filter
*f
;
1986 struct hlist_node
*h
;
1988 hlist_for_each_entry_safe(f
, h
, from
, hlist
) {
1989 u64 key
= i40e_addr_to_hkey(f
->macaddr
);
1991 /* Move the element back into MAC filter list*/
1992 hlist_del(&f
->hlist
);
1993 hash_add(vsi
->mac_filter_hash
, &f
->hlist
, key
);
1998 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1999 * @vsi: Pointer to vsi struct
2000 * @from: Pointer to list which contains MAC filter entries - changes to
2001 * those entries needs to be undone.
2003 * MAC filter entries from this list were slated for addition.
2005 static void i40e_undo_add_filter_entries(struct i40e_vsi
*vsi
,
2006 struct hlist_head
*from
)
2008 struct i40e_new_mac_filter
*new;
2009 struct hlist_node
*h
;
2011 hlist_for_each_entry_safe(new, h
, from
, hlist
) {
2012 /* We can simply free the wrapper structure */
2013 hlist_del(&new->hlist
);
2019 * i40e_next_entry - Get the next non-broadcast filter from a list
2020 * @next: pointer to filter in list
2022 * Returns the next non-broadcast filter in the list. Required so that we
2023 * ignore broadcast filters within the list, since these are not handled via
2024 * the normal firmware update path.
2027 struct i40e_new_mac_filter
*i40e_next_filter(struct i40e_new_mac_filter
*next
)
2029 hlist_for_each_entry_continue(next
, hlist
) {
2030 if (!is_broadcast_ether_addr(next
->f
->macaddr
))
2038 * i40e_update_filter_state - Update filter state based on return data
2040 * @count: Number of filters added
2041 * @add_list: return data from fw
2042 * @head: pointer to first filter in current batch
2044 * MAC filter entries from list were slated to be added to device. Returns
2045 * number of successful filters. Note that 0 does NOT mean success!
2048 i40e_update_filter_state(int count
,
2049 struct i40e_aqc_add_macvlan_element_data
*add_list
,
2050 struct i40e_new_mac_filter
*add_head
)
2055 for (i
= 0; i
< count
; i
++) {
2056 /* Always check status of each filter. We don't need to check
2057 * the firmware return status because we pre-set the filter
2058 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2059 * request to the adminq. Thus, if it no longer matches then
2060 * we know the filter is active.
2062 if (add_list
[i
].match_method
== I40E_AQC_MM_ERR_NO_RES
) {
2063 add_head
->state
= I40E_FILTER_FAILED
;
2065 add_head
->state
= I40E_FILTER_ACTIVE
;
2069 add_head
= i40e_next_filter(add_head
);
2078 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2079 * @vsi: ptr to the VSI
2080 * @vsi_name: name to display in messages
2081 * @list: the list of filters to send to firmware
2082 * @num_del: the number of filters to delete
2083 * @retval: Set to -EIO on failure to delete
2085 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2086 * *retval instead of a return value so that success does not force ret_val to
2087 * be set to 0. This ensures that a sequence of calls to this function
2088 * preserve the previous value of *retval on successful delete.
2091 void i40e_aqc_del_filters(struct i40e_vsi
*vsi
, const char *vsi_name
,
2092 struct i40e_aqc_remove_macvlan_element_data
*list
,
2093 int num_del
, int *retval
)
2095 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2099 aq_ret
= i40e_aq_remove_macvlan(hw
, vsi
->seid
, list
, num_del
, NULL
);
2100 aq_err
= hw
->aq
.asq_last_status
;
2102 /* Explicitly ignore and do not report when firmware returns ENOENT */
2103 if (aq_ret
&& !(aq_err
== I40E_AQ_RC_ENOENT
)) {
2105 dev_info(&vsi
->back
->pdev
->dev
,
2106 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2107 vsi_name
, i40e_stat_str(hw
, aq_ret
),
2108 i40e_aq_str(hw
, aq_err
));
2113 * i40e_aqc_add_filters - Request firmware to add a set of filters
2114 * @vsi: ptr to the VSI
2115 * @vsi_name: name to display in messages
2116 * @list: the list of filters to send to firmware
2117 * @add_head: Position in the add hlist
2118 * @num_add: the number of filters to add
2119 * @promisc_change: set to true on exit if promiscuous mode was forced on
2121 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2122 * promisc_changed to true if the firmware has run out of space for more
2126 void i40e_aqc_add_filters(struct i40e_vsi
*vsi
, const char *vsi_name
,
2127 struct i40e_aqc_add_macvlan_element_data
*list
,
2128 struct i40e_new_mac_filter
*add_head
,
2129 int num_add
, bool *promisc_changed
)
2131 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2134 i40e_aq_add_macvlan(hw
, vsi
->seid
, list
, num_add
, NULL
);
2135 aq_err
= hw
->aq
.asq_last_status
;
2136 fcnt
= i40e_update_filter_state(num_add
, list
, add_head
);
2138 if (fcnt
!= num_add
) {
2139 *promisc_changed
= true;
2140 set_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
2141 dev_warn(&vsi
->back
->pdev
->dev
,
2142 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2143 i40e_aq_str(hw
, aq_err
),
2149 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2150 * @vsi: pointer to the VSI
2153 * This function sets or clears the promiscuous broadcast flags for VLAN
2154 * filters in order to properly receive broadcast frames. Assumes that only
2155 * broadcast filters are passed.
2157 * Returns status indicating success or failure;
2160 i40e_aqc_broadcast_filter(struct i40e_vsi
*vsi
, const char *vsi_name
,
2161 struct i40e_mac_filter
*f
)
2163 bool enable
= f
->state
== I40E_FILTER_NEW
;
2164 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2167 if (f
->vlan
== I40E_VLAN_ANY
) {
2168 aq_ret
= i40e_aq_set_vsi_broadcast(hw
,
2173 aq_ret
= i40e_aq_set_vsi_bc_promisc_on_vlan(hw
,
2181 dev_warn(&vsi
->back
->pdev
->dev
,
2182 "Error %s setting broadcast promiscuous mode on %s\n",
2183 i40e_aq_str(hw
, hw
->aq
.asq_last_status
),
2190 * i40e_set_promiscuous - set promiscuous mode
2191 * @pf: board private structure
2192 * @promisc: promisc on or off
2194 * There are different ways of setting promiscuous mode on a PF depending on
2195 * what state/environment we're in. This identifies and sets it appropriately.
2196 * Returns 0 on success.
2198 static int i40e_set_promiscuous(struct i40e_pf
*pf
, bool promisc
)
2200 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
2201 struct i40e_hw
*hw
= &pf
->hw
;
2204 if (vsi
->type
== I40E_VSI_MAIN
&&
2205 pf
->lan_veb
!= I40E_NO_VEB
&&
2206 !(pf
->flags
& I40E_FLAG_MFP_ENABLED
)) {
2207 /* set defport ON for Main VSI instead of true promisc
2208 * this way we will get all unicast/multicast and VLAN
2209 * promisc behavior but will not get VF or VMDq traffic
2210 * replicated on the Main VSI.
2213 aq_ret
= i40e_aq_set_default_vsi(hw
,
2217 aq_ret
= i40e_aq_clear_default_vsi(hw
,
2221 dev_info(&pf
->pdev
->dev
,
2222 "Set default VSI failed, err %s, aq_err %s\n",
2223 i40e_stat_str(hw
, aq_ret
),
2224 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2227 aq_ret
= i40e_aq_set_vsi_unicast_promiscuous(
2233 dev_info(&pf
->pdev
->dev
,
2234 "set unicast promisc failed, err %s, aq_err %s\n",
2235 i40e_stat_str(hw
, aq_ret
),
2236 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2238 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(
2243 dev_info(&pf
->pdev
->dev
,
2244 "set multicast promisc failed, err %s, aq_err %s\n",
2245 i40e_stat_str(hw
, aq_ret
),
2246 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2251 pf
->cur_promisc
= promisc
;
2257 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2258 * @vsi: ptr to the VSI
2260 * Push any outstanding VSI filter changes through the AdminQ.
2262 * Returns 0 or error value
2264 int i40e_sync_vsi_filters(struct i40e_vsi
*vsi
)
2266 struct hlist_head tmp_add_list
, tmp_del_list
;
2267 struct i40e_mac_filter
*f
;
2268 struct i40e_new_mac_filter
*new, *add_head
= NULL
;
2269 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2270 unsigned int failed_filters
= 0;
2271 unsigned int vlan_filters
= 0;
2272 bool promisc_changed
= false;
2273 char vsi_name
[16] = "PF";
2274 int filter_list_len
= 0;
2275 i40e_status aq_ret
= 0;
2276 u32 changed_flags
= 0;
2277 struct hlist_node
*h
;
2286 /* empty array typed pointers, kcalloc later */
2287 struct i40e_aqc_add_macvlan_element_data
*add_list
;
2288 struct i40e_aqc_remove_macvlan_element_data
*del_list
;
2290 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS
, vsi
->state
))
2291 usleep_range(1000, 2000);
2295 changed_flags
= vsi
->current_netdev_flags
^ vsi
->netdev
->flags
;
2296 vsi
->current_netdev_flags
= vsi
->netdev
->flags
;
2299 INIT_HLIST_HEAD(&tmp_add_list
);
2300 INIT_HLIST_HEAD(&tmp_del_list
);
2302 if (vsi
->type
== I40E_VSI_SRIOV
)
2303 snprintf(vsi_name
, sizeof(vsi_name
) - 1, "VF %d", vsi
->vf_id
);
2304 else if (vsi
->type
!= I40E_VSI_MAIN
)
2305 snprintf(vsi_name
, sizeof(vsi_name
) - 1, "vsi %d", vsi
->seid
);
2307 if (vsi
->flags
& I40E_VSI_FLAG_FILTER_CHANGED
) {
2308 vsi
->flags
&= ~I40E_VSI_FLAG_FILTER_CHANGED
;
2310 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2311 /* Create a list of filters to delete. */
2312 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
2313 if (f
->state
== I40E_FILTER_REMOVE
) {
2314 /* Move the element into temporary del_list */
2315 hash_del(&f
->hlist
);
2316 hlist_add_head(&f
->hlist
, &tmp_del_list
);
2318 /* Avoid counting removed filters */
2321 if (f
->state
== I40E_FILTER_NEW
) {
2322 /* Create a temporary i40e_new_mac_filter */
2323 new = kzalloc(sizeof(*new), GFP_ATOMIC
);
2325 goto err_no_memory_locked
;
2327 /* Store pointer to the real filter */
2329 new->state
= f
->state
;
2331 /* Add it to the hash list */
2332 hlist_add_head(&new->hlist
, &tmp_add_list
);
2335 /* Count the number of active (current and new) VLAN
2336 * filters we have now. Does not count filters which
2337 * are marked for deletion.
2343 retval
= i40e_correct_mac_vlan_filters(vsi
,
2348 goto err_no_memory_locked
;
2350 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2353 /* Now process 'del_list' outside the lock */
2354 if (!hlist_empty(&tmp_del_list
)) {
2355 filter_list_len
= hw
->aq
.asq_buf_size
/
2356 sizeof(struct i40e_aqc_remove_macvlan_element_data
);
2357 list_size
= filter_list_len
*
2358 sizeof(struct i40e_aqc_remove_macvlan_element_data
);
2359 del_list
= kzalloc(list_size
, GFP_ATOMIC
);
2363 hlist_for_each_entry_safe(f
, h
, &tmp_del_list
, hlist
) {
2366 /* handle broadcast filters by updating the broadcast
2367 * promiscuous flag and release filter list.
2369 if (is_broadcast_ether_addr(f
->macaddr
)) {
2370 i40e_aqc_broadcast_filter(vsi
, vsi_name
, f
);
2372 hlist_del(&f
->hlist
);
2377 /* add to delete list */
2378 ether_addr_copy(del_list
[num_del
].mac_addr
, f
->macaddr
);
2379 if (f
->vlan
== I40E_VLAN_ANY
) {
2380 del_list
[num_del
].vlan_tag
= 0;
2381 cmd_flags
|= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN
;
2383 del_list
[num_del
].vlan_tag
=
2384 cpu_to_le16((u16
)(f
->vlan
));
2387 cmd_flags
|= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH
;
2388 del_list
[num_del
].flags
= cmd_flags
;
2391 /* flush a full buffer */
2392 if (num_del
== filter_list_len
) {
2393 i40e_aqc_del_filters(vsi
, vsi_name
, del_list
,
2395 memset(del_list
, 0, list_size
);
2398 /* Release memory for MAC filter entries which were
2399 * synced up with HW.
2401 hlist_del(&f
->hlist
);
2406 i40e_aqc_del_filters(vsi
, vsi_name
, del_list
,
2414 if (!hlist_empty(&tmp_add_list
)) {
2415 /* Do all the adds now. */
2416 filter_list_len
= hw
->aq
.asq_buf_size
/
2417 sizeof(struct i40e_aqc_add_macvlan_element_data
);
2418 list_size
= filter_list_len
*
2419 sizeof(struct i40e_aqc_add_macvlan_element_data
);
2420 add_list
= kzalloc(list_size
, GFP_ATOMIC
);
2425 hlist_for_each_entry_safe(new, h
, &tmp_add_list
, hlist
) {
2426 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC
,
2428 new->state
= I40E_FILTER_FAILED
;
2432 /* handle broadcast filters by updating the broadcast
2433 * promiscuous flag instead of adding a MAC filter.
2435 if (is_broadcast_ether_addr(new->f
->macaddr
)) {
2436 if (i40e_aqc_broadcast_filter(vsi
, vsi_name
,
2438 new->state
= I40E_FILTER_FAILED
;
2440 new->state
= I40E_FILTER_ACTIVE
;
2444 /* add to add array */
2448 ether_addr_copy(add_list
[num_add
].mac_addr
,
2450 if (new->f
->vlan
== I40E_VLAN_ANY
) {
2451 add_list
[num_add
].vlan_tag
= 0;
2452 cmd_flags
|= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN
;
2454 add_list
[num_add
].vlan_tag
=
2455 cpu_to_le16((u16
)(new->f
->vlan
));
2457 add_list
[num_add
].queue_number
= 0;
2458 /* set invalid match method for later detection */
2459 add_list
[num_add
].match_method
= I40E_AQC_MM_ERR_NO_RES
;
2460 cmd_flags
|= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH
;
2461 add_list
[num_add
].flags
= cpu_to_le16(cmd_flags
);
2464 /* flush a full buffer */
2465 if (num_add
== filter_list_len
) {
2466 i40e_aqc_add_filters(vsi
, vsi_name
, add_list
,
2469 memset(add_list
, 0, list_size
);
2474 i40e_aqc_add_filters(vsi
, vsi_name
, add_list
, add_head
,
2475 num_add
, &promisc_changed
);
2477 /* Now move all of the filters from the temp add list back to
2480 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2481 hlist_for_each_entry_safe(new, h
, &tmp_add_list
, hlist
) {
2482 /* Only update the state if we're still NEW */
2483 if (new->f
->state
== I40E_FILTER_NEW
)
2484 new->f
->state
= new->state
;
2485 hlist_del(&new->hlist
);
2488 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2493 /* Determine the number of active and failed filters. */
2494 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2495 vsi
->active_filters
= 0;
2496 hash_for_each(vsi
->mac_filter_hash
, bkt
, f
, hlist
) {
2497 if (f
->state
== I40E_FILTER_ACTIVE
)
2498 vsi
->active_filters
++;
2499 else if (f
->state
== I40E_FILTER_FAILED
)
2502 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2504 /* If promiscuous mode has changed, we need to calculate a new
2505 * threshold for when we are safe to exit
2507 if (promisc_changed
)
2508 vsi
->promisc_threshold
= (vsi
->active_filters
* 3) / 4;
2510 /* Check if we are able to exit overflow promiscuous mode. We can
2511 * safely exit if we didn't just enter, we no longer have any failed
2512 * filters, and we have reduced filters below the threshold value.
2514 if (test_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
) &&
2515 !promisc_changed
&& !failed_filters
&&
2516 (vsi
->active_filters
< vsi
->promisc_threshold
)) {
2517 dev_info(&pf
->pdev
->dev
,
2518 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2520 clear_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
2521 promisc_changed
= true;
2522 vsi
->promisc_threshold
= 0;
2525 /* if the VF is not trusted do not do promisc */
2526 if ((vsi
->type
== I40E_VSI_SRIOV
) && !pf
->vf
[vsi
->vf_id
].trusted
) {
2527 clear_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
2531 /* check for changes in promiscuous modes */
2532 if (changed_flags
& IFF_ALLMULTI
) {
2533 bool cur_multipromisc
;
2535 cur_multipromisc
= !!(vsi
->current_netdev_flags
& IFF_ALLMULTI
);
2536 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(&vsi
->back
->hw
,
2541 retval
= i40e_aq_rc_to_posix(aq_ret
,
2542 hw
->aq
.asq_last_status
);
2543 dev_info(&pf
->pdev
->dev
,
2544 "set multi promisc failed on %s, err %s aq_err %s\n",
2546 i40e_stat_str(hw
, aq_ret
),
2547 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2551 if ((changed_flags
& IFF_PROMISC
) || promisc_changed
) {
2554 cur_promisc
= (!!(vsi
->current_netdev_flags
& IFF_PROMISC
) ||
2555 test_bit(__I40E_VSI_OVERFLOW_PROMISC
,
2557 aq_ret
= i40e_set_promiscuous(pf
, cur_promisc
);
2559 retval
= i40e_aq_rc_to_posix(aq_ret
,
2560 hw
->aq
.asq_last_status
);
2561 dev_info(&pf
->pdev
->dev
,
2562 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2563 cur_promisc
? "on" : "off",
2565 i40e_stat_str(hw
, aq_ret
),
2566 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2570 /* if something went wrong then set the changed flag so we try again */
2572 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
2574 clear_bit(__I40E_VSI_SYNCING_FILTERS
, vsi
->state
);
2578 /* Restore elements on the temporary add and delete lists */
2579 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2580 err_no_memory_locked
:
2581 i40e_undo_del_filter_entries(vsi
, &tmp_del_list
);
2582 i40e_undo_add_filter_entries(vsi
, &tmp_add_list
);
2583 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2585 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
2586 clear_bit(__I40E_VSI_SYNCING_FILTERS
, vsi
->state
);
2591 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2592 * @pf: board private structure
2594 static void i40e_sync_filters_subtask(struct i40e_pf
*pf
)
2598 if (!pf
|| !(pf
->flags
& I40E_FLAG_FILTER_SYNC
))
2600 pf
->flags
&= ~I40E_FLAG_FILTER_SYNC
;
2602 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
2604 (pf
->vsi
[v
]->flags
& I40E_VSI_FLAG_FILTER_CHANGED
)) {
2605 int ret
= i40e_sync_vsi_filters(pf
->vsi
[v
]);
2608 /* come back and try again later */
2609 pf
->flags
|= I40E_FLAG_FILTER_SYNC
;
2617 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2620 static int i40e_max_xdp_frame_size(struct i40e_vsi
*vsi
)
2622 if (PAGE_SIZE
>= 8192 || (vsi
->back
->flags
& I40E_FLAG_LEGACY_RX
))
2623 return I40E_RXBUFFER_2048
;
2625 return I40E_RXBUFFER_3072
;
2629 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2630 * @netdev: network interface device structure
2631 * @new_mtu: new value for maximum frame size
2633 * Returns 0 on success, negative on failure
2635 static int i40e_change_mtu(struct net_device
*netdev
, int new_mtu
)
2637 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2638 struct i40e_vsi
*vsi
= np
->vsi
;
2639 struct i40e_pf
*pf
= vsi
->back
;
2641 if (i40e_enabled_xdp_vsi(vsi
)) {
2642 int frame_size
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
2644 if (frame_size
> i40e_max_xdp_frame_size(vsi
))
2648 netdev_info(netdev
, "changing MTU from %d to %d\n",
2649 netdev
->mtu
, new_mtu
);
2650 netdev
->mtu
= new_mtu
;
2651 if (netif_running(netdev
))
2652 i40e_vsi_reinit_locked(vsi
);
2653 pf
->flags
|= (I40E_FLAG_SERVICE_CLIENT_REQUESTED
|
2654 I40E_FLAG_CLIENT_L2_CHANGE
);
2659 * i40e_ioctl - Access the hwtstamp interface
2660 * @netdev: network interface device structure
2661 * @ifr: interface request data
2662 * @cmd: ioctl command
2664 int i40e_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2666 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2667 struct i40e_pf
*pf
= np
->vsi
->back
;
2671 return i40e_ptp_get_ts_config(pf
, ifr
);
2673 return i40e_ptp_set_ts_config(pf
, ifr
);
2680 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2681 * @vsi: the vsi being adjusted
2683 void i40e_vlan_stripping_enable(struct i40e_vsi
*vsi
)
2685 struct i40e_vsi_context ctxt
;
2688 if ((vsi
->info
.valid_sections
&
2689 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
)) &&
2690 ((vsi
->info
.port_vlan_flags
& I40E_AQ_VSI_PVLAN_MODE_MASK
) == 0))
2691 return; /* already enabled */
2693 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
2694 vsi
->info
.port_vlan_flags
= I40E_AQ_VSI_PVLAN_MODE_ALL
|
2695 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH
;
2697 ctxt
.seid
= vsi
->seid
;
2698 ctxt
.info
= vsi
->info
;
2699 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
2701 dev_info(&vsi
->back
->pdev
->dev
,
2702 "update vlan stripping failed, err %s aq_err %s\n",
2703 i40e_stat_str(&vsi
->back
->hw
, ret
),
2704 i40e_aq_str(&vsi
->back
->hw
,
2705 vsi
->back
->hw
.aq
.asq_last_status
));
2710 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2711 * @vsi: the vsi being adjusted
2713 void i40e_vlan_stripping_disable(struct i40e_vsi
*vsi
)
2715 struct i40e_vsi_context ctxt
;
2718 if ((vsi
->info
.valid_sections
&
2719 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
)) &&
2720 ((vsi
->info
.port_vlan_flags
& I40E_AQ_VSI_PVLAN_EMOD_MASK
) ==
2721 I40E_AQ_VSI_PVLAN_EMOD_MASK
))
2722 return; /* already disabled */
2724 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
2725 vsi
->info
.port_vlan_flags
= I40E_AQ_VSI_PVLAN_MODE_ALL
|
2726 I40E_AQ_VSI_PVLAN_EMOD_NOTHING
;
2728 ctxt
.seid
= vsi
->seid
;
2729 ctxt
.info
= vsi
->info
;
2730 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
2732 dev_info(&vsi
->back
->pdev
->dev
,
2733 "update vlan stripping failed, err %s aq_err %s\n",
2734 i40e_stat_str(&vsi
->back
->hw
, ret
),
2735 i40e_aq_str(&vsi
->back
->hw
,
2736 vsi
->back
->hw
.aq
.asq_last_status
));
2741 * i40e_vlan_rx_register - Setup or shutdown vlan offload
2742 * @netdev: network interface to be adjusted
2743 * @features: netdev features to test if VLAN offload is enabled or not
2745 static void i40e_vlan_rx_register(struct net_device
*netdev
, u32 features
)
2747 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2748 struct i40e_vsi
*vsi
= np
->vsi
;
2750 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
2751 i40e_vlan_stripping_enable(vsi
);
2753 i40e_vlan_stripping_disable(vsi
);
2757 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2758 * @vsi: the vsi being configured
2759 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2761 * This is a helper function for adding a new MAC/VLAN filter with the
2762 * specified VLAN for each existing MAC address already in the hash table.
2763 * This function does *not* perform any accounting to update filters based on
2766 * NOTE: this function expects to be called while under the
2767 * mac_filter_hash_lock
2769 int i40e_add_vlan_all_mac(struct i40e_vsi
*vsi
, s16 vid
)
2771 struct i40e_mac_filter
*f
, *add_f
;
2772 struct hlist_node
*h
;
2775 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
2776 if (f
->state
== I40E_FILTER_REMOVE
)
2778 add_f
= i40e_add_filter(vsi
, f
->macaddr
, vid
);
2780 dev_info(&vsi
->back
->pdev
->dev
,
2781 "Could not add vlan filter %d for %pM\n",
2791 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2792 * @vsi: the VSI being configured
2793 * @vid: VLAN id to be added
2795 int i40e_vsi_add_vlan(struct i40e_vsi
*vsi
, u16 vid
)
2802 /* The network stack will attempt to add VID=0, with the intention to
2803 * receive priority tagged packets with a VLAN of 0. Our HW receives
2804 * these packets by default when configured to receive untagged
2805 * packets, so we don't need to add a filter for this case.
2806 * Additionally, HW interprets adding a VID=0 filter as meaning to
2807 * receive *only* tagged traffic and stops receiving untagged traffic.
2808 * Thus, we do not want to actually add a filter for VID=0
2813 /* Locked once because all functions invoked below iterates list*/
2814 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2815 err
= i40e_add_vlan_all_mac(vsi
, vid
);
2816 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2820 /* schedule our worker thread which will take care of
2821 * applying the new filter changes
2823 i40e_service_event_schedule(vsi
->back
);
2828 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2829 * @vsi: the vsi being configured
2830 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2832 * This function should be used to remove all VLAN filters which match the
2833 * given VID. It does not schedule the service event and does not take the
2834 * mac_filter_hash_lock so it may be combined with other operations under
2835 * a single invocation of the mac_filter_hash_lock.
2837 * NOTE: this function expects to be called while under the
2838 * mac_filter_hash_lock
2840 void i40e_rm_vlan_all_mac(struct i40e_vsi
*vsi
, s16 vid
)
2842 struct i40e_mac_filter
*f
;
2843 struct hlist_node
*h
;
2846 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
2848 __i40e_del_filter(vsi
, f
);
2853 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2854 * @vsi: the VSI being configured
2855 * @vid: VLAN id to be removed
2857 void i40e_vsi_kill_vlan(struct i40e_vsi
*vsi
, u16 vid
)
2859 if (!vid
|| vsi
->info
.pvid
)
2862 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2863 i40e_rm_vlan_all_mac(vsi
, vid
);
2864 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2866 /* schedule our worker thread which will take care of
2867 * applying the new filter changes
2869 i40e_service_event_schedule(vsi
->back
);
2873 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2874 * @netdev: network interface to be adjusted
2875 * @vid: vlan id to be added
2877 * net_device_ops implementation for adding vlan ids
2879 static int i40e_vlan_rx_add_vid(struct net_device
*netdev
,
2880 __always_unused __be16 proto
, u16 vid
)
2882 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2883 struct i40e_vsi
*vsi
= np
->vsi
;
2886 if (vid
>= VLAN_N_VID
)
2889 ret
= i40e_vsi_add_vlan(vsi
, vid
);
2891 set_bit(vid
, vsi
->active_vlans
);
2897 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2898 * @netdev: network interface to be adjusted
2899 * @vid: vlan id to be removed
2901 * net_device_ops implementation for removing vlan ids
2903 static int i40e_vlan_rx_kill_vid(struct net_device
*netdev
,
2904 __always_unused __be16 proto
, u16 vid
)
2906 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2907 struct i40e_vsi
*vsi
= np
->vsi
;
2909 /* return code is ignored as there is nothing a user
2910 * can do about failure to remove and a log message was
2911 * already printed from the other function
2913 i40e_vsi_kill_vlan(vsi
, vid
);
2915 clear_bit(vid
, vsi
->active_vlans
);
2921 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2922 * @vsi: the vsi being brought back up
2924 static void i40e_restore_vlan(struct i40e_vsi
*vsi
)
2931 i40e_vlan_rx_register(vsi
->netdev
, vsi
->netdev
->features
);
2933 for_each_set_bit(vid
, vsi
->active_vlans
, VLAN_N_VID
)
2934 i40e_vlan_rx_add_vid(vsi
->netdev
, htons(ETH_P_8021Q
),
2939 * i40e_vsi_add_pvid - Add pvid for the VSI
2940 * @vsi: the vsi being adjusted
2941 * @vid: the vlan id to set as a PVID
2943 int i40e_vsi_add_pvid(struct i40e_vsi
*vsi
, u16 vid
)
2945 struct i40e_vsi_context ctxt
;
2948 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
2949 vsi
->info
.pvid
= cpu_to_le16(vid
);
2950 vsi
->info
.port_vlan_flags
= I40E_AQ_VSI_PVLAN_MODE_TAGGED
|
2951 I40E_AQ_VSI_PVLAN_INSERT_PVID
|
2952 I40E_AQ_VSI_PVLAN_EMOD_STR
;
2954 ctxt
.seid
= vsi
->seid
;
2955 ctxt
.info
= vsi
->info
;
2956 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
2958 dev_info(&vsi
->back
->pdev
->dev
,
2959 "add pvid failed, err %s aq_err %s\n",
2960 i40e_stat_str(&vsi
->back
->hw
, ret
),
2961 i40e_aq_str(&vsi
->back
->hw
,
2962 vsi
->back
->hw
.aq
.asq_last_status
));
2970 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2971 * @vsi: the vsi being adjusted
2973 * Just use the vlan_rx_register() service to put it back to normal
2975 void i40e_vsi_remove_pvid(struct i40e_vsi
*vsi
)
2977 i40e_vlan_stripping_disable(vsi
);
2983 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2984 * @vsi: ptr to the VSI
2986 * If this function returns with an error, then it's possible one or
2987 * more of the rings is populated (while the rest are not). It is the
2988 * callers duty to clean those orphaned rings.
2990 * Return 0 on success, negative on failure
2992 static int i40e_vsi_setup_tx_resources(struct i40e_vsi
*vsi
)
2996 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
2997 err
= i40e_setup_tx_descriptors(vsi
->tx_rings
[i
]);
2999 if (!i40e_enabled_xdp_vsi(vsi
))
3002 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
3003 err
= i40e_setup_tx_descriptors(vsi
->xdp_rings
[i
]);
3009 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
3010 * @vsi: ptr to the VSI
3012 * Free VSI's transmit software resources
3014 static void i40e_vsi_free_tx_resources(struct i40e_vsi
*vsi
)
3018 if (vsi
->tx_rings
) {
3019 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
3020 if (vsi
->tx_rings
[i
] && vsi
->tx_rings
[i
]->desc
)
3021 i40e_free_tx_resources(vsi
->tx_rings
[i
]);
3024 if (vsi
->xdp_rings
) {
3025 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
3026 if (vsi
->xdp_rings
[i
] && vsi
->xdp_rings
[i
]->desc
)
3027 i40e_free_tx_resources(vsi
->xdp_rings
[i
]);
3032 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3033 * @vsi: ptr to the VSI
3035 * If this function returns with an error, then it's possible one or
3036 * more of the rings is populated (while the rest are not). It is the
3037 * callers duty to clean those orphaned rings.
3039 * Return 0 on success, negative on failure
3041 static int i40e_vsi_setup_rx_resources(struct i40e_vsi
*vsi
)
3045 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
3046 err
= i40e_setup_rx_descriptors(vsi
->rx_rings
[i
]);
3051 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3052 * @vsi: ptr to the VSI
3054 * Free all receive software resources
3056 static void i40e_vsi_free_rx_resources(struct i40e_vsi
*vsi
)
3063 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
3064 if (vsi
->rx_rings
[i
] && vsi
->rx_rings
[i
]->desc
)
3065 i40e_free_rx_resources(vsi
->rx_rings
[i
]);
3069 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3070 * @ring: The Tx ring to configure
3072 * This enables/disables XPS for a given Tx descriptor ring
3073 * based on the TCs enabled for the VSI that ring belongs to.
3075 static void i40e_config_xps_tx_ring(struct i40e_ring
*ring
)
3079 if (!ring
->q_vector
|| !ring
->netdev
|| ring
->ch
)
3082 /* We only initialize XPS once, so as not to overwrite user settings */
3083 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE
, ring
->state
))
3086 cpu
= cpumask_local_spread(ring
->q_vector
->v_idx
, -1);
3087 netif_set_xps_queue(ring
->netdev
, get_cpu_mask(cpu
),
3092 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3093 * @ring: The Tx ring to configure
3095 * Configure the Tx descriptor ring in the HMC context.
3097 static int i40e_configure_tx_ring(struct i40e_ring
*ring
)
3099 struct i40e_vsi
*vsi
= ring
->vsi
;
3100 u16 pf_q
= vsi
->base_queue
+ ring
->queue_index
;
3101 struct i40e_hw
*hw
= &vsi
->back
->hw
;
3102 struct i40e_hmc_obj_txq tx_ctx
;
3103 i40e_status err
= 0;
3106 /* some ATR related tx ring init */
3107 if (vsi
->back
->flags
& I40E_FLAG_FD_ATR_ENABLED
) {
3108 ring
->atr_sample_rate
= vsi
->back
->atr_sample_rate
;
3109 ring
->atr_count
= 0;
3111 ring
->atr_sample_rate
= 0;
3115 i40e_config_xps_tx_ring(ring
);
3117 /* clear the context structure first */
3118 memset(&tx_ctx
, 0, sizeof(tx_ctx
));
3120 tx_ctx
.new_context
= 1;
3121 tx_ctx
.base
= (ring
->dma
/ 128);
3122 tx_ctx
.qlen
= ring
->count
;
3123 tx_ctx
.fd_ena
= !!(vsi
->back
->flags
& (I40E_FLAG_FD_SB_ENABLED
|
3124 I40E_FLAG_FD_ATR_ENABLED
));
3125 tx_ctx
.timesync_ena
= !!(vsi
->back
->flags
& I40E_FLAG_PTP
);
3126 /* FDIR VSI tx ring can still use RS bit and writebacks */
3127 if (vsi
->type
!= I40E_VSI_FDIR
)
3128 tx_ctx
.head_wb_ena
= 1;
3129 tx_ctx
.head_wb_addr
= ring
->dma
+
3130 (ring
->count
* sizeof(struct i40e_tx_desc
));
3132 /* As part of VSI creation/update, FW allocates certain
3133 * Tx arbitration queue sets for each TC enabled for
3134 * the VSI. The FW returns the handles to these queue
3135 * sets as part of the response buffer to Add VSI,
3136 * Update VSI, etc. AQ commands. It is expected that
3137 * these queue set handles be associated with the Tx
3138 * queues by the driver as part of the TX queue context
3139 * initialization. This has to be done regardless of
3140 * DCB as by default everything is mapped to TC0.
3145 le16_to_cpu(ring
->ch
->info
.qs_handle
[ring
->dcb_tc
]);
3148 tx_ctx
.rdylist
= le16_to_cpu(vsi
->info
.qs_handle
[ring
->dcb_tc
]);
3150 tx_ctx
.rdylist_act
= 0;
3152 /* clear the context in the HMC */
3153 err
= i40e_clear_lan_tx_queue_context(hw
, pf_q
);
3155 dev_info(&vsi
->back
->pdev
->dev
,
3156 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3157 ring
->queue_index
, pf_q
, err
);
3161 /* set the context in the HMC */
3162 err
= i40e_set_lan_tx_queue_context(hw
, pf_q
, &tx_ctx
);
3164 dev_info(&vsi
->back
->pdev
->dev
,
3165 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3166 ring
->queue_index
, pf_q
, err
);
3170 /* Now associate this queue with this PCI function */
3172 if (ring
->ch
->type
== I40E_VSI_VMDQ2
)
3173 qtx_ctl
= I40E_QTX_CTL_VM_QUEUE
;
3177 qtx_ctl
|= (ring
->ch
->vsi_number
<<
3178 I40E_QTX_CTL_VFVM_INDX_SHIFT
) &
3179 I40E_QTX_CTL_VFVM_INDX_MASK
;
3181 if (vsi
->type
== I40E_VSI_VMDQ2
) {
3182 qtx_ctl
= I40E_QTX_CTL_VM_QUEUE
;
3183 qtx_ctl
|= ((vsi
->id
) << I40E_QTX_CTL_VFVM_INDX_SHIFT
) &
3184 I40E_QTX_CTL_VFVM_INDX_MASK
;
3186 qtx_ctl
= I40E_QTX_CTL_PF_QUEUE
;
3190 qtx_ctl
|= ((hw
->pf_id
<< I40E_QTX_CTL_PF_INDX_SHIFT
) &
3191 I40E_QTX_CTL_PF_INDX_MASK
);
3192 wr32(hw
, I40E_QTX_CTL(pf_q
), qtx_ctl
);
3195 /* cache tail off for easier writes later */
3196 ring
->tail
= hw
->hw_addr
+ I40E_QTX_TAIL(pf_q
);
3202 * i40e_configure_rx_ring - Configure a receive ring context
3203 * @ring: The Rx ring to configure
3205 * Configure the Rx descriptor ring in the HMC context.
3207 static int i40e_configure_rx_ring(struct i40e_ring
*ring
)
3209 struct i40e_vsi
*vsi
= ring
->vsi
;
3210 u32 chain_len
= vsi
->back
->hw
.func_caps
.rx_buf_chain_len
;
3211 u16 pf_q
= vsi
->base_queue
+ ring
->queue_index
;
3212 struct i40e_hw
*hw
= &vsi
->back
->hw
;
3213 struct i40e_hmc_obj_rxq rx_ctx
;
3214 i40e_status err
= 0;
3216 bitmap_zero(ring
->state
, __I40E_RING_STATE_NBITS
);
3218 /* clear the context structure first */
3219 memset(&rx_ctx
, 0, sizeof(rx_ctx
));
3221 ring
->rx_buf_len
= vsi
->rx_buf_len
;
3223 rx_ctx
.dbuff
= DIV_ROUND_UP(ring
->rx_buf_len
,
3224 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT
));
3226 rx_ctx
.base
= (ring
->dma
/ 128);
3227 rx_ctx
.qlen
= ring
->count
;
3229 /* use 32 byte descriptors */
3232 /* descriptor type is always zero
3235 rx_ctx
.hsplit_0
= 0;
3237 rx_ctx
.rxmax
= min_t(u16
, vsi
->max_frame
, chain_len
* ring
->rx_buf_len
);
3238 if (hw
->revision_id
== 0)
3239 rx_ctx
.lrxqthresh
= 0;
3241 rx_ctx
.lrxqthresh
= 1;
3242 rx_ctx
.crcstrip
= 1;
3244 /* this controls whether VLAN is stripped from inner headers */
3246 /* set the prefena field to 1 because the manual says to */
3249 /* clear the context in the HMC */
3250 err
= i40e_clear_lan_rx_queue_context(hw
, pf_q
);
3252 dev_info(&vsi
->back
->pdev
->dev
,
3253 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3254 ring
->queue_index
, pf_q
, err
);
3258 /* set the context in the HMC */
3259 err
= i40e_set_lan_rx_queue_context(hw
, pf_q
, &rx_ctx
);
3261 dev_info(&vsi
->back
->pdev
->dev
,
3262 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3263 ring
->queue_index
, pf_q
, err
);
3267 /* configure Rx buffer alignment */
3268 if (!vsi
->netdev
|| (vsi
->back
->flags
& I40E_FLAG_LEGACY_RX
))
3269 clear_ring_build_skb_enabled(ring
);
3271 set_ring_build_skb_enabled(ring
);
3273 /* cache tail for quicker writes, and clear the reg before use */
3274 ring
->tail
= hw
->hw_addr
+ I40E_QRX_TAIL(pf_q
);
3275 writel(0, ring
->tail
);
3277 i40e_alloc_rx_buffers(ring
, I40E_DESC_UNUSED(ring
));
3283 * i40e_vsi_configure_tx - Configure the VSI for Tx
3284 * @vsi: VSI structure describing this set of rings and resources
3286 * Configure the Tx VSI for operation.
3288 static int i40e_vsi_configure_tx(struct i40e_vsi
*vsi
)
3293 for (i
= 0; (i
< vsi
->num_queue_pairs
) && !err
; i
++)
3294 err
= i40e_configure_tx_ring(vsi
->tx_rings
[i
]);
3296 if (!i40e_enabled_xdp_vsi(vsi
))
3299 for (i
= 0; (i
< vsi
->num_queue_pairs
) && !err
; i
++)
3300 err
= i40e_configure_tx_ring(vsi
->xdp_rings
[i
]);
3306 * i40e_vsi_configure_rx - Configure the VSI for Rx
3307 * @vsi: the VSI being configured
3309 * Configure the Rx VSI for operation.
3311 static int i40e_vsi_configure_rx(struct i40e_vsi
*vsi
)
3316 if (!vsi
->netdev
|| (vsi
->back
->flags
& I40E_FLAG_LEGACY_RX
)) {
3317 vsi
->max_frame
= I40E_MAX_RXBUFFER
;
3318 vsi
->rx_buf_len
= I40E_RXBUFFER_2048
;
3319 #if (PAGE_SIZE < 8192)
3320 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING
&&
3321 (vsi
->netdev
->mtu
<= ETH_DATA_LEN
)) {
3322 vsi
->max_frame
= I40E_RXBUFFER_1536
- NET_IP_ALIGN
;
3323 vsi
->rx_buf_len
= I40E_RXBUFFER_1536
- NET_IP_ALIGN
;
3326 vsi
->max_frame
= I40E_MAX_RXBUFFER
;
3327 vsi
->rx_buf_len
= (PAGE_SIZE
< 8192) ? I40E_RXBUFFER_3072
:
3331 /* set up individual rings */
3332 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
3333 err
= i40e_configure_rx_ring(vsi
->rx_rings
[i
]);
3339 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3340 * @vsi: ptr to the VSI
3342 static void i40e_vsi_config_dcb_rings(struct i40e_vsi
*vsi
)
3344 struct i40e_ring
*tx_ring
, *rx_ring
;
3345 u16 qoffset
, qcount
;
3348 if (!(vsi
->back
->flags
& I40E_FLAG_DCB_ENABLED
)) {
3349 /* Reset the TC information */
3350 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
3351 rx_ring
= vsi
->rx_rings
[i
];
3352 tx_ring
= vsi
->tx_rings
[i
];
3353 rx_ring
->dcb_tc
= 0;
3354 tx_ring
->dcb_tc
= 0;
3359 for (n
= 0; n
< I40E_MAX_TRAFFIC_CLASS
; n
++) {
3360 if (!(vsi
->tc_config
.enabled_tc
& BIT_ULL(n
)))
3363 qoffset
= vsi
->tc_config
.tc_info
[n
].qoffset
;
3364 qcount
= vsi
->tc_config
.tc_info
[n
].qcount
;
3365 for (i
= qoffset
; i
< (qoffset
+ qcount
); i
++) {
3366 rx_ring
= vsi
->rx_rings
[i
];
3367 tx_ring
= vsi
->tx_rings
[i
];
3368 rx_ring
->dcb_tc
= n
;
3369 tx_ring
->dcb_tc
= n
;
3375 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3376 * @vsi: ptr to the VSI
3378 static void i40e_set_vsi_rx_mode(struct i40e_vsi
*vsi
)
3381 i40e_set_rx_mode(vsi
->netdev
);
3385 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3386 * @vsi: Pointer to the targeted VSI
3388 * This function replays the hlist on the hw where all the SB Flow Director
3389 * filters were saved.
3391 static void i40e_fdir_filter_restore(struct i40e_vsi
*vsi
)
3393 struct i40e_fdir_filter
*filter
;
3394 struct i40e_pf
*pf
= vsi
->back
;
3395 struct hlist_node
*node
;
3397 if (!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
))
3400 /* Reset FDir counters as we're replaying all existing filters */
3401 pf
->fd_tcp4_filter_cnt
= 0;
3402 pf
->fd_udp4_filter_cnt
= 0;
3403 pf
->fd_sctp4_filter_cnt
= 0;
3404 pf
->fd_ip4_filter_cnt
= 0;
3406 hlist_for_each_entry_safe(filter
, node
,
3407 &pf
->fdir_filter_list
, fdir_node
) {
3408 i40e_add_del_fdir(vsi
, filter
, true);
3413 * i40e_vsi_configure - Set up the VSI for action
3414 * @vsi: the VSI being configured
3416 static int i40e_vsi_configure(struct i40e_vsi
*vsi
)
3420 i40e_set_vsi_rx_mode(vsi
);
3421 i40e_restore_vlan(vsi
);
3422 i40e_vsi_config_dcb_rings(vsi
);
3423 err
= i40e_vsi_configure_tx(vsi
);
3425 err
= i40e_vsi_configure_rx(vsi
);
3431 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3432 * @vsi: the VSI being configured
3434 static void i40e_vsi_configure_msix(struct i40e_vsi
*vsi
)
3436 bool has_xdp
= i40e_enabled_xdp_vsi(vsi
);
3437 struct i40e_pf
*pf
= vsi
->back
;
3438 struct i40e_hw
*hw
= &pf
->hw
;
3443 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3444 * and PFINT_LNKLSTn registers, e.g.:
3445 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3447 qp
= vsi
->base_queue
;
3448 vector
= vsi
->base_vector
;
3449 for (i
= 0; i
< vsi
->num_q_vectors
; i
++, vector
++) {
3450 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[i
];
3452 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
3453 q_vector
->rx
.itr
= ITR_TO_REG(vsi
->rx_rings
[i
]->rx_itr_setting
);
3454 q_vector
->rx
.latency_range
= I40E_LOW_LATENCY
;
3455 wr32(hw
, I40E_PFINT_ITRN(I40E_RX_ITR
, vector
- 1),
3457 q_vector
->tx
.itr
= ITR_TO_REG(vsi
->tx_rings
[i
]->tx_itr_setting
);
3458 q_vector
->tx
.latency_range
= I40E_LOW_LATENCY
;
3459 wr32(hw
, I40E_PFINT_ITRN(I40E_TX_ITR
, vector
- 1),
3461 wr32(hw
, I40E_PFINT_RATEN(vector
- 1),
3462 i40e_intrl_usec_to_reg(vsi
->int_rate_limit
));
3464 /* Linked list for the queuepairs assigned to this vector */
3465 wr32(hw
, I40E_PFINT_LNKLSTN(vector
- 1), qp
);
3466 for (q
= 0; q
< q_vector
->num_ringpairs
; q
++) {
3467 u32 nextqp
= has_xdp
? qp
+ vsi
->alloc_queue_pairs
: qp
;
3470 val
= I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
3471 (I40E_RX_ITR
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
) |
3472 (vector
<< I40E_QINT_RQCTL_MSIX_INDX_SHIFT
) |
3473 (nextqp
<< I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT
) |
3474 (I40E_QUEUE_TYPE_TX
<<
3475 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT
);
3477 wr32(hw
, I40E_QINT_RQCTL(qp
), val
);
3480 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3481 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
) |
3482 (vector
<< I40E_QINT_TQCTL_MSIX_INDX_SHIFT
) |
3483 (qp
<< I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
) |
3484 (I40E_QUEUE_TYPE_TX
<<
3485 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3487 wr32(hw
, I40E_QINT_TQCTL(nextqp
), val
);
3490 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3491 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
) |
3492 (vector
<< I40E_QINT_TQCTL_MSIX_INDX_SHIFT
) |
3493 ((qp
+ 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
) |
3494 (I40E_QUEUE_TYPE_RX
<<
3495 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3497 /* Terminate the linked list */
3498 if (q
== (q_vector
->num_ringpairs
- 1))
3499 val
|= (I40E_QUEUE_END_OF_LIST
<<
3500 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
);
3502 wr32(hw
, I40E_QINT_TQCTL(qp
), val
);
3511 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3512 * @hw: ptr to the hardware info
3514 static void i40e_enable_misc_int_causes(struct i40e_pf
*pf
)
3516 struct i40e_hw
*hw
= &pf
->hw
;
3519 /* clear things first */
3520 wr32(hw
, I40E_PFINT_ICR0_ENA
, 0); /* disable all */
3521 rd32(hw
, I40E_PFINT_ICR0
); /* read to clear */
3523 val
= I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
|
3524 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
|
3525 I40E_PFINT_ICR0_ENA_GRST_MASK
|
3526 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
|
3527 I40E_PFINT_ICR0_ENA_GPIO_MASK
|
3528 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
|
3529 I40E_PFINT_ICR0_ENA_VFLR_MASK
|
3530 I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
3532 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
)
3533 val
|= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
;
3535 if (pf
->flags
& I40E_FLAG_PTP
)
3536 val
|= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK
;
3538 wr32(hw
, I40E_PFINT_ICR0_ENA
, val
);
3540 /* SW_ITR_IDX = 0, but don't change INTENA */
3541 wr32(hw
, I40E_PFINT_DYN_CTL0
, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK
|
3542 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK
);
3544 /* OTHER_ITR_IDX = 0 */
3545 wr32(hw
, I40E_PFINT_STAT_CTL0
, 0);
3549 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3550 * @vsi: the VSI being configured
3552 static void i40e_configure_msi_and_legacy(struct i40e_vsi
*vsi
)
3554 u32 nextqp
= i40e_enabled_xdp_vsi(vsi
) ? vsi
->alloc_queue_pairs
: 0;
3555 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[0];
3556 struct i40e_pf
*pf
= vsi
->back
;
3557 struct i40e_hw
*hw
= &pf
->hw
;
3560 /* set the ITR configuration */
3561 q_vector
->itr_countdown
= ITR_COUNTDOWN_START
;
3562 q_vector
->rx
.itr
= ITR_TO_REG(vsi
->rx_rings
[0]->rx_itr_setting
);
3563 q_vector
->rx
.latency_range
= I40E_LOW_LATENCY
;
3564 wr32(hw
, I40E_PFINT_ITR0(I40E_RX_ITR
), q_vector
->rx
.itr
);
3565 q_vector
->tx
.itr
= ITR_TO_REG(vsi
->tx_rings
[0]->tx_itr_setting
);
3566 q_vector
->tx
.latency_range
= I40E_LOW_LATENCY
;
3567 wr32(hw
, I40E_PFINT_ITR0(I40E_TX_ITR
), q_vector
->tx
.itr
);
3569 i40e_enable_misc_int_causes(pf
);
3571 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3572 wr32(hw
, I40E_PFINT_LNKLST0
, 0);
3574 /* Associate the queue pair to the vector and enable the queue int */
3575 val
= I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
3576 (I40E_RX_ITR
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
) |
3577 (nextqp
<< I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT
)|
3578 (I40E_QUEUE_TYPE_TX
<< I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3580 wr32(hw
, I40E_QINT_RQCTL(0), val
);
3582 if (i40e_enabled_xdp_vsi(vsi
)) {
3583 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3584 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
)|
3586 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3588 wr32(hw
, I40E_QINT_TQCTL(nextqp
), val
);
3591 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3592 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
) |
3593 (I40E_QUEUE_END_OF_LIST
<< I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
);
3595 wr32(hw
, I40E_QINT_TQCTL(0), val
);
3600 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3601 * @pf: board private structure
3603 void i40e_irq_dynamic_disable_icr0(struct i40e_pf
*pf
)
3605 struct i40e_hw
*hw
= &pf
->hw
;
3607 wr32(hw
, I40E_PFINT_DYN_CTL0
,
3608 I40E_ITR_NONE
<< I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT
);
3613 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3614 * @pf: board private structure
3616 void i40e_irq_dynamic_enable_icr0(struct i40e_pf
*pf
)
3618 struct i40e_hw
*hw
= &pf
->hw
;
3621 val
= I40E_PFINT_DYN_CTL0_INTENA_MASK
|
3622 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK
|
3623 (I40E_ITR_NONE
<< I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT
);
3625 wr32(hw
, I40E_PFINT_DYN_CTL0
, val
);
3630 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3631 * @irq: interrupt number
3632 * @data: pointer to a q_vector
3634 static irqreturn_t
i40e_msix_clean_rings(int irq
, void *data
)
3636 struct i40e_q_vector
*q_vector
= data
;
3638 if (!q_vector
->tx
.ring
&& !q_vector
->rx
.ring
)
3641 napi_schedule_irqoff(&q_vector
->napi
);
3647 * i40e_irq_affinity_notify - Callback for affinity changes
3648 * @notify: context as to what irq was changed
3649 * @mask: the new affinity mask
3651 * This is a callback function used by the irq_set_affinity_notifier function
3652 * so that we may register to receive changes to the irq affinity masks.
3654 static void i40e_irq_affinity_notify(struct irq_affinity_notify
*notify
,
3655 const cpumask_t
*mask
)
3657 struct i40e_q_vector
*q_vector
=
3658 container_of(notify
, struct i40e_q_vector
, affinity_notify
);
3660 cpumask_copy(&q_vector
->affinity_mask
, mask
);
3664 * i40e_irq_affinity_release - Callback for affinity notifier release
3665 * @ref: internal core kernel usage
3667 * This is a callback function used by the irq_set_affinity_notifier function
3668 * to inform the current notification subscriber that they will no longer
3669 * receive notifications.
3671 static void i40e_irq_affinity_release(struct kref
*ref
) {}
3674 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3675 * @vsi: the VSI being configured
3676 * @basename: name for the vector
3678 * Allocates MSI-X vectors and requests interrupts from the kernel.
3680 static int i40e_vsi_request_irq_msix(struct i40e_vsi
*vsi
, char *basename
)
3682 int q_vectors
= vsi
->num_q_vectors
;
3683 struct i40e_pf
*pf
= vsi
->back
;
3684 int base
= vsi
->base_vector
;
3691 for (vector
= 0; vector
< q_vectors
; vector
++) {
3692 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[vector
];
3694 irq_num
= pf
->msix_entries
[base
+ vector
].vector
;
3696 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
3697 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
3698 "%s-%s-%d", basename
, "TxRx", rx_int_idx
++);
3700 } else if (q_vector
->rx
.ring
) {
3701 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
3702 "%s-%s-%d", basename
, "rx", rx_int_idx
++);
3703 } else if (q_vector
->tx
.ring
) {
3704 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
3705 "%s-%s-%d", basename
, "tx", tx_int_idx
++);
3707 /* skip this unused q_vector */
3710 err
= request_irq(irq_num
,
3716 dev_info(&pf
->pdev
->dev
,
3717 "MSIX request_irq failed, error: %d\n", err
);
3718 goto free_queue_irqs
;
3721 /* register for affinity change notifications */
3722 q_vector
->affinity_notify
.notify
= i40e_irq_affinity_notify
;
3723 q_vector
->affinity_notify
.release
= i40e_irq_affinity_release
;
3724 irq_set_affinity_notifier(irq_num
, &q_vector
->affinity_notify
);
3725 /* Spread affinity hints out across online CPUs.
3727 * get_cpu_mask returns a static constant mask with
3728 * a permanent lifetime so it's ok to pass to
3729 * irq_set_affinity_hint without making a copy.
3731 cpu
= cpumask_local_spread(q_vector
->v_idx
, -1);
3732 irq_set_affinity_hint(irq_num
, get_cpu_mask(cpu
));
3735 vsi
->irqs_ready
= true;
3741 irq_num
= pf
->msix_entries
[base
+ vector
].vector
;
3742 irq_set_affinity_notifier(irq_num
, NULL
);
3743 irq_set_affinity_hint(irq_num
, NULL
);
3744 free_irq(irq_num
, &vsi
->q_vectors
[vector
]);
3750 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3751 * @vsi: the VSI being un-configured
3753 static void i40e_vsi_disable_irq(struct i40e_vsi
*vsi
)
3755 struct i40e_pf
*pf
= vsi
->back
;
3756 struct i40e_hw
*hw
= &pf
->hw
;
3757 int base
= vsi
->base_vector
;
3760 /* disable interrupt causation from each queue */
3761 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
3764 val
= rd32(hw
, I40E_QINT_TQCTL(vsi
->tx_rings
[i
]->reg_idx
));
3765 val
&= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK
;
3766 wr32(hw
, I40E_QINT_TQCTL(vsi
->tx_rings
[i
]->reg_idx
), val
);
3768 val
= rd32(hw
, I40E_QINT_RQCTL(vsi
->rx_rings
[i
]->reg_idx
));
3769 val
&= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK
;
3770 wr32(hw
, I40E_QINT_RQCTL(vsi
->rx_rings
[i
]->reg_idx
), val
);
3772 if (!i40e_enabled_xdp_vsi(vsi
))
3774 wr32(hw
, I40E_QINT_TQCTL(vsi
->xdp_rings
[i
]->reg_idx
), 0);
3777 /* disable each interrupt */
3778 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
3779 for (i
= vsi
->base_vector
;
3780 i
< (vsi
->num_q_vectors
+ vsi
->base_vector
); i
++)
3781 wr32(hw
, I40E_PFINT_DYN_CTLN(i
- 1), 0);
3784 for (i
= 0; i
< vsi
->num_q_vectors
; i
++)
3785 synchronize_irq(pf
->msix_entries
[i
+ base
].vector
);
3787 /* Legacy and MSI mode - this stops all interrupt handling */
3788 wr32(hw
, I40E_PFINT_ICR0_ENA
, 0);
3789 wr32(hw
, I40E_PFINT_DYN_CTL0
, 0);
3791 synchronize_irq(pf
->pdev
->irq
);
3796 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3797 * @vsi: the VSI being configured
3799 static int i40e_vsi_enable_irq(struct i40e_vsi
*vsi
)
3801 struct i40e_pf
*pf
= vsi
->back
;
3804 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
3805 for (i
= 0; i
< vsi
->num_q_vectors
; i
++)
3806 i40e_irq_dynamic_enable(vsi
, i
);
3808 i40e_irq_dynamic_enable_icr0(pf
);
3811 i40e_flush(&pf
->hw
);
3816 * i40e_free_misc_vector - Free the vector that handles non-queue events
3817 * @pf: board private structure
3819 static void i40e_free_misc_vector(struct i40e_pf
*pf
)
3822 wr32(&pf
->hw
, I40E_PFINT_ICR0_ENA
, 0);
3823 i40e_flush(&pf
->hw
);
3825 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
&& pf
->msix_entries
) {
3826 synchronize_irq(pf
->msix_entries
[0].vector
);
3827 free_irq(pf
->msix_entries
[0].vector
, pf
);
3828 clear_bit(__I40E_MISC_IRQ_REQUESTED
, pf
->state
);
3833 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3834 * @irq: interrupt number
3835 * @data: pointer to a q_vector
3837 * This is the handler used for all MSI/Legacy interrupts, and deals
3838 * with both queue and non-queue interrupts. This is also used in
3839 * MSIX mode to handle the non-queue interrupts.
3841 static irqreturn_t
i40e_intr(int irq
, void *data
)
3843 struct i40e_pf
*pf
= (struct i40e_pf
*)data
;
3844 struct i40e_hw
*hw
= &pf
->hw
;
3845 irqreturn_t ret
= IRQ_NONE
;
3846 u32 icr0
, icr0_remaining
;
3849 icr0
= rd32(hw
, I40E_PFINT_ICR0
);
3850 ena_mask
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
3852 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3853 if ((icr0
& I40E_PFINT_ICR0_INTEVENT_MASK
) == 0)
3856 /* if interrupt but no bits showing, must be SWINT */
3857 if (((icr0
& ~I40E_PFINT_ICR0_INTEVENT_MASK
) == 0) ||
3858 (icr0
& I40E_PFINT_ICR0_SWINT_MASK
))
3861 if ((pf
->flags
& I40E_FLAG_IWARP_ENABLED
) &&
3862 (icr0
& I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
)) {
3863 ena_mask
&= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
;
3864 dev_dbg(&pf
->pdev
->dev
, "cleared PE_CRITERR\n");
3865 set_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
);
3868 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3869 if (icr0
& I40E_PFINT_ICR0_QUEUE_0_MASK
) {
3870 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
3871 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[0];
3873 /* We do not have a way to disarm Queue causes while leaving
3874 * interrupt enabled for all other causes, ideally
3875 * interrupt should be disabled while we are in NAPI but
3876 * this is not a performance path and napi_schedule()
3877 * can deal with rescheduling.
3879 if (!test_bit(__I40E_DOWN
, pf
->state
))
3880 napi_schedule_irqoff(&q_vector
->napi
);
3883 if (icr0
& I40E_PFINT_ICR0_ADMINQ_MASK
) {
3884 ena_mask
&= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
3885 set_bit(__I40E_ADMINQ_EVENT_PENDING
, pf
->state
);
3886 i40e_debug(&pf
->hw
, I40E_DEBUG_NVM
, "AdminQ event\n");
3889 if (icr0
& I40E_PFINT_ICR0_MAL_DETECT_MASK
) {
3890 ena_mask
&= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
;
3891 set_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
);
3894 if (icr0
& I40E_PFINT_ICR0_VFLR_MASK
) {
3895 ena_mask
&= ~I40E_PFINT_ICR0_ENA_VFLR_MASK
;
3896 set_bit(__I40E_VFLR_EVENT_PENDING
, pf
->state
);
3899 if (icr0
& I40E_PFINT_ICR0_GRST_MASK
) {
3900 if (!test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
3901 set_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
);
3902 ena_mask
&= ~I40E_PFINT_ICR0_ENA_GRST_MASK
;
3903 val
= rd32(hw
, I40E_GLGEN_RSTAT
);
3904 val
= (val
& I40E_GLGEN_RSTAT_RESET_TYPE_MASK
)
3905 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT
;
3906 if (val
== I40E_RESET_CORER
) {
3908 } else if (val
== I40E_RESET_GLOBR
) {
3910 } else if (val
== I40E_RESET_EMPR
) {
3912 set_bit(__I40E_EMP_RESET_INTR_RECEIVED
, pf
->state
);
3916 if (icr0
& I40E_PFINT_ICR0_HMC_ERR_MASK
) {
3917 icr0
&= ~I40E_PFINT_ICR0_HMC_ERR_MASK
;
3918 dev_info(&pf
->pdev
->dev
, "HMC error interrupt\n");
3919 dev_info(&pf
->pdev
->dev
, "HMC error info 0x%x, HMC error data 0x%x\n",
3920 rd32(hw
, I40E_PFHMC_ERRORINFO
),
3921 rd32(hw
, I40E_PFHMC_ERRORDATA
));
3924 if (icr0
& I40E_PFINT_ICR0_TIMESYNC_MASK
) {
3925 u32 prttsyn_stat
= rd32(hw
, I40E_PRTTSYN_STAT_0
);
3927 if (prttsyn_stat
& I40E_PRTTSYN_STAT_0_TXTIME_MASK
) {
3928 icr0
&= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK
;
3929 i40e_ptp_tx_hwtstamp(pf
);
3933 /* If a critical error is pending we have no choice but to reset the
3935 * Report and mask out any remaining unexpected interrupts.
3937 icr0_remaining
= icr0
& ena_mask
;
3938 if (icr0_remaining
) {
3939 dev_info(&pf
->pdev
->dev
, "unhandled interrupt icr0=0x%08x\n",
3941 if ((icr0_remaining
& I40E_PFINT_ICR0_PE_CRITERR_MASK
) ||
3942 (icr0_remaining
& I40E_PFINT_ICR0_PCI_EXCEPTION_MASK
) ||
3943 (icr0_remaining
& I40E_PFINT_ICR0_ECC_ERR_MASK
)) {
3944 dev_info(&pf
->pdev
->dev
, "device will be reset\n");
3945 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
3946 i40e_service_event_schedule(pf
);
3948 ena_mask
&= ~icr0_remaining
;
3953 /* re-enable interrupt causes */
3954 wr32(hw
, I40E_PFINT_ICR0_ENA
, ena_mask
);
3955 if (!test_bit(__I40E_DOWN
, pf
->state
)) {
3956 i40e_service_event_schedule(pf
);
3957 i40e_irq_dynamic_enable_icr0(pf
);
3964 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3965 * @tx_ring: tx ring to clean
3966 * @budget: how many cleans we're allowed
3968 * Returns true if there's any budget left (e.g. the clean is finished)
3970 static bool i40e_clean_fdir_tx_irq(struct i40e_ring
*tx_ring
, int budget
)
3972 struct i40e_vsi
*vsi
= tx_ring
->vsi
;
3973 u16 i
= tx_ring
->next_to_clean
;
3974 struct i40e_tx_buffer
*tx_buf
;
3975 struct i40e_tx_desc
*tx_desc
;
3977 tx_buf
= &tx_ring
->tx_bi
[i
];
3978 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
3979 i
-= tx_ring
->count
;
3982 struct i40e_tx_desc
*eop_desc
= tx_buf
->next_to_watch
;
3984 /* if next_to_watch is not set then there is no work pending */
3988 /* prevent any other reads prior to eop_desc */
3991 /* if the descriptor isn't done, no work yet to do */
3992 if (!(eop_desc
->cmd_type_offset_bsz
&
3993 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE
)))
3996 /* clear next_to_watch to prevent false hangs */
3997 tx_buf
->next_to_watch
= NULL
;
3999 tx_desc
->buffer_addr
= 0;
4000 tx_desc
->cmd_type_offset_bsz
= 0;
4001 /* move past filter desc */
4006 i
-= tx_ring
->count
;
4007 tx_buf
= tx_ring
->tx_bi
;
4008 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
4010 /* unmap skb header data */
4011 dma_unmap_single(tx_ring
->dev
,
4012 dma_unmap_addr(tx_buf
, dma
),
4013 dma_unmap_len(tx_buf
, len
),
4015 if (tx_buf
->tx_flags
& I40E_TX_FLAGS_FD_SB
)
4016 kfree(tx_buf
->raw_buf
);
4018 tx_buf
->raw_buf
= NULL
;
4019 tx_buf
->tx_flags
= 0;
4020 tx_buf
->next_to_watch
= NULL
;
4021 dma_unmap_len_set(tx_buf
, len
, 0);
4022 tx_desc
->buffer_addr
= 0;
4023 tx_desc
->cmd_type_offset_bsz
= 0;
4025 /* move us past the eop_desc for start of next FD desc */
4030 i
-= tx_ring
->count
;
4031 tx_buf
= tx_ring
->tx_bi
;
4032 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
4035 /* update budget accounting */
4037 } while (likely(budget
));
4039 i
+= tx_ring
->count
;
4040 tx_ring
->next_to_clean
= i
;
4042 if (vsi
->back
->flags
& I40E_FLAG_MSIX_ENABLED
)
4043 i40e_irq_dynamic_enable(vsi
, tx_ring
->q_vector
->v_idx
);
4049 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4050 * @irq: interrupt number
4051 * @data: pointer to a q_vector
4053 static irqreturn_t
i40e_fdir_clean_ring(int irq
, void *data
)
4055 struct i40e_q_vector
*q_vector
= data
;
4056 struct i40e_vsi
*vsi
;
4058 if (!q_vector
->tx
.ring
)
4061 vsi
= q_vector
->tx
.ring
->vsi
;
4062 i40e_clean_fdir_tx_irq(q_vector
->tx
.ring
, vsi
->work_limit
);
4068 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4069 * @vsi: the VSI being configured
4070 * @v_idx: vector index
4071 * @qp_idx: queue pair index
4073 static void i40e_map_vector_to_qp(struct i40e_vsi
*vsi
, int v_idx
, int qp_idx
)
4075 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[v_idx
];
4076 struct i40e_ring
*tx_ring
= vsi
->tx_rings
[qp_idx
];
4077 struct i40e_ring
*rx_ring
= vsi
->rx_rings
[qp_idx
];
4079 tx_ring
->q_vector
= q_vector
;
4080 tx_ring
->next
= q_vector
->tx
.ring
;
4081 q_vector
->tx
.ring
= tx_ring
;
4082 q_vector
->tx
.count
++;
4084 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4085 if (i40e_enabled_xdp_vsi(vsi
)) {
4086 struct i40e_ring
*xdp_ring
= vsi
->xdp_rings
[qp_idx
];
4088 xdp_ring
->q_vector
= q_vector
;
4089 xdp_ring
->next
= q_vector
->tx
.ring
;
4090 q_vector
->tx
.ring
= xdp_ring
;
4091 q_vector
->tx
.count
++;
4094 rx_ring
->q_vector
= q_vector
;
4095 rx_ring
->next
= q_vector
->rx
.ring
;
4096 q_vector
->rx
.ring
= rx_ring
;
4097 q_vector
->rx
.count
++;
4101 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4102 * @vsi: the VSI being configured
4104 * This function maps descriptor rings to the queue-specific vectors
4105 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4106 * one vector per queue pair, but on a constrained vector budget, we
4107 * group the queue pairs as "efficiently" as possible.
4109 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi
*vsi
)
4111 int qp_remaining
= vsi
->num_queue_pairs
;
4112 int q_vectors
= vsi
->num_q_vectors
;
4117 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4118 * group them so there are multiple queues per vector.
4119 * It is also important to go through all the vectors available to be
4120 * sure that if we don't use all the vectors, that the remaining vectors
4121 * are cleared. This is especially important when decreasing the
4122 * number of queues in use.
4124 for (; v_start
< q_vectors
; v_start
++) {
4125 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[v_start
];
4127 num_ringpairs
= DIV_ROUND_UP(qp_remaining
, q_vectors
- v_start
);
4129 q_vector
->num_ringpairs
= num_ringpairs
;
4130 q_vector
->reg_idx
= q_vector
->v_idx
+ vsi
->base_vector
- 1;
4132 q_vector
->rx
.count
= 0;
4133 q_vector
->tx
.count
= 0;
4134 q_vector
->rx
.ring
= NULL
;
4135 q_vector
->tx
.ring
= NULL
;
4137 while (num_ringpairs
--) {
4138 i40e_map_vector_to_qp(vsi
, v_start
, qp_idx
);
4146 * i40e_vsi_request_irq - Request IRQ from the OS
4147 * @vsi: the VSI being configured
4148 * @basename: name for the vector
4150 static int i40e_vsi_request_irq(struct i40e_vsi
*vsi
, char *basename
)
4152 struct i40e_pf
*pf
= vsi
->back
;
4155 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
4156 err
= i40e_vsi_request_irq_msix(vsi
, basename
);
4157 else if (pf
->flags
& I40E_FLAG_MSI_ENABLED
)
4158 err
= request_irq(pf
->pdev
->irq
, i40e_intr
, 0,
4161 err
= request_irq(pf
->pdev
->irq
, i40e_intr
, IRQF_SHARED
,
4165 dev_info(&pf
->pdev
->dev
, "request_irq failed, Error %d\n", err
);
4170 #ifdef CONFIG_NET_POLL_CONTROLLER
4172 * i40e_netpoll - A Polling 'interrupt' handler
4173 * @netdev: network interface device structure
4175 * This is used by netconsole to send skbs without having to re-enable
4176 * interrupts. It's not called while the normal interrupt routine is executing.
4178 static void i40e_netpoll(struct net_device
*netdev
)
4180 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4181 struct i40e_vsi
*vsi
= np
->vsi
;
4182 struct i40e_pf
*pf
= vsi
->back
;
4185 /* if interface is down do nothing */
4186 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
))
4189 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
4190 for (i
= 0; i
< vsi
->num_q_vectors
; i
++)
4191 i40e_msix_clean_rings(0, vsi
->q_vectors
[i
]);
4193 i40e_intr(pf
->pdev
->irq
, netdev
);
4198 #define I40E_QTX_ENA_WAIT_COUNT 50
4201 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4202 * @pf: the PF being configured
4203 * @pf_q: the PF queue
4204 * @enable: enable or disable state of the queue
4206 * This routine will wait for the given Tx queue of the PF to reach the
4207 * enabled or disabled state.
4208 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4209 * multiple retries; else will return 0 in case of success.
4211 static int i40e_pf_txq_wait(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4216 for (i
= 0; i
< I40E_QUEUE_WAIT_RETRY_LIMIT
; i
++) {
4217 tx_reg
= rd32(&pf
->hw
, I40E_QTX_ENA(pf_q
));
4218 if (enable
== !!(tx_reg
& I40E_QTX_ENA_QENA_STAT_MASK
))
4221 usleep_range(10, 20);
4223 if (i
>= I40E_QUEUE_WAIT_RETRY_LIMIT
)
4230 * i40e_control_tx_q - Start or stop a particular Tx queue
4231 * @pf: the PF structure
4232 * @pf_q: the PF queue to configure
4233 * @enable: start or stop the queue
4235 * This function enables or disables a single queue. Note that any delay
4236 * required after the operation is expected to be handled by the caller of
4239 static void i40e_control_tx_q(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4241 struct i40e_hw
*hw
= &pf
->hw
;
4245 /* warn the TX unit of coming changes */
4246 i40e_pre_tx_queue_cfg(&pf
->hw
, pf_q
, enable
);
4248 usleep_range(10, 20);
4250 for (i
= 0; i
< I40E_QTX_ENA_WAIT_COUNT
; i
++) {
4251 tx_reg
= rd32(hw
, I40E_QTX_ENA(pf_q
));
4252 if (((tx_reg
>> I40E_QTX_ENA_QENA_REQ_SHIFT
) & 1) ==
4253 ((tx_reg
>> I40E_QTX_ENA_QENA_STAT_SHIFT
) & 1))
4255 usleep_range(1000, 2000);
4258 /* Skip if the queue is already in the requested state */
4259 if (enable
== !!(tx_reg
& I40E_QTX_ENA_QENA_STAT_MASK
))
4262 /* turn on/off the queue */
4264 wr32(hw
, I40E_QTX_HEAD(pf_q
), 0);
4265 tx_reg
|= I40E_QTX_ENA_QENA_REQ_MASK
;
4267 tx_reg
&= ~I40E_QTX_ENA_QENA_REQ_MASK
;
4270 wr32(hw
, I40E_QTX_ENA(pf_q
), tx_reg
);
4274 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4276 * @pf: the PF structure
4277 * @pf_q: the PF queue to configure
4278 * @is_xdp: true if the queue is used for XDP
4279 * @enable: start or stop the queue
4281 static int i40e_control_wait_tx_q(int seid
, struct i40e_pf
*pf
, int pf_q
,
4282 bool is_xdp
, bool enable
)
4286 i40e_control_tx_q(pf
, pf_q
, enable
);
4288 /* wait for the change to finish */
4289 ret
= i40e_pf_txq_wait(pf
, pf_q
, enable
);
4291 dev_info(&pf
->pdev
->dev
,
4292 "VSI seid %d %sTx ring %d %sable timeout\n",
4293 seid
, (is_xdp
? "XDP " : ""), pf_q
,
4294 (enable
? "en" : "dis"));
4301 * i40e_vsi_control_tx - Start or stop a VSI's rings
4302 * @vsi: the VSI being configured
4303 * @enable: start or stop the rings
4305 static int i40e_vsi_control_tx(struct i40e_vsi
*vsi
, bool enable
)
4307 struct i40e_pf
*pf
= vsi
->back
;
4308 int i
, pf_q
, ret
= 0;
4310 pf_q
= vsi
->base_queue
;
4311 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4312 ret
= i40e_control_wait_tx_q(vsi
->seid
, pf
,
4314 false /*is xdp*/, enable
);
4318 if (!i40e_enabled_xdp_vsi(vsi
))
4321 ret
= i40e_control_wait_tx_q(vsi
->seid
, pf
,
4322 pf_q
+ vsi
->alloc_queue_pairs
,
4323 true /*is xdp*/, enable
);
4332 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4333 * @pf: the PF being configured
4334 * @pf_q: the PF queue
4335 * @enable: enable or disable state of the queue
4337 * This routine will wait for the given Rx queue of the PF to reach the
4338 * enabled or disabled state.
4339 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4340 * multiple retries; else will return 0 in case of success.
4342 static int i40e_pf_rxq_wait(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4347 for (i
= 0; i
< I40E_QUEUE_WAIT_RETRY_LIMIT
; i
++) {
4348 rx_reg
= rd32(&pf
->hw
, I40E_QRX_ENA(pf_q
));
4349 if (enable
== !!(rx_reg
& I40E_QRX_ENA_QENA_STAT_MASK
))
4352 usleep_range(10, 20);
4354 if (i
>= I40E_QUEUE_WAIT_RETRY_LIMIT
)
4361 * i40e_control_rx_q - Start or stop a particular Rx queue
4362 * @pf: the PF structure
4363 * @pf_q: the PF queue to configure
4364 * @enable: start or stop the queue
4366 * This function enables or disables a single queue. Note that any delay
4367 * required after the operation is expected to be handled by the caller of
4370 static void i40e_control_rx_q(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4372 struct i40e_hw
*hw
= &pf
->hw
;
4376 for (i
= 0; i
< I40E_QTX_ENA_WAIT_COUNT
; i
++) {
4377 rx_reg
= rd32(hw
, I40E_QRX_ENA(pf_q
));
4378 if (((rx_reg
>> I40E_QRX_ENA_QENA_REQ_SHIFT
) & 1) ==
4379 ((rx_reg
>> I40E_QRX_ENA_QENA_STAT_SHIFT
) & 1))
4381 usleep_range(1000, 2000);
4384 /* Skip if the queue is already in the requested state */
4385 if (enable
== !!(rx_reg
& I40E_QRX_ENA_QENA_STAT_MASK
))
4388 /* turn on/off the queue */
4390 rx_reg
|= I40E_QRX_ENA_QENA_REQ_MASK
;
4392 rx_reg
&= ~I40E_QRX_ENA_QENA_REQ_MASK
;
4394 wr32(hw
, I40E_QRX_ENA(pf_q
), rx_reg
);
4398 * i40e_vsi_control_rx - Start or stop a VSI's rings
4399 * @vsi: the VSI being configured
4400 * @enable: start or stop the rings
4402 static int i40e_vsi_control_rx(struct i40e_vsi
*vsi
, bool enable
)
4404 struct i40e_pf
*pf
= vsi
->back
;
4405 int i
, pf_q
, ret
= 0;
4407 pf_q
= vsi
->base_queue
;
4408 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4409 i40e_control_rx_q(pf
, pf_q
, enable
);
4411 /* wait for the change to finish */
4412 ret
= i40e_pf_rxq_wait(pf
, pf_q
, enable
);
4414 dev_info(&pf
->pdev
->dev
,
4415 "VSI seid %d Rx ring %d %sable timeout\n",
4416 vsi
->seid
, pf_q
, (enable
? "en" : "dis"));
4421 /* Due to HW errata, on Rx disable only, the register can indicate done
4422 * before it really is. Needs 50ms to be sure
4431 * i40e_vsi_start_rings - Start a VSI's rings
4432 * @vsi: the VSI being configured
4434 int i40e_vsi_start_rings(struct i40e_vsi
*vsi
)
4438 /* do rx first for enable and last for disable */
4439 ret
= i40e_vsi_control_rx(vsi
, true);
4442 ret
= i40e_vsi_control_tx(vsi
, true);
4448 * i40e_vsi_stop_rings - Stop a VSI's rings
4449 * @vsi: the VSI being configured
4451 void i40e_vsi_stop_rings(struct i40e_vsi
*vsi
)
4453 /* When port TX is suspended, don't wait */
4454 if (test_bit(__I40E_PORT_SUSPENDED
, vsi
->back
->state
))
4455 return i40e_vsi_stop_rings_no_wait(vsi
);
4457 /* do rx first for enable and last for disable
4458 * Ignore return value, we need to shutdown whatever we can
4460 i40e_vsi_control_tx(vsi
, false);
4461 i40e_vsi_control_rx(vsi
, false);
4465 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4466 * @vsi: the VSI being shutdown
4468 * This function stops all the rings for a VSI but does not delay to verify
4469 * that rings have been disabled. It is expected that the caller is shutting
4470 * down multiple VSIs at once and will delay together for all the VSIs after
4471 * initiating the shutdown. This is particularly useful for shutting down lots
4472 * of VFs together. Otherwise, a large delay can be incurred while configuring
4473 * each VSI in serial.
4475 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi
*vsi
)
4477 struct i40e_pf
*pf
= vsi
->back
;
4480 pf_q
= vsi
->base_queue
;
4481 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4482 i40e_control_tx_q(pf
, pf_q
, false);
4483 i40e_control_rx_q(pf
, pf_q
, false);
4488 * i40e_vsi_free_irq - Free the irq association with the OS
4489 * @vsi: the VSI being configured
4491 static void i40e_vsi_free_irq(struct i40e_vsi
*vsi
)
4493 struct i40e_pf
*pf
= vsi
->back
;
4494 struct i40e_hw
*hw
= &pf
->hw
;
4495 int base
= vsi
->base_vector
;
4499 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
4500 if (!vsi
->q_vectors
)
4503 if (!vsi
->irqs_ready
)
4506 vsi
->irqs_ready
= false;
4507 for (i
= 0; i
< vsi
->num_q_vectors
; i
++) {
4512 irq_num
= pf
->msix_entries
[vector
].vector
;
4514 /* free only the irqs that were actually requested */
4515 if (!vsi
->q_vectors
[i
] ||
4516 !vsi
->q_vectors
[i
]->num_ringpairs
)
4519 /* clear the affinity notifier in the IRQ descriptor */
4520 irq_set_affinity_notifier(irq_num
, NULL
);
4521 /* remove our suggested affinity mask for this IRQ */
4522 irq_set_affinity_hint(irq_num
, NULL
);
4523 synchronize_irq(irq_num
);
4524 free_irq(irq_num
, vsi
->q_vectors
[i
]);
4526 /* Tear down the interrupt queue link list
4528 * We know that they come in pairs and always
4529 * the Rx first, then the Tx. To clear the
4530 * link list, stick the EOL value into the
4531 * next_q field of the registers.
4533 val
= rd32(hw
, I40E_PFINT_LNKLSTN(vector
- 1));
4534 qp
= (val
& I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK
)
4535 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT
;
4536 val
|= I40E_QUEUE_END_OF_LIST
4537 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT
;
4538 wr32(hw
, I40E_PFINT_LNKLSTN(vector
- 1), val
);
4540 while (qp
!= I40E_QUEUE_END_OF_LIST
) {
4543 val
= rd32(hw
, I40E_QINT_RQCTL(qp
));
4545 val
&= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK
|
4546 I40E_QINT_RQCTL_MSIX0_INDX_MASK
|
4547 I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
4548 I40E_QINT_RQCTL_INTEVENT_MASK
);
4550 val
|= (I40E_QINT_RQCTL_ITR_INDX_MASK
|
4551 I40E_QINT_RQCTL_NEXTQ_INDX_MASK
);
4553 wr32(hw
, I40E_QINT_RQCTL(qp
), val
);
4555 val
= rd32(hw
, I40E_QINT_TQCTL(qp
));
4557 next
= (val
& I40E_QINT_TQCTL_NEXTQ_INDX_MASK
)
4558 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
;
4560 val
&= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK
|
4561 I40E_QINT_TQCTL_MSIX0_INDX_MASK
|
4562 I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
4563 I40E_QINT_TQCTL_INTEVENT_MASK
);
4565 val
|= (I40E_QINT_TQCTL_ITR_INDX_MASK
|
4566 I40E_QINT_TQCTL_NEXTQ_INDX_MASK
);
4568 wr32(hw
, I40E_QINT_TQCTL(qp
), val
);
4573 free_irq(pf
->pdev
->irq
, pf
);
4575 val
= rd32(hw
, I40E_PFINT_LNKLST0
);
4576 qp
= (val
& I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK
)
4577 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT
;
4578 val
|= I40E_QUEUE_END_OF_LIST
4579 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT
;
4580 wr32(hw
, I40E_PFINT_LNKLST0
, val
);
4582 val
= rd32(hw
, I40E_QINT_RQCTL(qp
));
4583 val
&= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK
|
4584 I40E_QINT_RQCTL_MSIX0_INDX_MASK
|
4585 I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
4586 I40E_QINT_RQCTL_INTEVENT_MASK
);
4588 val
|= (I40E_QINT_RQCTL_ITR_INDX_MASK
|
4589 I40E_QINT_RQCTL_NEXTQ_INDX_MASK
);
4591 wr32(hw
, I40E_QINT_RQCTL(qp
), val
);
4593 val
= rd32(hw
, I40E_QINT_TQCTL(qp
));
4595 val
&= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK
|
4596 I40E_QINT_TQCTL_MSIX0_INDX_MASK
|
4597 I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
4598 I40E_QINT_TQCTL_INTEVENT_MASK
);
4600 val
|= (I40E_QINT_TQCTL_ITR_INDX_MASK
|
4601 I40E_QINT_TQCTL_NEXTQ_INDX_MASK
);
4603 wr32(hw
, I40E_QINT_TQCTL(qp
), val
);
4608 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4609 * @vsi: the VSI being configured
4610 * @v_idx: Index of vector to be freed
4612 * This function frees the memory allocated to the q_vector. In addition if
4613 * NAPI is enabled it will delete any references to the NAPI struct prior
4614 * to freeing the q_vector.
4616 static void i40e_free_q_vector(struct i40e_vsi
*vsi
, int v_idx
)
4618 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[v_idx
];
4619 struct i40e_ring
*ring
;
4624 /* disassociate q_vector from rings */
4625 i40e_for_each_ring(ring
, q_vector
->tx
)
4626 ring
->q_vector
= NULL
;
4628 i40e_for_each_ring(ring
, q_vector
->rx
)
4629 ring
->q_vector
= NULL
;
4631 /* only VSI w/ an associated netdev is set up w/ NAPI */
4633 netif_napi_del(&q_vector
->napi
);
4635 vsi
->q_vectors
[v_idx
] = NULL
;
4637 kfree_rcu(q_vector
, rcu
);
4641 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4642 * @vsi: the VSI being un-configured
4644 * This frees the memory allocated to the q_vectors and
4645 * deletes references to the NAPI struct.
4647 static void i40e_vsi_free_q_vectors(struct i40e_vsi
*vsi
)
4651 for (v_idx
= 0; v_idx
< vsi
->num_q_vectors
; v_idx
++)
4652 i40e_free_q_vector(vsi
, v_idx
);
4656 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4657 * @pf: board private structure
4659 static void i40e_reset_interrupt_capability(struct i40e_pf
*pf
)
4661 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4662 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
4663 pci_disable_msix(pf
->pdev
);
4664 kfree(pf
->msix_entries
);
4665 pf
->msix_entries
= NULL
;
4666 kfree(pf
->irq_pile
);
4667 pf
->irq_pile
= NULL
;
4668 } else if (pf
->flags
& I40E_FLAG_MSI_ENABLED
) {
4669 pci_disable_msi(pf
->pdev
);
4671 pf
->flags
&= ~(I40E_FLAG_MSIX_ENABLED
| I40E_FLAG_MSI_ENABLED
);
4675 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4676 * @pf: board private structure
4678 * We go through and clear interrupt specific resources and reset the structure
4679 * to pre-load conditions
4681 static void i40e_clear_interrupt_scheme(struct i40e_pf
*pf
)
4685 i40e_free_misc_vector(pf
);
4687 i40e_put_lump(pf
->irq_pile
, pf
->iwarp_base_vector
,
4688 I40E_IWARP_IRQ_PILE_ID
);
4690 i40e_put_lump(pf
->irq_pile
, 0, I40E_PILE_VALID_BIT
-1);
4691 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
4693 i40e_vsi_free_q_vectors(pf
->vsi
[i
]);
4694 i40e_reset_interrupt_capability(pf
);
4698 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4699 * @vsi: the VSI being configured
4701 static void i40e_napi_enable_all(struct i40e_vsi
*vsi
)
4708 for (q_idx
= 0; q_idx
< vsi
->num_q_vectors
; q_idx
++) {
4709 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[q_idx
];
4711 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
4712 napi_enable(&q_vector
->napi
);
4717 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4718 * @vsi: the VSI being configured
4720 static void i40e_napi_disable_all(struct i40e_vsi
*vsi
)
4727 for (q_idx
= 0; q_idx
< vsi
->num_q_vectors
; q_idx
++) {
4728 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[q_idx
];
4730 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
4731 napi_disable(&q_vector
->napi
);
4736 * i40e_vsi_close - Shut down a VSI
4737 * @vsi: the vsi to be quelled
4739 static void i40e_vsi_close(struct i40e_vsi
*vsi
)
4741 struct i40e_pf
*pf
= vsi
->back
;
4742 if (!test_and_set_bit(__I40E_VSI_DOWN
, vsi
->state
))
4744 i40e_vsi_free_irq(vsi
);
4745 i40e_vsi_free_tx_resources(vsi
);
4746 i40e_vsi_free_rx_resources(vsi
);
4747 vsi
->current_netdev_flags
= 0;
4748 pf
->flags
|= I40E_FLAG_SERVICE_CLIENT_REQUESTED
;
4749 if (test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
4750 pf
->flags
|= I40E_FLAG_CLIENT_RESET
;
4754 * i40e_quiesce_vsi - Pause a given VSI
4755 * @vsi: the VSI being paused
4757 static void i40e_quiesce_vsi(struct i40e_vsi
*vsi
)
4759 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
))
4762 set_bit(__I40E_VSI_NEEDS_RESTART
, vsi
->state
);
4763 if (vsi
->netdev
&& netif_running(vsi
->netdev
))
4764 vsi
->netdev
->netdev_ops
->ndo_stop(vsi
->netdev
);
4766 i40e_vsi_close(vsi
);
4770 * i40e_unquiesce_vsi - Resume a given VSI
4771 * @vsi: the VSI being resumed
4773 static void i40e_unquiesce_vsi(struct i40e_vsi
*vsi
)
4775 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART
, vsi
->state
))
4778 if (vsi
->netdev
&& netif_running(vsi
->netdev
))
4779 vsi
->netdev
->netdev_ops
->ndo_open(vsi
->netdev
);
4781 i40e_vsi_open(vsi
); /* this clears the DOWN bit */
4785 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4788 static void i40e_pf_quiesce_all_vsi(struct i40e_pf
*pf
)
4792 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
4794 i40e_quiesce_vsi(pf
->vsi
[v
]);
4799 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4802 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf
*pf
)
4806 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
4808 i40e_unquiesce_vsi(pf
->vsi
[v
]);
4813 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4814 * @vsi: the VSI being configured
4816 * Wait until all queues on a given VSI have been disabled.
4818 int i40e_vsi_wait_queues_disabled(struct i40e_vsi
*vsi
)
4820 struct i40e_pf
*pf
= vsi
->back
;
4823 pf_q
= vsi
->base_queue
;
4824 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4825 /* Check and wait for the Tx queue */
4826 ret
= i40e_pf_txq_wait(pf
, pf_q
, false);
4828 dev_info(&pf
->pdev
->dev
,
4829 "VSI seid %d Tx ring %d disable timeout\n",
4834 if (!i40e_enabled_xdp_vsi(vsi
))
4837 /* Check and wait for the XDP Tx queue */
4838 ret
= i40e_pf_txq_wait(pf
, pf_q
+ vsi
->alloc_queue_pairs
,
4841 dev_info(&pf
->pdev
->dev
,
4842 "VSI seid %d XDP Tx ring %d disable timeout\n",
4847 /* Check and wait for the Rx queue */
4848 ret
= i40e_pf_rxq_wait(pf
, pf_q
, false);
4850 dev_info(&pf
->pdev
->dev
,
4851 "VSI seid %d Rx ring %d disable timeout\n",
4860 #ifdef CONFIG_I40E_DCB
4862 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4865 * This function waits for the queues to be in disabled state for all the
4866 * VSIs that are managed by this PF.
4868 static int i40e_pf_wait_queues_disabled(struct i40e_pf
*pf
)
4872 for (v
= 0; v
< pf
->hw
.func_caps
.num_vsis
; v
++) {
4874 ret
= i40e_vsi_wait_queues_disabled(pf
->vsi
[v
]);
4886 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4887 * @pf: pointer to PF
4889 * Get TC map for ISCSI PF type that will include iSCSI TC
4892 static u8
i40e_get_iscsi_tc_map(struct i40e_pf
*pf
)
4894 struct i40e_dcb_app_priority_table app
;
4895 struct i40e_hw
*hw
= &pf
->hw
;
4896 u8 enabled_tc
= 1; /* TC0 is always enabled */
4898 /* Get the iSCSI APP TLV */
4899 struct i40e_dcbx_config
*dcbcfg
= &hw
->local_dcbx_config
;
4901 for (i
= 0; i
< dcbcfg
->numapps
; i
++) {
4902 app
= dcbcfg
->app
[i
];
4903 if (app
.selector
== I40E_APP_SEL_TCPIP
&&
4904 app
.protocolid
== I40E_APP_PROTOID_ISCSI
) {
4905 tc
= dcbcfg
->etscfg
.prioritytable
[app
.priority
];
4906 enabled_tc
|= BIT(tc
);
4915 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4916 * @dcbcfg: the corresponding DCBx configuration structure
4918 * Return the number of TCs from given DCBx configuration
4920 static u8
i40e_dcb_get_num_tc(struct i40e_dcbx_config
*dcbcfg
)
4922 int i
, tc_unused
= 0;
4926 /* Scan the ETS Config Priority Table to find
4927 * traffic class enabled for a given priority
4928 * and create a bitmask of enabled TCs
4930 for (i
= 0; i
< I40E_MAX_USER_PRIORITY
; i
++)
4931 num_tc
|= BIT(dcbcfg
->etscfg
.prioritytable
[i
]);
4933 /* Now scan the bitmask to check for
4934 * contiguous TCs starting with TC0
4936 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
4937 if (num_tc
& BIT(i
)) {
4941 pr_err("Non-contiguous TC - Disabling DCB\n");
4949 /* There is always at least TC0 */
4957 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4958 * @dcbcfg: the corresponding DCBx configuration structure
4960 * Query the current DCB configuration and return the number of
4961 * traffic classes enabled from the given DCBX config
4963 static u8
i40e_dcb_get_enabled_tc(struct i40e_dcbx_config
*dcbcfg
)
4965 u8 num_tc
= i40e_dcb_get_num_tc(dcbcfg
);
4969 for (i
= 0; i
< num_tc
; i
++)
4970 enabled_tc
|= BIT(i
);
4976 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
4977 * @pf: PF being queried
4979 * Query the current MQPRIO configuration and return the number of
4980 * traffic classes enabled.
4982 static u8
i40e_mqprio_get_enabled_tc(struct i40e_pf
*pf
)
4984 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
4985 u8 num_tc
= vsi
->mqprio_qopt
.qopt
.num_tc
;
4986 u8 enabled_tc
= 1, i
;
4988 for (i
= 1; i
< num_tc
; i
++)
4989 enabled_tc
|= BIT(i
);
4994 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4995 * @pf: PF being queried
4997 * Return number of traffic classes enabled for the given PF
4999 static u8
i40e_pf_get_num_tc(struct i40e_pf
*pf
)
5001 struct i40e_hw
*hw
= &pf
->hw
;
5002 u8 i
, enabled_tc
= 1;
5004 struct i40e_dcbx_config
*dcbcfg
= &hw
->local_dcbx_config
;
5006 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
5007 return pf
->vsi
[pf
->lan_vsi
]->mqprio_qopt
.qopt
.num_tc
;
5009 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5010 if (!(pf
->flags
& I40E_FLAG_DCB_ENABLED
))
5013 /* SFP mode will be enabled for all TCs on port */
5014 if (!(pf
->flags
& I40E_FLAG_MFP_ENABLED
))
5015 return i40e_dcb_get_num_tc(dcbcfg
);
5017 /* MFP mode return count of enabled TCs for this PF */
5018 if (pf
->hw
.func_caps
.iscsi
)
5019 enabled_tc
= i40e_get_iscsi_tc_map(pf
);
5021 return 1; /* Only TC0 */
5023 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5024 if (enabled_tc
& BIT(i
))
5031 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
5032 * @pf: PF being queried
5034 * Return a bitmap for enabled traffic classes for this PF.
5036 static u8
i40e_pf_get_tc_map(struct i40e_pf
*pf
)
5038 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
5039 return i40e_mqprio_get_enabled_tc(pf
);
5041 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5044 if (!(pf
->flags
& I40E_FLAG_DCB_ENABLED
))
5045 return I40E_DEFAULT_TRAFFIC_CLASS
;
5047 /* SFP mode we want PF to be enabled for all TCs */
5048 if (!(pf
->flags
& I40E_FLAG_MFP_ENABLED
))
5049 return i40e_dcb_get_enabled_tc(&pf
->hw
.local_dcbx_config
);
5051 /* MFP enabled and iSCSI PF type */
5052 if (pf
->hw
.func_caps
.iscsi
)
5053 return i40e_get_iscsi_tc_map(pf
);
5055 return I40E_DEFAULT_TRAFFIC_CLASS
;
5059 * i40e_vsi_get_bw_info - Query VSI BW Information
5060 * @vsi: the VSI being queried
5062 * Returns 0 on success, negative value on failure
5064 static int i40e_vsi_get_bw_info(struct i40e_vsi
*vsi
)
5066 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config
= {0};
5067 struct i40e_aqc_query_vsi_bw_config_resp bw_config
= {0};
5068 struct i40e_pf
*pf
= vsi
->back
;
5069 struct i40e_hw
*hw
= &pf
->hw
;
5074 /* Get the VSI level BW configuration */
5075 ret
= i40e_aq_query_vsi_bw_config(hw
, vsi
->seid
, &bw_config
, NULL
);
5077 dev_info(&pf
->pdev
->dev
,
5078 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5079 i40e_stat_str(&pf
->hw
, ret
),
5080 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
5084 /* Get the VSI level BW configuration per TC */
5085 ret
= i40e_aq_query_vsi_ets_sla_config(hw
, vsi
->seid
, &bw_ets_config
,
5088 dev_info(&pf
->pdev
->dev
,
5089 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5090 i40e_stat_str(&pf
->hw
, ret
),
5091 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
5095 if (bw_config
.tc_valid_bits
!= bw_ets_config
.tc_valid_bits
) {
5096 dev_info(&pf
->pdev
->dev
,
5097 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5098 bw_config
.tc_valid_bits
,
5099 bw_ets_config
.tc_valid_bits
);
5100 /* Still continuing */
5103 vsi
->bw_limit
= le16_to_cpu(bw_config
.port_bw_limit
);
5104 vsi
->bw_max_quanta
= bw_config
.max_bw
;
5105 tc_bw_max
= le16_to_cpu(bw_ets_config
.tc_bw_max
[0]) |
5106 (le16_to_cpu(bw_ets_config
.tc_bw_max
[1]) << 16);
5107 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5108 vsi
->bw_ets_share_credits
[i
] = bw_ets_config
.share_credits
[i
];
5109 vsi
->bw_ets_limit_credits
[i
] =
5110 le16_to_cpu(bw_ets_config
.credits
[i
]);
5111 /* 3 bits out of 4 for each TC */
5112 vsi
->bw_ets_max_quanta
[i
] = (u8
)((tc_bw_max
>> (i
*4)) & 0x7);
5119 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5120 * @vsi: the VSI being configured
5121 * @enabled_tc: TC bitmap
5122 * @bw_credits: BW shared credits per TC
5124 * Returns 0 on success, negative value on failure
5126 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi
*vsi
, u8 enabled_tc
,
5129 struct i40e_aqc_configure_vsi_tc_bw_data bw_data
;
5133 if (vsi
->back
->flags
& I40E_FLAG_TC_MQPRIO
)
5135 if (!vsi
->mqprio_qopt
.qopt
.hw
) {
5136 ret
= i40e_set_bw_limit(vsi
, vsi
->seid
, 0);
5138 dev_info(&vsi
->back
->pdev
->dev
,
5139 "Failed to reset tx rate for vsi->seid %u\n",
5143 bw_data
.tc_valid_bits
= enabled_tc
;
5144 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5145 bw_data
.tc_bw_credits
[i
] = bw_share
[i
];
5147 ret
= i40e_aq_config_vsi_tc_bw(&vsi
->back
->hw
, vsi
->seid
, &bw_data
,
5150 dev_info(&vsi
->back
->pdev
->dev
,
5151 "AQ command Config VSI BW allocation per TC failed = %d\n",
5152 vsi
->back
->hw
.aq
.asq_last_status
);
5156 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5157 vsi
->info
.qs_handle
[i
] = bw_data
.qs_handles
[i
];
5163 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5164 * @vsi: the VSI being configured
5165 * @enabled_tc: TC map to be enabled
5168 static void i40e_vsi_config_netdev_tc(struct i40e_vsi
*vsi
, u8 enabled_tc
)
5170 struct net_device
*netdev
= vsi
->netdev
;
5171 struct i40e_pf
*pf
= vsi
->back
;
5172 struct i40e_hw
*hw
= &pf
->hw
;
5175 struct i40e_dcbx_config
*dcbcfg
= &hw
->local_dcbx_config
;
5181 netdev_reset_tc(netdev
);
5185 /* Set up actual enabled TCs on the VSI */
5186 if (netdev_set_num_tc(netdev
, vsi
->tc_config
.numtc
))
5189 /* set per TC queues for the VSI */
5190 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5191 /* Only set TC queues for enabled tcs
5193 * e.g. For a VSI that has TC0 and TC3 enabled the
5194 * enabled_tc bitmap would be 0x00001001; the driver
5195 * will set the numtc for netdev as 2 that will be
5196 * referenced by the netdev layer as TC 0 and 1.
5198 if (vsi
->tc_config
.enabled_tc
& BIT(i
))
5199 netdev_set_tc_queue(netdev
,
5200 vsi
->tc_config
.tc_info
[i
].netdev_tc
,
5201 vsi
->tc_config
.tc_info
[i
].qcount
,
5202 vsi
->tc_config
.tc_info
[i
].qoffset
);
5205 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
5208 /* Assign UP2TC map for the VSI */
5209 for (i
= 0; i
< I40E_MAX_USER_PRIORITY
; i
++) {
5210 /* Get the actual TC# for the UP */
5211 u8 ets_tc
= dcbcfg
->etscfg
.prioritytable
[i
];
5212 /* Get the mapped netdev TC# for the UP */
5213 netdev_tc
= vsi
->tc_config
.tc_info
[ets_tc
].netdev_tc
;
5214 netdev_set_prio_tc_map(netdev
, i
, netdev_tc
);
5219 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5220 * @vsi: the VSI being configured
5221 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5223 static void i40e_vsi_update_queue_map(struct i40e_vsi
*vsi
,
5224 struct i40e_vsi_context
*ctxt
)
5226 /* copy just the sections touched not the entire info
5227 * since not all sections are valid as returned by
5230 vsi
->info
.mapping_flags
= ctxt
->info
.mapping_flags
;
5231 memcpy(&vsi
->info
.queue_mapping
,
5232 &ctxt
->info
.queue_mapping
, sizeof(vsi
->info
.queue_mapping
));
5233 memcpy(&vsi
->info
.tc_mapping
, ctxt
->info
.tc_mapping
,
5234 sizeof(vsi
->info
.tc_mapping
));
5238 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5239 * @vsi: VSI to be configured
5240 * @enabled_tc: TC bitmap
5242 * This configures a particular VSI for TCs that are mapped to the
5243 * given TC bitmap. It uses default bandwidth share for TCs across
5244 * VSIs to configure TC for a particular VSI.
5247 * It is expected that the VSI queues have been quisced before calling
5250 static int i40e_vsi_config_tc(struct i40e_vsi
*vsi
, u8 enabled_tc
)
5252 u8 bw_share
[I40E_MAX_TRAFFIC_CLASS
] = {0};
5253 struct i40e_pf
*pf
= vsi
->back
;
5254 struct i40e_hw
*hw
= &pf
->hw
;
5255 struct i40e_vsi_context ctxt
;
5259 /* Check if enabled_tc is same as existing or new TCs */
5260 if (vsi
->tc_config
.enabled_tc
== enabled_tc
&&
5261 vsi
->mqprio_qopt
.mode
!= TC_MQPRIO_MODE_CHANNEL
)
5264 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5265 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5266 if (enabled_tc
& BIT(i
))
5270 ret
= i40e_vsi_configure_bw_alloc(vsi
, enabled_tc
, bw_share
);
5272 struct i40e_aqc_query_vsi_bw_config_resp bw_config
= {0};
5274 dev_info(&pf
->pdev
->dev
,
5275 "Failed configuring TC map %d for VSI %d\n",
5276 enabled_tc
, vsi
->seid
);
5277 ret
= i40e_aq_query_vsi_bw_config(hw
, vsi
->seid
,
5280 dev_info(&pf
->pdev
->dev
,
5281 "Failed querying vsi bw info, err %s aq_err %s\n",
5282 i40e_stat_str(hw
, ret
),
5283 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
5286 if ((bw_config
.tc_valid_bits
& enabled_tc
) != enabled_tc
) {
5287 u8 valid_tc
= bw_config
.tc_valid_bits
& enabled_tc
;
5290 valid_tc
= bw_config
.tc_valid_bits
;
5291 /* Always enable TC0, no matter what */
5293 dev_info(&pf
->pdev
->dev
,
5294 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5295 enabled_tc
, bw_config
.tc_valid_bits
, valid_tc
);
5296 enabled_tc
= valid_tc
;
5299 ret
= i40e_vsi_configure_bw_alloc(vsi
, enabled_tc
, bw_share
);
5301 dev_err(&pf
->pdev
->dev
,
5302 "Unable to configure TC map %d for VSI %d\n",
5303 enabled_tc
, vsi
->seid
);
5308 /* Update Queue Pairs Mapping for currently enabled UPs */
5309 ctxt
.seid
= vsi
->seid
;
5310 ctxt
.pf_num
= vsi
->back
->hw
.pf_id
;
5312 ctxt
.uplink_seid
= vsi
->uplink_seid
;
5313 ctxt
.info
= vsi
->info
;
5314 if (vsi
->back
->flags
& I40E_FLAG_TC_MQPRIO
) {
5315 ret
= i40e_vsi_setup_queue_map_mqprio(vsi
, &ctxt
, enabled_tc
);
5319 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, false);
5322 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5325 if (!vsi
->mqprio_qopt
.qopt
.hw
&& vsi
->reconfig_rss
) {
5326 vsi
->rss_size
= min_t(int, vsi
->back
->alloc_rss_size
,
5327 vsi
->num_queue_pairs
);
5328 ret
= i40e_vsi_config_rss(vsi
);
5330 dev_info(&vsi
->back
->pdev
->dev
,
5331 "Failed to reconfig rss for num_queues\n");
5334 vsi
->reconfig_rss
= false;
5336 if (vsi
->back
->flags
& I40E_FLAG_IWARP_ENABLED
) {
5337 ctxt
.info
.valid_sections
|=
5338 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID
);
5339 ctxt
.info
.queueing_opt_flags
|= I40E_AQ_VSI_QUE_OPT_TCP_ENA
;
5342 /* Update the VSI after updating the VSI queue-mapping
5345 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
5347 dev_info(&pf
->pdev
->dev
,
5348 "Update vsi tc config failed, err %s aq_err %s\n",
5349 i40e_stat_str(hw
, ret
),
5350 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
5353 /* update the local VSI info with updated queue map */
5354 i40e_vsi_update_queue_map(vsi
, &ctxt
);
5355 vsi
->info
.valid_sections
= 0;
5357 /* Update current VSI BW information */
5358 ret
= i40e_vsi_get_bw_info(vsi
);
5360 dev_info(&pf
->pdev
->dev
,
5361 "Failed updating vsi bw info, err %s aq_err %s\n",
5362 i40e_stat_str(hw
, ret
),
5363 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
5367 /* Update the netdev TC setup */
5368 i40e_vsi_config_netdev_tc(vsi
, enabled_tc
);
5374 * i40e_get_link_speed - Returns link speed for the interface
5375 * @vsi: VSI to be configured
5378 int i40e_get_link_speed(struct i40e_vsi
*vsi
)
5380 struct i40e_pf
*pf
= vsi
->back
;
5382 switch (pf
->hw
.phy
.link_info
.link_speed
) {
5383 case I40E_LINK_SPEED_40GB
:
5385 case I40E_LINK_SPEED_25GB
:
5387 case I40E_LINK_SPEED_20GB
:
5389 case I40E_LINK_SPEED_10GB
:
5391 case I40E_LINK_SPEED_1GB
:
5399 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5400 * @vsi: VSI to be configured
5401 * @seid: seid of the channel/VSI
5402 * @max_tx_rate: max TX rate to be configured as BW limit
5404 * Helper function to set BW limit for a given VSI
5406 int i40e_set_bw_limit(struct i40e_vsi
*vsi
, u16 seid
, u64 max_tx_rate
)
5408 struct i40e_pf
*pf
= vsi
->back
;
5413 speed
= i40e_get_link_speed(vsi
);
5414 if (max_tx_rate
> speed
) {
5415 dev_err(&pf
->pdev
->dev
,
5416 "Invalid max tx rate %llu specified for VSI seid %d.",
5420 if (max_tx_rate
&& max_tx_rate
< 50) {
5421 dev_warn(&pf
->pdev
->dev
,
5422 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5426 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5427 credits
= max_tx_rate
;
5428 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
5429 ret
= i40e_aq_config_vsi_bw_limit(&pf
->hw
, seid
, credits
,
5430 I40E_MAX_BW_INACTIVE_ACCUM
, NULL
);
5432 dev_err(&pf
->pdev
->dev
,
5433 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5434 max_tx_rate
, seid
, i40e_stat_str(&pf
->hw
, ret
),
5435 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
5440 * i40e_remove_queue_channels - Remove queue channels for the TCs
5441 * @vsi: VSI to be configured
5443 * Remove queue channels for the TCs
5445 static void i40e_remove_queue_channels(struct i40e_vsi
*vsi
)
5447 enum i40e_admin_queue_err last_aq_status
;
5448 struct i40e_cloud_filter
*cfilter
;
5449 struct i40e_channel
*ch
, *ch_tmp
;
5450 struct i40e_pf
*pf
= vsi
->back
;
5451 struct hlist_node
*node
;
5454 /* Reset rss size that was stored when reconfiguring rss for
5455 * channel VSIs with non-power-of-2 queue count.
5457 vsi
->current_rss_size
= 0;
5459 /* perform cleanup for channels if they exist */
5460 if (list_empty(&vsi
->ch_list
))
5463 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
5464 struct i40e_vsi
*p_vsi
;
5466 list_del(&ch
->list
);
5467 p_vsi
= ch
->parent_vsi
;
5468 if (!p_vsi
|| !ch
->initialized
) {
5472 /* Reset queue contexts */
5473 for (i
= 0; i
< ch
->num_queue_pairs
; i
++) {
5474 struct i40e_ring
*tx_ring
, *rx_ring
;
5477 pf_q
= ch
->base_queue
+ i
;
5478 tx_ring
= vsi
->tx_rings
[pf_q
];
5481 rx_ring
= vsi
->rx_rings
[pf_q
];
5485 /* Reset BW configured for this VSI via mqprio */
5486 ret
= i40e_set_bw_limit(vsi
, ch
->seid
, 0);
5488 dev_info(&vsi
->back
->pdev
->dev
,
5489 "Failed to reset tx rate for ch->seid %u\n",
5492 /* delete cloud filters associated with this channel */
5493 hlist_for_each_entry_safe(cfilter
, node
,
5494 &pf
->cloud_filter_list
, cloud_node
) {
5495 if (cfilter
->seid
!= ch
->seid
)
5498 hash_del(&cfilter
->cloud_node
);
5499 if (cfilter
->dst_port
)
5500 ret
= i40e_add_del_cloud_filter_big_buf(vsi
,
5504 ret
= i40e_add_del_cloud_filter(vsi
, cfilter
,
5506 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
5508 dev_info(&pf
->pdev
->dev
,
5509 "Failed to delete cloud filter, err %s aq_err %s\n",
5510 i40e_stat_str(&pf
->hw
, ret
),
5511 i40e_aq_str(&pf
->hw
, last_aq_status
));
5515 /* delete VSI from FW */
5516 ret
= i40e_aq_delete_element(&vsi
->back
->hw
, ch
->seid
,
5519 dev_err(&vsi
->back
->pdev
->dev
,
5520 "unable to remove channel (%d) for parent VSI(%d)\n",
5521 ch
->seid
, p_vsi
->seid
);
5524 INIT_LIST_HEAD(&vsi
->ch_list
);
5528 * i40e_is_any_channel - channel exist or not
5529 * @vsi: ptr to VSI to which channels are associated with
5531 * Returns true or false if channel(s) exist for associated VSI or not
5533 static bool i40e_is_any_channel(struct i40e_vsi
*vsi
)
5535 struct i40e_channel
*ch
, *ch_tmp
;
5537 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
5538 if (ch
->initialized
)
5546 * i40e_get_max_queues_for_channel
5547 * @vsi: ptr to VSI to which channels are associated with
5549 * Helper function which returns max value among the queue counts set on the
5550 * channels/TCs created.
5552 static int i40e_get_max_queues_for_channel(struct i40e_vsi
*vsi
)
5554 struct i40e_channel
*ch
, *ch_tmp
;
5557 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
5558 if (!ch
->initialized
)
5560 if (ch
->num_queue_pairs
> max
)
5561 max
= ch
->num_queue_pairs
;
5568 * i40e_validate_num_queues - validate num_queues w.r.t channel
5569 * @pf: ptr to PF device
5570 * @num_queues: number of queues
5571 * @vsi: the parent VSI
5572 * @reconfig_rss: indicates should the RSS be reconfigured or not
5574 * This function validates number of queues in the context of new channel
5575 * which is being established and determines if RSS should be reconfigured
5576 * or not for parent VSI.
5578 static int i40e_validate_num_queues(struct i40e_pf
*pf
, int num_queues
,
5579 struct i40e_vsi
*vsi
, bool *reconfig_rss
)
5586 *reconfig_rss
= false;
5587 if (vsi
->current_rss_size
) {
5588 if (num_queues
> vsi
->current_rss_size
) {
5589 dev_dbg(&pf
->pdev
->dev
,
5590 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5591 num_queues
, vsi
->current_rss_size
);
5593 } else if ((num_queues
< vsi
->current_rss_size
) &&
5594 (!is_power_of_2(num_queues
))) {
5595 dev_dbg(&pf
->pdev
->dev
,
5596 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5597 num_queues
, vsi
->current_rss_size
);
5602 if (!is_power_of_2(num_queues
)) {
5603 /* Find the max num_queues configured for channel if channel
5605 * if channel exist, then enforce 'num_queues' to be more than
5606 * max ever queues configured for channel.
5608 max_ch_queues
= i40e_get_max_queues_for_channel(vsi
);
5609 if (num_queues
< max_ch_queues
) {
5610 dev_dbg(&pf
->pdev
->dev
,
5611 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5612 num_queues
, max_ch_queues
);
5615 *reconfig_rss
= true;
5622 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5623 * @vsi: the VSI being setup
5624 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5626 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5628 static int i40e_vsi_reconfig_rss(struct i40e_vsi
*vsi
, u16 rss_size
)
5630 struct i40e_pf
*pf
= vsi
->back
;
5631 u8 seed
[I40E_HKEY_ARRAY_SIZE
];
5632 struct i40e_hw
*hw
= &pf
->hw
;
5640 if (rss_size
> vsi
->rss_size
)
5643 local_rss_size
= min_t(int, vsi
->rss_size
, rss_size
);
5644 lut
= kzalloc(vsi
->rss_table_size
, GFP_KERNEL
);
5648 /* Ignoring user configured lut if there is one */
5649 i40e_fill_rss_lut(pf
, lut
, vsi
->rss_table_size
, local_rss_size
);
5651 /* Use user configured hash key if there is one, otherwise
5654 if (vsi
->rss_hkey_user
)
5655 memcpy(seed
, vsi
->rss_hkey_user
, I40E_HKEY_ARRAY_SIZE
);
5657 netdev_rss_key_fill((void *)seed
, I40E_HKEY_ARRAY_SIZE
);
5659 ret
= i40e_config_rss(vsi
, seed
, lut
, vsi
->rss_table_size
);
5661 dev_info(&pf
->pdev
->dev
,
5662 "Cannot set RSS lut, err %s aq_err %s\n",
5663 i40e_stat_str(hw
, ret
),
5664 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
5670 /* Do the update w.r.t. storing rss_size */
5671 if (!vsi
->orig_rss_size
)
5672 vsi
->orig_rss_size
= vsi
->rss_size
;
5673 vsi
->current_rss_size
= local_rss_size
;
5679 * i40e_channel_setup_queue_map - Setup a channel queue map
5680 * @pf: ptr to PF device
5681 * @vsi: the VSI being setup
5682 * @ctxt: VSI context structure
5683 * @ch: ptr to channel structure
5685 * Setup queue map for a specific channel
5687 static void i40e_channel_setup_queue_map(struct i40e_pf
*pf
,
5688 struct i40e_vsi_context
*ctxt
,
5689 struct i40e_channel
*ch
)
5691 u16 qcount
, qmap
, sections
= 0;
5695 sections
= I40E_AQ_VSI_PROP_QUEUE_MAP_VALID
;
5696 sections
|= I40E_AQ_VSI_PROP_SCHED_VALID
;
5698 qcount
= min_t(int, ch
->num_queue_pairs
, pf
->num_lan_msix
);
5699 ch
->num_queue_pairs
= qcount
;
5701 /* find the next higher power-of-2 of num queue pairs */
5702 pow
= ilog2(qcount
);
5703 if (!is_power_of_2(qcount
))
5706 qmap
= (offset
<< I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT
) |
5707 (pow
<< I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT
);
5709 /* Setup queue TC[0].qmap for given VSI context */
5710 ctxt
->info
.tc_mapping
[0] = cpu_to_le16(qmap
);
5712 ctxt
->info
.up_enable_bits
= 0x1; /* TC0 enabled */
5713 ctxt
->info
.mapping_flags
|= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG
);
5714 ctxt
->info
.queue_mapping
[0] = cpu_to_le16(ch
->base_queue
);
5715 ctxt
->info
.valid_sections
|= cpu_to_le16(sections
);
5719 * i40e_add_channel - add a channel by adding VSI
5720 * @pf: ptr to PF device
5721 * @uplink_seid: underlying HW switching element (VEB) ID
5722 * @ch: ptr to channel structure
5724 * Add a channel (VSI) using add_vsi and queue_map
5726 static int i40e_add_channel(struct i40e_pf
*pf
, u16 uplink_seid
,
5727 struct i40e_channel
*ch
)
5729 struct i40e_hw
*hw
= &pf
->hw
;
5730 struct i40e_vsi_context ctxt
;
5731 u8 enabled_tc
= 0x1; /* TC0 enabled */
5734 if (ch
->type
!= I40E_VSI_VMDQ2
) {
5735 dev_info(&pf
->pdev
->dev
,
5736 "add new vsi failed, ch->type %d\n", ch
->type
);
5740 memset(&ctxt
, 0, sizeof(ctxt
));
5741 ctxt
.pf_num
= hw
->pf_id
;
5743 ctxt
.uplink_seid
= uplink_seid
;
5744 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
5745 if (ch
->type
== I40E_VSI_VMDQ2
)
5746 ctxt
.flags
= I40E_AQ_VSI_TYPE_VMDQ2
;
5748 if (pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
) {
5749 ctxt
.info
.valid_sections
|=
5750 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
5751 ctxt
.info
.switch_id
=
5752 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
5755 /* Set queue map for a given VSI context */
5756 i40e_channel_setup_queue_map(pf
, &ctxt
, ch
);
5758 /* Now time to create VSI */
5759 ret
= i40e_aq_add_vsi(hw
, &ctxt
, NULL
);
5761 dev_info(&pf
->pdev
->dev
,
5762 "add new vsi failed, err %s aq_err %s\n",
5763 i40e_stat_str(&pf
->hw
, ret
),
5764 i40e_aq_str(&pf
->hw
,
5765 pf
->hw
.aq
.asq_last_status
));
5769 /* Success, update channel */
5770 ch
->enabled_tc
= enabled_tc
;
5771 ch
->seid
= ctxt
.seid
;
5772 ch
->vsi_number
= ctxt
.vsi_number
;
5773 ch
->stat_counter_idx
= cpu_to_le16(ctxt
.info
.stat_counter_idx
);
5775 /* copy just the sections touched not the entire info
5776 * since not all sections are valid as returned by
5779 ch
->info
.mapping_flags
= ctxt
.info
.mapping_flags
;
5780 memcpy(&ch
->info
.queue_mapping
,
5781 &ctxt
.info
.queue_mapping
, sizeof(ctxt
.info
.queue_mapping
));
5782 memcpy(&ch
->info
.tc_mapping
, ctxt
.info
.tc_mapping
,
5783 sizeof(ctxt
.info
.tc_mapping
));
5788 static int i40e_channel_config_bw(struct i40e_vsi
*vsi
, struct i40e_channel
*ch
,
5791 struct i40e_aqc_configure_vsi_tc_bw_data bw_data
;
5795 bw_data
.tc_valid_bits
= ch
->enabled_tc
;
5796 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5797 bw_data
.tc_bw_credits
[i
] = bw_share
[i
];
5799 ret
= i40e_aq_config_vsi_tc_bw(&vsi
->back
->hw
, ch
->seid
,
5802 dev_info(&vsi
->back
->pdev
->dev
,
5803 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5804 vsi
->back
->hw
.aq
.asq_last_status
, ch
->seid
);
5808 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5809 ch
->info
.qs_handle
[i
] = bw_data
.qs_handles
[i
];
5815 * i40e_channel_config_tx_ring - config TX ring associated with new channel
5816 * @pf: ptr to PF device
5817 * @vsi: the VSI being setup
5818 * @ch: ptr to channel structure
5820 * Configure TX rings associated with channel (VSI) since queues are being
5823 static int i40e_channel_config_tx_ring(struct i40e_pf
*pf
,
5824 struct i40e_vsi
*vsi
,
5825 struct i40e_channel
*ch
)
5829 u8 bw_share
[I40E_MAX_TRAFFIC_CLASS
] = {0};
5831 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5832 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5833 if (ch
->enabled_tc
& BIT(i
))
5837 /* configure BW for new VSI */
5838 ret
= i40e_channel_config_bw(vsi
, ch
, bw_share
);
5840 dev_info(&vsi
->back
->pdev
->dev
,
5841 "Failed configuring TC map %d for channel (seid %u)\n",
5842 ch
->enabled_tc
, ch
->seid
);
5846 for (i
= 0; i
< ch
->num_queue_pairs
; i
++) {
5847 struct i40e_ring
*tx_ring
, *rx_ring
;
5850 pf_q
= ch
->base_queue
+ i
;
5852 /* Get to TX ring ptr of main VSI, for re-setup TX queue
5855 tx_ring
= vsi
->tx_rings
[pf_q
];
5858 /* Get the RX ring ptr */
5859 rx_ring
= vsi
->rx_rings
[pf_q
];
5867 * i40e_setup_hw_channel - setup new channel
5868 * @pf: ptr to PF device
5869 * @vsi: the VSI being setup
5870 * @ch: ptr to channel structure
5871 * @uplink_seid: underlying HW switching element (VEB) ID
5872 * @type: type of channel to be created (VMDq2/VF)
5874 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5875 * and configures TX rings accordingly
5877 static inline int i40e_setup_hw_channel(struct i40e_pf
*pf
,
5878 struct i40e_vsi
*vsi
,
5879 struct i40e_channel
*ch
,
5880 u16 uplink_seid
, u8 type
)
5884 ch
->initialized
= false;
5885 ch
->base_queue
= vsi
->next_base_queue
;
5888 /* Proceed with creation of channel (VMDq2) VSI */
5889 ret
= i40e_add_channel(pf
, uplink_seid
, ch
);
5891 dev_info(&pf
->pdev
->dev
,
5892 "failed to add_channel using uplink_seid %u\n",
5897 /* Mark the successful creation of channel */
5898 ch
->initialized
= true;
5900 /* Reconfigure TX queues using QTX_CTL register */
5901 ret
= i40e_channel_config_tx_ring(pf
, vsi
, ch
);
5903 dev_info(&pf
->pdev
->dev
,
5904 "failed to configure TX rings for channel %u\n",
5909 /* update 'next_base_queue' */
5910 vsi
->next_base_queue
= vsi
->next_base_queue
+ ch
->num_queue_pairs
;
5911 dev_dbg(&pf
->pdev
->dev
,
5912 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
5913 ch
->seid
, ch
->vsi_number
, ch
->stat_counter_idx
,
5914 ch
->num_queue_pairs
,
5915 vsi
->next_base_queue
);
5920 * i40e_setup_channel - setup new channel using uplink element
5921 * @pf: ptr to PF device
5922 * @type: type of channel to be created (VMDq2/VF)
5923 * @uplink_seid: underlying HW switching element (VEB) ID
5924 * @ch: ptr to channel structure
5926 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5927 * and uplink switching element (uplink_seid)
5929 static bool i40e_setup_channel(struct i40e_pf
*pf
, struct i40e_vsi
*vsi
,
5930 struct i40e_channel
*ch
)
5936 if (vsi
->type
== I40E_VSI_MAIN
) {
5937 vsi_type
= I40E_VSI_VMDQ2
;
5939 dev_err(&pf
->pdev
->dev
, "unsupported parent vsi type(%d)\n",
5944 /* underlying switching element */
5945 seid
= pf
->vsi
[pf
->lan_vsi
]->uplink_seid
;
5947 /* create channel (VSI), configure TX rings */
5948 ret
= i40e_setup_hw_channel(pf
, vsi
, ch
, seid
, vsi_type
);
5950 dev_err(&pf
->pdev
->dev
, "failed to setup hw_channel\n");
5954 return ch
->initialized
? true : false;
5958 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
5959 * @vsi: ptr to VSI which has PF backing
5961 * Sets up switch mode correctly if it needs to be changed and perform
5962 * what are allowed modes.
5964 static int i40e_validate_and_set_switch_mode(struct i40e_vsi
*vsi
)
5967 struct i40e_pf
*pf
= vsi
->back
;
5968 struct i40e_hw
*hw
= &pf
->hw
;
5971 ret
= i40e_get_capabilities(pf
, i40e_aqc_opc_list_dev_capabilities
);
5975 if (hw
->dev_caps
.switch_mode
) {
5976 /* if switch mode is set, support mode2 (non-tunneled for
5977 * cloud filter) for now
5979 u32 switch_mode
= hw
->dev_caps
.switch_mode
&
5980 I40E_SWITCH_MODE_MASK
;
5981 if (switch_mode
>= I40E_CLOUD_FILTER_MODE1
) {
5982 if (switch_mode
== I40E_CLOUD_FILTER_MODE2
)
5984 dev_err(&pf
->pdev
->dev
,
5985 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
5986 hw
->dev_caps
.switch_mode
);
5991 /* Set Bit 7 to be valid */
5992 mode
= I40E_AQ_SET_SWITCH_BIT7_VALID
;
5994 /* Set L4type for TCP support */
5995 mode
|= I40E_AQ_SET_SWITCH_L4_TYPE_TCP
;
5997 /* Set cloud filter mode */
5998 mode
|= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL
;
6000 /* Prep mode field for set_switch_config */
6001 ret
= i40e_aq_set_switch_config(hw
, pf
->last_sw_conf_flags
,
6002 pf
->last_sw_conf_valid_flags
,
6004 if (ret
&& hw
->aq
.asq_last_status
!= I40E_AQ_RC_ESRCH
)
6005 dev_err(&pf
->pdev
->dev
,
6006 "couldn't set switch config bits, err %s aq_err %s\n",
6007 i40e_stat_str(hw
, ret
),
6009 hw
->aq
.asq_last_status
));
6015 * i40e_create_queue_channel - function to create channel
6016 * @vsi: VSI to be configured
6017 * @ch: ptr to channel (it contains channel specific params)
6019 * This function creates channel (VSI) using num_queues specified by user,
6020 * reconfigs RSS if needed.
6022 int i40e_create_queue_channel(struct i40e_vsi
*vsi
,
6023 struct i40e_channel
*ch
)
6025 struct i40e_pf
*pf
= vsi
->back
;
6032 if (!ch
->num_queue_pairs
) {
6033 dev_err(&pf
->pdev
->dev
, "Invalid num_queues requested: %d\n",
6034 ch
->num_queue_pairs
);
6038 /* validate user requested num_queues for channel */
6039 err
= i40e_validate_num_queues(pf
, ch
->num_queue_pairs
, vsi
,
6042 dev_info(&pf
->pdev
->dev
, "Failed to validate num_queues (%d)\n",
6043 ch
->num_queue_pairs
);
6047 /* By default we are in VEPA mode, if this is the first VF/VMDq
6048 * VSI to be added switch to VEB mode.
6050 if ((!(pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)) ||
6051 (!i40e_is_any_channel(vsi
))) {
6052 if (!is_power_of_2(vsi
->tc_config
.tc_info
[0].qcount
)) {
6053 dev_dbg(&pf
->pdev
->dev
,
6054 "Failed to create channel. Override queues (%u) not power of 2\n",
6055 vsi
->tc_config
.tc_info
[0].qcount
);
6059 if (!(pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)) {
6060 pf
->flags
|= I40E_FLAG_VEB_MODE_ENABLED
;
6062 if (vsi
->type
== I40E_VSI_MAIN
) {
6063 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
6064 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
,
6067 i40e_do_reset_safe(pf
,
6068 I40E_PF_RESET_FLAG
);
6071 /* now onwards for main VSI, number of queues will be value
6072 * of TC0's queue count
6076 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6077 * it should be more than num_queues
6079 if (!vsi
->cnt_q_avail
|| vsi
->cnt_q_avail
< ch
->num_queue_pairs
) {
6080 dev_dbg(&pf
->pdev
->dev
,
6081 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6082 vsi
->cnt_q_avail
, ch
->num_queue_pairs
);
6086 /* reconfig_rss only if vsi type is MAIN_VSI */
6087 if (reconfig_rss
&& (vsi
->type
== I40E_VSI_MAIN
)) {
6088 err
= i40e_vsi_reconfig_rss(vsi
, ch
->num_queue_pairs
);
6090 dev_info(&pf
->pdev
->dev
,
6091 "Error: unable to reconfig rss for num_queues (%u)\n",
6092 ch
->num_queue_pairs
);
6097 if (!i40e_setup_channel(pf
, vsi
, ch
)) {
6098 dev_info(&pf
->pdev
->dev
, "Failed to setup channel\n");
6102 dev_info(&pf
->pdev
->dev
,
6103 "Setup channel (id:%u) utilizing num_queues %d\n",
6104 ch
->seid
, ch
->num_queue_pairs
);
6106 /* configure VSI for BW limit */
6107 if (ch
->max_tx_rate
) {
6108 u64 credits
= ch
->max_tx_rate
;
6110 if (i40e_set_bw_limit(vsi
, ch
->seid
, ch
->max_tx_rate
))
6113 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
6114 dev_dbg(&pf
->pdev
->dev
,
6115 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6121 /* in case of VF, this will be main SRIOV VSI */
6122 ch
->parent_vsi
= vsi
;
6124 /* and update main_vsi's count for queue_available to use */
6125 vsi
->cnt_q_avail
-= ch
->num_queue_pairs
;
6131 * i40e_configure_queue_channels - Add queue channel for the given TCs
6132 * @vsi: VSI to be configured
6134 * Configures queue channel mapping to the given TCs
6136 static int i40e_configure_queue_channels(struct i40e_vsi
*vsi
)
6138 struct i40e_channel
*ch
;
6142 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6143 vsi
->tc_seid_map
[0] = vsi
->seid
;
6144 for (i
= 1; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
6145 if (vsi
->tc_config
.enabled_tc
& BIT(i
)) {
6146 ch
= kzalloc(sizeof(*ch
), GFP_KERNEL
);
6152 INIT_LIST_HEAD(&ch
->list
);
6153 ch
->num_queue_pairs
=
6154 vsi
->tc_config
.tc_info
[i
].qcount
;
6156 vsi
->tc_config
.tc_info
[i
].qoffset
;
6158 /* Bandwidth limit through tc interface is in bytes/s,
6161 max_rate
= vsi
->mqprio_qopt
.max_rate
[i
];
6162 do_div(max_rate
, I40E_BW_MBPS_DIVISOR
);
6163 ch
->max_tx_rate
= max_rate
;
6165 list_add_tail(&ch
->list
, &vsi
->ch_list
);
6167 ret
= i40e_create_queue_channel(vsi
, ch
);
6169 dev_err(&vsi
->back
->pdev
->dev
,
6170 "Failed creating queue channel with TC%d: queues %d\n",
6171 i
, ch
->num_queue_pairs
);
6174 vsi
->tc_seid_map
[i
] = ch
->seid
;
6180 i40e_remove_queue_channels(vsi
);
6185 * i40e_veb_config_tc - Configure TCs for given VEB
6187 * @enabled_tc: TC bitmap
6189 * Configures given TC bitmap for VEB (switching) element
6191 int i40e_veb_config_tc(struct i40e_veb
*veb
, u8 enabled_tc
)
6193 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data
= {0};
6194 struct i40e_pf
*pf
= veb
->pf
;
6198 /* No TCs or already enabled TCs just return */
6199 if (!enabled_tc
|| veb
->enabled_tc
== enabled_tc
)
6202 bw_data
.tc_valid_bits
= enabled_tc
;
6203 /* bw_data.absolute_credits is not set (relative) */
6205 /* Enable ETS TCs with equal BW Share for now */
6206 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
6207 if (enabled_tc
& BIT(i
))
6208 bw_data
.tc_bw_share_credits
[i
] = 1;
6211 ret
= i40e_aq_config_switch_comp_bw_config(&pf
->hw
, veb
->seid
,
6214 dev_info(&pf
->pdev
->dev
,
6215 "VEB bw config failed, err %s aq_err %s\n",
6216 i40e_stat_str(&pf
->hw
, ret
),
6217 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6221 /* Update the BW information */
6222 ret
= i40e_veb_get_bw_info(veb
);
6224 dev_info(&pf
->pdev
->dev
,
6225 "Failed getting veb bw config, err %s aq_err %s\n",
6226 i40e_stat_str(&pf
->hw
, ret
),
6227 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6234 #ifdef CONFIG_I40E_DCB
6236 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6239 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6240 * the caller would've quiesce all the VSIs before calling
6243 static void i40e_dcb_reconfigure(struct i40e_pf
*pf
)
6249 /* Enable the TCs available on PF to all VEBs */
6250 tc_map
= i40e_pf_get_tc_map(pf
);
6251 for (v
= 0; v
< I40E_MAX_VEB
; v
++) {
6254 ret
= i40e_veb_config_tc(pf
->veb
[v
], tc_map
);
6256 dev_info(&pf
->pdev
->dev
,
6257 "Failed configuring TC for VEB seid=%d\n",
6259 /* Will try to configure as many components */
6263 /* Update each VSI */
6264 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
6268 /* - Enable all TCs for the LAN VSI
6269 * - For all others keep them at TC0 for now
6271 if (v
== pf
->lan_vsi
)
6272 tc_map
= i40e_pf_get_tc_map(pf
);
6274 tc_map
= I40E_DEFAULT_TRAFFIC_CLASS
;
6276 ret
= i40e_vsi_config_tc(pf
->vsi
[v
], tc_map
);
6278 dev_info(&pf
->pdev
->dev
,
6279 "Failed configuring TC for VSI seid=%d\n",
6281 /* Will try to configure as many components */
6283 /* Re-configure VSI vectors based on updated TC map */
6284 i40e_vsi_map_rings_to_vectors(pf
->vsi
[v
]);
6285 if (pf
->vsi
[v
]->netdev
)
6286 i40e_dcbnl_set_all(pf
->vsi
[v
]);
6292 * i40e_resume_port_tx - Resume port Tx
6295 * Resume a port's Tx and issue a PF reset in case of failure to
6298 static int i40e_resume_port_tx(struct i40e_pf
*pf
)
6300 struct i40e_hw
*hw
= &pf
->hw
;
6303 ret
= i40e_aq_resume_port_tx(hw
, NULL
);
6305 dev_info(&pf
->pdev
->dev
,
6306 "Resume Port Tx failed, err %s aq_err %s\n",
6307 i40e_stat_str(&pf
->hw
, ret
),
6308 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6309 /* Schedule PF reset to recover */
6310 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
6311 i40e_service_event_schedule(pf
);
6318 * i40e_init_pf_dcb - Initialize DCB configuration
6319 * @pf: PF being configured
6321 * Query the current DCB configuration and cache it
6322 * in the hardware structure
6324 static int i40e_init_pf_dcb(struct i40e_pf
*pf
)
6326 struct i40e_hw
*hw
= &pf
->hw
;
6329 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
6330 * Also do not enable DCBx if FW LLDP agent is disabled
6332 if ((pf
->hw_features
& I40E_HW_NO_DCB_SUPPORT
) ||
6333 (pf
->flags
& I40E_FLAG_DISABLE_FW_LLDP
))
6336 /* Get the initial DCB configuration */
6337 err
= i40e_init_dcb(hw
);
6339 /* Device/Function is not DCBX capable */
6340 if ((!hw
->func_caps
.dcb
) ||
6341 (hw
->dcbx_status
== I40E_DCBX_STATUS_DISABLED
)) {
6342 dev_info(&pf
->pdev
->dev
,
6343 "DCBX offload is not supported or is disabled for this PF.\n");
6345 /* When status is not DISABLED then DCBX in FW */
6346 pf
->dcbx_cap
= DCB_CAP_DCBX_LLD_MANAGED
|
6347 DCB_CAP_DCBX_VER_IEEE
;
6349 pf
->flags
|= I40E_FLAG_DCB_CAPABLE
;
6350 /* Enable DCB tagging only when more than one TC
6351 * or explicitly disable if only one TC
6353 if (i40e_dcb_get_num_tc(&hw
->local_dcbx_config
) > 1)
6354 pf
->flags
|= I40E_FLAG_DCB_ENABLED
;
6356 pf
->flags
&= ~I40E_FLAG_DCB_ENABLED
;
6357 dev_dbg(&pf
->pdev
->dev
,
6358 "DCBX offload is supported for this PF.\n");
6360 } else if (pf
->hw
.aq
.asq_last_status
== I40E_AQ_RC_EPERM
) {
6361 dev_info(&pf
->pdev
->dev
, "FW LLDP disabled for this PF.\n");
6362 pf
->flags
|= I40E_FLAG_DISABLE_FW_LLDP
;
6364 dev_info(&pf
->pdev
->dev
,
6365 "Query for DCB configuration failed, err %s aq_err %s\n",
6366 i40e_stat_str(&pf
->hw
, err
),
6367 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6373 #endif /* CONFIG_I40E_DCB */
6374 #define SPEED_SIZE 14
6377 * i40e_print_link_message - print link up or down
6378 * @vsi: the VSI for which link needs a message
6380 void i40e_print_link_message(struct i40e_vsi
*vsi
, bool isup
)
6382 enum i40e_aq_link_speed new_speed
;
6383 struct i40e_pf
*pf
= vsi
->back
;
6384 char *speed
= "Unknown";
6385 char *fc
= "Unknown";
6390 new_speed
= pf
->hw
.phy
.link_info
.link_speed
;
6392 if ((vsi
->current_isup
== isup
) && (vsi
->current_speed
== new_speed
))
6394 vsi
->current_isup
= isup
;
6395 vsi
->current_speed
= new_speed
;
6397 netdev_info(vsi
->netdev
, "NIC Link is Down\n");
6401 /* Warn user if link speed on NPAR enabled partition is not at
6404 if (pf
->hw
.func_caps
.npar_enable
&&
6405 (pf
->hw
.phy
.link_info
.link_speed
== I40E_LINK_SPEED_1GB
||
6406 pf
->hw
.phy
.link_info
.link_speed
== I40E_LINK_SPEED_100MB
))
6407 netdev_warn(vsi
->netdev
,
6408 "The partition detected link speed that is less than 10Gbps\n");
6410 switch (pf
->hw
.phy
.link_info
.link_speed
) {
6411 case I40E_LINK_SPEED_40GB
:
6414 case I40E_LINK_SPEED_20GB
:
6417 case I40E_LINK_SPEED_25GB
:
6420 case I40E_LINK_SPEED_10GB
:
6423 case I40E_LINK_SPEED_1GB
:
6426 case I40E_LINK_SPEED_100MB
:
6433 switch (pf
->hw
.fc
.current_mode
) {
6437 case I40E_FC_TX_PAUSE
:
6440 case I40E_FC_RX_PAUSE
:
6448 if (pf
->hw
.phy
.link_info
.link_speed
== I40E_LINK_SPEED_25GB
) {
6449 req_fec
= ", Requested FEC: None";
6450 fec
= ", FEC: None";
6451 an
= ", Autoneg: False";
6453 if (pf
->hw
.phy
.link_info
.an_info
& I40E_AQ_AN_COMPLETED
)
6454 an
= ", Autoneg: True";
6456 if (pf
->hw
.phy
.link_info
.fec_info
&
6457 I40E_AQ_CONFIG_FEC_KR_ENA
)
6458 fec
= ", FEC: CL74 FC-FEC/BASE-R";
6459 else if (pf
->hw
.phy
.link_info
.fec_info
&
6460 I40E_AQ_CONFIG_FEC_RS_ENA
)
6461 fec
= ", FEC: CL108 RS-FEC";
6463 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
6464 * both RS and FC are requested
6466 if (vsi
->back
->hw
.phy
.link_info
.req_fec_info
&
6467 (I40E_AQ_REQUEST_FEC_KR
| I40E_AQ_REQUEST_FEC_RS
)) {
6468 if (vsi
->back
->hw
.phy
.link_info
.req_fec_info
&
6469 I40E_AQ_REQUEST_FEC_RS
)
6470 req_fec
= ", Requested FEC: CL108 RS-FEC";
6472 req_fec
= ", Requested FEC: CL74 FC-FEC/BASE-R";
6476 netdev_info(vsi
->netdev
, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
6477 speed
, req_fec
, fec
, an
, fc
);
6481 * i40e_up_complete - Finish the last steps of bringing up a connection
6482 * @vsi: the VSI being configured
6484 static int i40e_up_complete(struct i40e_vsi
*vsi
)
6486 struct i40e_pf
*pf
= vsi
->back
;
6489 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
6490 i40e_vsi_configure_msix(vsi
);
6492 i40e_configure_msi_and_legacy(vsi
);
6495 err
= i40e_vsi_start_rings(vsi
);
6499 clear_bit(__I40E_VSI_DOWN
, vsi
->state
);
6500 i40e_napi_enable_all(vsi
);
6501 i40e_vsi_enable_irq(vsi
);
6503 if ((pf
->hw
.phy
.link_info
.link_info
& I40E_AQ_LINK_UP
) &&
6505 i40e_print_link_message(vsi
, true);
6506 netif_tx_start_all_queues(vsi
->netdev
);
6507 netif_carrier_on(vsi
->netdev
);
6510 /* replay FDIR SB filters */
6511 if (vsi
->type
== I40E_VSI_FDIR
) {
6512 /* reset fd counters */
6515 i40e_fdir_filter_restore(vsi
);
6518 /* On the next run of the service_task, notify any clients of the new
6521 pf
->flags
|= I40E_FLAG_SERVICE_CLIENT_REQUESTED
;
6522 i40e_service_event_schedule(pf
);
6528 * i40e_vsi_reinit_locked - Reset the VSI
6529 * @vsi: the VSI being configured
6531 * Rebuild the ring structs after some configuration
6532 * has changed, e.g. MTU size.
6534 static void i40e_vsi_reinit_locked(struct i40e_vsi
*vsi
)
6536 struct i40e_pf
*pf
= vsi
->back
;
6538 WARN_ON(in_interrupt());
6539 while (test_and_set_bit(__I40E_CONFIG_BUSY
, pf
->state
))
6540 usleep_range(1000, 2000);
6544 clear_bit(__I40E_CONFIG_BUSY
, pf
->state
);
6548 * i40e_up - Bring the connection back up after being down
6549 * @vsi: the VSI being configured
6551 int i40e_up(struct i40e_vsi
*vsi
)
6555 err
= i40e_vsi_configure(vsi
);
6557 err
= i40e_up_complete(vsi
);
6563 * i40e_down - Shutdown the connection processing
6564 * @vsi: the VSI being stopped
6566 void i40e_down(struct i40e_vsi
*vsi
)
6570 /* It is assumed that the caller of this function
6571 * sets the vsi->state __I40E_VSI_DOWN bit.
6574 netif_carrier_off(vsi
->netdev
);
6575 netif_tx_disable(vsi
->netdev
);
6577 i40e_vsi_disable_irq(vsi
);
6578 i40e_vsi_stop_rings(vsi
);
6579 i40e_napi_disable_all(vsi
);
6581 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
6582 i40e_clean_tx_ring(vsi
->tx_rings
[i
]);
6583 if (i40e_enabled_xdp_vsi(vsi
))
6584 i40e_clean_tx_ring(vsi
->xdp_rings
[i
]);
6585 i40e_clean_rx_ring(vsi
->rx_rings
[i
]);
6591 * i40e_validate_mqprio_qopt- validate queue mapping info
6592 * @vsi: the VSI being configured
6593 * @mqprio_qopt: queue parametrs
6595 static int i40e_validate_mqprio_qopt(struct i40e_vsi
*vsi
,
6596 struct tc_mqprio_qopt_offload
*mqprio_qopt
)
6598 u64 sum_max_rate
= 0;
6602 if (mqprio_qopt
->qopt
.offset
[0] != 0 ||
6603 mqprio_qopt
->qopt
.num_tc
< 1 ||
6604 mqprio_qopt
->qopt
.num_tc
> I40E_MAX_TRAFFIC_CLASS
)
6606 for (i
= 0; ; i
++) {
6607 if (!mqprio_qopt
->qopt
.count
[i
])
6609 if (mqprio_qopt
->min_rate
[i
]) {
6610 dev_err(&vsi
->back
->pdev
->dev
,
6611 "Invalid min tx rate (greater than 0) specified\n");
6614 max_rate
= mqprio_qopt
->max_rate
[i
];
6615 do_div(max_rate
, I40E_BW_MBPS_DIVISOR
);
6616 sum_max_rate
+= max_rate
;
6618 if (i
>= mqprio_qopt
->qopt
.num_tc
- 1)
6620 if (mqprio_qopt
->qopt
.offset
[i
+ 1] !=
6621 (mqprio_qopt
->qopt
.offset
[i
] + mqprio_qopt
->qopt
.count
[i
]))
6624 if (vsi
->num_queue_pairs
<
6625 (mqprio_qopt
->qopt
.offset
[i
] + mqprio_qopt
->qopt
.count
[i
])) {
6628 if (sum_max_rate
> i40e_get_link_speed(vsi
)) {
6629 dev_err(&vsi
->back
->pdev
->dev
,
6630 "Invalid max tx rate specified\n");
6637 * i40e_vsi_set_default_tc_config - set default values for tc configuration
6638 * @vsi: the VSI being configured
6640 static void i40e_vsi_set_default_tc_config(struct i40e_vsi
*vsi
)
6645 /* Only TC0 is enabled */
6646 vsi
->tc_config
.numtc
= 1;
6647 vsi
->tc_config
.enabled_tc
= 1;
6648 qcount
= min_t(int, vsi
->alloc_queue_pairs
,
6649 i40e_pf_get_max_q_per_tc(vsi
->back
));
6650 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
6651 /* For the TC that is not enabled set the offset to to default
6652 * queue and allocate one queue for the given TC.
6654 vsi
->tc_config
.tc_info
[i
].qoffset
= 0;
6656 vsi
->tc_config
.tc_info
[i
].qcount
= qcount
;
6658 vsi
->tc_config
.tc_info
[i
].qcount
= 1;
6659 vsi
->tc_config
.tc_info
[i
].netdev_tc
= 0;
6664 * i40e_setup_tc - configure multiple traffic classes
6665 * @netdev: net device to configure
6666 * @type_data: tc offload data
6668 static int i40e_setup_tc(struct net_device
*netdev
, void *type_data
)
6670 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
6671 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
6672 struct i40e_vsi
*vsi
= np
->vsi
;
6673 struct i40e_pf
*pf
= vsi
->back
;
6674 u8 enabled_tc
= 0, num_tc
, hw
;
6675 bool need_reset
= false;
6680 num_tc
= mqprio_qopt
->qopt
.num_tc
;
6681 hw
= mqprio_qopt
->qopt
.hw
;
6682 mode
= mqprio_qopt
->mode
;
6684 pf
->flags
&= ~I40E_FLAG_TC_MQPRIO
;
6685 memcpy(&vsi
->mqprio_qopt
, mqprio_qopt
, sizeof(*mqprio_qopt
));
6689 /* Check if MFP enabled */
6690 if (pf
->flags
& I40E_FLAG_MFP_ENABLED
) {
6692 "Configuring TC not supported in MFP mode\n");
6696 case TC_MQPRIO_MODE_DCB
:
6697 pf
->flags
&= ~I40E_FLAG_TC_MQPRIO
;
6699 /* Check if DCB enabled to continue */
6700 if (!(pf
->flags
& I40E_FLAG_DCB_ENABLED
)) {
6702 "DCB is not enabled for adapter\n");
6706 /* Check whether tc count is within enabled limit */
6707 if (num_tc
> i40e_pf_get_num_tc(pf
)) {
6709 "TC count greater than enabled on link for adapter\n");
6713 case TC_MQPRIO_MODE_CHANNEL
:
6714 if (pf
->flags
& I40E_FLAG_DCB_ENABLED
) {
6716 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
6719 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
))
6721 ret
= i40e_validate_mqprio_qopt(vsi
, mqprio_qopt
);
6724 memcpy(&vsi
->mqprio_qopt
, mqprio_qopt
,
6725 sizeof(*mqprio_qopt
));
6726 pf
->flags
|= I40E_FLAG_TC_MQPRIO
;
6727 pf
->flags
&= ~I40E_FLAG_DCB_ENABLED
;
6734 /* Generate TC map for number of tc requested */
6735 for (i
= 0; i
< num_tc
; i
++)
6736 enabled_tc
|= BIT(i
);
6738 /* Requesting same TC configuration as already enabled */
6739 if (enabled_tc
== vsi
->tc_config
.enabled_tc
&&
6740 mode
!= TC_MQPRIO_MODE_CHANNEL
)
6743 /* Quiesce VSI queues */
6744 i40e_quiesce_vsi(vsi
);
6746 if (!hw
&& !(pf
->flags
& I40E_FLAG_TC_MQPRIO
))
6747 i40e_remove_queue_channels(vsi
);
6749 /* Configure VSI for enabled TCs */
6750 ret
= i40e_vsi_config_tc(vsi
, enabled_tc
);
6752 netdev_info(netdev
, "Failed configuring TC for VSI seid=%d\n",
6758 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
) {
6759 if (vsi
->mqprio_qopt
.max_rate
[0]) {
6760 u64 max_tx_rate
= vsi
->mqprio_qopt
.max_rate
[0];
6762 do_div(max_tx_rate
, I40E_BW_MBPS_DIVISOR
);
6763 ret
= i40e_set_bw_limit(vsi
, vsi
->seid
, max_tx_rate
);
6765 u64 credits
= max_tx_rate
;
6767 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
6768 dev_dbg(&vsi
->back
->pdev
->dev
,
6769 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6778 ret
= i40e_configure_queue_channels(vsi
);
6781 "Failed configuring queue channels\n");
6788 /* Reset the configuration data to defaults, only TC0 is enabled */
6790 i40e_vsi_set_default_tc_config(vsi
);
6795 i40e_unquiesce_vsi(vsi
);
6800 * i40e_set_cld_element - sets cloud filter element data
6801 * @filter: cloud filter rule
6802 * @cld: ptr to cloud filter element data
6804 * This is helper function to copy data into cloud filter element
6807 i40e_set_cld_element(struct i40e_cloud_filter
*filter
,
6808 struct i40e_aqc_cloud_filters_element_data
*cld
)
6813 memset(cld
, 0, sizeof(*cld
));
6814 ether_addr_copy(cld
->outer_mac
, filter
->dst_mac
);
6815 ether_addr_copy(cld
->inner_mac
, filter
->src_mac
);
6817 if (filter
->n_proto
!= ETH_P_IP
&& filter
->n_proto
!= ETH_P_IPV6
)
6820 if (filter
->n_proto
== ETH_P_IPV6
) {
6821 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
6822 for (i
= 0, j
= 0; i
< ARRAY_SIZE(filter
->dst_ipv6
);
6824 ipa
= be32_to_cpu(filter
->dst_ipv6
[IPV6_MAX_INDEX
- i
]);
6825 ipa
= cpu_to_le32(ipa
);
6826 memcpy(&cld
->ipaddr
.raw_v6
.data
[j
], &ipa
, sizeof(ipa
));
6829 ipa
= be32_to_cpu(filter
->dst_ipv4
);
6830 memcpy(&cld
->ipaddr
.v4
.data
, &ipa
, sizeof(ipa
));
6833 cld
->inner_vlan
= cpu_to_le16(ntohs(filter
->vlan_id
));
6835 /* tenant_id is not supported by FW now, once the support is enabled
6836 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
6838 if (filter
->tenant_id
)
6843 * i40e_add_del_cloud_filter - Add/del cloud filter
6844 * @vsi: pointer to VSI
6845 * @filter: cloud filter rule
6846 * @add: if true, add, if false, delete
6848 * Add or delete a cloud filter for a specific flow spec.
6849 * Returns 0 if the filter were successfully added.
6851 static int i40e_add_del_cloud_filter(struct i40e_vsi
*vsi
,
6852 struct i40e_cloud_filter
*filter
, bool add
)
6854 struct i40e_aqc_cloud_filters_element_data cld_filter
;
6855 struct i40e_pf
*pf
= vsi
->back
;
6857 static const u16 flag_table
[128] = {
6858 [I40E_CLOUD_FILTER_FLAGS_OMAC
] =
6859 I40E_AQC_ADD_CLOUD_FILTER_OMAC
,
6860 [I40E_CLOUD_FILTER_FLAGS_IMAC
] =
6861 I40E_AQC_ADD_CLOUD_FILTER_IMAC
,
6862 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN
] =
6863 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN
,
6864 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID
] =
6865 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID
,
6866 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC
] =
6867 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC
,
6868 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID
] =
6869 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID
,
6870 [I40E_CLOUD_FILTER_FLAGS_IIP
] =
6871 I40E_AQC_ADD_CLOUD_FILTER_IIP
,
6874 if (filter
->flags
>= ARRAY_SIZE(flag_table
))
6875 return I40E_ERR_CONFIG
;
6877 /* copy element needed to add cloud filter from filter */
6878 i40e_set_cld_element(filter
, &cld_filter
);
6880 if (filter
->tunnel_type
!= I40E_CLOUD_TNL_TYPE_NONE
)
6881 cld_filter
.flags
= cpu_to_le16(filter
->tunnel_type
<<
6882 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT
);
6884 if (filter
->n_proto
== ETH_P_IPV6
)
6885 cld_filter
.flags
|= cpu_to_le16(flag_table
[filter
->flags
] |
6886 I40E_AQC_ADD_CLOUD_FLAGS_IPV6
);
6888 cld_filter
.flags
|= cpu_to_le16(flag_table
[filter
->flags
] |
6889 I40E_AQC_ADD_CLOUD_FLAGS_IPV4
);
6892 ret
= i40e_aq_add_cloud_filters(&pf
->hw
, filter
->seid
,
6895 ret
= i40e_aq_rem_cloud_filters(&pf
->hw
, filter
->seid
,
6898 dev_dbg(&pf
->pdev
->dev
,
6899 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
6900 add
? "add" : "delete", filter
->dst_port
, ret
,
6901 pf
->hw
.aq
.asq_last_status
);
6903 dev_info(&pf
->pdev
->dev
,
6904 "%s cloud filter for VSI: %d\n",
6905 add
? "Added" : "Deleted", filter
->seid
);
6910 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
6911 * @vsi: pointer to VSI
6912 * @filter: cloud filter rule
6913 * @add: if true, add, if false, delete
6915 * Add or delete a cloud filter for a specific flow spec using big buffer.
6916 * Returns 0 if the filter were successfully added.
6918 static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi
*vsi
,
6919 struct i40e_cloud_filter
*filter
,
6922 struct i40e_aqc_cloud_filters_element_bb cld_filter
;
6923 struct i40e_pf
*pf
= vsi
->back
;
6926 /* Both (src/dst) valid mac_addr are not supported */
6927 if ((is_valid_ether_addr(filter
->dst_mac
) &&
6928 is_valid_ether_addr(filter
->src_mac
)) ||
6929 (is_multicast_ether_addr(filter
->dst_mac
) &&
6930 is_multicast_ether_addr(filter
->src_mac
)))
6933 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
6934 * ports are not supported via big buffer now.
6936 if (!filter
->dst_port
|| filter
->ip_proto
== IPPROTO_UDP
)
6939 /* adding filter using src_port/src_ip is not supported at this stage */
6940 if (filter
->src_port
|| filter
->src_ipv4
||
6941 !ipv6_addr_any(&filter
->ip
.v6
.src_ip6
))
6944 /* copy element needed to add cloud filter from filter */
6945 i40e_set_cld_element(filter
, &cld_filter
.element
);
6947 if (is_valid_ether_addr(filter
->dst_mac
) ||
6948 is_valid_ether_addr(filter
->src_mac
) ||
6949 is_multicast_ether_addr(filter
->dst_mac
) ||
6950 is_multicast_ether_addr(filter
->src_mac
)) {
6951 /* MAC + IP : unsupported mode */
6952 if (filter
->dst_ipv4
)
6955 /* since we validated that L4 port must be valid before
6956 * we get here, start with respective "flags" value
6957 * and update if vlan is present or not
6959 cld_filter
.element
.flags
=
6960 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT
);
6962 if (filter
->vlan_id
) {
6963 cld_filter
.element
.flags
=
6964 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT
);
6967 } else if (filter
->dst_ipv4
||
6968 !ipv6_addr_any(&filter
->ip
.v6
.dst_ip6
)) {
6969 cld_filter
.element
.flags
=
6970 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT
);
6971 if (filter
->n_proto
== ETH_P_IPV6
)
6972 cld_filter
.element
.flags
|=
6973 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6
);
6975 cld_filter
.element
.flags
|=
6976 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4
);
6978 dev_err(&pf
->pdev
->dev
,
6979 "either mac or ip has to be valid for cloud filter\n");
6983 /* Now copy L4 port in Byte 6..7 in general fields */
6984 cld_filter
.general_fields
[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0
] =
6985 be16_to_cpu(filter
->dst_port
);
6988 /* Validate current device switch mode, change if necessary */
6989 ret
= i40e_validate_and_set_switch_mode(vsi
);
6991 dev_err(&pf
->pdev
->dev
,
6992 "failed to set switch mode, ret %d\n",
6997 ret
= i40e_aq_add_cloud_filters_bb(&pf
->hw
, filter
->seid
,
7000 ret
= i40e_aq_rem_cloud_filters_bb(&pf
->hw
, filter
->seid
,
7005 dev_dbg(&pf
->pdev
->dev
,
7006 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7007 add
? "add" : "delete", ret
, pf
->hw
.aq
.asq_last_status
);
7009 dev_info(&pf
->pdev
->dev
,
7010 "%s cloud filter for VSI: %d, L4 port: %d\n",
7011 add
? "add" : "delete", filter
->seid
,
7012 ntohs(filter
->dst_port
));
7017 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
7018 * @vsi: Pointer to VSI
7019 * @cls_flower: Pointer to struct tc_cls_flower_offload
7020 * @filter: Pointer to cloud filter structure
7023 static int i40e_parse_cls_flower(struct i40e_vsi
*vsi
,
7024 struct tc_cls_flower_offload
*f
,
7025 struct i40e_cloud_filter
*filter
)
7027 u16 n_proto_mask
= 0, n_proto_key
= 0, addr_type
= 0;
7028 struct i40e_pf
*pf
= vsi
->back
;
7031 if (f
->dissector
->used_keys
&
7032 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL
) |
7033 BIT(FLOW_DISSECTOR_KEY_BASIC
) |
7034 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS
) |
7035 BIT(FLOW_DISSECTOR_KEY_VLAN
) |
7036 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
7037 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
7038 BIT(FLOW_DISSECTOR_KEY_PORTS
) |
7039 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID
))) {
7040 dev_err(&pf
->pdev
->dev
, "Unsupported key used: 0x%x\n",
7041 f
->dissector
->used_keys
);
7045 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ENC_KEYID
)) {
7046 struct flow_dissector_key_keyid
*key
=
7047 skb_flow_dissector_target(f
->dissector
,
7048 FLOW_DISSECTOR_KEY_ENC_KEYID
,
7051 struct flow_dissector_key_keyid
*mask
=
7052 skb_flow_dissector_target(f
->dissector
,
7053 FLOW_DISSECTOR_KEY_ENC_KEYID
,
7056 if (mask
->keyid
!= 0)
7057 field_flags
|= I40E_CLOUD_FIELD_TEN_ID
;
7059 filter
->tenant_id
= be32_to_cpu(key
->keyid
);
7062 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_BASIC
)) {
7063 struct flow_dissector_key_basic
*key
=
7064 skb_flow_dissector_target(f
->dissector
,
7065 FLOW_DISSECTOR_KEY_BASIC
,
7068 struct flow_dissector_key_basic
*mask
=
7069 skb_flow_dissector_target(f
->dissector
,
7070 FLOW_DISSECTOR_KEY_BASIC
,
7073 n_proto_key
= ntohs(key
->n_proto
);
7074 n_proto_mask
= ntohs(mask
->n_proto
);
7076 if (n_proto_key
== ETH_P_ALL
) {
7080 filter
->n_proto
= n_proto_key
& n_proto_mask
;
7081 filter
->ip_proto
= key
->ip_proto
;
7084 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
7085 struct flow_dissector_key_eth_addrs
*key
=
7086 skb_flow_dissector_target(f
->dissector
,
7087 FLOW_DISSECTOR_KEY_ETH_ADDRS
,
7090 struct flow_dissector_key_eth_addrs
*mask
=
7091 skb_flow_dissector_target(f
->dissector
,
7092 FLOW_DISSECTOR_KEY_ETH_ADDRS
,
7095 /* use is_broadcast and is_zero to check for all 0xf or 0 */
7096 if (!is_zero_ether_addr(mask
->dst
)) {
7097 if (is_broadcast_ether_addr(mask
->dst
)) {
7098 field_flags
|= I40E_CLOUD_FIELD_OMAC
;
7100 dev_err(&pf
->pdev
->dev
, "Bad ether dest mask %pM\n",
7102 return I40E_ERR_CONFIG
;
7106 if (!is_zero_ether_addr(mask
->src
)) {
7107 if (is_broadcast_ether_addr(mask
->src
)) {
7108 field_flags
|= I40E_CLOUD_FIELD_IMAC
;
7110 dev_err(&pf
->pdev
->dev
, "Bad ether src mask %pM\n",
7112 return I40E_ERR_CONFIG
;
7115 ether_addr_copy(filter
->dst_mac
, key
->dst
);
7116 ether_addr_copy(filter
->src_mac
, key
->src
);
7119 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_VLAN
)) {
7120 struct flow_dissector_key_vlan
*key
=
7121 skb_flow_dissector_target(f
->dissector
,
7122 FLOW_DISSECTOR_KEY_VLAN
,
7124 struct flow_dissector_key_vlan
*mask
=
7125 skb_flow_dissector_target(f
->dissector
,
7126 FLOW_DISSECTOR_KEY_VLAN
,
7129 if (mask
->vlan_id
) {
7130 if (mask
->vlan_id
== VLAN_VID_MASK
) {
7131 field_flags
|= I40E_CLOUD_FIELD_IVLAN
;
7134 dev_err(&pf
->pdev
->dev
, "Bad vlan mask 0x%04x\n",
7136 return I40E_ERR_CONFIG
;
7140 filter
->vlan_id
= cpu_to_be16(key
->vlan_id
);
7143 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_CONTROL
)) {
7144 struct flow_dissector_key_control
*key
=
7145 skb_flow_dissector_target(f
->dissector
,
7146 FLOW_DISSECTOR_KEY_CONTROL
,
7149 addr_type
= key
->addr_type
;
7152 if (addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
7153 struct flow_dissector_key_ipv4_addrs
*key
=
7154 skb_flow_dissector_target(f
->dissector
,
7155 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
7157 struct flow_dissector_key_ipv4_addrs
*mask
=
7158 skb_flow_dissector_target(f
->dissector
,
7159 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
7163 if (mask
->dst
== cpu_to_be32(0xffffffff)) {
7164 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7166 mask
->dst
= be32_to_cpu(mask
->dst
);
7167 dev_err(&pf
->pdev
->dev
, "Bad ip dst mask %pI4\n",
7169 return I40E_ERR_CONFIG
;
7174 if (mask
->src
== cpu_to_be32(0xffffffff)) {
7175 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7177 mask
->src
= be32_to_cpu(mask
->src
);
7178 dev_err(&pf
->pdev
->dev
, "Bad ip src mask %pI4\n",
7180 return I40E_ERR_CONFIG
;
7184 if (field_flags
& I40E_CLOUD_FIELD_TEN_ID
) {
7185 dev_err(&pf
->pdev
->dev
, "Tenant id not allowed for ip filter\n");
7186 return I40E_ERR_CONFIG
;
7188 filter
->dst_ipv4
= key
->dst
;
7189 filter
->src_ipv4
= key
->src
;
7192 if (addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
7193 struct flow_dissector_key_ipv6_addrs
*key
=
7194 skb_flow_dissector_target(f
->dissector
,
7195 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
7197 struct flow_dissector_key_ipv6_addrs
*mask
=
7198 skb_flow_dissector_target(f
->dissector
,
7199 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
7202 /* src and dest IPV6 address should not be LOOPBACK
7203 * (0:0:0:0:0:0:0:1), which can be represented as ::1
7205 if (ipv6_addr_loopback(&key
->dst
) ||
7206 ipv6_addr_loopback(&key
->src
)) {
7207 dev_err(&pf
->pdev
->dev
,
7208 "Bad ipv6, addr is LOOPBACK\n");
7209 return I40E_ERR_CONFIG
;
7211 if (!ipv6_addr_any(&mask
->dst
) || !ipv6_addr_any(&mask
->src
))
7212 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7214 memcpy(&filter
->src_ipv6
, &key
->src
.s6_addr32
,
7215 sizeof(filter
->src_ipv6
));
7216 memcpy(&filter
->dst_ipv6
, &key
->dst
.s6_addr32
,
7217 sizeof(filter
->dst_ipv6
));
7220 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_PORTS
)) {
7221 struct flow_dissector_key_ports
*key
=
7222 skb_flow_dissector_target(f
->dissector
,
7223 FLOW_DISSECTOR_KEY_PORTS
,
7225 struct flow_dissector_key_ports
*mask
=
7226 skb_flow_dissector_target(f
->dissector
,
7227 FLOW_DISSECTOR_KEY_PORTS
,
7231 if (mask
->src
== cpu_to_be16(0xffff)) {
7232 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7234 dev_err(&pf
->pdev
->dev
, "Bad src port mask 0x%04x\n",
7235 be16_to_cpu(mask
->src
));
7236 return I40E_ERR_CONFIG
;
7241 if (mask
->dst
== cpu_to_be16(0xffff)) {
7242 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7244 dev_err(&pf
->pdev
->dev
, "Bad dst port mask 0x%04x\n",
7245 be16_to_cpu(mask
->dst
));
7246 return I40E_ERR_CONFIG
;
7250 filter
->dst_port
= key
->dst
;
7251 filter
->src_port
= key
->src
;
7253 switch (filter
->ip_proto
) {
7258 dev_err(&pf
->pdev
->dev
,
7259 "Only UDP and TCP transport are supported\n");
7263 filter
->flags
= field_flags
;
7268 * i40e_handle_tclass: Forward to a traffic class on the device
7269 * @vsi: Pointer to VSI
7270 * @tc: traffic class index on the device
7271 * @filter: Pointer to cloud filter structure
7274 static int i40e_handle_tclass(struct i40e_vsi
*vsi
, u32 tc
,
7275 struct i40e_cloud_filter
*filter
)
7277 struct i40e_channel
*ch
, *ch_tmp
;
7279 /* direct to a traffic class on the same device */
7281 filter
->seid
= vsi
->seid
;
7283 } else if (vsi
->tc_config
.enabled_tc
& BIT(tc
)) {
7284 if (!filter
->dst_port
) {
7285 dev_err(&vsi
->back
->pdev
->dev
,
7286 "Specify destination port to direct to traffic class that is not default\n");
7289 if (list_empty(&vsi
->ch_list
))
7291 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
,
7293 if (ch
->seid
== vsi
->tc_seid_map
[tc
])
7294 filter
->seid
= ch
->seid
;
7298 dev_err(&vsi
->back
->pdev
->dev
, "TC is not enabled\n");
7303 * i40e_configure_clsflower - Configure tc flower filters
7304 * @vsi: Pointer to VSI
7305 * @cls_flower: Pointer to struct tc_cls_flower_offload
7308 static int i40e_configure_clsflower(struct i40e_vsi
*vsi
,
7309 struct tc_cls_flower_offload
*cls_flower
)
7311 int tc
= tc_classid_to_hwtc(vsi
->netdev
, cls_flower
->classid
);
7312 struct i40e_cloud_filter
*filter
= NULL
;
7313 struct i40e_pf
*pf
= vsi
->back
;
7317 dev_err(&vsi
->back
->pdev
->dev
, "Invalid traffic class\n");
7321 if (test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
) ||
7322 test_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
))
7325 if (pf
->fdir_pf_active_filters
||
7326 (!hlist_empty(&pf
->fdir_filter_list
))) {
7327 dev_err(&vsi
->back
->pdev
->dev
,
7328 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
7332 if (vsi
->back
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
7333 dev_err(&vsi
->back
->pdev
->dev
,
7334 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
7335 vsi
->back
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
7336 vsi
->back
->flags
|= I40E_FLAG_FD_SB_TO_CLOUD_FILTER
;
7339 filter
= kzalloc(sizeof(*filter
), GFP_KERNEL
);
7343 filter
->cookie
= cls_flower
->cookie
;
7345 err
= i40e_parse_cls_flower(vsi
, cls_flower
, filter
);
7349 err
= i40e_handle_tclass(vsi
, tc
, filter
);
7353 /* Add cloud filter */
7354 if (filter
->dst_port
)
7355 err
= i40e_add_del_cloud_filter_big_buf(vsi
, filter
, true);
7357 err
= i40e_add_del_cloud_filter(vsi
, filter
, true);
7360 dev_err(&pf
->pdev
->dev
,
7361 "Failed to add cloud filter, err %s\n",
7362 i40e_stat_str(&pf
->hw
, err
));
7366 /* add filter to the ordered list */
7367 INIT_HLIST_NODE(&filter
->cloud_node
);
7369 hlist_add_head(&filter
->cloud_node
, &pf
->cloud_filter_list
);
7371 pf
->num_cloud_filters
++;
7380 * i40e_find_cloud_filter - Find the could filter in the list
7381 * @vsi: Pointer to VSI
7382 * @cookie: filter specific cookie
7385 static struct i40e_cloud_filter
*i40e_find_cloud_filter(struct i40e_vsi
*vsi
,
7386 unsigned long *cookie
)
7388 struct i40e_cloud_filter
*filter
= NULL
;
7389 struct hlist_node
*node2
;
7391 hlist_for_each_entry_safe(filter
, node2
,
7392 &vsi
->back
->cloud_filter_list
, cloud_node
)
7393 if (!memcmp(cookie
, &filter
->cookie
, sizeof(filter
->cookie
)))
7399 * i40e_delete_clsflower - Remove tc flower filters
7400 * @vsi: Pointer to VSI
7401 * @cls_flower: Pointer to struct tc_cls_flower_offload
7404 static int i40e_delete_clsflower(struct i40e_vsi
*vsi
,
7405 struct tc_cls_flower_offload
*cls_flower
)
7407 struct i40e_cloud_filter
*filter
= NULL
;
7408 struct i40e_pf
*pf
= vsi
->back
;
7411 filter
= i40e_find_cloud_filter(vsi
, &cls_flower
->cookie
);
7416 hash_del(&filter
->cloud_node
);
7418 if (filter
->dst_port
)
7419 err
= i40e_add_del_cloud_filter_big_buf(vsi
, filter
, false);
7421 err
= i40e_add_del_cloud_filter(vsi
, filter
, false);
7425 dev_err(&pf
->pdev
->dev
,
7426 "Failed to delete cloud filter, err %s\n",
7427 i40e_stat_str(&pf
->hw
, err
));
7428 return i40e_aq_rc_to_posix(err
, pf
->hw
.aq
.asq_last_status
);
7431 pf
->num_cloud_filters
--;
7432 if (!pf
->num_cloud_filters
)
7433 if ((pf
->flags
& I40E_FLAG_FD_SB_TO_CLOUD_FILTER
) &&
7434 !(pf
->flags
& I40E_FLAG_FD_SB_INACTIVE
)) {
7435 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
7436 pf
->flags
&= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER
;
7437 pf
->flags
&= ~I40E_FLAG_FD_SB_INACTIVE
;
7443 * i40e_setup_tc_cls_flower - flower classifier offloads
7444 * @netdev: net device to configure
7445 * @type_data: offload data
7447 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv
*np
,
7448 struct tc_cls_flower_offload
*cls_flower
)
7450 struct i40e_vsi
*vsi
= np
->vsi
;
7452 switch (cls_flower
->command
) {
7453 case TC_CLSFLOWER_REPLACE
:
7454 return i40e_configure_clsflower(vsi
, cls_flower
);
7455 case TC_CLSFLOWER_DESTROY
:
7456 return i40e_delete_clsflower(vsi
, cls_flower
);
7457 case TC_CLSFLOWER_STATS
:
7464 static int i40e_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
7467 struct i40e_netdev_priv
*np
= cb_priv
;
7469 if (!tc_cls_can_offload_and_chain0(np
->vsi
->netdev
, type_data
))
7473 case TC_SETUP_CLSFLOWER
:
7474 return i40e_setup_tc_cls_flower(np
, type_data
);
7481 static int i40e_setup_tc_block(struct net_device
*dev
,
7482 struct tc_block_offload
*f
)
7484 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
7486 if (f
->binder_type
!= TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
7489 switch (f
->command
) {
7491 return tcf_block_cb_register(f
->block
, i40e_setup_tc_block_cb
,
7493 case TC_BLOCK_UNBIND
:
7494 tcf_block_cb_unregister(f
->block
, i40e_setup_tc_block_cb
, np
);
7501 static int __i40e_setup_tc(struct net_device
*netdev
, enum tc_setup_type type
,
7505 case TC_SETUP_QDISC_MQPRIO
:
7506 return i40e_setup_tc(netdev
, type_data
);
7507 case TC_SETUP_BLOCK
:
7508 return i40e_setup_tc_block(netdev
, type_data
);
7515 * i40e_open - Called when a network interface is made active
7516 * @netdev: network interface device structure
7518 * The open entry point is called when a network interface is made
7519 * active by the system (IFF_UP). At this point all resources needed
7520 * for transmit and receive operations are allocated, the interrupt
7521 * handler is registered with the OS, the netdev watchdog subtask is
7522 * enabled, and the stack is notified that the interface is ready.
7524 * Returns 0 on success, negative value on failure
7526 int i40e_open(struct net_device
*netdev
)
7528 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
7529 struct i40e_vsi
*vsi
= np
->vsi
;
7530 struct i40e_pf
*pf
= vsi
->back
;
7533 /* disallow open during test or if eeprom is broken */
7534 if (test_bit(__I40E_TESTING
, pf
->state
) ||
7535 test_bit(__I40E_BAD_EEPROM
, pf
->state
))
7538 netif_carrier_off(netdev
);
7540 err
= i40e_vsi_open(vsi
);
7544 /* configure global TSO hardware offload settings */
7545 wr32(&pf
->hw
, I40E_GLLAN_TSOMSK_F
, be32_to_cpu(TCP_FLAG_PSH
|
7546 TCP_FLAG_FIN
) >> 16);
7547 wr32(&pf
->hw
, I40E_GLLAN_TSOMSK_M
, be32_to_cpu(TCP_FLAG_PSH
|
7549 TCP_FLAG_CWR
) >> 16);
7550 wr32(&pf
->hw
, I40E_GLLAN_TSOMSK_L
, be32_to_cpu(TCP_FLAG_CWR
) >> 16);
7552 udp_tunnel_get_rx_info(netdev
);
7559 * @vsi: the VSI to open
7561 * Finish initialization of the VSI.
7563 * Returns 0 on success, negative value on failure
7565 * Note: expects to be called while under rtnl_lock()
7567 int i40e_vsi_open(struct i40e_vsi
*vsi
)
7569 struct i40e_pf
*pf
= vsi
->back
;
7570 char int_name
[I40E_INT_NAME_STR_LEN
];
7573 /* allocate descriptors */
7574 err
= i40e_vsi_setup_tx_resources(vsi
);
7577 err
= i40e_vsi_setup_rx_resources(vsi
);
7581 err
= i40e_vsi_configure(vsi
);
7586 snprintf(int_name
, sizeof(int_name
) - 1, "%s-%s",
7587 dev_driver_string(&pf
->pdev
->dev
), vsi
->netdev
->name
);
7588 err
= i40e_vsi_request_irq(vsi
, int_name
);
7592 /* Notify the stack of the actual queue counts. */
7593 err
= netif_set_real_num_tx_queues(vsi
->netdev
,
7594 vsi
->num_queue_pairs
);
7596 goto err_set_queues
;
7598 err
= netif_set_real_num_rx_queues(vsi
->netdev
,
7599 vsi
->num_queue_pairs
);
7601 goto err_set_queues
;
7603 } else if (vsi
->type
== I40E_VSI_FDIR
) {
7604 snprintf(int_name
, sizeof(int_name
) - 1, "%s-%s:fdir",
7605 dev_driver_string(&pf
->pdev
->dev
),
7606 dev_name(&pf
->pdev
->dev
));
7607 err
= i40e_vsi_request_irq(vsi
, int_name
);
7614 err
= i40e_up_complete(vsi
);
7616 goto err_up_complete
;
7623 i40e_vsi_free_irq(vsi
);
7625 i40e_vsi_free_rx_resources(vsi
);
7627 i40e_vsi_free_tx_resources(vsi
);
7628 if (vsi
== pf
->vsi
[pf
->lan_vsi
])
7629 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
, true);
7635 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
7636 * @pf: Pointer to PF
7638 * This function destroys the hlist where all the Flow Director
7639 * filters were saved.
7641 static void i40e_fdir_filter_exit(struct i40e_pf
*pf
)
7643 struct i40e_fdir_filter
*filter
;
7644 struct i40e_flex_pit
*pit_entry
, *tmp
;
7645 struct hlist_node
*node2
;
7647 hlist_for_each_entry_safe(filter
, node2
,
7648 &pf
->fdir_filter_list
, fdir_node
) {
7649 hlist_del(&filter
->fdir_node
);
7653 list_for_each_entry_safe(pit_entry
, tmp
, &pf
->l3_flex_pit_list
, list
) {
7654 list_del(&pit_entry
->list
);
7657 INIT_LIST_HEAD(&pf
->l3_flex_pit_list
);
7659 list_for_each_entry_safe(pit_entry
, tmp
, &pf
->l4_flex_pit_list
, list
) {
7660 list_del(&pit_entry
->list
);
7663 INIT_LIST_HEAD(&pf
->l4_flex_pit_list
);
7665 pf
->fdir_pf_active_filters
= 0;
7666 pf
->fd_tcp4_filter_cnt
= 0;
7667 pf
->fd_udp4_filter_cnt
= 0;
7668 pf
->fd_sctp4_filter_cnt
= 0;
7669 pf
->fd_ip4_filter_cnt
= 0;
7671 /* Reprogram the default input set for TCP/IPv4 */
7672 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_TCP
,
7673 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
|
7674 I40E_L4_SRC_MASK
| I40E_L4_DST_MASK
);
7676 /* Reprogram the default input set for UDP/IPv4 */
7677 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_UDP
,
7678 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
|
7679 I40E_L4_SRC_MASK
| I40E_L4_DST_MASK
);
7681 /* Reprogram the default input set for SCTP/IPv4 */
7682 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP
,
7683 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
|
7684 I40E_L4_SRC_MASK
| I40E_L4_DST_MASK
);
7686 /* Reprogram the default input set for Other/IPv4 */
7687 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER
,
7688 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
);
7690 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_FRAG_IPV4
,
7691 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
);
7695 * i40e_cloud_filter_exit - Cleans up the cloud filters
7696 * @pf: Pointer to PF
7698 * This function destroys the hlist where all the cloud filters
7701 static void i40e_cloud_filter_exit(struct i40e_pf
*pf
)
7703 struct i40e_cloud_filter
*cfilter
;
7704 struct hlist_node
*node
;
7706 hlist_for_each_entry_safe(cfilter
, node
,
7707 &pf
->cloud_filter_list
, cloud_node
) {
7708 hlist_del(&cfilter
->cloud_node
);
7711 pf
->num_cloud_filters
= 0;
7713 if ((pf
->flags
& I40E_FLAG_FD_SB_TO_CLOUD_FILTER
) &&
7714 !(pf
->flags
& I40E_FLAG_FD_SB_INACTIVE
)) {
7715 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
7716 pf
->flags
&= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER
;
7717 pf
->flags
&= ~I40E_FLAG_FD_SB_INACTIVE
;
7722 * i40e_close - Disables a network interface
7723 * @netdev: network interface device structure
7725 * The close entry point is called when an interface is de-activated
7726 * by the OS. The hardware is still under the driver's control, but
7727 * this netdev interface is disabled.
7729 * Returns 0, this is not allowed to fail
7731 int i40e_close(struct net_device
*netdev
)
7733 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
7734 struct i40e_vsi
*vsi
= np
->vsi
;
7736 i40e_vsi_close(vsi
);
7742 * i40e_do_reset - Start a PF or Core Reset sequence
7743 * @pf: board private structure
7744 * @reset_flags: which reset is requested
7745 * @lock_acquired: indicates whether or not the lock has been acquired
7746 * before this function was called.
7748 * The essential difference in resets is that the PF Reset
7749 * doesn't clear the packet buffers, doesn't reset the PE
7750 * firmware, and doesn't bother the other PFs on the chip.
7752 void i40e_do_reset(struct i40e_pf
*pf
, u32 reset_flags
, bool lock_acquired
)
7756 WARN_ON(in_interrupt());
7759 /* do the biggest reset indicated */
7760 if (reset_flags
& BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED
)) {
7762 /* Request a Global Reset
7764 * This will start the chip's countdown to the actual full
7765 * chip reset event, and a warning interrupt to be sent
7766 * to all PFs, including the requestor. Our handler
7767 * for the warning interrupt will deal with the shutdown
7768 * and recovery of the switch setup.
7770 dev_dbg(&pf
->pdev
->dev
, "GlobalR requested\n");
7771 val
= rd32(&pf
->hw
, I40E_GLGEN_RTRIG
);
7772 val
|= I40E_GLGEN_RTRIG_GLOBR_MASK
;
7773 wr32(&pf
->hw
, I40E_GLGEN_RTRIG
, val
);
7775 } else if (reset_flags
& BIT_ULL(__I40E_CORE_RESET_REQUESTED
)) {
7777 /* Request a Core Reset
7779 * Same as Global Reset, except does *not* include the MAC/PHY
7781 dev_dbg(&pf
->pdev
->dev
, "CoreR requested\n");
7782 val
= rd32(&pf
->hw
, I40E_GLGEN_RTRIG
);
7783 val
|= I40E_GLGEN_RTRIG_CORER_MASK
;
7784 wr32(&pf
->hw
, I40E_GLGEN_RTRIG
, val
);
7785 i40e_flush(&pf
->hw
);
7787 } else if (reset_flags
& I40E_PF_RESET_FLAG
) {
7789 /* Request a PF Reset
7791 * Resets only the PF-specific registers
7793 * This goes directly to the tear-down and rebuild of
7794 * the switch, since we need to do all the recovery as
7795 * for the Core Reset.
7797 dev_dbg(&pf
->pdev
->dev
, "PFR requested\n");
7798 i40e_handle_reset_warning(pf
, lock_acquired
);
7800 } else if (reset_flags
& BIT_ULL(__I40E_REINIT_REQUESTED
)) {
7803 /* Find the VSI(s) that requested a re-init */
7804 dev_info(&pf
->pdev
->dev
,
7805 "VSI reinit requested\n");
7806 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
7807 struct i40e_vsi
*vsi
= pf
->vsi
[v
];
7810 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED
,
7812 i40e_vsi_reinit_locked(pf
->vsi
[v
]);
7814 } else if (reset_flags
& BIT_ULL(__I40E_DOWN_REQUESTED
)) {
7817 /* Find the VSI(s) that needs to be brought down */
7818 dev_info(&pf
->pdev
->dev
, "VSI down requested\n");
7819 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
7820 struct i40e_vsi
*vsi
= pf
->vsi
[v
];
7823 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED
,
7825 set_bit(__I40E_VSI_DOWN
, vsi
->state
);
7830 dev_info(&pf
->pdev
->dev
,
7831 "bad reset request 0x%08x\n", reset_flags
);
7835 #ifdef CONFIG_I40E_DCB
7837 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
7838 * @pf: board private structure
7839 * @old_cfg: current DCB config
7840 * @new_cfg: new DCB config
7842 bool i40e_dcb_need_reconfig(struct i40e_pf
*pf
,
7843 struct i40e_dcbx_config
*old_cfg
,
7844 struct i40e_dcbx_config
*new_cfg
)
7846 bool need_reconfig
= false;
7848 /* Check if ETS configuration has changed */
7849 if (memcmp(&new_cfg
->etscfg
,
7851 sizeof(new_cfg
->etscfg
))) {
7852 /* If Priority Table has changed reconfig is needed */
7853 if (memcmp(&new_cfg
->etscfg
.prioritytable
,
7854 &old_cfg
->etscfg
.prioritytable
,
7855 sizeof(new_cfg
->etscfg
.prioritytable
))) {
7856 need_reconfig
= true;
7857 dev_dbg(&pf
->pdev
->dev
, "ETS UP2TC changed.\n");
7860 if (memcmp(&new_cfg
->etscfg
.tcbwtable
,
7861 &old_cfg
->etscfg
.tcbwtable
,
7862 sizeof(new_cfg
->etscfg
.tcbwtable
)))
7863 dev_dbg(&pf
->pdev
->dev
, "ETS TC BW Table changed.\n");
7865 if (memcmp(&new_cfg
->etscfg
.tsatable
,
7866 &old_cfg
->etscfg
.tsatable
,
7867 sizeof(new_cfg
->etscfg
.tsatable
)))
7868 dev_dbg(&pf
->pdev
->dev
, "ETS TSA Table changed.\n");
7871 /* Check if PFC configuration has changed */
7872 if (memcmp(&new_cfg
->pfc
,
7874 sizeof(new_cfg
->pfc
))) {
7875 need_reconfig
= true;
7876 dev_dbg(&pf
->pdev
->dev
, "PFC config change detected.\n");
7879 /* Check if APP Table has changed */
7880 if (memcmp(&new_cfg
->app
,
7882 sizeof(new_cfg
->app
))) {
7883 need_reconfig
= true;
7884 dev_dbg(&pf
->pdev
->dev
, "APP Table change detected.\n");
7887 dev_dbg(&pf
->pdev
->dev
, "dcb need_reconfig=%d\n", need_reconfig
);
7888 return need_reconfig
;
7892 * i40e_handle_lldp_event - Handle LLDP Change MIB event
7893 * @pf: board private structure
7894 * @e: event info posted on ARQ
7896 static int i40e_handle_lldp_event(struct i40e_pf
*pf
,
7897 struct i40e_arq_event_info
*e
)
7899 struct i40e_aqc_lldp_get_mib
*mib
=
7900 (struct i40e_aqc_lldp_get_mib
*)&e
->desc
.params
.raw
;
7901 struct i40e_hw
*hw
= &pf
->hw
;
7902 struct i40e_dcbx_config tmp_dcbx_cfg
;
7903 bool need_reconfig
= false;
7907 /* Not DCB capable or capability disabled */
7908 if (!(pf
->flags
& I40E_FLAG_DCB_CAPABLE
))
7911 /* Ignore if event is not for Nearest Bridge */
7912 type
= ((mib
->type
>> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT
)
7913 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK
);
7914 dev_dbg(&pf
->pdev
->dev
, "LLDP event mib bridge type 0x%x\n", type
);
7915 if (type
!= I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE
)
7918 /* Check MIB Type and return if event for Remote MIB update */
7919 type
= mib
->type
& I40E_AQ_LLDP_MIB_TYPE_MASK
;
7920 dev_dbg(&pf
->pdev
->dev
,
7921 "LLDP event mib type %s\n", type
? "remote" : "local");
7922 if (type
== I40E_AQ_LLDP_MIB_REMOTE
) {
7923 /* Update the remote cached instance and return */
7924 ret
= i40e_aq_get_dcb_config(hw
, I40E_AQ_LLDP_MIB_REMOTE
,
7925 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE
,
7926 &hw
->remote_dcbx_config
);
7930 /* Store the old configuration */
7931 tmp_dcbx_cfg
= hw
->local_dcbx_config
;
7933 /* Reset the old DCBx configuration data */
7934 memset(&hw
->local_dcbx_config
, 0, sizeof(hw
->local_dcbx_config
));
7935 /* Get updated DCBX data from firmware */
7936 ret
= i40e_get_dcb_config(&pf
->hw
);
7938 dev_info(&pf
->pdev
->dev
,
7939 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
7940 i40e_stat_str(&pf
->hw
, ret
),
7941 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
7945 /* No change detected in DCBX configs */
7946 if (!memcmp(&tmp_dcbx_cfg
, &hw
->local_dcbx_config
,
7947 sizeof(tmp_dcbx_cfg
))) {
7948 dev_dbg(&pf
->pdev
->dev
, "No change detected in DCBX configuration.\n");
7952 need_reconfig
= i40e_dcb_need_reconfig(pf
, &tmp_dcbx_cfg
,
7953 &hw
->local_dcbx_config
);
7955 i40e_dcbnl_flush_apps(pf
, &tmp_dcbx_cfg
, &hw
->local_dcbx_config
);
7960 /* Enable DCB tagging only when more than one TC */
7961 if (i40e_dcb_get_num_tc(&hw
->local_dcbx_config
) > 1)
7962 pf
->flags
|= I40E_FLAG_DCB_ENABLED
;
7964 pf
->flags
&= ~I40E_FLAG_DCB_ENABLED
;
7966 set_bit(__I40E_PORT_SUSPENDED
, pf
->state
);
7967 /* Reconfiguration needed quiesce all VSIs */
7968 i40e_pf_quiesce_all_vsi(pf
);
7970 /* Changes in configuration update VEB/VSI */
7971 i40e_dcb_reconfigure(pf
);
7973 ret
= i40e_resume_port_tx(pf
);
7975 clear_bit(__I40E_PORT_SUSPENDED
, pf
->state
);
7976 /* In case of error no point in resuming VSIs */
7980 /* Wait for the PF's queues to be disabled */
7981 ret
= i40e_pf_wait_queues_disabled(pf
);
7983 /* Schedule PF reset to recover */
7984 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
7985 i40e_service_event_schedule(pf
);
7987 i40e_pf_unquiesce_all_vsi(pf
);
7988 pf
->flags
|= (I40E_FLAG_SERVICE_CLIENT_REQUESTED
|
7989 I40E_FLAG_CLIENT_L2_CHANGE
);
7995 #endif /* CONFIG_I40E_DCB */
7998 * i40e_do_reset_safe - Protected reset path for userland calls.
7999 * @pf: board private structure
8000 * @reset_flags: which reset is requested
8003 void i40e_do_reset_safe(struct i40e_pf
*pf
, u32 reset_flags
)
8006 i40e_do_reset(pf
, reset_flags
, true);
8011 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
8012 * @pf: board private structure
8013 * @e: event info posted on ARQ
8015 * Handler for LAN Queue Overflow Event generated by the firmware for PF
8018 static void i40e_handle_lan_overflow_event(struct i40e_pf
*pf
,
8019 struct i40e_arq_event_info
*e
)
8021 struct i40e_aqc_lan_overflow
*data
=
8022 (struct i40e_aqc_lan_overflow
*)&e
->desc
.params
.raw
;
8023 u32 queue
= le32_to_cpu(data
->prtdcb_rupto
);
8024 u32 qtx_ctl
= le32_to_cpu(data
->otx_ctl
);
8025 struct i40e_hw
*hw
= &pf
->hw
;
8029 dev_dbg(&pf
->pdev
->dev
, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8032 /* Queue belongs to VF, find the VF and issue VF reset */
8033 if (((qtx_ctl
& I40E_QTX_CTL_PFVF_Q_MASK
)
8034 >> I40E_QTX_CTL_PFVF_Q_SHIFT
) == I40E_QTX_CTL_VF_QUEUE
) {
8035 vf_id
= (u16
)((qtx_ctl
& I40E_QTX_CTL_VFVM_INDX_MASK
)
8036 >> I40E_QTX_CTL_VFVM_INDX_SHIFT
);
8037 vf_id
-= hw
->func_caps
.vf_base_id
;
8038 vf
= &pf
->vf
[vf_id
];
8039 i40e_vc_notify_vf_reset(vf
);
8040 /* Allow VF to process pending reset notification */
8042 i40e_reset_vf(vf
, false);
8047 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
8048 * @pf: board private structure
8050 u32
i40e_get_cur_guaranteed_fd_count(struct i40e_pf
*pf
)
8054 val
= rd32(&pf
->hw
, I40E_PFQF_FDSTAT
);
8055 fcnt_prog
= (val
& I40E_PFQF_FDSTAT_GUARANT_CNT_MASK
);
8060 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
8061 * @pf: board private structure
8063 u32
i40e_get_current_fd_count(struct i40e_pf
*pf
)
8067 val
= rd32(&pf
->hw
, I40E_PFQF_FDSTAT
);
8068 fcnt_prog
= (val
& I40E_PFQF_FDSTAT_GUARANT_CNT_MASK
) +
8069 ((val
& I40E_PFQF_FDSTAT_BEST_CNT_MASK
) >>
8070 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT
);
8075 * i40e_get_global_fd_count - Get total FD filters programmed on device
8076 * @pf: board private structure
8078 u32
i40e_get_global_fd_count(struct i40e_pf
*pf
)
8082 val
= rd32(&pf
->hw
, I40E_GLQF_FDCNT_0
);
8083 fcnt_prog
= (val
& I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK
) +
8084 ((val
& I40E_GLQF_FDCNT_0_BESTCNT_MASK
) >>
8085 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT
);
8090 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
8091 * @pf: board private structure
8093 void i40e_fdir_check_and_reenable(struct i40e_pf
*pf
)
8095 struct i40e_fdir_filter
*filter
;
8096 u32 fcnt_prog
, fcnt_avail
;
8097 struct hlist_node
*node
;
8099 if (test_bit(__I40E_FD_FLUSH_REQUESTED
, pf
->state
))
8102 /* Check if we have enough room to re-enable FDir SB capability. */
8103 fcnt_prog
= i40e_get_global_fd_count(pf
);
8104 fcnt_avail
= pf
->fdir_pf_filter_count
;
8105 if ((fcnt_prog
< (fcnt_avail
- I40E_FDIR_BUFFER_HEAD_ROOM
)) ||
8106 (pf
->fd_add_err
== 0) ||
8107 (i40e_get_current_atr_cnt(pf
) < pf
->fd_atr_cnt
)) {
8108 if (pf
->flags
& I40E_FLAG_FD_SB_AUTO_DISABLED
) {
8109 pf
->flags
&= ~I40E_FLAG_FD_SB_AUTO_DISABLED
;
8110 if ((pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) &&
8111 (I40E_DEBUG_FD
& pf
->hw
.debug_mask
))
8112 dev_info(&pf
->pdev
->dev
, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8116 /* We should wait for even more space before re-enabling ATR.
8117 * Additionally, we cannot enable ATR as long as we still have TCP SB
8120 if ((fcnt_prog
< (fcnt_avail
- I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR
)) &&
8121 (pf
->fd_tcp4_filter_cnt
== 0)) {
8122 if (pf
->flags
& I40E_FLAG_FD_ATR_AUTO_DISABLED
) {
8123 pf
->flags
&= ~I40E_FLAG_FD_ATR_AUTO_DISABLED
;
8124 if ((pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) &&
8125 (I40E_DEBUG_FD
& pf
->hw
.debug_mask
))
8126 dev_info(&pf
->pdev
->dev
, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8130 /* if hw had a problem adding a filter, delete it */
8131 if (pf
->fd_inv
> 0) {
8132 hlist_for_each_entry_safe(filter
, node
,
8133 &pf
->fdir_filter_list
, fdir_node
) {
8134 if (filter
->fd_id
== pf
->fd_inv
) {
8135 hlist_del(&filter
->fdir_node
);
8137 pf
->fdir_pf_active_filters
--;
8144 #define I40E_MIN_FD_FLUSH_INTERVAL 10
8145 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8147 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
8148 * @pf: board private structure
8150 static void i40e_fdir_flush_and_replay(struct i40e_pf
*pf
)
8152 unsigned long min_flush_time
;
8153 int flush_wait_retry
= 50;
8154 bool disable_atr
= false;
8158 if (!time_after(jiffies
, pf
->fd_flush_timestamp
+
8159 (I40E_MIN_FD_FLUSH_INTERVAL
* HZ
)))
8162 /* If the flush is happening too quick and we have mostly SB rules we
8163 * should not re-enable ATR for some time.
8165 min_flush_time
= pf
->fd_flush_timestamp
+
8166 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE
* HZ
);
8167 fd_room
= pf
->fdir_pf_filter_count
- pf
->fdir_pf_active_filters
;
8169 if (!(time_after(jiffies
, min_flush_time
)) &&
8170 (fd_room
< I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR
)) {
8171 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
8172 dev_info(&pf
->pdev
->dev
, "ATR disabled, not enough FD filter space.\n");
8176 pf
->fd_flush_timestamp
= jiffies
;
8177 pf
->flags
|= I40E_FLAG_FD_ATR_AUTO_DISABLED
;
8178 /* flush all filters */
8179 wr32(&pf
->hw
, I40E_PFQF_CTL_1
,
8180 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK
);
8181 i40e_flush(&pf
->hw
);
8185 /* Check FD flush status every 5-6msec */
8186 usleep_range(5000, 6000);
8187 reg
= rd32(&pf
->hw
, I40E_PFQF_CTL_1
);
8188 if (!(reg
& I40E_PFQF_CTL_1_CLEARFDTABLE_MASK
))
8190 } while (flush_wait_retry
--);
8191 if (reg
& I40E_PFQF_CTL_1_CLEARFDTABLE_MASK
) {
8192 dev_warn(&pf
->pdev
->dev
, "FD table did not flush, needs more time\n");
8194 /* replay sideband filters */
8195 i40e_fdir_filter_restore(pf
->vsi
[pf
->lan_vsi
]);
8196 if (!disable_atr
&& !pf
->fd_tcp4_filter_cnt
)
8197 pf
->flags
&= ~I40E_FLAG_FD_ATR_AUTO_DISABLED
;
8198 clear_bit(__I40E_FD_FLUSH_REQUESTED
, pf
->state
);
8199 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
8200 dev_info(&pf
->pdev
->dev
, "FD Filter table flushed and FD-SB replayed.\n");
8205 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
8206 * @pf: board private structure
8208 u32
i40e_get_current_atr_cnt(struct i40e_pf
*pf
)
8210 return i40e_get_current_fd_count(pf
) - pf
->fdir_pf_active_filters
;
8213 /* We can see up to 256 filter programming desc in transit if the filters are
8214 * being applied really fast; before we see the first
8215 * filter miss error on Rx queue 0. Accumulating enough error messages before
8216 * reacting will make sure we don't cause flush too often.
8218 #define I40E_MAX_FD_PROGRAM_ERROR 256
8221 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
8222 * @pf: board private structure
8224 static void i40e_fdir_reinit_subtask(struct i40e_pf
*pf
)
8227 /* if interface is down do nothing */
8228 if (test_bit(__I40E_DOWN
, pf
->state
))
8231 if (test_bit(__I40E_FD_FLUSH_REQUESTED
, pf
->state
))
8232 i40e_fdir_flush_and_replay(pf
);
8234 i40e_fdir_check_and_reenable(pf
);
8239 * i40e_vsi_link_event - notify VSI of a link event
8240 * @vsi: vsi to be notified
8241 * @link_up: link up or down
8243 static void i40e_vsi_link_event(struct i40e_vsi
*vsi
, bool link_up
)
8245 if (!vsi
|| test_bit(__I40E_VSI_DOWN
, vsi
->state
))
8248 switch (vsi
->type
) {
8250 if (!vsi
->netdev
|| !vsi
->netdev_registered
)
8254 netif_carrier_on(vsi
->netdev
);
8255 netif_tx_wake_all_queues(vsi
->netdev
);
8257 netif_carrier_off(vsi
->netdev
);
8258 netif_tx_stop_all_queues(vsi
->netdev
);
8262 case I40E_VSI_SRIOV
:
8263 case I40E_VSI_VMDQ2
:
8265 case I40E_VSI_IWARP
:
8266 case I40E_VSI_MIRROR
:
8268 /* there is no notification for other VSIs */
8274 * i40e_veb_link_event - notify elements on the veb of a link event
8275 * @veb: veb to be notified
8276 * @link_up: link up or down
8278 static void i40e_veb_link_event(struct i40e_veb
*veb
, bool link_up
)
8283 if (!veb
|| !veb
->pf
)
8287 /* depth first... */
8288 for (i
= 0; i
< I40E_MAX_VEB
; i
++)
8289 if (pf
->veb
[i
] && (pf
->veb
[i
]->uplink_seid
== veb
->seid
))
8290 i40e_veb_link_event(pf
->veb
[i
], link_up
);
8292 /* ... now the local VSIs */
8293 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
8294 if (pf
->vsi
[i
] && (pf
->vsi
[i
]->uplink_seid
== veb
->seid
))
8295 i40e_vsi_link_event(pf
->vsi
[i
], link_up
);
8299 * i40e_link_event - Update netif_carrier status
8300 * @pf: board private structure
8302 static void i40e_link_event(struct i40e_pf
*pf
)
8304 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
8305 u8 new_link_speed
, old_link_speed
;
8307 bool new_link
, old_link
;
8309 /* save off old link status information */
8310 pf
->hw
.phy
.link_info_old
= pf
->hw
.phy
.link_info
;
8312 /* set this to force the get_link_status call to refresh state */
8313 pf
->hw
.phy
.get_link_info
= true;
8315 old_link
= (pf
->hw
.phy
.link_info_old
.link_info
& I40E_AQ_LINK_UP
);
8317 status
= i40e_get_link_status(&pf
->hw
, &new_link
);
8319 /* On success, disable temp link polling */
8320 if (status
== I40E_SUCCESS
) {
8321 if (pf
->flags
& I40E_FLAG_TEMP_LINK_POLLING
)
8322 pf
->flags
&= ~I40E_FLAG_TEMP_LINK_POLLING
;
8324 /* Enable link polling temporarily until i40e_get_link_status
8325 * returns I40E_SUCCESS
8327 pf
->flags
|= I40E_FLAG_TEMP_LINK_POLLING
;
8328 dev_dbg(&pf
->pdev
->dev
, "couldn't get link state, status: %d\n",
8333 old_link_speed
= pf
->hw
.phy
.link_info_old
.link_speed
;
8334 new_link_speed
= pf
->hw
.phy
.link_info
.link_speed
;
8336 if (new_link
== old_link
&&
8337 new_link_speed
== old_link_speed
&&
8338 (test_bit(__I40E_VSI_DOWN
, vsi
->state
) ||
8339 new_link
== netif_carrier_ok(vsi
->netdev
)))
8342 i40e_print_link_message(vsi
, new_link
);
8344 /* Notify the base of the switch tree connected to
8345 * the link. Floating VEBs are not notified.
8347 if (pf
->lan_veb
!= I40E_NO_VEB
&& pf
->veb
[pf
->lan_veb
])
8348 i40e_veb_link_event(pf
->veb
[pf
->lan_veb
], new_link
);
8350 i40e_vsi_link_event(vsi
, new_link
);
8353 i40e_vc_notify_link_state(pf
);
8355 if (pf
->flags
& I40E_FLAG_PTP
)
8356 i40e_ptp_set_increment(pf
);
8360 * i40e_watchdog_subtask - periodic checks not using event driven response
8361 * @pf: board private structure
8363 static void i40e_watchdog_subtask(struct i40e_pf
*pf
)
8367 /* if interface is down do nothing */
8368 if (test_bit(__I40E_DOWN
, pf
->state
) ||
8369 test_bit(__I40E_CONFIG_BUSY
, pf
->state
))
8372 /* make sure we don't do these things too often */
8373 if (time_before(jiffies
, (pf
->service_timer_previous
+
8374 pf
->service_timer_period
)))
8376 pf
->service_timer_previous
= jiffies
;
8378 if ((pf
->flags
& I40E_FLAG_LINK_POLLING_ENABLED
) ||
8379 (pf
->flags
& I40E_FLAG_TEMP_LINK_POLLING
))
8380 i40e_link_event(pf
);
8382 /* Update the stats for active netdevs so the network stack
8383 * can look at updated numbers whenever it cares to
8385 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
8386 if (pf
->vsi
[i
] && pf
->vsi
[i
]->netdev
)
8387 i40e_update_stats(pf
->vsi
[i
]);
8389 if (pf
->flags
& I40E_FLAG_VEB_STATS_ENABLED
) {
8390 /* Update the stats for the active switching components */
8391 for (i
= 0; i
< I40E_MAX_VEB
; i
++)
8393 i40e_update_veb_stats(pf
->veb
[i
]);
8396 i40e_ptp_rx_hang(pf
);
8397 i40e_ptp_tx_hang(pf
);
8401 * i40e_reset_subtask - Set up for resetting the device and driver
8402 * @pf: board private structure
8404 static void i40e_reset_subtask(struct i40e_pf
*pf
)
8406 u32 reset_flags
= 0;
8408 if (test_bit(__I40E_REINIT_REQUESTED
, pf
->state
)) {
8409 reset_flags
|= BIT(__I40E_REINIT_REQUESTED
);
8410 clear_bit(__I40E_REINIT_REQUESTED
, pf
->state
);
8412 if (test_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
)) {
8413 reset_flags
|= BIT(__I40E_PF_RESET_REQUESTED
);
8414 clear_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
8416 if (test_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
)) {
8417 reset_flags
|= BIT(__I40E_CORE_RESET_REQUESTED
);
8418 clear_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
);
8420 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED
, pf
->state
)) {
8421 reset_flags
|= BIT(__I40E_GLOBAL_RESET_REQUESTED
);
8422 clear_bit(__I40E_GLOBAL_RESET_REQUESTED
, pf
->state
);
8424 if (test_bit(__I40E_DOWN_REQUESTED
, pf
->state
)) {
8425 reset_flags
|= BIT(__I40E_DOWN_REQUESTED
);
8426 clear_bit(__I40E_DOWN_REQUESTED
, pf
->state
);
8429 /* If there's a recovery already waiting, it takes
8430 * precedence before starting a new reset sequence.
8432 if (test_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
)) {
8433 i40e_prep_for_reset(pf
, false);
8435 i40e_rebuild(pf
, false, false);
8438 /* If we're already down or resetting, just bail */
8440 !test_bit(__I40E_DOWN
, pf
->state
) &&
8441 !test_bit(__I40E_CONFIG_BUSY
, pf
->state
)) {
8442 i40e_do_reset(pf
, reset_flags
, false);
8447 * i40e_handle_link_event - Handle link event
8448 * @pf: board private structure
8449 * @e: event info posted on ARQ
8451 static void i40e_handle_link_event(struct i40e_pf
*pf
,
8452 struct i40e_arq_event_info
*e
)
8454 struct i40e_aqc_get_link_status
*status
=
8455 (struct i40e_aqc_get_link_status
*)&e
->desc
.params
.raw
;
8457 /* Do a new status request to re-enable LSE reporting
8458 * and load new status information into the hw struct
8459 * This completely ignores any state information
8460 * in the ARQ event info, instead choosing to always
8461 * issue the AQ update link status command.
8463 i40e_link_event(pf
);
8465 /* Check if module meets thermal requirements */
8466 if (status
->phy_type
== I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP
) {
8467 dev_err(&pf
->pdev
->dev
,
8468 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
8469 dev_err(&pf
->pdev
->dev
,
8470 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8472 /* check for unqualified module, if link is down, suppress
8473 * the message if link was forced to be down.
8475 if ((status
->link_info
& I40E_AQ_MEDIA_AVAILABLE
) &&
8476 (!(status
->an_info
& I40E_AQ_QUALIFIED_MODULE
)) &&
8477 (!(status
->link_info
& I40E_AQ_LINK_UP
)) &&
8478 (!(pf
->flags
& I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED
))) {
8479 dev_err(&pf
->pdev
->dev
,
8480 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
8481 dev_err(&pf
->pdev
->dev
,
8482 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8488 * i40e_clean_adminq_subtask - Clean the AdminQ rings
8489 * @pf: board private structure
8491 static void i40e_clean_adminq_subtask(struct i40e_pf
*pf
)
8493 struct i40e_arq_event_info event
;
8494 struct i40e_hw
*hw
= &pf
->hw
;
8501 /* Do not run clean AQ when PF reset fails */
8502 if (test_bit(__I40E_RESET_FAILED
, pf
->state
))
8505 /* check for error indications */
8506 val
= rd32(&pf
->hw
, pf
->hw
.aq
.arq
.len
);
8508 if (val
& I40E_PF_ARQLEN_ARQVFE_MASK
) {
8509 if (hw
->debug_mask
& I40E_DEBUG_AQ
)
8510 dev_info(&pf
->pdev
->dev
, "ARQ VF Error detected\n");
8511 val
&= ~I40E_PF_ARQLEN_ARQVFE_MASK
;
8513 if (val
& I40E_PF_ARQLEN_ARQOVFL_MASK
) {
8514 if (hw
->debug_mask
& I40E_DEBUG_AQ
)
8515 dev_info(&pf
->pdev
->dev
, "ARQ Overflow Error detected\n");
8516 val
&= ~I40E_PF_ARQLEN_ARQOVFL_MASK
;
8517 pf
->arq_overflows
++;
8519 if (val
& I40E_PF_ARQLEN_ARQCRIT_MASK
) {
8520 if (hw
->debug_mask
& I40E_DEBUG_AQ
)
8521 dev_info(&pf
->pdev
->dev
, "ARQ Critical Error detected\n");
8522 val
&= ~I40E_PF_ARQLEN_ARQCRIT_MASK
;
8525 wr32(&pf
->hw
, pf
->hw
.aq
.arq
.len
, val
);
8527 val
= rd32(&pf
->hw
, pf
->hw
.aq
.asq
.len
);
8529 if (val
& I40E_PF_ATQLEN_ATQVFE_MASK
) {
8530 if (pf
->hw
.debug_mask
& I40E_DEBUG_AQ
)
8531 dev_info(&pf
->pdev
->dev
, "ASQ VF Error detected\n");
8532 val
&= ~I40E_PF_ATQLEN_ATQVFE_MASK
;
8534 if (val
& I40E_PF_ATQLEN_ATQOVFL_MASK
) {
8535 if (pf
->hw
.debug_mask
& I40E_DEBUG_AQ
)
8536 dev_info(&pf
->pdev
->dev
, "ASQ Overflow Error detected\n");
8537 val
&= ~I40E_PF_ATQLEN_ATQOVFL_MASK
;
8539 if (val
& I40E_PF_ATQLEN_ATQCRIT_MASK
) {
8540 if (pf
->hw
.debug_mask
& I40E_DEBUG_AQ
)
8541 dev_info(&pf
->pdev
->dev
, "ASQ Critical Error detected\n");
8542 val
&= ~I40E_PF_ATQLEN_ATQCRIT_MASK
;
8545 wr32(&pf
->hw
, pf
->hw
.aq
.asq
.len
, val
);
8547 event
.buf_len
= I40E_MAX_AQ_BUF_SIZE
;
8548 event
.msg_buf
= kzalloc(event
.buf_len
, GFP_KERNEL
);
8553 ret
= i40e_clean_arq_element(hw
, &event
, &pending
);
8554 if (ret
== I40E_ERR_ADMIN_QUEUE_NO_WORK
)
8557 dev_info(&pf
->pdev
->dev
, "ARQ event error %d\n", ret
);
8561 opcode
= le16_to_cpu(event
.desc
.opcode
);
8564 case i40e_aqc_opc_get_link_status
:
8565 i40e_handle_link_event(pf
, &event
);
8567 case i40e_aqc_opc_send_msg_to_pf
:
8568 ret
= i40e_vc_process_vf_msg(pf
,
8569 le16_to_cpu(event
.desc
.retval
),
8570 le32_to_cpu(event
.desc
.cookie_high
),
8571 le32_to_cpu(event
.desc
.cookie_low
),
8575 case i40e_aqc_opc_lldp_update_mib
:
8576 dev_dbg(&pf
->pdev
->dev
, "ARQ: Update LLDP MIB event received\n");
8577 #ifdef CONFIG_I40E_DCB
8579 ret
= i40e_handle_lldp_event(pf
, &event
);
8581 #endif /* CONFIG_I40E_DCB */
8583 case i40e_aqc_opc_event_lan_overflow
:
8584 dev_dbg(&pf
->pdev
->dev
, "ARQ LAN queue overflow event received\n");
8585 i40e_handle_lan_overflow_event(pf
, &event
);
8587 case i40e_aqc_opc_send_msg_to_peer
:
8588 dev_info(&pf
->pdev
->dev
, "ARQ: Msg from other pf\n");
8590 case i40e_aqc_opc_nvm_erase
:
8591 case i40e_aqc_opc_nvm_update
:
8592 case i40e_aqc_opc_oem_post_update
:
8593 i40e_debug(&pf
->hw
, I40E_DEBUG_NVM
,
8594 "ARQ NVM operation 0x%04x completed\n",
8598 dev_info(&pf
->pdev
->dev
,
8599 "ARQ: Unknown event 0x%04x ignored\n",
8603 } while (i
++ < pf
->adminq_work_limit
);
8605 if (i
< pf
->adminq_work_limit
)
8606 clear_bit(__I40E_ADMINQ_EVENT_PENDING
, pf
->state
);
8608 /* re-enable Admin queue interrupt cause */
8609 val
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
8610 val
|= I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
8611 wr32(hw
, I40E_PFINT_ICR0_ENA
, val
);
8614 kfree(event
.msg_buf
);
8618 * i40e_verify_eeprom - make sure eeprom is good to use
8619 * @pf: board private structure
8621 static void i40e_verify_eeprom(struct i40e_pf
*pf
)
8625 err
= i40e_diag_eeprom_test(&pf
->hw
);
8627 /* retry in case of garbage read */
8628 err
= i40e_diag_eeprom_test(&pf
->hw
);
8630 dev_info(&pf
->pdev
->dev
, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
8632 set_bit(__I40E_BAD_EEPROM
, pf
->state
);
8636 if (!err
&& test_bit(__I40E_BAD_EEPROM
, pf
->state
)) {
8637 dev_info(&pf
->pdev
->dev
, "eeprom check passed, Tx/Rx traffic enabled\n");
8638 clear_bit(__I40E_BAD_EEPROM
, pf
->state
);
8643 * i40e_enable_pf_switch_lb
8644 * @pf: pointer to the PF structure
8646 * enable switch loop back or die - no point in a return value
8648 static void i40e_enable_pf_switch_lb(struct i40e_pf
*pf
)
8650 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
8651 struct i40e_vsi_context ctxt
;
8654 ctxt
.seid
= pf
->main_vsi_seid
;
8655 ctxt
.pf_num
= pf
->hw
.pf_id
;
8657 ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
8659 dev_info(&pf
->pdev
->dev
,
8660 "couldn't get PF vsi config, err %s aq_err %s\n",
8661 i40e_stat_str(&pf
->hw
, ret
),
8662 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8665 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
8666 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
8667 ctxt
.info
.switch_id
|= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
8669 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
8671 dev_info(&pf
->pdev
->dev
,
8672 "update vsi switch failed, err %s aq_err %s\n",
8673 i40e_stat_str(&pf
->hw
, ret
),
8674 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8679 * i40e_disable_pf_switch_lb
8680 * @pf: pointer to the PF structure
8682 * disable switch loop back or die - no point in a return value
8684 static void i40e_disable_pf_switch_lb(struct i40e_pf
*pf
)
8686 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
8687 struct i40e_vsi_context ctxt
;
8690 ctxt
.seid
= pf
->main_vsi_seid
;
8691 ctxt
.pf_num
= pf
->hw
.pf_id
;
8693 ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
8695 dev_info(&pf
->pdev
->dev
,
8696 "couldn't get PF vsi config, err %s aq_err %s\n",
8697 i40e_stat_str(&pf
->hw
, ret
),
8698 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8701 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
8702 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
8703 ctxt
.info
.switch_id
&= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
8705 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
8707 dev_info(&pf
->pdev
->dev
,
8708 "update vsi switch failed, err %s aq_err %s\n",
8709 i40e_stat_str(&pf
->hw
, ret
),
8710 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8715 * i40e_config_bridge_mode - Configure the HW bridge mode
8716 * @veb: pointer to the bridge instance
8718 * Configure the loop back mode for the LAN VSI that is downlink to the
8719 * specified HW bridge instance. It is expected this function is called
8720 * when a new HW bridge is instantiated.
8722 static void i40e_config_bridge_mode(struct i40e_veb
*veb
)
8724 struct i40e_pf
*pf
= veb
->pf
;
8726 if (pf
->hw
.debug_mask
& I40E_DEBUG_LAN
)
8727 dev_info(&pf
->pdev
->dev
, "enabling bridge mode: %s\n",
8728 veb
->bridge_mode
== BRIDGE_MODE_VEPA
? "VEPA" : "VEB");
8729 if (veb
->bridge_mode
& BRIDGE_MODE_VEPA
)
8730 i40e_disable_pf_switch_lb(pf
);
8732 i40e_enable_pf_switch_lb(pf
);
8736 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
8737 * @veb: pointer to the VEB instance
8739 * This is a recursive function that first builds the attached VSIs then
8740 * recurses in to build the next layer of VEB. We track the connections
8741 * through our own index numbers because the seid's from the HW could
8742 * change across the reset.
8744 static int i40e_reconstitute_veb(struct i40e_veb
*veb
)
8746 struct i40e_vsi
*ctl_vsi
= NULL
;
8747 struct i40e_pf
*pf
= veb
->pf
;
8751 /* build VSI that owns this VEB, temporarily attached to base VEB */
8752 for (v
= 0; v
< pf
->num_alloc_vsi
&& !ctl_vsi
; v
++) {
8754 pf
->vsi
[v
]->veb_idx
== veb
->idx
&&
8755 pf
->vsi
[v
]->flags
& I40E_VSI_FLAG_VEB_OWNER
) {
8756 ctl_vsi
= pf
->vsi
[v
];
8761 dev_info(&pf
->pdev
->dev
,
8762 "missing owner VSI for veb_idx %d\n", veb
->idx
);
8764 goto end_reconstitute
;
8766 if (ctl_vsi
!= pf
->vsi
[pf
->lan_vsi
])
8767 ctl_vsi
->uplink_seid
= pf
->vsi
[pf
->lan_vsi
]->uplink_seid
;
8768 ret
= i40e_add_vsi(ctl_vsi
);
8770 dev_info(&pf
->pdev
->dev
,
8771 "rebuild of veb_idx %d owner VSI failed: %d\n",
8773 goto end_reconstitute
;
8775 i40e_vsi_reset_stats(ctl_vsi
);
8777 /* create the VEB in the switch and move the VSI onto the VEB */
8778 ret
= i40e_add_veb(veb
, ctl_vsi
);
8780 goto end_reconstitute
;
8782 if (pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)
8783 veb
->bridge_mode
= BRIDGE_MODE_VEB
;
8785 veb
->bridge_mode
= BRIDGE_MODE_VEPA
;
8786 i40e_config_bridge_mode(veb
);
8788 /* create the remaining VSIs attached to this VEB */
8789 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
8790 if (!pf
->vsi
[v
] || pf
->vsi
[v
] == ctl_vsi
)
8793 if (pf
->vsi
[v
]->veb_idx
== veb
->idx
) {
8794 struct i40e_vsi
*vsi
= pf
->vsi
[v
];
8796 vsi
->uplink_seid
= veb
->seid
;
8797 ret
= i40e_add_vsi(vsi
);
8799 dev_info(&pf
->pdev
->dev
,
8800 "rebuild of vsi_idx %d failed: %d\n",
8802 goto end_reconstitute
;
8804 i40e_vsi_reset_stats(vsi
);
8808 /* create any VEBs attached to this VEB - RECURSION */
8809 for (veb_idx
= 0; veb_idx
< I40E_MAX_VEB
; veb_idx
++) {
8810 if (pf
->veb
[veb_idx
] && pf
->veb
[veb_idx
]->veb_idx
== veb
->idx
) {
8811 pf
->veb
[veb_idx
]->uplink_seid
= veb
->seid
;
8812 ret
= i40e_reconstitute_veb(pf
->veb
[veb_idx
]);
8823 * i40e_get_capabilities - get info about the HW
8824 * @pf: the PF struct
8826 static int i40e_get_capabilities(struct i40e_pf
*pf
,
8827 enum i40e_admin_queue_opc list_type
)
8829 struct i40e_aqc_list_capabilities_element_resp
*cap_buf
;
8834 buf_len
= 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp
);
8836 cap_buf
= kzalloc(buf_len
, GFP_KERNEL
);
8840 /* this loads the data into the hw struct for us */
8841 err
= i40e_aq_discover_capabilities(&pf
->hw
, cap_buf
, buf_len
,
8842 &data_size
, list_type
,
8844 /* data loaded, buffer no longer needed */
8847 if (pf
->hw
.aq
.asq_last_status
== I40E_AQ_RC_ENOMEM
) {
8848 /* retry with a larger buffer */
8849 buf_len
= data_size
;
8850 } else if (pf
->hw
.aq
.asq_last_status
!= I40E_AQ_RC_OK
) {
8851 dev_info(&pf
->pdev
->dev
,
8852 "capability discovery failed, err %s aq_err %s\n",
8853 i40e_stat_str(&pf
->hw
, err
),
8854 i40e_aq_str(&pf
->hw
,
8855 pf
->hw
.aq
.asq_last_status
));
8860 if (pf
->hw
.debug_mask
& I40E_DEBUG_USER
) {
8861 if (list_type
== i40e_aqc_opc_list_func_capabilities
) {
8862 dev_info(&pf
->pdev
->dev
,
8863 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
8864 pf
->hw
.pf_id
, pf
->hw
.func_caps
.num_vfs
,
8865 pf
->hw
.func_caps
.num_msix_vectors
,
8866 pf
->hw
.func_caps
.num_msix_vectors_vf
,
8867 pf
->hw
.func_caps
.fd_filters_guaranteed
,
8868 pf
->hw
.func_caps
.fd_filters_best_effort
,
8869 pf
->hw
.func_caps
.num_tx_qp
,
8870 pf
->hw
.func_caps
.num_vsis
);
8871 } else if (list_type
== i40e_aqc_opc_list_dev_capabilities
) {
8872 dev_info(&pf
->pdev
->dev
,
8873 "switch_mode=0x%04x, function_valid=0x%08x\n",
8874 pf
->hw
.dev_caps
.switch_mode
,
8875 pf
->hw
.dev_caps
.valid_functions
);
8876 dev_info(&pf
->pdev
->dev
,
8877 "SR-IOV=%d, num_vfs for all function=%u\n",
8878 pf
->hw
.dev_caps
.sr_iov_1_1
,
8879 pf
->hw
.dev_caps
.num_vfs
);
8880 dev_info(&pf
->pdev
->dev
,
8881 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
8882 pf
->hw
.dev_caps
.num_vsis
,
8883 pf
->hw
.dev_caps
.num_rx_qp
,
8884 pf
->hw
.dev_caps
.num_tx_qp
);
8887 if (list_type
== i40e_aqc_opc_list_func_capabilities
) {
8888 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
8889 + pf->hw.func_caps.num_vfs)
8890 if (pf
->hw
.revision_id
== 0 &&
8891 pf
->hw
.func_caps
.num_vsis
< DEF_NUM_VSI
) {
8892 dev_info(&pf
->pdev
->dev
,
8893 "got num_vsis %d, setting num_vsis to %d\n",
8894 pf
->hw
.func_caps
.num_vsis
, DEF_NUM_VSI
);
8895 pf
->hw
.func_caps
.num_vsis
= DEF_NUM_VSI
;
8901 static int i40e_vsi_clear(struct i40e_vsi
*vsi
);
8904 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
8905 * @pf: board private structure
8907 static void i40e_fdir_sb_setup(struct i40e_pf
*pf
)
8909 struct i40e_vsi
*vsi
;
8911 /* quick workaround for an NVM issue that leaves a critical register
8914 if (!rd32(&pf
->hw
, I40E_GLQF_HKEY(0))) {
8915 static const u32 hkey
[] = {
8916 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
8917 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
8918 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
8922 for (i
= 0; i
<= I40E_GLQF_HKEY_MAX_INDEX
; i
++)
8923 wr32(&pf
->hw
, I40E_GLQF_HKEY(i
), hkey
[i
]);
8926 if (!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
))
8929 /* find existing VSI and see if it needs configuring */
8930 vsi
= i40e_find_vsi_by_type(pf
, I40E_VSI_FDIR
);
8932 /* create a new VSI if none exists */
8934 vsi
= i40e_vsi_setup(pf
, I40E_VSI_FDIR
,
8935 pf
->vsi
[pf
->lan_vsi
]->seid
, 0);
8937 dev_info(&pf
->pdev
->dev
, "Couldn't create FDir VSI\n");
8938 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
8939 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
8944 i40e_vsi_setup_irqhandler(vsi
, i40e_fdir_clean_ring
);
8948 * i40e_fdir_teardown - release the Flow Director resources
8949 * @pf: board private structure
8951 static void i40e_fdir_teardown(struct i40e_pf
*pf
)
8953 struct i40e_vsi
*vsi
;
8955 i40e_fdir_filter_exit(pf
);
8956 vsi
= i40e_find_vsi_by_type(pf
, I40E_VSI_FDIR
);
8958 i40e_vsi_release(vsi
);
8962 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
8964 * @seid: seid of main or channel VSIs
8966 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
8967 * existed before reset
8969 static int i40e_rebuild_cloud_filters(struct i40e_vsi
*vsi
, u16 seid
)
8971 struct i40e_cloud_filter
*cfilter
;
8972 struct i40e_pf
*pf
= vsi
->back
;
8973 struct hlist_node
*node
;
8976 /* Add cloud filters back if they exist */
8977 hlist_for_each_entry_safe(cfilter
, node
, &pf
->cloud_filter_list
,
8979 if (cfilter
->seid
!= seid
)
8982 if (cfilter
->dst_port
)
8983 ret
= i40e_add_del_cloud_filter_big_buf(vsi
, cfilter
,
8986 ret
= i40e_add_del_cloud_filter(vsi
, cfilter
, true);
8989 dev_dbg(&pf
->pdev
->dev
,
8990 "Failed to rebuild cloud filter, err %s aq_err %s\n",
8991 i40e_stat_str(&pf
->hw
, ret
),
8992 i40e_aq_str(&pf
->hw
,
8993 pf
->hw
.aq
.asq_last_status
));
9001 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
9004 * Rebuilds channel VSIs if they existed before reset
9006 static int i40e_rebuild_channels(struct i40e_vsi
*vsi
)
9008 struct i40e_channel
*ch
, *ch_tmp
;
9011 if (list_empty(&vsi
->ch_list
))
9014 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
9015 if (!ch
->initialized
)
9017 /* Proceed with creation of channel (VMDq2) VSI */
9018 ret
= i40e_add_channel(vsi
->back
, vsi
->uplink_seid
, ch
);
9020 dev_info(&vsi
->back
->pdev
->dev
,
9021 "failed to rebuild channels using uplink_seid %u\n",
9025 /* Reconfigure TX queues using QTX_CTL register */
9026 ret
= i40e_channel_config_tx_ring(vsi
->back
, vsi
, ch
);
9028 dev_info(&vsi
->back
->pdev
->dev
,
9029 "failed to configure TX rings for channel %u\n",
9033 /* update 'next_base_queue' */
9034 vsi
->next_base_queue
= vsi
->next_base_queue
+
9035 ch
->num_queue_pairs
;
9036 if (ch
->max_tx_rate
) {
9037 u64 credits
= ch
->max_tx_rate
;
9039 if (i40e_set_bw_limit(vsi
, ch
->seid
,
9043 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
9044 dev_dbg(&vsi
->back
->pdev
->dev
,
9045 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9050 ret
= i40e_rebuild_cloud_filters(vsi
, ch
->seid
);
9052 dev_dbg(&vsi
->back
->pdev
->dev
,
9053 "Failed to rebuild cloud filters for channel VSI %u\n",
9062 * i40e_prep_for_reset - prep for the core to reset
9063 * @pf: board private structure
9064 * @lock_acquired: indicates whether or not the lock has been acquired
9065 * before this function was called.
9067 * Close up the VFs and other things in prep for PF Reset.
9069 static void i40e_prep_for_reset(struct i40e_pf
*pf
, bool lock_acquired
)
9071 struct i40e_hw
*hw
= &pf
->hw
;
9072 i40e_status ret
= 0;
9075 clear_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
);
9076 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
9078 if (i40e_check_asq_alive(&pf
->hw
))
9079 i40e_vc_notify_reset(pf
);
9081 dev_dbg(&pf
->pdev
->dev
, "Tearing down internal switch for reset\n");
9083 /* quiesce the VSIs and their queues that are not already DOWN */
9084 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
9087 i40e_pf_quiesce_all_vsi(pf
);
9091 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
9093 pf
->vsi
[v
]->seid
= 0;
9096 i40e_shutdown_adminq(&pf
->hw
);
9098 /* call shutdown HMC */
9099 if (hw
->hmc
.hmc_obj
) {
9100 ret
= i40e_shutdown_lan_hmc(hw
);
9102 dev_warn(&pf
->pdev
->dev
,
9103 "shutdown_lan_hmc failed: %d\n", ret
);
9108 * i40e_send_version - update firmware with driver version
9111 static void i40e_send_version(struct i40e_pf
*pf
)
9113 struct i40e_driver_version dv
;
9115 dv
.major_version
= DRV_VERSION_MAJOR
;
9116 dv
.minor_version
= DRV_VERSION_MINOR
;
9117 dv
.build_version
= DRV_VERSION_BUILD
;
9118 dv
.subbuild_version
= 0;
9119 strlcpy(dv
.driver_string
, DRV_VERSION
, sizeof(dv
.driver_string
));
9120 i40e_aq_send_driver_version(&pf
->hw
, &dv
, NULL
);
9124 * i40e_get_oem_version - get OEM specific version information
9125 * @hw: pointer to the hardware structure
9127 static void i40e_get_oem_version(struct i40e_hw
*hw
)
9129 u16 block_offset
= 0xffff;
9130 u16 block_length
= 0;
9131 u16 capabilities
= 0;
9135 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9136 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9137 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9138 #define I40E_NVM_OEM_GEN_OFFSET 0x02
9139 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9140 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9141 #define I40E_NVM_OEM_LENGTH 3
9143 /* Check if pointer to OEM version block is valid. */
9144 i40e_read_nvm_word(hw
, I40E_SR_NVM_OEM_VERSION_PTR
, &block_offset
);
9145 if (block_offset
== 0xffff)
9148 /* Check if OEM version block has correct length. */
9149 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_LENGTH_OFFSET
,
9151 if (block_length
< I40E_NVM_OEM_LENGTH
)
9154 /* Check if OEM version format is as expected. */
9155 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_CAPABILITIES_OFFSET
,
9157 if ((capabilities
& I40E_NVM_OEM_CAPABILITIES_MASK
) != 0)
9160 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_GEN_OFFSET
,
9162 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_RELEASE_OFFSET
,
9164 hw
->nvm
.oem_ver
= (gen_snap
<< I40E_OEM_SNAP_SHIFT
) | release
;
9165 hw
->nvm
.eetrack
= I40E_OEM_EETRACK_ID
;
9169 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
9170 * @pf: board private structure
9172 static int i40e_reset(struct i40e_pf
*pf
)
9174 struct i40e_hw
*hw
= &pf
->hw
;
9177 ret
= i40e_pf_reset(hw
);
9179 dev_info(&pf
->pdev
->dev
, "PF reset failed, %d\n", ret
);
9180 set_bit(__I40E_RESET_FAILED
, pf
->state
);
9181 clear_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
);
9189 * i40e_rebuild - rebuild using a saved config
9190 * @pf: board private structure
9191 * @reinit: if the Main VSI needs to re-initialized.
9192 * @lock_acquired: indicates whether or not the lock has been acquired
9193 * before this function was called.
9195 static void i40e_rebuild(struct i40e_pf
*pf
, bool reinit
, bool lock_acquired
)
9197 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
9198 struct i40e_hw
*hw
= &pf
->hw
;
9199 u8 set_fc_aq_fail
= 0;
9204 if (test_bit(__I40E_DOWN
, pf
->state
))
9205 goto clear_recovery
;
9206 dev_dbg(&pf
->pdev
->dev
, "Rebuilding internal switch\n");
9208 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
9209 ret
= i40e_init_adminq(&pf
->hw
);
9211 dev_info(&pf
->pdev
->dev
, "Rebuild AdminQ failed, err %s aq_err %s\n",
9212 i40e_stat_str(&pf
->hw
, ret
),
9213 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9214 goto clear_recovery
;
9216 i40e_get_oem_version(&pf
->hw
);
9218 /* re-verify the eeprom if we just had an EMP reset */
9219 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED
, pf
->state
))
9220 i40e_verify_eeprom(pf
);
9222 i40e_clear_pxe_mode(hw
);
9223 ret
= i40e_get_capabilities(pf
, i40e_aqc_opc_list_func_capabilities
);
9225 goto end_core_reset
;
9227 ret
= i40e_init_lan_hmc(hw
, hw
->func_caps
.num_tx_qp
,
9228 hw
->func_caps
.num_rx_qp
, 0, 0);
9230 dev_info(&pf
->pdev
->dev
, "init_lan_hmc failed: %d\n", ret
);
9231 goto end_core_reset
;
9233 ret
= i40e_configure_lan_hmc(hw
, I40E_HMC_MODEL_DIRECT_ONLY
);
9235 dev_info(&pf
->pdev
->dev
, "configure_lan_hmc failed: %d\n", ret
);
9236 goto end_core_reset
;
9239 /* Enable FW to write a default DCB config on link-up */
9240 i40e_aq_set_dcb_parameters(hw
, true, NULL
);
9242 #ifdef CONFIG_I40E_DCB
9243 ret
= i40e_init_pf_dcb(pf
);
9245 dev_info(&pf
->pdev
->dev
, "DCB init failed %d, disabled\n", ret
);
9246 pf
->flags
&= ~I40E_FLAG_DCB_CAPABLE
;
9247 /* Continue without DCB enabled */
9249 #endif /* CONFIG_I40E_DCB */
9250 /* do basic switch setup */
9253 ret
= i40e_setup_pf_switch(pf
, reinit
);
9257 /* The driver only wants link up/down and module qualification
9258 * reports from firmware. Note the negative logic.
9260 ret
= i40e_aq_set_phy_int_mask(&pf
->hw
,
9261 ~(I40E_AQ_EVENT_LINK_UPDOWN
|
9262 I40E_AQ_EVENT_MEDIA_NA
|
9263 I40E_AQ_EVENT_MODULE_QUAL_FAIL
), NULL
);
9265 dev_info(&pf
->pdev
->dev
, "set phy mask fail, err %s aq_err %s\n",
9266 i40e_stat_str(&pf
->hw
, ret
),
9267 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9269 /* make sure our flow control settings are restored */
9270 ret
= i40e_set_fc(&pf
->hw
, &set_fc_aq_fail
, true);
9272 dev_dbg(&pf
->pdev
->dev
, "setting flow control: ret = %s last_status = %s\n",
9273 i40e_stat_str(&pf
->hw
, ret
),
9274 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9276 /* Rebuild the VSIs and VEBs that existed before reset.
9277 * They are still in our local switch element arrays, so only
9278 * need to rebuild the switch model in the HW.
9280 * If there were VEBs but the reconstitution failed, we'll try
9281 * try to recover minimal use by getting the basic PF VSI working.
9283 if (vsi
->uplink_seid
!= pf
->mac_seid
) {
9284 dev_dbg(&pf
->pdev
->dev
, "attempting to rebuild switch\n");
9285 /* find the one VEB connected to the MAC, and find orphans */
9286 for (v
= 0; v
< I40E_MAX_VEB
; v
++) {
9290 if (pf
->veb
[v
]->uplink_seid
== pf
->mac_seid
||
9291 pf
->veb
[v
]->uplink_seid
== 0) {
9292 ret
= i40e_reconstitute_veb(pf
->veb
[v
]);
9297 /* If Main VEB failed, we're in deep doodoo,
9298 * so give up rebuilding the switch and set up
9299 * for minimal rebuild of PF VSI.
9300 * If orphan failed, we'll report the error
9301 * but try to keep going.
9303 if (pf
->veb
[v
]->uplink_seid
== pf
->mac_seid
) {
9304 dev_info(&pf
->pdev
->dev
,
9305 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
9307 vsi
->uplink_seid
= pf
->mac_seid
;
9309 } else if (pf
->veb
[v
]->uplink_seid
== 0) {
9310 dev_info(&pf
->pdev
->dev
,
9311 "rebuild of orphan VEB failed: %d\n",
9318 if (vsi
->uplink_seid
== pf
->mac_seid
) {
9319 dev_dbg(&pf
->pdev
->dev
, "attempting to rebuild PF VSI\n");
9320 /* no VEB, so rebuild only the Main VSI */
9321 ret
= i40e_add_vsi(vsi
);
9323 dev_info(&pf
->pdev
->dev
,
9324 "rebuild of Main VSI failed: %d\n", ret
);
9329 if (vsi
->mqprio_qopt
.max_rate
[0]) {
9330 u64 max_tx_rate
= vsi
->mqprio_qopt
.max_rate
[0];
9333 do_div(max_tx_rate
, I40E_BW_MBPS_DIVISOR
);
9334 ret
= i40e_set_bw_limit(vsi
, vsi
->seid
, max_tx_rate
);
9338 credits
= max_tx_rate
;
9339 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
9340 dev_dbg(&vsi
->back
->pdev
->dev
,
9341 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9347 ret
= i40e_rebuild_cloud_filters(vsi
, vsi
->seid
);
9351 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
9352 * for this main VSI if they exist
9354 ret
= i40e_rebuild_channels(vsi
);
9358 /* Reconfigure hardware for allowing smaller MSS in the case
9359 * of TSO, so that we avoid the MDD being fired and causing
9360 * a reset in the case of small MSS+TSO.
9362 #define I40E_REG_MSS 0x000E64DC
9363 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
9364 #define I40E_64BYTE_MSS 0x400000
9365 val
= rd32(hw
, I40E_REG_MSS
);
9366 if ((val
& I40E_REG_MSS_MIN_MASK
) > I40E_64BYTE_MSS
) {
9367 val
&= ~I40E_REG_MSS_MIN_MASK
;
9368 val
|= I40E_64BYTE_MSS
;
9369 wr32(hw
, I40E_REG_MSS
, val
);
9372 if (pf
->hw_features
& I40E_HW_RESTART_AUTONEG
) {
9374 ret
= i40e_aq_set_link_restart_an(&pf
->hw
, true, NULL
);
9376 dev_info(&pf
->pdev
->dev
, "link restart failed, err %s aq_err %s\n",
9377 i40e_stat_str(&pf
->hw
, ret
),
9378 i40e_aq_str(&pf
->hw
,
9379 pf
->hw
.aq
.asq_last_status
));
9381 /* reinit the misc interrupt */
9382 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
9383 ret
= i40e_setup_misc_vector(pf
);
9385 /* Add a filter to drop all Flow control frames from any VSI from being
9386 * transmitted. By doing so we stop a malicious VF from sending out
9387 * PAUSE or PFC frames and potentially controlling traffic for other
9389 * The FW can still send Flow control frames if enabled.
9391 i40e_add_filter_to_drop_tx_flow_control_frames(&pf
->hw
,
9394 /* restart the VSIs that were rebuilt and running before the reset */
9395 i40e_pf_unquiesce_all_vsi(pf
);
9397 /* Release the RTNL lock before we start resetting VFs */
9401 /* Restore promiscuous settings */
9402 ret
= i40e_set_promiscuous(pf
, pf
->cur_promisc
);
9404 dev_warn(&pf
->pdev
->dev
,
9405 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
9406 pf
->cur_promisc
? "on" : "off",
9407 i40e_stat_str(&pf
->hw
, ret
),
9408 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9410 i40e_reset_all_vfs(pf
, true);
9412 /* tell the firmware that we're starting */
9413 i40e_send_version(pf
);
9415 /* We've already released the lock, so don't do it again */
9416 goto end_core_reset
;
9422 clear_bit(__I40E_RESET_FAILED
, pf
->state
);
9424 clear_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
);
9428 * i40e_reset_and_rebuild - reset and rebuild using a saved config
9429 * @pf: board private structure
9430 * @reinit: if the Main VSI needs to re-initialized.
9431 * @lock_acquired: indicates whether or not the lock has been acquired
9432 * before this function was called.
9434 static void i40e_reset_and_rebuild(struct i40e_pf
*pf
, bool reinit
,
9438 /* Now we wait for GRST to settle out.
9439 * We don't have to delete the VEBs or VSIs from the hw switch
9440 * because the reset will make them disappear.
9442 ret
= i40e_reset(pf
);
9444 i40e_rebuild(pf
, reinit
, lock_acquired
);
9448 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
9449 * @pf: board private structure
9451 * Close up the VFs and other things in prep for a Core Reset,
9452 * then get ready to rebuild the world.
9453 * @lock_acquired: indicates whether or not the lock has been acquired
9454 * before this function was called.
9456 static void i40e_handle_reset_warning(struct i40e_pf
*pf
, bool lock_acquired
)
9458 i40e_prep_for_reset(pf
, lock_acquired
);
9459 i40e_reset_and_rebuild(pf
, false, lock_acquired
);
9463 * i40e_handle_mdd_event
9464 * @pf: pointer to the PF structure
9466 * Called from the MDD irq handler to identify possibly malicious vfs
9468 static void i40e_handle_mdd_event(struct i40e_pf
*pf
)
9470 struct i40e_hw
*hw
= &pf
->hw
;
9471 bool mdd_detected
= false;
9472 bool pf_mdd_detected
= false;
9477 if (!test_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
))
9480 /* find what triggered the MDD event */
9481 reg
= rd32(hw
, I40E_GL_MDET_TX
);
9482 if (reg
& I40E_GL_MDET_TX_VALID_MASK
) {
9483 u8 pf_num
= (reg
& I40E_GL_MDET_TX_PF_NUM_MASK
) >>
9484 I40E_GL_MDET_TX_PF_NUM_SHIFT
;
9485 u16 vf_num
= (reg
& I40E_GL_MDET_TX_VF_NUM_MASK
) >>
9486 I40E_GL_MDET_TX_VF_NUM_SHIFT
;
9487 u8 event
= (reg
& I40E_GL_MDET_TX_EVENT_MASK
) >>
9488 I40E_GL_MDET_TX_EVENT_SHIFT
;
9489 u16 queue
= ((reg
& I40E_GL_MDET_TX_QUEUE_MASK
) >>
9490 I40E_GL_MDET_TX_QUEUE_SHIFT
) -
9491 pf
->hw
.func_caps
.base_queue
;
9492 if (netif_msg_tx_err(pf
))
9493 dev_info(&pf
->pdev
->dev
, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
9494 event
, queue
, pf_num
, vf_num
);
9495 wr32(hw
, I40E_GL_MDET_TX
, 0xffffffff);
9496 mdd_detected
= true;
9498 reg
= rd32(hw
, I40E_GL_MDET_RX
);
9499 if (reg
& I40E_GL_MDET_RX_VALID_MASK
) {
9500 u8 func
= (reg
& I40E_GL_MDET_RX_FUNCTION_MASK
) >>
9501 I40E_GL_MDET_RX_FUNCTION_SHIFT
;
9502 u8 event
= (reg
& I40E_GL_MDET_RX_EVENT_MASK
) >>
9503 I40E_GL_MDET_RX_EVENT_SHIFT
;
9504 u16 queue
= ((reg
& I40E_GL_MDET_RX_QUEUE_MASK
) >>
9505 I40E_GL_MDET_RX_QUEUE_SHIFT
) -
9506 pf
->hw
.func_caps
.base_queue
;
9507 if (netif_msg_rx_err(pf
))
9508 dev_info(&pf
->pdev
->dev
, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
9509 event
, queue
, func
);
9510 wr32(hw
, I40E_GL_MDET_RX
, 0xffffffff);
9511 mdd_detected
= true;
9515 reg
= rd32(hw
, I40E_PF_MDET_TX
);
9516 if (reg
& I40E_PF_MDET_TX_VALID_MASK
) {
9517 wr32(hw
, I40E_PF_MDET_TX
, 0xFFFF);
9518 dev_info(&pf
->pdev
->dev
, "TX driver issue detected, PF reset issued\n");
9519 pf_mdd_detected
= true;
9521 reg
= rd32(hw
, I40E_PF_MDET_RX
);
9522 if (reg
& I40E_PF_MDET_RX_VALID_MASK
) {
9523 wr32(hw
, I40E_PF_MDET_RX
, 0xFFFF);
9524 dev_info(&pf
->pdev
->dev
, "RX driver issue detected, PF reset issued\n");
9525 pf_mdd_detected
= true;
9527 /* Queue belongs to the PF, initiate a reset */
9528 if (pf_mdd_detected
) {
9529 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
9530 i40e_service_event_schedule(pf
);
9534 /* see if one of the VFs needs its hand slapped */
9535 for (i
= 0; i
< pf
->num_alloc_vfs
&& mdd_detected
; i
++) {
9537 reg
= rd32(hw
, I40E_VP_MDET_TX(i
));
9538 if (reg
& I40E_VP_MDET_TX_VALID_MASK
) {
9539 wr32(hw
, I40E_VP_MDET_TX(i
), 0xFFFF);
9540 vf
->num_mdd_events
++;
9541 dev_info(&pf
->pdev
->dev
, "TX driver issue detected on VF %d\n",
9545 reg
= rd32(hw
, I40E_VP_MDET_RX(i
));
9546 if (reg
& I40E_VP_MDET_RX_VALID_MASK
) {
9547 wr32(hw
, I40E_VP_MDET_RX(i
), 0xFFFF);
9548 vf
->num_mdd_events
++;
9549 dev_info(&pf
->pdev
->dev
, "RX driver issue detected on VF %d\n",
9553 if (vf
->num_mdd_events
> I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED
) {
9554 dev_info(&pf
->pdev
->dev
,
9555 "Too many MDD events on VF %d, disabled\n", i
);
9556 dev_info(&pf
->pdev
->dev
,
9557 "Use PF Control I/F to re-enable the VF\n");
9558 set_bit(I40E_VF_STATE_DISABLED
, &vf
->vf_states
);
9562 /* re-enable mdd interrupt cause */
9563 clear_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
);
9564 reg
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
9565 reg
|= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
;
9566 wr32(hw
, I40E_PFINT_ICR0_ENA
, reg
);
9570 static const char *i40e_tunnel_name(struct i40e_udp_port_config
*port
)
9572 switch (port
->type
) {
9573 case UDP_TUNNEL_TYPE_VXLAN
:
9575 case UDP_TUNNEL_TYPE_GENEVE
:
9583 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
9584 * @pf: board private structure
9586 static void i40e_sync_udp_filters(struct i40e_pf
*pf
)
9590 /* loop through and set pending bit for all active UDP filters */
9591 for (i
= 0; i
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
; i
++) {
9592 if (pf
->udp_ports
[i
].port
)
9593 pf
->pending_udp_bitmap
|= BIT_ULL(i
);
9596 pf
->flags
|= I40E_FLAG_UDP_FILTER_SYNC
;
9600 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
9601 * @pf: board private structure
9603 static void i40e_sync_udp_filters_subtask(struct i40e_pf
*pf
)
9605 struct i40e_hw
*hw
= &pf
->hw
;
9610 if (!(pf
->flags
& I40E_FLAG_UDP_FILTER_SYNC
))
9613 pf
->flags
&= ~I40E_FLAG_UDP_FILTER_SYNC
;
9615 for (i
= 0; i
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
; i
++) {
9616 if (pf
->pending_udp_bitmap
& BIT_ULL(i
)) {
9617 pf
->pending_udp_bitmap
&= ~BIT_ULL(i
);
9618 port
= pf
->udp_ports
[i
].port
;
9620 ret
= i40e_aq_add_udp_tunnel(hw
, port
,
9621 pf
->udp_ports
[i
].type
,
9624 ret
= i40e_aq_del_udp_tunnel(hw
, i
, NULL
);
9627 dev_info(&pf
->pdev
->dev
,
9628 "%s %s port %d, index %d failed, err %s aq_err %s\n",
9629 i40e_tunnel_name(&pf
->udp_ports
[i
]),
9630 port
? "add" : "delete",
9632 i40e_stat_str(&pf
->hw
, ret
),
9633 i40e_aq_str(&pf
->hw
,
9634 pf
->hw
.aq
.asq_last_status
));
9635 pf
->udp_ports
[i
].port
= 0;
9642 * i40e_service_task - Run the driver's async subtasks
9643 * @work: pointer to work_struct containing our data
9645 static void i40e_service_task(struct work_struct
*work
)
9647 struct i40e_pf
*pf
= container_of(work
,
9650 unsigned long start_time
= jiffies
;
9652 /* don't bother with service tasks if a reset is in progress */
9653 if (test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
9656 if (test_and_set_bit(__I40E_SERVICE_SCHED
, pf
->state
))
9659 i40e_detect_recover_hung(pf
->vsi
[pf
->lan_vsi
]);
9660 i40e_sync_filters_subtask(pf
);
9661 i40e_reset_subtask(pf
);
9662 i40e_handle_mdd_event(pf
);
9663 i40e_vc_process_vflr_event(pf
);
9664 i40e_watchdog_subtask(pf
);
9665 i40e_fdir_reinit_subtask(pf
);
9666 if (pf
->flags
& I40E_FLAG_CLIENT_RESET
) {
9667 /* Client subtask will reopen next time through. */
9668 i40e_notify_client_of_netdev_close(pf
->vsi
[pf
->lan_vsi
], true);
9669 pf
->flags
&= ~I40E_FLAG_CLIENT_RESET
;
9671 i40e_client_subtask(pf
);
9672 if (pf
->flags
& I40E_FLAG_CLIENT_L2_CHANGE
) {
9673 i40e_notify_client_of_l2_param_changes(
9674 pf
->vsi
[pf
->lan_vsi
]);
9675 pf
->flags
&= ~I40E_FLAG_CLIENT_L2_CHANGE
;
9678 i40e_sync_filters_subtask(pf
);
9679 i40e_sync_udp_filters_subtask(pf
);
9680 i40e_clean_adminq_subtask(pf
);
9682 /* flush memory to make sure state is correct before next watchdog */
9683 smp_mb__before_atomic();
9684 clear_bit(__I40E_SERVICE_SCHED
, pf
->state
);
9686 /* If the tasks have taken longer than one timer cycle or there
9687 * is more work to be done, reschedule the service task now
9688 * rather than wait for the timer to tick again.
9690 if (time_after(jiffies
, (start_time
+ pf
->service_timer_period
)) ||
9691 test_bit(__I40E_ADMINQ_EVENT_PENDING
, pf
->state
) ||
9692 test_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
) ||
9693 test_bit(__I40E_VFLR_EVENT_PENDING
, pf
->state
))
9694 i40e_service_event_schedule(pf
);
9698 * i40e_service_timer - timer callback
9699 * @data: pointer to PF struct
9701 static void i40e_service_timer(struct timer_list
*t
)
9703 struct i40e_pf
*pf
= from_timer(pf
, t
, service_timer
);
9705 mod_timer(&pf
->service_timer
,
9706 round_jiffies(jiffies
+ pf
->service_timer_period
));
9707 i40e_service_event_schedule(pf
);
9711 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
9712 * @vsi: the VSI being configured
9714 static int i40e_set_num_rings_in_vsi(struct i40e_vsi
*vsi
)
9716 struct i40e_pf
*pf
= vsi
->back
;
9718 switch (vsi
->type
) {
9720 vsi
->alloc_queue_pairs
= pf
->num_lan_qps
;
9721 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
9722 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9723 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
9724 vsi
->num_q_vectors
= pf
->num_lan_msix
;
9726 vsi
->num_q_vectors
= 1;
9731 vsi
->alloc_queue_pairs
= 1;
9732 vsi
->num_desc
= ALIGN(I40E_FDIR_RING_COUNT
,
9733 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9734 vsi
->num_q_vectors
= pf
->num_fdsb_msix
;
9737 case I40E_VSI_VMDQ2
:
9738 vsi
->alloc_queue_pairs
= pf
->num_vmdq_qps
;
9739 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
9740 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9741 vsi
->num_q_vectors
= pf
->num_vmdq_msix
;
9744 case I40E_VSI_SRIOV
:
9745 vsi
->alloc_queue_pairs
= pf
->num_vf_qps
;
9746 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
9747 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9759 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
9761 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
9763 * On error: returns error code (negative)
9764 * On success: returns 0
9766 static int i40e_vsi_alloc_arrays(struct i40e_vsi
*vsi
, bool alloc_qvectors
)
9768 struct i40e_ring
**next_rings
;
9772 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
9773 size
= sizeof(struct i40e_ring
*) * vsi
->alloc_queue_pairs
*
9774 (i40e_enabled_xdp_vsi(vsi
) ? 3 : 2);
9775 vsi
->tx_rings
= kzalloc(size
, GFP_KERNEL
);
9778 next_rings
= vsi
->tx_rings
+ vsi
->alloc_queue_pairs
;
9779 if (i40e_enabled_xdp_vsi(vsi
)) {
9780 vsi
->xdp_rings
= next_rings
;
9781 next_rings
+= vsi
->alloc_queue_pairs
;
9783 vsi
->rx_rings
= next_rings
;
9785 if (alloc_qvectors
) {
9786 /* allocate memory for q_vector pointers */
9787 size
= sizeof(struct i40e_q_vector
*) * vsi
->num_q_vectors
;
9788 vsi
->q_vectors
= kzalloc(size
, GFP_KERNEL
);
9789 if (!vsi
->q_vectors
) {
9797 kfree(vsi
->tx_rings
);
9802 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
9803 * @pf: board private structure
9804 * @type: type of VSI
9806 * On error: returns error code (negative)
9807 * On success: returns vsi index in PF (positive)
9809 static int i40e_vsi_mem_alloc(struct i40e_pf
*pf
, enum i40e_vsi_type type
)
9812 struct i40e_vsi
*vsi
;
9816 /* Need to protect the allocation of the VSIs at the PF level */
9817 mutex_lock(&pf
->switch_mutex
);
9819 /* VSI list may be fragmented if VSI creation/destruction has
9820 * been happening. We can afford to do a quick scan to look
9821 * for any free VSIs in the list.
9823 * find next empty vsi slot, looping back around if necessary
9826 while (i
< pf
->num_alloc_vsi
&& pf
->vsi
[i
])
9828 if (i
>= pf
->num_alloc_vsi
) {
9830 while (i
< pf
->next_vsi
&& pf
->vsi
[i
])
9834 if (i
< pf
->num_alloc_vsi
&& !pf
->vsi
[i
]) {
9835 vsi_idx
= i
; /* Found one! */
9838 goto unlock_pf
; /* out of VSI slots! */
9842 vsi
= kzalloc(sizeof(*vsi
), GFP_KERNEL
);
9849 set_bit(__I40E_VSI_DOWN
, vsi
->state
);
9852 vsi
->int_rate_limit
= 0;
9853 vsi
->rss_table_size
= (vsi
->type
== I40E_VSI_MAIN
) ?
9854 pf
->rss_table_size
: 64;
9855 vsi
->netdev_registered
= false;
9856 vsi
->work_limit
= I40E_DEFAULT_IRQ_WORK
;
9857 hash_init(vsi
->mac_filter_hash
);
9858 vsi
->irqs_ready
= false;
9860 ret
= i40e_set_num_rings_in_vsi(vsi
);
9864 ret
= i40e_vsi_alloc_arrays(vsi
, true);
9868 /* Setup default MSIX irq handler for VSI */
9869 i40e_vsi_setup_irqhandler(vsi
, i40e_msix_clean_rings
);
9871 /* Initialize VSI lock */
9872 spin_lock_init(&vsi
->mac_filter_hash_lock
);
9873 pf
->vsi
[vsi_idx
] = vsi
;
9878 pf
->next_vsi
= i
- 1;
9881 mutex_unlock(&pf
->switch_mutex
);
9886 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
9887 * @type: VSI pointer
9888 * @free_qvectors: a bool to specify if q_vectors need to be freed.
9890 * On error: returns error code (negative)
9891 * On success: returns 0
9893 static void i40e_vsi_free_arrays(struct i40e_vsi
*vsi
, bool free_qvectors
)
9895 /* free the ring and vector containers */
9896 if (free_qvectors
) {
9897 kfree(vsi
->q_vectors
);
9898 vsi
->q_vectors
= NULL
;
9900 kfree(vsi
->tx_rings
);
9901 vsi
->tx_rings
= NULL
;
9902 vsi
->rx_rings
= NULL
;
9903 vsi
->xdp_rings
= NULL
;
9907 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
9909 * @vsi: Pointer to VSI structure
9911 static void i40e_clear_rss_config_user(struct i40e_vsi
*vsi
)
9916 kfree(vsi
->rss_hkey_user
);
9917 vsi
->rss_hkey_user
= NULL
;
9919 kfree(vsi
->rss_lut_user
);
9920 vsi
->rss_lut_user
= NULL
;
9924 * i40e_vsi_clear - Deallocate the VSI provided
9925 * @vsi: the VSI being un-configured
9927 static int i40e_vsi_clear(struct i40e_vsi
*vsi
)
9938 mutex_lock(&pf
->switch_mutex
);
9939 if (!pf
->vsi
[vsi
->idx
]) {
9940 dev_err(&pf
->pdev
->dev
, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
9941 vsi
->idx
, vsi
->idx
, vsi
, vsi
->type
);
9945 if (pf
->vsi
[vsi
->idx
] != vsi
) {
9946 dev_err(&pf
->pdev
->dev
,
9947 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
9948 pf
->vsi
[vsi
->idx
]->idx
,
9950 pf
->vsi
[vsi
->idx
]->type
,
9951 vsi
->idx
, vsi
, vsi
->type
);
9955 /* updates the PF for this cleared vsi */
9956 i40e_put_lump(pf
->qp_pile
, vsi
->base_queue
, vsi
->idx
);
9957 i40e_put_lump(pf
->irq_pile
, vsi
->base_vector
, vsi
->idx
);
9959 i40e_vsi_free_arrays(vsi
, true);
9960 i40e_clear_rss_config_user(vsi
);
9962 pf
->vsi
[vsi
->idx
] = NULL
;
9963 if (vsi
->idx
< pf
->next_vsi
)
9964 pf
->next_vsi
= vsi
->idx
;
9967 mutex_unlock(&pf
->switch_mutex
);
9975 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
9976 * @vsi: the VSI being cleaned
9978 static void i40e_vsi_clear_rings(struct i40e_vsi
*vsi
)
9982 if (vsi
->tx_rings
&& vsi
->tx_rings
[0]) {
9983 for (i
= 0; i
< vsi
->alloc_queue_pairs
; i
++) {
9984 kfree_rcu(vsi
->tx_rings
[i
], rcu
);
9985 vsi
->tx_rings
[i
] = NULL
;
9986 vsi
->rx_rings
[i
] = NULL
;
9988 vsi
->xdp_rings
[i
] = NULL
;
9994 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
9995 * @vsi: the VSI being configured
9997 static int i40e_alloc_rings(struct i40e_vsi
*vsi
)
9999 int i
, qpv
= i40e_enabled_xdp_vsi(vsi
) ? 3 : 2;
10000 struct i40e_pf
*pf
= vsi
->back
;
10001 struct i40e_ring
*ring
;
10003 /* Set basic values in the rings to be used later during open() */
10004 for (i
= 0; i
< vsi
->alloc_queue_pairs
; i
++) {
10005 /* allocate space for both Tx and Rx in one shot */
10006 ring
= kcalloc(qpv
, sizeof(struct i40e_ring
), GFP_KERNEL
);
10010 ring
->queue_index
= i
;
10011 ring
->reg_idx
= vsi
->base_queue
+ i
;
10012 ring
->ring_active
= false;
10014 ring
->netdev
= vsi
->netdev
;
10015 ring
->dev
= &pf
->pdev
->dev
;
10016 ring
->count
= vsi
->num_desc
;
10019 if (vsi
->back
->hw_features
& I40E_HW_WB_ON_ITR_CAPABLE
)
10020 ring
->flags
= I40E_TXR_FLAGS_WB_ON_ITR
;
10021 ring
->tx_itr_setting
= pf
->tx_itr_default
;
10022 vsi
->tx_rings
[i
] = ring
++;
10024 if (!i40e_enabled_xdp_vsi(vsi
))
10027 ring
->queue_index
= vsi
->alloc_queue_pairs
+ i
;
10028 ring
->reg_idx
= vsi
->base_queue
+ ring
->queue_index
;
10029 ring
->ring_active
= false;
10031 ring
->netdev
= NULL
;
10032 ring
->dev
= &pf
->pdev
->dev
;
10033 ring
->count
= vsi
->num_desc
;
10036 if (vsi
->back
->hw_features
& I40E_HW_WB_ON_ITR_CAPABLE
)
10037 ring
->flags
= I40E_TXR_FLAGS_WB_ON_ITR
;
10038 set_ring_xdp(ring
);
10039 ring
->tx_itr_setting
= pf
->tx_itr_default
;
10040 vsi
->xdp_rings
[i
] = ring
++;
10043 ring
->queue_index
= i
;
10044 ring
->reg_idx
= vsi
->base_queue
+ i
;
10045 ring
->ring_active
= false;
10047 ring
->netdev
= vsi
->netdev
;
10048 ring
->dev
= &pf
->pdev
->dev
;
10049 ring
->count
= vsi
->num_desc
;
10052 ring
->rx_itr_setting
= pf
->rx_itr_default
;
10053 vsi
->rx_rings
[i
] = ring
;
10059 i40e_vsi_clear_rings(vsi
);
10064 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
10065 * @pf: board private structure
10066 * @vectors: the number of MSI-X vectors to request
10068 * Returns the number of vectors reserved, or error
10070 static int i40e_reserve_msix_vectors(struct i40e_pf
*pf
, int vectors
)
10072 vectors
= pci_enable_msix_range(pf
->pdev
, pf
->msix_entries
,
10073 I40E_MIN_MSIX
, vectors
);
10075 dev_info(&pf
->pdev
->dev
,
10076 "MSI-X vector reservation failed: %d\n", vectors
);
10084 * i40e_init_msix - Setup the MSIX capability
10085 * @pf: board private structure
10087 * Work with the OS to set up the MSIX vectors needed.
10089 * Returns the number of vectors reserved or negative on failure
10091 static int i40e_init_msix(struct i40e_pf
*pf
)
10093 struct i40e_hw
*hw
= &pf
->hw
;
10094 int cpus
, extra_vectors
;
10098 int iwarp_requested
= 0;
10100 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
))
10103 /* The number of vectors we'll request will be comprised of:
10104 * - Add 1 for "other" cause for Admin Queue events, etc.
10105 * - The number of LAN queue pairs
10106 * - Queues being used for RSS.
10107 * We don't need as many as max_rss_size vectors.
10108 * use rss_size instead in the calculation since that
10109 * is governed by number of cpus in the system.
10110 * - assumes symmetric Tx/Rx pairing
10111 * - The number of VMDq pairs
10112 * - The CPU count within the NUMA node if iWARP is enabled
10113 * Once we count this up, try the request.
10115 * If we can't get what we want, we'll simplify to nearly nothing
10116 * and try again. If that still fails, we punt.
10118 vectors_left
= hw
->func_caps
.num_msix_vectors
;
10121 /* reserve one vector for miscellaneous handler */
10122 if (vectors_left
) {
10127 /* reserve some vectors for the main PF traffic queues. Initially we
10128 * only reserve at most 50% of the available vectors, in the case that
10129 * the number of online CPUs is large. This ensures that we can enable
10130 * extra features as well. Once we've enabled the other features, we
10131 * will use any remaining vectors to reach as close as we can to the
10132 * number of online CPUs.
10134 cpus
= num_online_cpus();
10135 pf
->num_lan_msix
= min_t(int, cpus
, vectors_left
/ 2);
10136 vectors_left
-= pf
->num_lan_msix
;
10138 /* reserve one vector for sideband flow director */
10139 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
10140 if (vectors_left
) {
10141 pf
->num_fdsb_msix
= 1;
10145 pf
->num_fdsb_msix
= 0;
10149 /* can we reserve enough for iWARP? */
10150 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
10151 iwarp_requested
= pf
->num_iwarp_msix
;
10154 pf
->num_iwarp_msix
= 0;
10155 else if (vectors_left
< pf
->num_iwarp_msix
)
10156 pf
->num_iwarp_msix
= 1;
10157 v_budget
+= pf
->num_iwarp_msix
;
10158 vectors_left
-= pf
->num_iwarp_msix
;
10161 /* any vectors left over go for VMDq support */
10162 if (pf
->flags
& I40E_FLAG_VMDQ_ENABLED
) {
10163 int vmdq_vecs_wanted
= pf
->num_vmdq_vsis
* pf
->num_vmdq_qps
;
10164 int vmdq_vecs
= min_t(int, vectors_left
, vmdq_vecs_wanted
);
10166 if (!vectors_left
) {
10167 pf
->num_vmdq_msix
= 0;
10168 pf
->num_vmdq_qps
= 0;
10170 /* if we're short on vectors for what's desired, we limit
10171 * the queues per vmdq. If this is still more than are
10172 * available, the user will need to change the number of
10173 * queues/vectors used by the PF later with the ethtool
10176 if (vmdq_vecs
< vmdq_vecs_wanted
)
10177 pf
->num_vmdq_qps
= 1;
10178 pf
->num_vmdq_msix
= pf
->num_vmdq_qps
;
10180 v_budget
+= vmdq_vecs
;
10181 vectors_left
-= vmdq_vecs
;
10185 /* On systems with a large number of SMP cores, we previously limited
10186 * the number of vectors for num_lan_msix to be at most 50% of the
10187 * available vectors, to allow for other features. Now, we add back
10188 * the remaining vectors. However, we ensure that the total
10189 * num_lan_msix will not exceed num_online_cpus(). To do this, we
10190 * calculate the number of vectors we can add without going over the
10191 * cap of CPUs. For systems with a small number of CPUs this will be
10194 extra_vectors
= min_t(int, cpus
- pf
->num_lan_msix
, vectors_left
);
10195 pf
->num_lan_msix
+= extra_vectors
;
10196 vectors_left
-= extra_vectors
;
10198 WARN(vectors_left
< 0,
10199 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
10201 v_budget
+= pf
->num_lan_msix
;
10202 pf
->msix_entries
= kcalloc(v_budget
, sizeof(struct msix_entry
),
10204 if (!pf
->msix_entries
)
10207 for (i
= 0; i
< v_budget
; i
++)
10208 pf
->msix_entries
[i
].entry
= i
;
10209 v_actual
= i40e_reserve_msix_vectors(pf
, v_budget
);
10211 if (v_actual
< I40E_MIN_MSIX
) {
10212 pf
->flags
&= ~I40E_FLAG_MSIX_ENABLED
;
10213 kfree(pf
->msix_entries
);
10214 pf
->msix_entries
= NULL
;
10215 pci_disable_msix(pf
->pdev
);
10218 } else if (v_actual
== I40E_MIN_MSIX
) {
10219 /* Adjust for minimal MSIX use */
10220 pf
->num_vmdq_vsis
= 0;
10221 pf
->num_vmdq_qps
= 0;
10222 pf
->num_lan_qps
= 1;
10223 pf
->num_lan_msix
= 1;
10225 } else if (v_actual
!= v_budget
) {
10226 /* If we have limited resources, we will start with no vectors
10227 * for the special features and then allocate vectors to some
10228 * of these features based on the policy and at the end disable
10229 * the features that did not get any vectors.
10233 dev_info(&pf
->pdev
->dev
,
10234 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
10235 v_actual
, v_budget
);
10236 /* reserve the misc vector */
10237 vec
= v_actual
- 1;
10239 /* Scale vector usage down */
10240 pf
->num_vmdq_msix
= 1; /* force VMDqs to only one vector */
10241 pf
->num_vmdq_vsis
= 1;
10242 pf
->num_vmdq_qps
= 1;
10244 /* partition out the remaining vectors */
10247 pf
->num_lan_msix
= 1;
10250 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
10251 pf
->num_lan_msix
= 1;
10252 pf
->num_iwarp_msix
= 1;
10254 pf
->num_lan_msix
= 2;
10258 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
10259 pf
->num_iwarp_msix
= min_t(int, (vec
/ 3),
10261 pf
->num_vmdq_vsis
= min_t(int, (vec
/ 3),
10262 I40E_DEFAULT_NUM_VMDQ_VSI
);
10264 pf
->num_vmdq_vsis
= min_t(int, (vec
/ 2),
10265 I40E_DEFAULT_NUM_VMDQ_VSI
);
10267 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
10268 pf
->num_fdsb_msix
= 1;
10271 pf
->num_lan_msix
= min_t(int,
10272 (vec
- (pf
->num_iwarp_msix
+ pf
->num_vmdq_vsis
)),
10274 pf
->num_lan_qps
= pf
->num_lan_msix
;
10279 if ((pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) &&
10280 (pf
->num_fdsb_msix
== 0)) {
10281 dev_info(&pf
->pdev
->dev
, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
10282 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
10283 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
10285 if ((pf
->flags
& I40E_FLAG_VMDQ_ENABLED
) &&
10286 (pf
->num_vmdq_msix
== 0)) {
10287 dev_info(&pf
->pdev
->dev
, "VMDq disabled, not enough MSI-X vectors\n");
10288 pf
->flags
&= ~I40E_FLAG_VMDQ_ENABLED
;
10291 if ((pf
->flags
& I40E_FLAG_IWARP_ENABLED
) &&
10292 (pf
->num_iwarp_msix
== 0)) {
10293 dev_info(&pf
->pdev
->dev
, "IWARP disabled, not enough MSI-X vectors\n");
10294 pf
->flags
&= ~I40E_FLAG_IWARP_ENABLED
;
10296 i40e_debug(&pf
->hw
, I40E_DEBUG_INIT
,
10297 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
10299 pf
->num_vmdq_msix
* pf
->num_vmdq_vsis
,
10301 pf
->num_iwarp_msix
);
10307 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
10308 * @vsi: the VSI being configured
10309 * @v_idx: index of the vector in the vsi struct
10310 * @cpu: cpu to be used on affinity_mask
10312 * We allocate one q_vector. If allocation fails we return -ENOMEM.
10314 static int i40e_vsi_alloc_q_vector(struct i40e_vsi
*vsi
, int v_idx
, int cpu
)
10316 struct i40e_q_vector
*q_vector
;
10318 /* allocate q_vector */
10319 q_vector
= kzalloc(sizeof(struct i40e_q_vector
), GFP_KERNEL
);
10323 q_vector
->vsi
= vsi
;
10324 q_vector
->v_idx
= v_idx
;
10325 cpumask_copy(&q_vector
->affinity_mask
, cpu_possible_mask
);
10328 netif_napi_add(vsi
->netdev
, &q_vector
->napi
,
10329 i40e_napi_poll
, NAPI_POLL_WEIGHT
);
10331 q_vector
->rx
.latency_range
= I40E_LOW_LATENCY
;
10332 q_vector
->tx
.latency_range
= I40E_LOW_LATENCY
;
10334 /* tie q_vector and vsi together */
10335 vsi
->q_vectors
[v_idx
] = q_vector
;
10341 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
10342 * @vsi: the VSI being configured
10344 * We allocate one q_vector per queue interrupt. If allocation fails we
10347 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi
*vsi
)
10349 struct i40e_pf
*pf
= vsi
->back
;
10350 int err
, v_idx
, num_q_vectors
, current_cpu
;
10352 /* if not MSIX, give the one vector only to the LAN VSI */
10353 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
10354 num_q_vectors
= vsi
->num_q_vectors
;
10355 else if (vsi
== pf
->vsi
[pf
->lan_vsi
])
10360 current_cpu
= cpumask_first(cpu_online_mask
);
10362 for (v_idx
= 0; v_idx
< num_q_vectors
; v_idx
++) {
10363 err
= i40e_vsi_alloc_q_vector(vsi
, v_idx
, current_cpu
);
10366 current_cpu
= cpumask_next(current_cpu
, cpu_online_mask
);
10367 if (unlikely(current_cpu
>= nr_cpu_ids
))
10368 current_cpu
= cpumask_first(cpu_online_mask
);
10375 i40e_free_q_vector(vsi
, v_idx
);
10381 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
10382 * @pf: board private structure to initialize
10384 static int i40e_init_interrupt_scheme(struct i40e_pf
*pf
)
10389 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
10390 vectors
= i40e_init_msix(pf
);
10392 pf
->flags
&= ~(I40E_FLAG_MSIX_ENABLED
|
10393 I40E_FLAG_IWARP_ENABLED
|
10394 I40E_FLAG_RSS_ENABLED
|
10395 I40E_FLAG_DCB_CAPABLE
|
10396 I40E_FLAG_DCB_ENABLED
|
10397 I40E_FLAG_SRIOV_ENABLED
|
10398 I40E_FLAG_FD_SB_ENABLED
|
10399 I40E_FLAG_FD_ATR_ENABLED
|
10400 I40E_FLAG_VMDQ_ENABLED
);
10401 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
10403 /* rework the queue expectations without MSIX */
10404 i40e_determine_queue_usage(pf
);
10408 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
) &&
10409 (pf
->flags
& I40E_FLAG_MSI_ENABLED
)) {
10410 dev_info(&pf
->pdev
->dev
, "MSI-X not available, trying MSI\n");
10411 vectors
= pci_enable_msi(pf
->pdev
);
10413 dev_info(&pf
->pdev
->dev
, "MSI init failed - %d\n",
10415 pf
->flags
&= ~I40E_FLAG_MSI_ENABLED
;
10417 vectors
= 1; /* one MSI or Legacy vector */
10420 if (!(pf
->flags
& (I40E_FLAG_MSIX_ENABLED
| I40E_FLAG_MSI_ENABLED
)))
10421 dev_info(&pf
->pdev
->dev
, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
10423 /* set up vector assignment tracking */
10424 size
= sizeof(struct i40e_lump_tracking
) + (sizeof(u16
) * vectors
);
10425 pf
->irq_pile
= kzalloc(size
, GFP_KERNEL
);
10429 pf
->irq_pile
->num_entries
= vectors
;
10430 pf
->irq_pile
->search_hint
= 0;
10432 /* track first vector for misc interrupts, ignore return */
10433 (void)i40e_get_lump(pf
, pf
->irq_pile
, 1, I40E_PILE_VALID_BIT
- 1);
10439 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
10440 * @pf: private board data structure
10442 * Restore the interrupt scheme that was cleared when we suspended the
10443 * device. This should be called during resume to re-allocate the q_vectors
10444 * and reacquire IRQs.
10446 static int i40e_restore_interrupt_scheme(struct i40e_pf
*pf
)
10450 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
10451 * scheme. We need to re-enabled them here in order to attempt to
10452 * re-acquire the MSI or MSI-X vectors
10454 pf
->flags
|= (I40E_FLAG_MSIX_ENABLED
| I40E_FLAG_MSI_ENABLED
);
10456 err
= i40e_init_interrupt_scheme(pf
);
10460 /* Now that we've re-acquired IRQs, we need to remap the vectors and
10461 * rings together again.
10463 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
10465 err
= i40e_vsi_alloc_q_vectors(pf
->vsi
[i
]);
10468 i40e_vsi_map_rings_to_vectors(pf
->vsi
[i
]);
10472 err
= i40e_setup_misc_vector(pf
);
10481 i40e_vsi_free_q_vectors(pf
->vsi
[i
]);
10488 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
10489 * @pf: board private structure
10491 * This sets up the handler for MSIX 0, which is used to manage the
10492 * non-queue interrupts, e.g. AdminQ and errors. This is not used
10493 * when in MSI or Legacy interrupt mode.
10495 static int i40e_setup_misc_vector(struct i40e_pf
*pf
)
10497 struct i40e_hw
*hw
= &pf
->hw
;
10500 /* Only request the IRQ once, the first time through. */
10501 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED
, pf
->state
)) {
10502 err
= request_irq(pf
->msix_entries
[0].vector
,
10503 i40e_intr
, 0, pf
->int_name
, pf
);
10505 clear_bit(__I40E_MISC_IRQ_REQUESTED
, pf
->state
);
10506 dev_info(&pf
->pdev
->dev
,
10507 "request_irq for %s failed: %d\n",
10508 pf
->int_name
, err
);
10513 i40e_enable_misc_int_causes(pf
);
10515 /* associate no queues to the misc vector */
10516 wr32(hw
, I40E_PFINT_LNKLST0
, I40E_QUEUE_END_OF_LIST
);
10517 wr32(hw
, I40E_PFINT_ITR0(I40E_RX_ITR
), I40E_ITR_8K
);
10521 i40e_irq_dynamic_enable_icr0(pf
);
10527 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
10528 * @vsi: Pointer to vsi structure
10529 * @seed: Buffter to store the hash keys
10530 * @lut: Buffer to store the lookup table entries
10531 * @lut_size: Size of buffer to store the lookup table entries
10533 * Return 0 on success, negative on failure
10535 static int i40e_get_rss_aq(struct i40e_vsi
*vsi
, const u8
*seed
,
10536 u8
*lut
, u16 lut_size
)
10538 struct i40e_pf
*pf
= vsi
->back
;
10539 struct i40e_hw
*hw
= &pf
->hw
;
10543 ret
= i40e_aq_get_rss_key(hw
, vsi
->id
,
10544 (struct i40e_aqc_get_set_rss_key_data
*)seed
);
10546 dev_info(&pf
->pdev
->dev
,
10547 "Cannot get RSS key, err %s aq_err %s\n",
10548 i40e_stat_str(&pf
->hw
, ret
),
10549 i40e_aq_str(&pf
->hw
,
10550 pf
->hw
.aq
.asq_last_status
));
10556 bool pf_lut
= vsi
->type
== I40E_VSI_MAIN
? true : false;
10558 ret
= i40e_aq_get_rss_lut(hw
, vsi
->id
, pf_lut
, lut
, lut_size
);
10560 dev_info(&pf
->pdev
->dev
,
10561 "Cannot get RSS lut, err %s aq_err %s\n",
10562 i40e_stat_str(&pf
->hw
, ret
),
10563 i40e_aq_str(&pf
->hw
,
10564 pf
->hw
.aq
.asq_last_status
));
10573 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
10574 * @vsi: Pointer to vsi structure
10575 * @seed: RSS hash seed
10576 * @lut: Lookup table
10577 * @lut_size: Lookup table size
10579 * Returns 0 on success, negative on failure
10581 static int i40e_config_rss_reg(struct i40e_vsi
*vsi
, const u8
*seed
,
10582 const u8
*lut
, u16 lut_size
)
10584 struct i40e_pf
*pf
= vsi
->back
;
10585 struct i40e_hw
*hw
= &pf
->hw
;
10586 u16 vf_id
= vsi
->vf_id
;
10589 /* Fill out hash function seed */
10591 u32
*seed_dw
= (u32
*)seed
;
10593 if (vsi
->type
== I40E_VSI_MAIN
) {
10594 for (i
= 0; i
<= I40E_PFQF_HKEY_MAX_INDEX
; i
++)
10595 wr32(hw
, I40E_PFQF_HKEY(i
), seed_dw
[i
]);
10596 } else if (vsi
->type
== I40E_VSI_SRIOV
) {
10597 for (i
= 0; i
<= I40E_VFQF_HKEY1_MAX_INDEX
; i
++)
10598 wr32(hw
, I40E_VFQF_HKEY1(i
, vf_id
), seed_dw
[i
]);
10600 dev_err(&pf
->pdev
->dev
, "Cannot set RSS seed - invalid VSI type\n");
10605 u32
*lut_dw
= (u32
*)lut
;
10607 if (vsi
->type
== I40E_VSI_MAIN
) {
10608 if (lut_size
!= I40E_HLUT_ARRAY_SIZE
)
10610 for (i
= 0; i
<= I40E_PFQF_HLUT_MAX_INDEX
; i
++)
10611 wr32(hw
, I40E_PFQF_HLUT(i
), lut_dw
[i
]);
10612 } else if (vsi
->type
== I40E_VSI_SRIOV
) {
10613 if (lut_size
!= I40E_VF_HLUT_ARRAY_SIZE
)
10615 for (i
= 0; i
<= I40E_VFQF_HLUT_MAX_INDEX
; i
++)
10616 wr32(hw
, I40E_VFQF_HLUT1(i
, vf_id
), lut_dw
[i
]);
10618 dev_err(&pf
->pdev
->dev
, "Cannot set RSS LUT - invalid VSI type\n");
10627 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
10628 * @vsi: Pointer to VSI structure
10629 * @seed: Buffer to store the keys
10630 * @lut: Buffer to store the lookup table entries
10631 * @lut_size: Size of buffer to store the lookup table entries
10633 * Returns 0 on success, negative on failure
10635 static int i40e_get_rss_reg(struct i40e_vsi
*vsi
, u8
*seed
,
10636 u8
*lut
, u16 lut_size
)
10638 struct i40e_pf
*pf
= vsi
->back
;
10639 struct i40e_hw
*hw
= &pf
->hw
;
10643 u32
*seed_dw
= (u32
*)seed
;
10645 for (i
= 0; i
<= I40E_PFQF_HKEY_MAX_INDEX
; i
++)
10646 seed_dw
[i
] = i40e_read_rx_ctl(hw
, I40E_PFQF_HKEY(i
));
10649 u32
*lut_dw
= (u32
*)lut
;
10651 if (lut_size
!= I40E_HLUT_ARRAY_SIZE
)
10653 for (i
= 0; i
<= I40E_PFQF_HLUT_MAX_INDEX
; i
++)
10654 lut_dw
[i
] = rd32(hw
, I40E_PFQF_HLUT(i
));
10661 * i40e_config_rss - Configure RSS keys and lut
10662 * @vsi: Pointer to VSI structure
10663 * @seed: RSS hash seed
10664 * @lut: Lookup table
10665 * @lut_size: Lookup table size
10667 * Returns 0 on success, negative on failure
10669 int i40e_config_rss(struct i40e_vsi
*vsi
, u8
*seed
, u8
*lut
, u16 lut_size
)
10671 struct i40e_pf
*pf
= vsi
->back
;
10673 if (pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
)
10674 return i40e_config_rss_aq(vsi
, seed
, lut
, lut_size
);
10676 return i40e_config_rss_reg(vsi
, seed
, lut
, lut_size
);
10680 * i40e_get_rss - Get RSS keys and lut
10681 * @vsi: Pointer to VSI structure
10682 * @seed: Buffer to store the keys
10683 * @lut: Buffer to store the lookup table entries
10684 * lut_size: Size of buffer to store the lookup table entries
10686 * Returns 0 on success, negative on failure
10688 int i40e_get_rss(struct i40e_vsi
*vsi
, u8
*seed
, u8
*lut
, u16 lut_size
)
10690 struct i40e_pf
*pf
= vsi
->back
;
10692 if (pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
)
10693 return i40e_get_rss_aq(vsi
, seed
, lut
, lut_size
);
10695 return i40e_get_rss_reg(vsi
, seed
, lut
, lut_size
);
10699 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
10700 * @pf: Pointer to board private structure
10701 * @lut: Lookup table
10702 * @rss_table_size: Lookup table size
10703 * @rss_size: Range of queue number for hashing
10705 void i40e_fill_rss_lut(struct i40e_pf
*pf
, u8
*lut
,
10706 u16 rss_table_size
, u16 rss_size
)
10710 for (i
= 0; i
< rss_table_size
; i
++)
10711 lut
[i
] = i
% rss_size
;
10715 * i40e_pf_config_rss - Prepare for RSS if used
10716 * @pf: board private structure
10718 static int i40e_pf_config_rss(struct i40e_pf
*pf
)
10720 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
10721 u8 seed
[I40E_HKEY_ARRAY_SIZE
];
10723 struct i40e_hw
*hw
= &pf
->hw
;
10728 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
10729 hena
= (u64
)i40e_read_rx_ctl(hw
, I40E_PFQF_HENA(0)) |
10730 ((u64
)i40e_read_rx_ctl(hw
, I40E_PFQF_HENA(1)) << 32);
10731 hena
|= i40e_pf_get_default_rss_hena(pf
);
10733 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(0), (u32
)hena
);
10734 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(1), (u32
)(hena
>> 32));
10736 /* Determine the RSS table size based on the hardware capabilities */
10737 reg_val
= i40e_read_rx_ctl(hw
, I40E_PFQF_CTL_0
);
10738 reg_val
= (pf
->rss_table_size
== 512) ?
10739 (reg_val
| I40E_PFQF_CTL_0_HASHLUTSIZE_512
) :
10740 (reg_val
& ~I40E_PFQF_CTL_0_HASHLUTSIZE_512
);
10741 i40e_write_rx_ctl(hw
, I40E_PFQF_CTL_0
, reg_val
);
10743 /* Determine the RSS size of the VSI */
10744 if (!vsi
->rss_size
) {
10746 /* If the firmware does something weird during VSI init, we
10747 * could end up with zero TCs. Check for that to avoid
10748 * divide-by-zero. It probably won't pass traffic, but it also
10751 qcount
= vsi
->num_queue_pairs
/
10752 (vsi
->tc_config
.numtc
? vsi
->tc_config
.numtc
: 1);
10753 vsi
->rss_size
= min_t(int, pf
->alloc_rss_size
, qcount
);
10755 if (!vsi
->rss_size
)
10758 lut
= kzalloc(vsi
->rss_table_size
, GFP_KERNEL
);
10762 /* Use user configured lut if there is one, otherwise use default */
10763 if (vsi
->rss_lut_user
)
10764 memcpy(lut
, vsi
->rss_lut_user
, vsi
->rss_table_size
);
10766 i40e_fill_rss_lut(pf
, lut
, vsi
->rss_table_size
, vsi
->rss_size
);
10768 /* Use user configured hash key if there is one, otherwise
10771 if (vsi
->rss_hkey_user
)
10772 memcpy(seed
, vsi
->rss_hkey_user
, I40E_HKEY_ARRAY_SIZE
);
10774 netdev_rss_key_fill((void *)seed
, I40E_HKEY_ARRAY_SIZE
);
10775 ret
= i40e_config_rss(vsi
, seed
, lut
, vsi
->rss_table_size
);
10782 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
10783 * @pf: board private structure
10784 * @queue_count: the requested queue count for rss.
10786 * returns 0 if rss is not enabled, if enabled returns the final rss queue
10787 * count which may be different from the requested queue count.
10788 * Note: expects to be called while under rtnl_lock()
10790 int i40e_reconfig_rss_queues(struct i40e_pf
*pf
, int queue_count
)
10792 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
10795 if (!(pf
->flags
& I40E_FLAG_RSS_ENABLED
))
10798 new_rss_size
= min_t(int, queue_count
, pf
->rss_size_max
);
10800 if (queue_count
!= vsi
->num_queue_pairs
) {
10803 vsi
->req_queue_pairs
= queue_count
;
10804 i40e_prep_for_reset(pf
, true);
10806 pf
->alloc_rss_size
= new_rss_size
;
10808 i40e_reset_and_rebuild(pf
, true, true);
10810 /* Discard the user configured hash keys and lut, if less
10811 * queues are enabled.
10813 if (queue_count
< vsi
->rss_size
) {
10814 i40e_clear_rss_config_user(vsi
);
10815 dev_dbg(&pf
->pdev
->dev
,
10816 "discard user configured hash keys and lut\n");
10819 /* Reset vsi->rss_size, as number of enabled queues changed */
10820 qcount
= vsi
->num_queue_pairs
/ vsi
->tc_config
.numtc
;
10821 vsi
->rss_size
= min_t(int, pf
->alloc_rss_size
, qcount
);
10823 i40e_pf_config_rss(pf
);
10825 dev_info(&pf
->pdev
->dev
, "User requested queue count/HW max RSS count: %d/%d\n",
10826 vsi
->req_queue_pairs
, pf
->rss_size_max
);
10827 return pf
->alloc_rss_size
;
10831 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
10832 * @pf: board private structure
10834 i40e_status
i40e_get_partition_bw_setting(struct i40e_pf
*pf
)
10836 i40e_status status
;
10837 bool min_valid
, max_valid
;
10838 u32 max_bw
, min_bw
;
10840 status
= i40e_read_bw_from_alt_ram(&pf
->hw
, &max_bw
, &min_bw
,
10841 &min_valid
, &max_valid
);
10845 pf
->min_bw
= min_bw
;
10847 pf
->max_bw
= max_bw
;
10854 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
10855 * @pf: board private structure
10857 i40e_status
i40e_set_partition_bw_setting(struct i40e_pf
*pf
)
10859 struct i40e_aqc_configure_partition_bw_data bw_data
;
10860 i40e_status status
;
10862 /* Set the valid bit for this PF */
10863 bw_data
.pf_valid_bits
= cpu_to_le16(BIT(pf
->hw
.pf_id
));
10864 bw_data
.max_bw
[pf
->hw
.pf_id
] = pf
->max_bw
& I40E_ALT_BW_VALUE_MASK
;
10865 bw_data
.min_bw
[pf
->hw
.pf_id
] = pf
->min_bw
& I40E_ALT_BW_VALUE_MASK
;
10867 /* Set the new bandwidths */
10868 status
= i40e_aq_configure_partition_bw(&pf
->hw
, &bw_data
, NULL
);
10874 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
10875 * @pf: board private structure
10877 i40e_status
i40e_commit_partition_bw_setting(struct i40e_pf
*pf
)
10879 /* Commit temporary BW setting to permanent NVM image */
10880 enum i40e_admin_queue_err last_aq_status
;
10884 if (pf
->hw
.partition_id
!= 1) {
10885 dev_info(&pf
->pdev
->dev
,
10886 "Commit BW only works on partition 1! This is partition %d",
10887 pf
->hw
.partition_id
);
10888 ret
= I40E_NOT_SUPPORTED
;
10889 goto bw_commit_out
;
10892 /* Acquire NVM for read access */
10893 ret
= i40e_acquire_nvm(&pf
->hw
, I40E_RESOURCE_READ
);
10894 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
10896 dev_info(&pf
->pdev
->dev
,
10897 "Cannot acquire NVM for read access, err %s aq_err %s\n",
10898 i40e_stat_str(&pf
->hw
, ret
),
10899 i40e_aq_str(&pf
->hw
, last_aq_status
));
10900 goto bw_commit_out
;
10903 /* Read word 0x10 of NVM - SW compatibility word 1 */
10904 ret
= i40e_aq_read_nvm(&pf
->hw
,
10905 I40E_SR_NVM_CONTROL_WORD
,
10906 0x10, sizeof(nvm_word
), &nvm_word
,
10908 /* Save off last admin queue command status before releasing
10911 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
10912 i40e_release_nvm(&pf
->hw
);
10914 dev_info(&pf
->pdev
->dev
, "NVM read error, err %s aq_err %s\n",
10915 i40e_stat_str(&pf
->hw
, ret
),
10916 i40e_aq_str(&pf
->hw
, last_aq_status
));
10917 goto bw_commit_out
;
10920 /* Wait a bit for NVM release to complete */
10923 /* Acquire NVM for write access */
10924 ret
= i40e_acquire_nvm(&pf
->hw
, I40E_RESOURCE_WRITE
);
10925 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
10927 dev_info(&pf
->pdev
->dev
,
10928 "Cannot acquire NVM for write access, err %s aq_err %s\n",
10929 i40e_stat_str(&pf
->hw
, ret
),
10930 i40e_aq_str(&pf
->hw
, last_aq_status
));
10931 goto bw_commit_out
;
10933 /* Write it back out unchanged to initiate update NVM,
10934 * which will force a write of the shadow (alt) RAM to
10935 * the NVM - thus storing the bandwidth values permanently.
10937 ret
= i40e_aq_update_nvm(&pf
->hw
,
10938 I40E_SR_NVM_CONTROL_WORD
,
10939 0x10, sizeof(nvm_word
),
10940 &nvm_word
, true, 0, NULL
);
10941 /* Save off last admin queue command status before releasing
10944 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
10945 i40e_release_nvm(&pf
->hw
);
10947 dev_info(&pf
->pdev
->dev
,
10948 "BW settings NOT SAVED, err %s aq_err %s\n",
10949 i40e_stat_str(&pf
->hw
, ret
),
10950 i40e_aq_str(&pf
->hw
, last_aq_status
));
10957 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
10958 * @pf: board private structure to initialize
10960 * i40e_sw_init initializes the Adapter private data structure.
10961 * Fields are initialized based on PCI device information and
10962 * OS network device settings (MTU size).
10964 static int i40e_sw_init(struct i40e_pf
*pf
)
10969 /* Set default capability flags */
10970 pf
->flags
= I40E_FLAG_RX_CSUM_ENABLED
|
10971 I40E_FLAG_MSI_ENABLED
|
10972 I40E_FLAG_MSIX_ENABLED
;
10974 /* Set default ITR */
10975 pf
->rx_itr_default
= I40E_ITR_RX_DEF
;
10976 pf
->tx_itr_default
= I40E_ITR_TX_DEF
;
10978 /* Depending on PF configurations, it is possible that the RSS
10979 * maximum might end up larger than the available queues
10981 pf
->rss_size_max
= BIT(pf
->hw
.func_caps
.rss_table_entry_width
);
10982 pf
->alloc_rss_size
= 1;
10983 pf
->rss_table_size
= pf
->hw
.func_caps
.rss_table_size
;
10984 pf
->rss_size_max
= min_t(int, pf
->rss_size_max
,
10985 pf
->hw
.func_caps
.num_tx_qp
);
10986 if (pf
->hw
.func_caps
.rss
) {
10987 pf
->flags
|= I40E_FLAG_RSS_ENABLED
;
10988 pf
->alloc_rss_size
= min_t(int, pf
->rss_size_max
,
10989 num_online_cpus());
10992 /* MFP mode enabled */
10993 if (pf
->hw
.func_caps
.npar_enable
|| pf
->hw
.func_caps
.flex10_enable
) {
10994 pf
->flags
|= I40E_FLAG_MFP_ENABLED
;
10995 dev_info(&pf
->pdev
->dev
, "MFP mode Enabled\n");
10996 if (i40e_get_partition_bw_setting(pf
)) {
10997 dev_warn(&pf
->pdev
->dev
,
10998 "Could not get partition bw settings\n");
11000 dev_info(&pf
->pdev
->dev
,
11001 "Partition BW Min = %8.8x, Max = %8.8x\n",
11002 pf
->min_bw
, pf
->max_bw
);
11004 /* nudge the Tx scheduler */
11005 i40e_set_partition_bw_setting(pf
);
11009 if ((pf
->hw
.func_caps
.fd_filters_guaranteed
> 0) ||
11010 (pf
->hw
.func_caps
.fd_filters_best_effort
> 0)) {
11011 pf
->flags
|= I40E_FLAG_FD_ATR_ENABLED
;
11012 pf
->atr_sample_rate
= I40E_DEFAULT_ATR_SAMPLE_RATE
;
11013 if (pf
->flags
& I40E_FLAG_MFP_ENABLED
&&
11014 pf
->hw
.num_partitions
> 1)
11015 dev_info(&pf
->pdev
->dev
,
11016 "Flow Director Sideband mode Disabled in MFP mode\n");
11018 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
11019 pf
->fdir_pf_filter_count
=
11020 pf
->hw
.func_caps
.fd_filters_guaranteed
;
11021 pf
->hw
.fdir_shared_filter_count
=
11022 pf
->hw
.func_caps
.fd_filters_best_effort
;
11025 if (pf
->hw
.mac
.type
== I40E_MAC_X722
) {
11026 pf
->hw_features
|= (I40E_HW_RSS_AQ_CAPABLE
|
11027 I40E_HW_128_QP_RSS_CAPABLE
|
11028 I40E_HW_ATR_EVICT_CAPABLE
|
11029 I40E_HW_WB_ON_ITR_CAPABLE
|
11030 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE
|
11031 I40E_HW_NO_PCI_LINK_CHECK
|
11032 I40E_HW_USE_SET_LLDP_MIB
|
11033 I40E_HW_GENEVE_OFFLOAD_CAPABLE
|
11034 I40E_HW_PTP_L4_CAPABLE
|
11035 I40E_HW_WOL_MC_MAGIC_PKT_WAKE
|
11036 I40E_HW_OUTER_UDP_CSUM_CAPABLE
);
11038 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11039 if (rd32(&pf
->hw
, I40E_GLQF_FDEVICTENA(1)) !=
11040 I40E_FDEVICT_PCTYPE_DEFAULT
) {
11041 dev_warn(&pf
->pdev
->dev
,
11042 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11043 pf
->hw_features
&= ~I40E_HW_ATR_EVICT_CAPABLE
;
11045 } else if ((pf
->hw
.aq
.api_maj_ver
> 1) ||
11046 ((pf
->hw
.aq
.api_maj_ver
== 1) &&
11047 (pf
->hw
.aq
.api_min_ver
> 4))) {
11048 /* Supported in FW API version higher than 1.4 */
11049 pf
->hw_features
|= I40E_HW_GENEVE_OFFLOAD_CAPABLE
;
11052 /* Enable HW ATR eviction if possible */
11053 if (pf
->hw_features
& I40E_HW_ATR_EVICT_CAPABLE
)
11054 pf
->flags
|= I40E_FLAG_HW_ATR_EVICT_ENABLED
;
11056 if ((pf
->hw
.mac
.type
== I40E_MAC_XL710
) &&
11057 (((pf
->hw
.aq
.fw_maj_ver
== 4) && (pf
->hw
.aq
.fw_min_ver
< 33)) ||
11058 (pf
->hw
.aq
.fw_maj_ver
< 4))) {
11059 pf
->hw_features
|= I40E_HW_RESTART_AUTONEG
;
11060 /* No DCB support for FW < v4.33 */
11061 pf
->hw_features
|= I40E_HW_NO_DCB_SUPPORT
;
11064 /* Disable FW LLDP if FW < v4.3 */
11065 if ((pf
->hw
.mac
.type
== I40E_MAC_XL710
) &&
11066 (((pf
->hw
.aq
.fw_maj_ver
== 4) && (pf
->hw
.aq
.fw_min_ver
< 3)) ||
11067 (pf
->hw
.aq
.fw_maj_ver
< 4)))
11068 pf
->hw_features
|= I40E_HW_STOP_FW_LLDP
;
11070 /* Use the FW Set LLDP MIB API if FW > v4.40 */
11071 if ((pf
->hw
.mac
.type
== I40E_MAC_XL710
) &&
11072 (((pf
->hw
.aq
.fw_maj_ver
== 4) && (pf
->hw
.aq
.fw_min_ver
>= 40)) ||
11073 (pf
->hw
.aq
.fw_maj_ver
>= 5)))
11074 pf
->hw_features
|= I40E_HW_USE_SET_LLDP_MIB
;
11076 /* Enable PTP L4 if FW > v6.0 */
11077 if (pf
->hw
.mac
.type
== I40E_MAC_XL710
&&
11078 pf
->hw
.aq
.fw_maj_ver
>= 6)
11079 pf
->hw_features
|= I40E_HW_PTP_L4_CAPABLE
;
11081 if (pf
->hw
.func_caps
.vmdq
&& num_online_cpus() != 1) {
11082 pf
->num_vmdq_vsis
= I40E_DEFAULT_NUM_VMDQ_VSI
;
11083 pf
->flags
|= I40E_FLAG_VMDQ_ENABLED
;
11084 pf
->num_vmdq_qps
= i40e_default_queues_per_vmdq(pf
);
11087 if (pf
->hw
.func_caps
.iwarp
&& num_online_cpus() != 1) {
11088 pf
->flags
|= I40E_FLAG_IWARP_ENABLED
;
11089 /* IWARP needs one extra vector for CQP just like MISC.*/
11090 pf
->num_iwarp_msix
= (int)num_online_cpus() + 1;
11093 #ifdef CONFIG_PCI_IOV
11094 if (pf
->hw
.func_caps
.num_vfs
&& pf
->hw
.partition_id
== 1) {
11095 pf
->num_vf_qps
= I40E_DEFAULT_QUEUES_PER_VF
;
11096 pf
->flags
|= I40E_FLAG_SRIOV_ENABLED
;
11097 pf
->num_req_vfs
= min_t(int,
11098 pf
->hw
.func_caps
.num_vfs
,
11099 I40E_MAX_VF_COUNT
);
11101 #endif /* CONFIG_PCI_IOV */
11102 pf
->eeprom_version
= 0xDEAD;
11103 pf
->lan_veb
= I40E_NO_VEB
;
11104 pf
->lan_vsi
= I40E_NO_VSI
;
11106 /* By default FW has this off for performance reasons */
11107 pf
->flags
&= ~I40E_FLAG_VEB_STATS_ENABLED
;
11109 /* set up queue assignment tracking */
11110 size
= sizeof(struct i40e_lump_tracking
)
11111 + (sizeof(u16
) * pf
->hw
.func_caps
.num_tx_qp
);
11112 pf
->qp_pile
= kzalloc(size
, GFP_KERNEL
);
11113 if (!pf
->qp_pile
) {
11117 pf
->qp_pile
->num_entries
= pf
->hw
.func_caps
.num_tx_qp
;
11118 pf
->qp_pile
->search_hint
= 0;
11120 pf
->tx_timeout_recovery_level
= 1;
11122 mutex_init(&pf
->switch_mutex
);
11129 * i40e_set_ntuple - set the ntuple feature flag and take action
11130 * @pf: board private structure to initialize
11131 * @features: the feature set that the stack is suggesting
11133 * returns a bool to indicate if reset needs to happen
11135 bool i40e_set_ntuple(struct i40e_pf
*pf
, netdev_features_t features
)
11137 bool need_reset
= false;
11139 /* Check if Flow Director n-tuple support was enabled or disabled. If
11140 * the state changed, we need to reset.
11142 if (features
& NETIF_F_NTUPLE
) {
11143 /* Enable filters and mark for reset */
11144 if (!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
))
11146 /* enable FD_SB only if there is MSI-X vector and no cloud
11149 if (pf
->num_fdsb_msix
> 0 && !pf
->num_cloud_filters
) {
11150 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
11151 pf
->flags
&= ~I40E_FLAG_FD_SB_INACTIVE
;
11154 /* turn off filters, mark for reset and clear SW filter list */
11155 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
11157 i40e_fdir_filter_exit(pf
);
11159 pf
->flags
&= ~(I40E_FLAG_FD_SB_ENABLED
|
11160 I40E_FLAG_FD_SB_AUTO_DISABLED
);
11161 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
11163 /* reset fd counters */
11164 pf
->fd_add_err
= 0;
11165 pf
->fd_atr_cnt
= 0;
11166 /* if ATR was auto disabled it can be re-enabled. */
11167 if (pf
->flags
& I40E_FLAG_FD_ATR_AUTO_DISABLED
) {
11168 pf
->flags
&= ~I40E_FLAG_FD_ATR_AUTO_DISABLED
;
11169 if ((pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) &&
11170 (I40E_DEBUG_FD
& pf
->hw
.debug_mask
))
11171 dev_info(&pf
->pdev
->dev
, "ATR re-enabled.\n");
11178 * i40e_clear_rss_lut - clear the rx hash lookup table
11179 * @vsi: the VSI being configured
11181 static void i40e_clear_rss_lut(struct i40e_vsi
*vsi
)
11183 struct i40e_pf
*pf
= vsi
->back
;
11184 struct i40e_hw
*hw
= &pf
->hw
;
11185 u16 vf_id
= vsi
->vf_id
;
11188 if (vsi
->type
== I40E_VSI_MAIN
) {
11189 for (i
= 0; i
<= I40E_PFQF_HLUT_MAX_INDEX
; i
++)
11190 wr32(hw
, I40E_PFQF_HLUT(i
), 0);
11191 } else if (vsi
->type
== I40E_VSI_SRIOV
) {
11192 for (i
= 0; i
<= I40E_VFQF_HLUT_MAX_INDEX
; i
++)
11193 i40e_write_rx_ctl(hw
, I40E_VFQF_HLUT1(i
, vf_id
), 0);
11195 dev_err(&pf
->pdev
->dev
, "Cannot set RSS LUT - invalid VSI type\n");
11200 * i40e_set_features - set the netdev feature flags
11201 * @netdev: ptr to the netdev being adjusted
11202 * @features: the feature set that the stack is suggesting
11203 * Note: expects to be called while under rtnl_lock()
11205 static int i40e_set_features(struct net_device
*netdev
,
11206 netdev_features_t features
)
11208 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11209 struct i40e_vsi
*vsi
= np
->vsi
;
11210 struct i40e_pf
*pf
= vsi
->back
;
11213 if (features
& NETIF_F_RXHASH
&& !(netdev
->features
& NETIF_F_RXHASH
))
11214 i40e_pf_config_rss(pf
);
11215 else if (!(features
& NETIF_F_RXHASH
) &&
11216 netdev
->features
& NETIF_F_RXHASH
)
11217 i40e_clear_rss_lut(vsi
);
11219 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
11220 i40e_vlan_stripping_enable(vsi
);
11222 i40e_vlan_stripping_disable(vsi
);
11224 if (!(features
& NETIF_F_HW_TC
) && pf
->num_cloud_filters
) {
11225 dev_err(&pf
->pdev
->dev
,
11226 "Offloaded tc filters active, can't turn hw_tc_offload off");
11230 need_reset
= i40e_set_ntuple(pf
, features
);
11233 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
, true);
11239 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
11240 * @pf: board private structure
11241 * @port: The UDP port to look up
11243 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
11245 static u8
i40e_get_udp_port_idx(struct i40e_pf
*pf
, u16 port
)
11249 for (i
= 0; i
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
; i
++) {
11250 if (pf
->udp_ports
[i
].port
== port
)
11258 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
11259 * @netdev: This physical port's netdev
11260 * @ti: Tunnel endpoint information
11262 static void i40e_udp_tunnel_add(struct net_device
*netdev
,
11263 struct udp_tunnel_info
*ti
)
11265 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11266 struct i40e_vsi
*vsi
= np
->vsi
;
11267 struct i40e_pf
*pf
= vsi
->back
;
11268 u16 port
= ntohs(ti
->port
);
11272 idx
= i40e_get_udp_port_idx(pf
, port
);
11274 /* Check if port already exists */
11275 if (idx
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
) {
11276 netdev_info(netdev
, "port %d already offloaded\n", port
);
11280 /* Now check if there is space to add the new port */
11281 next_idx
= i40e_get_udp_port_idx(pf
, 0);
11283 if (next_idx
== I40E_MAX_PF_UDP_OFFLOAD_PORTS
) {
11284 netdev_info(netdev
, "maximum number of offloaded UDP ports reached, not adding port %d\n",
11289 switch (ti
->type
) {
11290 case UDP_TUNNEL_TYPE_VXLAN
:
11291 pf
->udp_ports
[next_idx
].type
= I40E_AQC_TUNNEL_TYPE_VXLAN
;
11293 case UDP_TUNNEL_TYPE_GENEVE
:
11294 if (!(pf
->hw_features
& I40E_HW_GENEVE_OFFLOAD_CAPABLE
))
11296 pf
->udp_ports
[next_idx
].type
= I40E_AQC_TUNNEL_TYPE_NGE
;
11302 /* New port: add it and mark its index in the bitmap */
11303 pf
->udp_ports
[next_idx
].port
= port
;
11304 pf
->pending_udp_bitmap
|= BIT_ULL(next_idx
);
11305 pf
->flags
|= I40E_FLAG_UDP_FILTER_SYNC
;
11309 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
11310 * @netdev: This physical port's netdev
11311 * @ti: Tunnel endpoint information
11313 static void i40e_udp_tunnel_del(struct net_device
*netdev
,
11314 struct udp_tunnel_info
*ti
)
11316 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11317 struct i40e_vsi
*vsi
= np
->vsi
;
11318 struct i40e_pf
*pf
= vsi
->back
;
11319 u16 port
= ntohs(ti
->port
);
11322 idx
= i40e_get_udp_port_idx(pf
, port
);
11324 /* Check if port already exists */
11325 if (idx
>= I40E_MAX_PF_UDP_OFFLOAD_PORTS
)
11328 switch (ti
->type
) {
11329 case UDP_TUNNEL_TYPE_VXLAN
:
11330 if (pf
->udp_ports
[idx
].type
!= I40E_AQC_TUNNEL_TYPE_VXLAN
)
11333 case UDP_TUNNEL_TYPE_GENEVE
:
11334 if (pf
->udp_ports
[idx
].type
!= I40E_AQC_TUNNEL_TYPE_NGE
)
11341 /* if port exists, set it to 0 (mark for deletion)
11342 * and make it pending
11344 pf
->udp_ports
[idx
].port
= 0;
11345 pf
->pending_udp_bitmap
|= BIT_ULL(idx
);
11346 pf
->flags
|= I40E_FLAG_UDP_FILTER_SYNC
;
11350 netdev_warn(netdev
, "UDP port %d was not found, not deleting\n",
11354 static int i40e_get_phys_port_id(struct net_device
*netdev
,
11355 struct netdev_phys_item_id
*ppid
)
11357 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11358 struct i40e_pf
*pf
= np
->vsi
->back
;
11359 struct i40e_hw
*hw
= &pf
->hw
;
11361 if (!(pf
->hw_features
& I40E_HW_PORT_ID_VALID
))
11362 return -EOPNOTSUPP
;
11364 ppid
->id_len
= min_t(int, sizeof(hw
->mac
.port_addr
), sizeof(ppid
->id
));
11365 memcpy(ppid
->id
, hw
->mac
.port_addr
, ppid
->id_len
);
11371 * i40e_ndo_fdb_add - add an entry to the hardware database
11372 * @ndm: the input from the stack
11373 * @tb: pointer to array of nladdr (unused)
11374 * @dev: the net device pointer
11375 * @addr: the MAC address entry being added
11376 * @flags: instructions from stack about fdb operation
11378 static int i40e_ndo_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
11379 struct net_device
*dev
,
11380 const unsigned char *addr
, u16 vid
,
11383 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
11384 struct i40e_pf
*pf
= np
->vsi
->back
;
11387 if (!(pf
->flags
& I40E_FLAG_SRIOV_ENABLED
))
11388 return -EOPNOTSUPP
;
11391 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev
->name
);
11395 /* Hardware does not support aging addresses so if a
11396 * ndm_state is given only allow permanent addresses
11398 if (ndm
->ndm_state
&& !(ndm
->ndm_state
& NUD_PERMANENT
)) {
11399 netdev_info(dev
, "FDB only supports static addresses\n");
11403 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
))
11404 err
= dev_uc_add_excl(dev
, addr
);
11405 else if (is_multicast_ether_addr(addr
))
11406 err
= dev_mc_add_excl(dev
, addr
);
11410 /* Only return duplicate errors if NLM_F_EXCL is set */
11411 if (err
== -EEXIST
&& !(flags
& NLM_F_EXCL
))
11418 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
11419 * @dev: the netdev being configured
11420 * @nlh: RTNL message
11422 * Inserts a new hardware bridge if not already created and
11423 * enables the bridging mode requested (VEB or VEPA). If the
11424 * hardware bridge has already been inserted and the request
11425 * is to change the mode then that requires a PF reset to
11426 * allow rebuild of the components with required hardware
11427 * bridge mode enabled.
11429 * Note: expects to be called while under rtnl_lock()
11431 static int i40e_ndo_bridge_setlink(struct net_device
*dev
,
11432 struct nlmsghdr
*nlh
,
11435 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
11436 struct i40e_vsi
*vsi
= np
->vsi
;
11437 struct i40e_pf
*pf
= vsi
->back
;
11438 struct i40e_veb
*veb
= NULL
;
11439 struct nlattr
*attr
, *br_spec
;
11442 /* Only for PF VSI for now */
11443 if (vsi
->seid
!= pf
->vsi
[pf
->lan_vsi
]->seid
)
11444 return -EOPNOTSUPP
;
11446 /* Find the HW bridge for PF VSI */
11447 for (i
= 0; i
< I40E_MAX_VEB
&& !veb
; i
++) {
11448 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== vsi
->uplink_seid
)
11452 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
11454 nla_for_each_nested(attr
, br_spec
, rem
) {
11457 if (nla_type(attr
) != IFLA_BRIDGE_MODE
)
11460 mode
= nla_get_u16(attr
);
11461 if ((mode
!= BRIDGE_MODE_VEPA
) &&
11462 (mode
!= BRIDGE_MODE_VEB
))
11465 /* Insert a new HW bridge */
11467 veb
= i40e_veb_setup(pf
, 0, vsi
->uplink_seid
, vsi
->seid
,
11468 vsi
->tc_config
.enabled_tc
);
11470 veb
->bridge_mode
= mode
;
11471 i40e_config_bridge_mode(veb
);
11473 /* No Bridge HW offload available */
11477 } else if (mode
!= veb
->bridge_mode
) {
11478 /* Existing HW bridge but different mode needs reset */
11479 veb
->bridge_mode
= mode
;
11480 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
11481 if (mode
== BRIDGE_MODE_VEB
)
11482 pf
->flags
|= I40E_FLAG_VEB_MODE_ENABLED
;
11484 pf
->flags
&= ~I40E_FLAG_VEB_MODE_ENABLED
;
11485 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
, true);
11494 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
11497 * @seq: RTNL message seq #
11498 * @dev: the netdev being configured
11499 * @filter_mask: unused
11500 * @nlflags: netlink flags passed in
11502 * Return the mode in which the hardware bridge is operating in
11505 static int i40e_ndo_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
11506 struct net_device
*dev
,
11507 u32 __always_unused filter_mask
,
11510 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
11511 struct i40e_vsi
*vsi
= np
->vsi
;
11512 struct i40e_pf
*pf
= vsi
->back
;
11513 struct i40e_veb
*veb
= NULL
;
11516 /* Only for PF VSI for now */
11517 if (vsi
->seid
!= pf
->vsi
[pf
->lan_vsi
]->seid
)
11518 return -EOPNOTSUPP
;
11520 /* Find the HW bridge for the PF VSI */
11521 for (i
= 0; i
< I40E_MAX_VEB
&& !veb
; i
++) {
11522 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== vsi
->uplink_seid
)
11529 return ndo_dflt_bridge_getlink(skb
, pid
, seq
, dev
, veb
->bridge_mode
,
11530 0, 0, nlflags
, filter_mask
, NULL
);
11534 * i40e_features_check - Validate encapsulated packet conforms to limits
11536 * @dev: This physical port's netdev
11537 * @features: Offload features that the stack believes apply
11539 static netdev_features_t
i40e_features_check(struct sk_buff
*skb
,
11540 struct net_device
*dev
,
11541 netdev_features_t features
)
11545 /* No point in doing any of this if neither checksum nor GSO are
11546 * being requested for this frame. We can rule out both by just
11547 * checking for CHECKSUM_PARTIAL
11549 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
11552 /* We cannot support GSO if the MSS is going to be less than
11553 * 64 bytes. If it is then we need to drop support for GSO.
11555 if (skb_is_gso(skb
) && (skb_shinfo(skb
)->gso_size
< 64))
11556 features
&= ~NETIF_F_GSO_MASK
;
11558 /* MACLEN can support at most 63 words */
11559 len
= skb_network_header(skb
) - skb
->data
;
11560 if (len
& ~(63 * 2))
11563 /* IPLEN and EIPLEN can support at most 127 dwords */
11564 len
= skb_transport_header(skb
) - skb_network_header(skb
);
11565 if (len
& ~(127 * 4))
11568 if (skb
->encapsulation
) {
11569 /* L4TUNLEN can support 127 words */
11570 len
= skb_inner_network_header(skb
) - skb_transport_header(skb
);
11571 if (len
& ~(127 * 2))
11574 /* IPLEN can support at most 127 dwords */
11575 len
= skb_inner_transport_header(skb
) -
11576 skb_inner_network_header(skb
);
11577 if (len
& ~(127 * 4))
11581 /* No need to validate L4LEN as TCP is the only protocol with a
11582 * a flexible value and we support all possible values supported
11583 * by TCP, which is at most 15 dwords
11588 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
11592 * i40e_xdp_setup - add/remove an XDP program
11593 * @vsi: VSI to changed
11594 * @prog: XDP program
11596 static int i40e_xdp_setup(struct i40e_vsi
*vsi
,
11597 struct bpf_prog
*prog
)
11599 int frame_size
= vsi
->netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
11600 struct i40e_pf
*pf
= vsi
->back
;
11601 struct bpf_prog
*old_prog
;
11605 /* Don't allow frames that span over multiple buffers */
11606 if (frame_size
> vsi
->rx_buf_len
)
11609 if (!i40e_enabled_xdp_vsi(vsi
) && !prog
)
11612 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
11613 need_reset
= (i40e_enabled_xdp_vsi(vsi
) != !!prog
);
11616 i40e_prep_for_reset(pf
, true);
11618 old_prog
= xchg(&vsi
->xdp_prog
, prog
);
11621 i40e_reset_and_rebuild(pf
, true, true);
11623 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
11624 WRITE_ONCE(vsi
->rx_rings
[i
]->xdp_prog
, vsi
->xdp_prog
);
11627 bpf_prog_put(old_prog
);
11633 * i40e_xdp - implements ndo_bpf for i40e
11635 * @xdp: XDP command
11637 static int i40e_xdp(struct net_device
*dev
,
11638 struct netdev_bpf
*xdp
)
11640 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
11641 struct i40e_vsi
*vsi
= np
->vsi
;
11643 if (vsi
->type
!= I40E_VSI_MAIN
)
11646 switch (xdp
->command
) {
11647 case XDP_SETUP_PROG
:
11648 return i40e_xdp_setup(vsi
, xdp
->prog
);
11649 case XDP_QUERY_PROG
:
11650 xdp
->prog_attached
= i40e_enabled_xdp_vsi(vsi
);
11651 xdp
->prog_id
= vsi
->xdp_prog
? vsi
->xdp_prog
->aux
->id
: 0;
11658 static const struct net_device_ops i40e_netdev_ops
= {
11659 .ndo_open
= i40e_open
,
11660 .ndo_stop
= i40e_close
,
11661 .ndo_start_xmit
= i40e_lan_xmit_frame
,
11662 .ndo_get_stats64
= i40e_get_netdev_stats_struct
,
11663 .ndo_set_rx_mode
= i40e_set_rx_mode
,
11664 .ndo_validate_addr
= eth_validate_addr
,
11665 .ndo_set_mac_address
= i40e_set_mac
,
11666 .ndo_change_mtu
= i40e_change_mtu
,
11667 .ndo_do_ioctl
= i40e_ioctl
,
11668 .ndo_tx_timeout
= i40e_tx_timeout
,
11669 .ndo_vlan_rx_add_vid
= i40e_vlan_rx_add_vid
,
11670 .ndo_vlan_rx_kill_vid
= i40e_vlan_rx_kill_vid
,
11671 #ifdef CONFIG_NET_POLL_CONTROLLER
11672 .ndo_poll_controller
= i40e_netpoll
,
11674 .ndo_setup_tc
= __i40e_setup_tc
,
11675 .ndo_set_features
= i40e_set_features
,
11676 .ndo_set_vf_mac
= i40e_ndo_set_vf_mac
,
11677 .ndo_set_vf_vlan
= i40e_ndo_set_vf_port_vlan
,
11678 .ndo_set_vf_rate
= i40e_ndo_set_vf_bw
,
11679 .ndo_get_vf_config
= i40e_ndo_get_vf_config
,
11680 .ndo_set_vf_link_state
= i40e_ndo_set_vf_link_state
,
11681 .ndo_set_vf_spoofchk
= i40e_ndo_set_vf_spoofchk
,
11682 .ndo_set_vf_trust
= i40e_ndo_set_vf_trust
,
11683 .ndo_udp_tunnel_add
= i40e_udp_tunnel_add
,
11684 .ndo_udp_tunnel_del
= i40e_udp_tunnel_del
,
11685 .ndo_get_phys_port_id
= i40e_get_phys_port_id
,
11686 .ndo_fdb_add
= i40e_ndo_fdb_add
,
11687 .ndo_features_check
= i40e_features_check
,
11688 .ndo_bridge_getlink
= i40e_ndo_bridge_getlink
,
11689 .ndo_bridge_setlink
= i40e_ndo_bridge_setlink
,
11690 .ndo_bpf
= i40e_xdp
,
11694 * i40e_config_netdev - Setup the netdev flags
11695 * @vsi: the VSI being configured
11697 * Returns 0 on success, negative value on failure
11699 static int i40e_config_netdev(struct i40e_vsi
*vsi
)
11701 struct i40e_pf
*pf
= vsi
->back
;
11702 struct i40e_hw
*hw
= &pf
->hw
;
11703 struct i40e_netdev_priv
*np
;
11704 struct net_device
*netdev
;
11705 u8 broadcast
[ETH_ALEN
];
11706 u8 mac_addr
[ETH_ALEN
];
11708 netdev_features_t hw_enc_features
;
11709 netdev_features_t hw_features
;
11711 etherdev_size
= sizeof(struct i40e_netdev_priv
);
11712 netdev
= alloc_etherdev_mq(etherdev_size
, vsi
->alloc_queue_pairs
);
11716 vsi
->netdev
= netdev
;
11717 np
= netdev_priv(netdev
);
11720 hw_enc_features
= NETIF_F_SG
|
11722 NETIF_F_IPV6_CSUM
|
11724 NETIF_F_SOFT_FEATURES
|
11729 NETIF_F_GSO_GRE_CSUM
|
11730 NETIF_F_GSO_PARTIAL
|
11731 NETIF_F_GSO_UDP_TUNNEL
|
11732 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
11738 if (!(pf
->hw_features
& I40E_HW_OUTER_UDP_CSUM_CAPABLE
))
11739 netdev
->gso_partial_features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
11741 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
11743 netdev
->hw_enc_features
|= hw_enc_features
;
11745 /* record features VLANs can make use of */
11746 netdev
->vlan_features
|= hw_enc_features
| NETIF_F_TSO_MANGLEID
;
11748 if (!(pf
->flags
& I40E_FLAG_MFP_ENABLED
))
11749 netdev
->hw_features
|= NETIF_F_NTUPLE
| NETIF_F_HW_TC
;
11751 hw_features
= hw_enc_features
|
11752 NETIF_F_HW_VLAN_CTAG_TX
|
11753 NETIF_F_HW_VLAN_CTAG_RX
;
11755 netdev
->hw_features
|= hw_features
;
11757 netdev
->features
|= hw_features
| NETIF_F_HW_VLAN_CTAG_FILTER
;
11758 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
11760 if (vsi
->type
== I40E_VSI_MAIN
) {
11761 SET_NETDEV_DEV(netdev
, &pf
->pdev
->dev
);
11762 ether_addr_copy(mac_addr
, hw
->mac
.perm_addr
);
11763 /* The following steps are necessary for two reasons. First,
11764 * some older NVM configurations load a default MAC-VLAN
11765 * filter that will accept any tagged packet, and we want to
11766 * replace this with a normal filter. Additionally, it is
11767 * possible our MAC address was provided by the platform using
11768 * Open Firmware or similar.
11770 * Thus, we need to remove the default filter and install one
11771 * specific to the MAC address.
11773 i40e_rm_default_mac_filter(vsi
, mac_addr
);
11774 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
11775 i40e_add_mac_filter(vsi
, mac_addr
);
11776 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
11778 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
11779 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
11780 * the end, which is 4 bytes long, so force truncation of the
11781 * original name by IFNAMSIZ - 4
11783 snprintf(netdev
->name
, IFNAMSIZ
, "%.*sv%%d",
11785 pf
->vsi
[pf
->lan_vsi
]->netdev
->name
);
11786 random_ether_addr(mac_addr
);
11788 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
11789 i40e_add_mac_filter(vsi
, mac_addr
);
11790 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
11793 /* Add the broadcast filter so that we initially will receive
11794 * broadcast packets. Note that when a new VLAN is first added the
11795 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
11796 * specific filters as part of transitioning into "vlan" operation.
11797 * When more VLANs are added, the driver will copy each existing MAC
11798 * filter and add it for the new VLAN.
11800 * Broadcast filters are handled specially by
11801 * i40e_sync_filters_subtask, as the driver must to set the broadcast
11802 * promiscuous bit instead of adding this directly as a MAC/VLAN
11803 * filter. The subtask will update the correct broadcast promiscuous
11804 * bits as VLANs become active or inactive.
11806 eth_broadcast_addr(broadcast
);
11807 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
11808 i40e_add_mac_filter(vsi
, broadcast
);
11809 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
11811 ether_addr_copy(netdev
->dev_addr
, mac_addr
);
11812 ether_addr_copy(netdev
->perm_addr
, mac_addr
);
11814 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
11815 netdev
->priv_flags
|= IFF_SUPP_NOFCS
;
11816 /* Setup netdev TC information */
11817 i40e_vsi_config_netdev_tc(vsi
, vsi
->tc_config
.enabled_tc
);
11819 netdev
->netdev_ops
= &i40e_netdev_ops
;
11820 netdev
->watchdog_timeo
= 5 * HZ
;
11821 i40e_set_ethtool_ops(netdev
);
11823 /* MTU range: 68 - 9706 */
11824 netdev
->min_mtu
= ETH_MIN_MTU
;
11825 netdev
->max_mtu
= I40E_MAX_RXBUFFER
- I40E_PACKET_HDR_PAD
;
11831 * i40e_vsi_delete - Delete a VSI from the switch
11832 * @vsi: the VSI being removed
11834 * Returns 0 on success, negative value on failure
11836 static void i40e_vsi_delete(struct i40e_vsi
*vsi
)
11838 /* remove default VSI is not allowed */
11839 if (vsi
== vsi
->back
->vsi
[vsi
->back
->lan_vsi
])
11842 i40e_aq_delete_element(&vsi
->back
->hw
, vsi
->seid
, NULL
);
11846 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
11847 * @vsi: the VSI being queried
11849 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
11851 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi
*vsi
)
11853 struct i40e_veb
*veb
;
11854 struct i40e_pf
*pf
= vsi
->back
;
11856 /* Uplink is not a bridge so default to VEB */
11857 if (vsi
->veb_idx
== I40E_NO_VEB
)
11860 veb
= pf
->veb
[vsi
->veb_idx
];
11862 dev_info(&pf
->pdev
->dev
,
11863 "There is no veb associated with the bridge\n");
11867 /* Uplink is a bridge in VEPA mode */
11868 if (veb
->bridge_mode
& BRIDGE_MODE_VEPA
) {
11871 /* Uplink is a bridge in VEB mode */
11875 /* VEPA is now default bridge, so return 0 */
11880 * i40e_add_vsi - Add a VSI to the switch
11881 * @vsi: the VSI being configured
11883 * This initializes a VSI context depending on the VSI type to be added and
11884 * passes it down to the add_vsi aq command.
11886 static int i40e_add_vsi(struct i40e_vsi
*vsi
)
11889 struct i40e_pf
*pf
= vsi
->back
;
11890 struct i40e_hw
*hw
= &pf
->hw
;
11891 struct i40e_vsi_context ctxt
;
11892 struct i40e_mac_filter
*f
;
11893 struct hlist_node
*h
;
11896 u8 enabled_tc
= 0x1; /* TC0 enabled */
11899 memset(&ctxt
, 0, sizeof(ctxt
));
11900 switch (vsi
->type
) {
11901 case I40E_VSI_MAIN
:
11902 /* The PF's main VSI is already setup as part of the
11903 * device initialization, so we'll not bother with
11904 * the add_vsi call, but we will retrieve the current
11907 ctxt
.seid
= pf
->main_vsi_seid
;
11908 ctxt
.pf_num
= pf
->hw
.pf_id
;
11910 ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
11911 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
11913 dev_info(&pf
->pdev
->dev
,
11914 "couldn't get PF vsi config, err %s aq_err %s\n",
11915 i40e_stat_str(&pf
->hw
, ret
),
11916 i40e_aq_str(&pf
->hw
,
11917 pf
->hw
.aq
.asq_last_status
));
11920 vsi
->info
= ctxt
.info
;
11921 vsi
->info
.valid_sections
= 0;
11923 vsi
->seid
= ctxt
.seid
;
11924 vsi
->id
= ctxt
.vsi_number
;
11926 enabled_tc
= i40e_pf_get_tc_map(pf
);
11928 /* Source pruning is enabled by default, so the flag is
11929 * negative logic - if it's set, we need to fiddle with
11930 * the VSI to disable source pruning.
11932 if (pf
->flags
& I40E_FLAG_SOURCE_PRUNING_DISABLED
) {
11933 memset(&ctxt
, 0, sizeof(ctxt
));
11934 ctxt
.seid
= pf
->main_vsi_seid
;
11935 ctxt
.pf_num
= pf
->hw
.pf_id
;
11937 ctxt
.info
.valid_sections
|=
11938 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
11939 ctxt
.info
.switch_id
=
11940 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB
);
11941 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
11943 dev_info(&pf
->pdev
->dev
,
11944 "update vsi failed, err %s aq_err %s\n",
11945 i40e_stat_str(&pf
->hw
, ret
),
11946 i40e_aq_str(&pf
->hw
,
11947 pf
->hw
.aq
.asq_last_status
));
11953 /* MFP mode setup queue map and update VSI */
11954 if ((pf
->flags
& I40E_FLAG_MFP_ENABLED
) &&
11955 !(pf
->hw
.func_caps
.iscsi
)) { /* NIC type PF */
11956 memset(&ctxt
, 0, sizeof(ctxt
));
11957 ctxt
.seid
= pf
->main_vsi_seid
;
11958 ctxt
.pf_num
= pf
->hw
.pf_id
;
11960 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, false);
11961 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
11963 dev_info(&pf
->pdev
->dev
,
11964 "update vsi failed, err %s aq_err %s\n",
11965 i40e_stat_str(&pf
->hw
, ret
),
11966 i40e_aq_str(&pf
->hw
,
11967 pf
->hw
.aq
.asq_last_status
));
11971 /* update the local VSI info queue map */
11972 i40e_vsi_update_queue_map(vsi
, &ctxt
);
11973 vsi
->info
.valid_sections
= 0;
11975 /* Default/Main VSI is only enabled for TC0
11976 * reconfigure it to enable all TCs that are
11977 * available on the port in SFP mode.
11978 * For MFP case the iSCSI PF would use this
11979 * flow to enable LAN+iSCSI TC.
11981 ret
= i40e_vsi_config_tc(vsi
, enabled_tc
);
11983 /* Single TC condition is not fatal,
11984 * message and continue
11986 dev_info(&pf
->pdev
->dev
,
11987 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
11989 i40e_stat_str(&pf
->hw
, ret
),
11990 i40e_aq_str(&pf
->hw
,
11991 pf
->hw
.aq
.asq_last_status
));
11996 case I40E_VSI_FDIR
:
11997 ctxt
.pf_num
= hw
->pf_id
;
11999 ctxt
.uplink_seid
= vsi
->uplink_seid
;
12000 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
12001 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
12002 if ((pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
) &&
12003 (i40e_is_vsi_uplink_mode_veb(vsi
))) {
12004 ctxt
.info
.valid_sections
|=
12005 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
12006 ctxt
.info
.switch_id
=
12007 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
12009 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, true);
12012 case I40E_VSI_VMDQ2
:
12013 ctxt
.pf_num
= hw
->pf_id
;
12015 ctxt
.uplink_seid
= vsi
->uplink_seid
;
12016 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
12017 ctxt
.flags
= I40E_AQ_VSI_TYPE_VMDQ2
;
12019 /* This VSI is connected to VEB so the switch_id
12020 * should be set to zero by default.
12022 if (i40e_is_vsi_uplink_mode_veb(vsi
)) {
12023 ctxt
.info
.valid_sections
|=
12024 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
12025 ctxt
.info
.switch_id
=
12026 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
12029 /* Setup the VSI tx/rx queue map for TC0 only for now */
12030 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, true);
12033 case I40E_VSI_SRIOV
:
12034 ctxt
.pf_num
= hw
->pf_id
;
12035 ctxt
.vf_num
= vsi
->vf_id
+ hw
->func_caps
.vf_base_id
;
12036 ctxt
.uplink_seid
= vsi
->uplink_seid
;
12037 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
12038 ctxt
.flags
= I40E_AQ_VSI_TYPE_VF
;
12040 /* This VSI is connected to VEB so the switch_id
12041 * should be set to zero by default.
12043 if (i40e_is_vsi_uplink_mode_veb(vsi
)) {
12044 ctxt
.info
.valid_sections
|=
12045 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
12046 ctxt
.info
.switch_id
=
12047 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
12050 if (vsi
->back
->flags
& I40E_FLAG_IWARP_ENABLED
) {
12051 ctxt
.info
.valid_sections
|=
12052 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID
);
12053 ctxt
.info
.queueing_opt_flags
|=
12054 (I40E_AQ_VSI_QUE_OPT_TCP_ENA
|
12055 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI
);
12058 ctxt
.info
.valid_sections
|= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
12059 ctxt
.info
.port_vlan_flags
|= I40E_AQ_VSI_PVLAN_MODE_ALL
;
12060 if (pf
->vf
[vsi
->vf_id
].spoofchk
) {
12061 ctxt
.info
.valid_sections
|=
12062 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID
);
12063 ctxt
.info
.sec_flags
|=
12064 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK
|
12065 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
);
12067 /* Setup the VSI tx/rx queue map for TC0 only for now */
12068 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, true);
12071 case I40E_VSI_IWARP
:
12072 /* send down message to iWARP */
12079 if (vsi
->type
!= I40E_VSI_MAIN
) {
12080 ret
= i40e_aq_add_vsi(hw
, &ctxt
, NULL
);
12082 dev_info(&vsi
->back
->pdev
->dev
,
12083 "add vsi failed, err %s aq_err %s\n",
12084 i40e_stat_str(&pf
->hw
, ret
),
12085 i40e_aq_str(&pf
->hw
,
12086 pf
->hw
.aq
.asq_last_status
));
12090 vsi
->info
= ctxt
.info
;
12091 vsi
->info
.valid_sections
= 0;
12092 vsi
->seid
= ctxt
.seid
;
12093 vsi
->id
= ctxt
.vsi_number
;
12096 vsi
->active_filters
= 0;
12097 clear_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
12098 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
12099 /* If macvlan filters already exist, force them to get loaded */
12100 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
12101 f
->state
= I40E_FILTER_NEW
;
12104 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
12107 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
12108 pf
->flags
|= I40E_FLAG_FILTER_SYNC
;
12111 /* Update VSI BW information */
12112 ret
= i40e_vsi_get_bw_info(vsi
);
12114 dev_info(&pf
->pdev
->dev
,
12115 "couldn't get vsi bw info, err %s aq_err %s\n",
12116 i40e_stat_str(&pf
->hw
, ret
),
12117 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
12118 /* VSI is already added so not tearing that up */
12127 * i40e_vsi_release - Delete a VSI and free its resources
12128 * @vsi: the VSI being removed
12130 * Returns 0 on success or < 0 on error
12132 int i40e_vsi_release(struct i40e_vsi
*vsi
)
12134 struct i40e_mac_filter
*f
;
12135 struct hlist_node
*h
;
12136 struct i40e_veb
*veb
= NULL
;
12137 struct i40e_pf
*pf
;
12143 /* release of a VEB-owner or last VSI is not allowed */
12144 if (vsi
->flags
& I40E_VSI_FLAG_VEB_OWNER
) {
12145 dev_info(&pf
->pdev
->dev
, "VSI %d has existing VEB %d\n",
12146 vsi
->seid
, vsi
->uplink_seid
);
12149 if (vsi
== pf
->vsi
[pf
->lan_vsi
] &&
12150 !test_bit(__I40E_DOWN
, pf
->state
)) {
12151 dev_info(&pf
->pdev
->dev
, "Can't remove PF VSI\n");
12155 uplink_seid
= vsi
->uplink_seid
;
12156 if (vsi
->type
!= I40E_VSI_SRIOV
) {
12157 if (vsi
->netdev_registered
) {
12158 vsi
->netdev_registered
= false;
12160 /* results in a call to i40e_close() */
12161 unregister_netdev(vsi
->netdev
);
12164 i40e_vsi_close(vsi
);
12166 i40e_vsi_disable_irq(vsi
);
12169 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
12171 /* clear the sync flag on all filters */
12173 __dev_uc_unsync(vsi
->netdev
, NULL
);
12174 __dev_mc_unsync(vsi
->netdev
, NULL
);
12177 /* make sure any remaining filters are marked for deletion */
12178 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
)
12179 __i40e_del_filter(vsi
, f
);
12181 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
12183 i40e_sync_vsi_filters(vsi
);
12185 i40e_vsi_delete(vsi
);
12186 i40e_vsi_free_q_vectors(vsi
);
12188 free_netdev(vsi
->netdev
);
12189 vsi
->netdev
= NULL
;
12191 i40e_vsi_clear_rings(vsi
);
12192 i40e_vsi_clear(vsi
);
12194 /* If this was the last thing on the VEB, except for the
12195 * controlling VSI, remove the VEB, which puts the controlling
12196 * VSI onto the next level down in the switch.
12198 * Well, okay, there's one more exception here: don't remove
12199 * the orphan VEBs yet. We'll wait for an explicit remove request
12200 * from up the network stack.
12202 for (n
= 0, i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
12204 pf
->vsi
[i
]->uplink_seid
== uplink_seid
&&
12205 (pf
->vsi
[i
]->flags
& I40E_VSI_FLAG_VEB_OWNER
) == 0) {
12206 n
++; /* count the VSIs */
12209 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
12212 if (pf
->veb
[i
]->uplink_seid
== uplink_seid
)
12213 n
++; /* count the VEBs */
12214 if (pf
->veb
[i
]->seid
== uplink_seid
)
12217 if (n
== 0 && veb
&& veb
->uplink_seid
!= 0)
12218 i40e_veb_release(veb
);
12224 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
12225 * @vsi: ptr to the VSI
12227 * This should only be called after i40e_vsi_mem_alloc() which allocates the
12228 * corresponding SW VSI structure and initializes num_queue_pairs for the
12229 * newly allocated VSI.
12231 * Returns 0 on success or negative on failure
12233 static int i40e_vsi_setup_vectors(struct i40e_vsi
*vsi
)
12236 struct i40e_pf
*pf
= vsi
->back
;
12238 if (vsi
->q_vectors
[0]) {
12239 dev_info(&pf
->pdev
->dev
, "VSI %d has existing q_vectors\n",
12244 if (vsi
->base_vector
) {
12245 dev_info(&pf
->pdev
->dev
, "VSI %d has non-zero base vector %d\n",
12246 vsi
->seid
, vsi
->base_vector
);
12250 ret
= i40e_vsi_alloc_q_vectors(vsi
);
12252 dev_info(&pf
->pdev
->dev
,
12253 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
12254 vsi
->num_q_vectors
, vsi
->seid
, ret
);
12255 vsi
->num_q_vectors
= 0;
12256 goto vector_setup_out
;
12259 /* In Legacy mode, we do not have to get any other vector since we
12260 * piggyback on the misc/ICR0 for queue interrupts.
12262 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
))
12264 if (vsi
->num_q_vectors
)
12265 vsi
->base_vector
= i40e_get_lump(pf
, pf
->irq_pile
,
12266 vsi
->num_q_vectors
, vsi
->idx
);
12267 if (vsi
->base_vector
< 0) {
12268 dev_info(&pf
->pdev
->dev
,
12269 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
12270 vsi
->num_q_vectors
, vsi
->seid
, vsi
->base_vector
);
12271 i40e_vsi_free_q_vectors(vsi
);
12273 goto vector_setup_out
;
12281 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
12282 * @vsi: pointer to the vsi.
12284 * This re-allocates a vsi's queue resources.
12286 * Returns pointer to the successfully allocated and configured VSI sw struct
12287 * on success, otherwise returns NULL on failure.
12289 static struct i40e_vsi
*i40e_vsi_reinit_setup(struct i40e_vsi
*vsi
)
12291 u16 alloc_queue_pairs
;
12292 struct i40e_pf
*pf
;
12301 i40e_put_lump(pf
->qp_pile
, vsi
->base_queue
, vsi
->idx
);
12302 i40e_vsi_clear_rings(vsi
);
12304 i40e_vsi_free_arrays(vsi
, false);
12305 i40e_set_num_rings_in_vsi(vsi
);
12306 ret
= i40e_vsi_alloc_arrays(vsi
, false);
12310 alloc_queue_pairs
= vsi
->alloc_queue_pairs
*
12311 (i40e_enabled_xdp_vsi(vsi
) ? 2 : 1);
12313 ret
= i40e_get_lump(pf
, pf
->qp_pile
, alloc_queue_pairs
, vsi
->idx
);
12315 dev_info(&pf
->pdev
->dev
,
12316 "failed to get tracking for %d queues for VSI %d err %d\n",
12317 alloc_queue_pairs
, vsi
->seid
, ret
);
12320 vsi
->base_queue
= ret
;
12322 /* Update the FW view of the VSI. Force a reset of TC and queue
12323 * layout configurations.
12325 enabled_tc
= pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
;
12326 pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
= 0;
12327 pf
->vsi
[pf
->lan_vsi
]->seid
= pf
->main_vsi_seid
;
12328 i40e_vsi_config_tc(pf
->vsi
[pf
->lan_vsi
], enabled_tc
);
12329 if (vsi
->type
== I40E_VSI_MAIN
)
12330 i40e_rm_default_mac_filter(vsi
, pf
->hw
.mac
.perm_addr
);
12332 /* assign it some queues */
12333 ret
= i40e_alloc_rings(vsi
);
12337 /* map all of the rings to the q_vectors */
12338 i40e_vsi_map_rings_to_vectors(vsi
);
12342 i40e_vsi_free_q_vectors(vsi
);
12343 if (vsi
->netdev_registered
) {
12344 vsi
->netdev_registered
= false;
12345 unregister_netdev(vsi
->netdev
);
12346 free_netdev(vsi
->netdev
);
12347 vsi
->netdev
= NULL
;
12349 i40e_aq_delete_element(&pf
->hw
, vsi
->seid
, NULL
);
12351 i40e_vsi_clear(vsi
);
12356 * i40e_vsi_setup - Set up a VSI by a given type
12357 * @pf: board private structure
12359 * @uplink_seid: the switch element to link to
12360 * @param1: usage depends upon VSI type. For VF types, indicates VF id
12362 * This allocates the sw VSI structure and its queue resources, then add a VSI
12363 * to the identified VEB.
12365 * Returns pointer to the successfully allocated and configure VSI sw struct on
12366 * success, otherwise returns NULL on failure.
12368 struct i40e_vsi
*i40e_vsi_setup(struct i40e_pf
*pf
, u8 type
,
12369 u16 uplink_seid
, u32 param1
)
12371 struct i40e_vsi
*vsi
= NULL
;
12372 struct i40e_veb
*veb
= NULL
;
12373 u16 alloc_queue_pairs
;
12377 /* The requested uplink_seid must be either
12378 * - the PF's port seid
12379 * no VEB is needed because this is the PF
12380 * or this is a Flow Director special case VSI
12381 * - seid of an existing VEB
12382 * - seid of a VSI that owns an existing VEB
12383 * - seid of a VSI that doesn't own a VEB
12384 * a new VEB is created and the VSI becomes the owner
12385 * - seid of the PF VSI, which is what creates the first VEB
12386 * this is a special case of the previous
12388 * Find which uplink_seid we were given and create a new VEB if needed
12390 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
12391 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== uplink_seid
) {
12397 if (!veb
&& uplink_seid
!= pf
->mac_seid
) {
12399 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
12400 if (pf
->vsi
[i
] && pf
->vsi
[i
]->seid
== uplink_seid
) {
12406 dev_info(&pf
->pdev
->dev
, "no such uplink_seid %d\n",
12411 if (vsi
->uplink_seid
== pf
->mac_seid
)
12412 veb
= i40e_veb_setup(pf
, 0, pf
->mac_seid
, vsi
->seid
,
12413 vsi
->tc_config
.enabled_tc
);
12414 else if ((vsi
->flags
& I40E_VSI_FLAG_VEB_OWNER
) == 0)
12415 veb
= i40e_veb_setup(pf
, 0, vsi
->uplink_seid
, vsi
->seid
,
12416 vsi
->tc_config
.enabled_tc
);
12418 if (vsi
->seid
!= pf
->vsi
[pf
->lan_vsi
]->seid
) {
12419 dev_info(&vsi
->back
->pdev
->dev
,
12420 "New VSI creation error, uplink seid of LAN VSI expected.\n");
12423 /* We come up by default in VEPA mode if SRIOV is not
12424 * already enabled, in which case we can't force VEPA
12427 if (!(pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)) {
12428 veb
->bridge_mode
= BRIDGE_MODE_VEPA
;
12429 pf
->flags
&= ~I40E_FLAG_VEB_MODE_ENABLED
;
12431 i40e_config_bridge_mode(veb
);
12433 for (i
= 0; i
< I40E_MAX_VEB
&& !veb
; i
++) {
12434 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== vsi
->uplink_seid
)
12438 dev_info(&pf
->pdev
->dev
, "couldn't add VEB\n");
12442 vsi
->flags
|= I40E_VSI_FLAG_VEB_OWNER
;
12443 uplink_seid
= veb
->seid
;
12446 /* get vsi sw struct */
12447 v_idx
= i40e_vsi_mem_alloc(pf
, type
);
12450 vsi
= pf
->vsi
[v_idx
];
12454 vsi
->veb_idx
= (veb
? veb
->idx
: I40E_NO_VEB
);
12456 if (type
== I40E_VSI_MAIN
)
12457 pf
->lan_vsi
= v_idx
;
12458 else if (type
== I40E_VSI_SRIOV
)
12459 vsi
->vf_id
= param1
;
12460 /* assign it some queues */
12461 alloc_queue_pairs
= vsi
->alloc_queue_pairs
*
12462 (i40e_enabled_xdp_vsi(vsi
) ? 2 : 1);
12464 ret
= i40e_get_lump(pf
, pf
->qp_pile
, alloc_queue_pairs
, vsi
->idx
);
12466 dev_info(&pf
->pdev
->dev
,
12467 "failed to get tracking for %d queues for VSI %d err=%d\n",
12468 alloc_queue_pairs
, vsi
->seid
, ret
);
12471 vsi
->base_queue
= ret
;
12473 /* get a VSI from the hardware */
12474 vsi
->uplink_seid
= uplink_seid
;
12475 ret
= i40e_add_vsi(vsi
);
12479 switch (vsi
->type
) {
12480 /* setup the netdev if needed */
12481 case I40E_VSI_MAIN
:
12482 case I40E_VSI_VMDQ2
:
12483 ret
= i40e_config_netdev(vsi
);
12486 ret
= register_netdev(vsi
->netdev
);
12489 vsi
->netdev_registered
= true;
12490 netif_carrier_off(vsi
->netdev
);
12491 #ifdef CONFIG_I40E_DCB
12492 /* Setup DCB netlink interface */
12493 i40e_dcbnl_setup(vsi
);
12494 #endif /* CONFIG_I40E_DCB */
12497 case I40E_VSI_FDIR
:
12498 /* set up vectors and rings if needed */
12499 ret
= i40e_vsi_setup_vectors(vsi
);
12503 ret
= i40e_alloc_rings(vsi
);
12507 /* map all of the rings to the q_vectors */
12508 i40e_vsi_map_rings_to_vectors(vsi
);
12510 i40e_vsi_reset_stats(vsi
);
12514 /* no netdev or rings for the other VSI types */
12518 if ((pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
) &&
12519 (vsi
->type
== I40E_VSI_VMDQ2
)) {
12520 ret
= i40e_vsi_config_rss(vsi
);
12525 i40e_vsi_free_q_vectors(vsi
);
12527 if (vsi
->netdev_registered
) {
12528 vsi
->netdev_registered
= false;
12529 unregister_netdev(vsi
->netdev
);
12530 free_netdev(vsi
->netdev
);
12531 vsi
->netdev
= NULL
;
12534 i40e_aq_delete_element(&pf
->hw
, vsi
->seid
, NULL
);
12536 i40e_vsi_clear(vsi
);
12542 * i40e_veb_get_bw_info - Query VEB BW information
12543 * @veb: the veb to query
12545 * Query the Tx scheduler BW configuration data for given VEB
12547 static int i40e_veb_get_bw_info(struct i40e_veb
*veb
)
12549 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data
;
12550 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data
;
12551 struct i40e_pf
*pf
= veb
->pf
;
12552 struct i40e_hw
*hw
= &pf
->hw
;
12557 ret
= i40e_aq_query_switch_comp_bw_config(hw
, veb
->seid
,
12560 dev_info(&pf
->pdev
->dev
,
12561 "query veb bw config failed, err %s aq_err %s\n",
12562 i40e_stat_str(&pf
->hw
, ret
),
12563 i40e_aq_str(&pf
->hw
, hw
->aq
.asq_last_status
));
12567 ret
= i40e_aq_query_switch_comp_ets_config(hw
, veb
->seid
,
12570 dev_info(&pf
->pdev
->dev
,
12571 "query veb bw ets config failed, err %s aq_err %s\n",
12572 i40e_stat_str(&pf
->hw
, ret
),
12573 i40e_aq_str(&pf
->hw
, hw
->aq
.asq_last_status
));
12577 veb
->bw_limit
= le16_to_cpu(ets_data
.port_bw_limit
);
12578 veb
->bw_max_quanta
= ets_data
.tc_bw_max
;
12579 veb
->is_abs_credits
= bw_data
.absolute_credits_enable
;
12580 veb
->enabled_tc
= ets_data
.tc_valid_bits
;
12581 tc_bw_max
= le16_to_cpu(bw_data
.tc_bw_max
[0]) |
12582 (le16_to_cpu(bw_data
.tc_bw_max
[1]) << 16);
12583 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
12584 veb
->bw_tc_share_credits
[i
] = bw_data
.tc_bw_share_credits
[i
];
12585 veb
->bw_tc_limit_credits
[i
] =
12586 le16_to_cpu(bw_data
.tc_bw_limits
[i
]);
12587 veb
->bw_tc_max_quanta
[i
] = ((tc_bw_max
>> (i
*4)) & 0x7);
12595 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
12596 * @pf: board private structure
12598 * On error: returns error code (negative)
12599 * On success: returns vsi index in PF (positive)
12601 static int i40e_veb_mem_alloc(struct i40e_pf
*pf
)
12604 struct i40e_veb
*veb
;
12607 /* Need to protect the allocation of switch elements at the PF level */
12608 mutex_lock(&pf
->switch_mutex
);
12610 /* VEB list may be fragmented if VEB creation/destruction has
12611 * been happening. We can afford to do a quick scan to look
12612 * for any free slots in the list.
12614 * find next empty veb slot, looping back around if necessary
12617 while ((i
< I40E_MAX_VEB
) && (pf
->veb
[i
] != NULL
))
12619 if (i
>= I40E_MAX_VEB
) {
12621 goto err_alloc_veb
; /* out of VEB slots! */
12624 veb
= kzalloc(sizeof(*veb
), GFP_KERNEL
);
12627 goto err_alloc_veb
;
12631 veb
->enabled_tc
= 1;
12636 mutex_unlock(&pf
->switch_mutex
);
12641 * i40e_switch_branch_release - Delete a branch of the switch tree
12642 * @branch: where to start deleting
12644 * This uses recursion to find the tips of the branch to be
12645 * removed, deleting until we get back to and can delete this VEB.
12647 static void i40e_switch_branch_release(struct i40e_veb
*branch
)
12649 struct i40e_pf
*pf
= branch
->pf
;
12650 u16 branch_seid
= branch
->seid
;
12651 u16 veb_idx
= branch
->idx
;
12654 /* release any VEBs on this VEB - RECURSION */
12655 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
12658 if (pf
->veb
[i
]->uplink_seid
== branch
->seid
)
12659 i40e_switch_branch_release(pf
->veb
[i
]);
12662 /* Release the VSIs on this VEB, but not the owner VSI.
12664 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
12665 * the VEB itself, so don't use (*branch) after this loop.
12667 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
12670 if (pf
->vsi
[i
]->uplink_seid
== branch_seid
&&
12671 (pf
->vsi
[i
]->flags
& I40E_VSI_FLAG_VEB_OWNER
) == 0) {
12672 i40e_vsi_release(pf
->vsi
[i
]);
12676 /* There's one corner case where the VEB might not have been
12677 * removed, so double check it here and remove it if needed.
12678 * This case happens if the veb was created from the debugfs
12679 * commands and no VSIs were added to it.
12681 if (pf
->veb
[veb_idx
])
12682 i40e_veb_release(pf
->veb
[veb_idx
]);
12686 * i40e_veb_clear - remove veb struct
12687 * @veb: the veb to remove
12689 static void i40e_veb_clear(struct i40e_veb
*veb
)
12695 struct i40e_pf
*pf
= veb
->pf
;
12697 mutex_lock(&pf
->switch_mutex
);
12698 if (pf
->veb
[veb
->idx
] == veb
)
12699 pf
->veb
[veb
->idx
] = NULL
;
12700 mutex_unlock(&pf
->switch_mutex
);
12707 * i40e_veb_release - Delete a VEB and free its resources
12708 * @veb: the VEB being removed
12710 void i40e_veb_release(struct i40e_veb
*veb
)
12712 struct i40e_vsi
*vsi
= NULL
;
12713 struct i40e_pf
*pf
;
12718 /* find the remaining VSI and check for extras */
12719 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
12720 if (pf
->vsi
[i
] && pf
->vsi
[i
]->uplink_seid
== veb
->seid
) {
12726 dev_info(&pf
->pdev
->dev
,
12727 "can't remove VEB %d with %d VSIs left\n",
12732 /* move the remaining VSI to uplink veb */
12733 vsi
->flags
&= ~I40E_VSI_FLAG_VEB_OWNER
;
12734 if (veb
->uplink_seid
) {
12735 vsi
->uplink_seid
= veb
->uplink_seid
;
12736 if (veb
->uplink_seid
== pf
->mac_seid
)
12737 vsi
->veb_idx
= I40E_NO_VEB
;
12739 vsi
->veb_idx
= veb
->veb_idx
;
12742 vsi
->uplink_seid
= pf
->vsi
[pf
->lan_vsi
]->uplink_seid
;
12743 vsi
->veb_idx
= pf
->vsi
[pf
->lan_vsi
]->veb_idx
;
12746 i40e_aq_delete_element(&pf
->hw
, veb
->seid
, NULL
);
12747 i40e_veb_clear(veb
);
12751 * i40e_add_veb - create the VEB in the switch
12752 * @veb: the VEB to be instantiated
12753 * @vsi: the controlling VSI
12755 static int i40e_add_veb(struct i40e_veb
*veb
, struct i40e_vsi
*vsi
)
12757 struct i40e_pf
*pf
= veb
->pf
;
12758 bool enable_stats
= !!(pf
->flags
& I40E_FLAG_VEB_STATS_ENABLED
);
12761 ret
= i40e_aq_add_veb(&pf
->hw
, veb
->uplink_seid
, vsi
->seid
,
12762 veb
->enabled_tc
, false,
12763 &veb
->seid
, enable_stats
, NULL
);
12765 /* get a VEB from the hardware */
12767 dev_info(&pf
->pdev
->dev
,
12768 "couldn't add VEB, err %s aq_err %s\n",
12769 i40e_stat_str(&pf
->hw
, ret
),
12770 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
12774 /* get statistics counter */
12775 ret
= i40e_aq_get_veb_parameters(&pf
->hw
, veb
->seid
, NULL
, NULL
,
12776 &veb
->stats_idx
, NULL
, NULL
, NULL
);
12778 dev_info(&pf
->pdev
->dev
,
12779 "couldn't get VEB statistics idx, err %s aq_err %s\n",
12780 i40e_stat_str(&pf
->hw
, ret
),
12781 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
12784 ret
= i40e_veb_get_bw_info(veb
);
12786 dev_info(&pf
->pdev
->dev
,
12787 "couldn't get VEB bw info, err %s aq_err %s\n",
12788 i40e_stat_str(&pf
->hw
, ret
),
12789 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
12790 i40e_aq_delete_element(&pf
->hw
, veb
->seid
, NULL
);
12794 vsi
->uplink_seid
= veb
->seid
;
12795 vsi
->veb_idx
= veb
->idx
;
12796 vsi
->flags
|= I40E_VSI_FLAG_VEB_OWNER
;
12802 * i40e_veb_setup - Set up a VEB
12803 * @pf: board private structure
12804 * @flags: VEB setup flags
12805 * @uplink_seid: the switch element to link to
12806 * @vsi_seid: the initial VSI seid
12807 * @enabled_tc: Enabled TC bit-map
12809 * This allocates the sw VEB structure and links it into the switch
12810 * It is possible and legal for this to be a duplicate of an already
12811 * existing VEB. It is also possible for both uplink and vsi seids
12812 * to be zero, in order to create a floating VEB.
12814 * Returns pointer to the successfully allocated VEB sw struct on
12815 * success, otherwise returns NULL on failure.
12817 struct i40e_veb
*i40e_veb_setup(struct i40e_pf
*pf
, u16 flags
,
12818 u16 uplink_seid
, u16 vsi_seid
,
12821 struct i40e_veb
*veb
, *uplink_veb
= NULL
;
12822 int vsi_idx
, veb_idx
;
12825 /* if one seid is 0, the other must be 0 to create a floating relay */
12826 if ((uplink_seid
== 0 || vsi_seid
== 0) &&
12827 (uplink_seid
+ vsi_seid
!= 0)) {
12828 dev_info(&pf
->pdev
->dev
,
12829 "one, not both seid's are 0: uplink=%d vsi=%d\n",
12830 uplink_seid
, vsi_seid
);
12834 /* make sure there is such a vsi and uplink */
12835 for (vsi_idx
= 0; vsi_idx
< pf
->num_alloc_vsi
; vsi_idx
++)
12836 if (pf
->vsi
[vsi_idx
] && pf
->vsi
[vsi_idx
]->seid
== vsi_seid
)
12838 if (vsi_idx
>= pf
->num_alloc_vsi
&& vsi_seid
!= 0) {
12839 dev_info(&pf
->pdev
->dev
, "vsi seid %d not found\n",
12844 if (uplink_seid
&& uplink_seid
!= pf
->mac_seid
) {
12845 for (veb_idx
= 0; veb_idx
< I40E_MAX_VEB
; veb_idx
++) {
12846 if (pf
->veb
[veb_idx
] &&
12847 pf
->veb
[veb_idx
]->seid
== uplink_seid
) {
12848 uplink_veb
= pf
->veb
[veb_idx
];
12853 dev_info(&pf
->pdev
->dev
,
12854 "uplink seid %d not found\n", uplink_seid
);
12859 /* get veb sw struct */
12860 veb_idx
= i40e_veb_mem_alloc(pf
);
12863 veb
= pf
->veb
[veb_idx
];
12864 veb
->flags
= flags
;
12865 veb
->uplink_seid
= uplink_seid
;
12866 veb
->veb_idx
= (uplink_veb
? uplink_veb
->idx
: I40E_NO_VEB
);
12867 veb
->enabled_tc
= (enabled_tc
? enabled_tc
: 0x1);
12869 /* create the VEB in the switch */
12870 ret
= i40e_add_veb(veb
, pf
->vsi
[vsi_idx
]);
12873 if (vsi_idx
== pf
->lan_vsi
)
12874 pf
->lan_veb
= veb
->idx
;
12879 i40e_veb_clear(veb
);
12885 * i40e_setup_pf_switch_element - set PF vars based on switch type
12886 * @pf: board private structure
12887 * @ele: element we are building info from
12888 * @num_reported: total number of elements
12889 * @printconfig: should we print the contents
12891 * helper function to assist in extracting a few useful SEID values.
12893 static void i40e_setup_pf_switch_element(struct i40e_pf
*pf
,
12894 struct i40e_aqc_switch_config_element_resp
*ele
,
12895 u16 num_reported
, bool printconfig
)
12897 u16 downlink_seid
= le16_to_cpu(ele
->downlink_seid
);
12898 u16 uplink_seid
= le16_to_cpu(ele
->uplink_seid
);
12899 u8 element_type
= ele
->element_type
;
12900 u16 seid
= le16_to_cpu(ele
->seid
);
12903 dev_info(&pf
->pdev
->dev
,
12904 "type=%d seid=%d uplink=%d downlink=%d\n",
12905 element_type
, seid
, uplink_seid
, downlink_seid
);
12907 switch (element_type
) {
12908 case I40E_SWITCH_ELEMENT_TYPE_MAC
:
12909 pf
->mac_seid
= seid
;
12911 case I40E_SWITCH_ELEMENT_TYPE_VEB
:
12913 if (uplink_seid
!= pf
->mac_seid
)
12915 if (pf
->lan_veb
== I40E_NO_VEB
) {
12918 /* find existing or else empty VEB */
12919 for (v
= 0; v
< I40E_MAX_VEB
; v
++) {
12920 if (pf
->veb
[v
] && (pf
->veb
[v
]->seid
== seid
)) {
12925 if (pf
->lan_veb
== I40E_NO_VEB
) {
12926 v
= i40e_veb_mem_alloc(pf
);
12933 pf
->veb
[pf
->lan_veb
]->seid
= seid
;
12934 pf
->veb
[pf
->lan_veb
]->uplink_seid
= pf
->mac_seid
;
12935 pf
->veb
[pf
->lan_veb
]->pf
= pf
;
12936 pf
->veb
[pf
->lan_veb
]->veb_idx
= I40E_NO_VEB
;
12938 case I40E_SWITCH_ELEMENT_TYPE_VSI
:
12939 if (num_reported
!= 1)
12941 /* This is immediately after a reset so we can assume this is
12944 pf
->mac_seid
= uplink_seid
;
12945 pf
->pf_seid
= downlink_seid
;
12946 pf
->main_vsi_seid
= seid
;
12948 dev_info(&pf
->pdev
->dev
,
12949 "pf_seid=%d main_vsi_seid=%d\n",
12950 pf
->pf_seid
, pf
->main_vsi_seid
);
12952 case I40E_SWITCH_ELEMENT_TYPE_PF
:
12953 case I40E_SWITCH_ELEMENT_TYPE_VF
:
12954 case I40E_SWITCH_ELEMENT_TYPE_EMP
:
12955 case I40E_SWITCH_ELEMENT_TYPE_BMC
:
12956 case I40E_SWITCH_ELEMENT_TYPE_PE
:
12957 case I40E_SWITCH_ELEMENT_TYPE_PA
:
12958 /* ignore these for now */
12961 dev_info(&pf
->pdev
->dev
, "unknown element type=%d seid=%d\n",
12962 element_type
, seid
);
12968 * i40e_fetch_switch_configuration - Get switch config from firmware
12969 * @pf: board private structure
12970 * @printconfig: should we print the contents
12972 * Get the current switch configuration from the device and
12973 * extract a few useful SEID values.
12975 int i40e_fetch_switch_configuration(struct i40e_pf
*pf
, bool printconfig
)
12977 struct i40e_aqc_get_switch_config_resp
*sw_config
;
12983 aq_buf
= kzalloc(I40E_AQ_LARGE_BUF
, GFP_KERNEL
);
12987 sw_config
= (struct i40e_aqc_get_switch_config_resp
*)aq_buf
;
12989 u16 num_reported
, num_total
;
12991 ret
= i40e_aq_get_switch_config(&pf
->hw
, sw_config
,
12995 dev_info(&pf
->pdev
->dev
,
12996 "get switch config failed err %s aq_err %s\n",
12997 i40e_stat_str(&pf
->hw
, ret
),
12998 i40e_aq_str(&pf
->hw
,
12999 pf
->hw
.aq
.asq_last_status
));
13004 num_reported
= le16_to_cpu(sw_config
->header
.num_reported
);
13005 num_total
= le16_to_cpu(sw_config
->header
.num_total
);
13008 dev_info(&pf
->pdev
->dev
,
13009 "header: %d reported %d total\n",
13010 num_reported
, num_total
);
13012 for (i
= 0; i
< num_reported
; i
++) {
13013 struct i40e_aqc_switch_config_element_resp
*ele
=
13014 &sw_config
->element
[i
];
13016 i40e_setup_pf_switch_element(pf
, ele
, num_reported
,
13019 } while (next_seid
!= 0);
13026 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
13027 * @pf: board private structure
13028 * @reinit: if the Main VSI needs to re-initialized.
13030 * Returns 0 on success, negative value on failure
13032 static int i40e_setup_pf_switch(struct i40e_pf
*pf
, bool reinit
)
13037 /* find out what's out there already */
13038 ret
= i40e_fetch_switch_configuration(pf
, false);
13040 dev_info(&pf
->pdev
->dev
,
13041 "couldn't fetch switch config, err %s aq_err %s\n",
13042 i40e_stat_str(&pf
->hw
, ret
),
13043 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13046 i40e_pf_reset_stats(pf
);
13048 /* set the switch config bit for the whole device to
13049 * support limited promisc or true promisc
13050 * when user requests promisc. The default is limited
13054 if ((pf
->hw
.pf_id
== 0) &&
13055 !(pf
->flags
& I40E_FLAG_TRUE_PROMISC_SUPPORT
)) {
13056 flags
= I40E_AQ_SET_SWITCH_CFG_PROMISC
;
13057 pf
->last_sw_conf_flags
= flags
;
13060 if (pf
->hw
.pf_id
== 0) {
13063 valid_flags
= I40E_AQ_SET_SWITCH_CFG_PROMISC
;
13064 ret
= i40e_aq_set_switch_config(&pf
->hw
, flags
, valid_flags
, 0,
13066 if (ret
&& pf
->hw
.aq
.asq_last_status
!= I40E_AQ_RC_ESRCH
) {
13067 dev_info(&pf
->pdev
->dev
,
13068 "couldn't set switch config bits, err %s aq_err %s\n",
13069 i40e_stat_str(&pf
->hw
, ret
),
13070 i40e_aq_str(&pf
->hw
,
13071 pf
->hw
.aq
.asq_last_status
));
13072 /* not a fatal problem, just keep going */
13074 pf
->last_sw_conf_valid_flags
= valid_flags
;
13077 /* first time setup */
13078 if (pf
->lan_vsi
== I40E_NO_VSI
|| reinit
) {
13079 struct i40e_vsi
*vsi
= NULL
;
13082 /* Set up the PF VSI associated with the PF's main VSI
13083 * that is already in the HW switch
13085 if (pf
->lan_veb
!= I40E_NO_VEB
&& pf
->veb
[pf
->lan_veb
])
13086 uplink_seid
= pf
->veb
[pf
->lan_veb
]->seid
;
13088 uplink_seid
= pf
->mac_seid
;
13089 if (pf
->lan_vsi
== I40E_NO_VSI
)
13090 vsi
= i40e_vsi_setup(pf
, I40E_VSI_MAIN
, uplink_seid
, 0);
13092 vsi
= i40e_vsi_reinit_setup(pf
->vsi
[pf
->lan_vsi
]);
13094 dev_info(&pf
->pdev
->dev
, "setup of MAIN VSI failed\n");
13095 i40e_cloud_filter_exit(pf
);
13096 i40e_fdir_teardown(pf
);
13100 /* force a reset of TC and queue layout configurations */
13101 u8 enabled_tc
= pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
;
13103 pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
= 0;
13104 pf
->vsi
[pf
->lan_vsi
]->seid
= pf
->main_vsi_seid
;
13105 i40e_vsi_config_tc(pf
->vsi
[pf
->lan_vsi
], enabled_tc
);
13107 i40e_vlan_stripping_disable(pf
->vsi
[pf
->lan_vsi
]);
13109 i40e_fdir_sb_setup(pf
);
13111 /* Setup static PF queue filter control settings */
13112 ret
= i40e_setup_pf_filter_control(pf
);
13114 dev_info(&pf
->pdev
->dev
, "setup_pf_filter_control failed: %d\n",
13116 /* Failure here should not stop continuing other steps */
13119 /* enable RSS in the HW, even for only one queue, as the stack can use
13122 if ((pf
->flags
& I40E_FLAG_RSS_ENABLED
))
13123 i40e_pf_config_rss(pf
);
13125 /* fill in link information and enable LSE reporting */
13126 i40e_link_event(pf
);
13128 /* Initialize user-specific link properties */
13129 pf
->fc_autoneg_status
= ((pf
->hw
.phy
.link_info
.an_info
&
13130 I40E_AQ_AN_COMPLETED
) ? true : false);
13134 /* repopulate tunnel port filters */
13135 i40e_sync_udp_filters(pf
);
13141 * i40e_determine_queue_usage - Work out queue distribution
13142 * @pf: board private structure
13144 static void i40e_determine_queue_usage(struct i40e_pf
*pf
)
13149 pf
->num_lan_qps
= 0;
13151 /* Find the max queues to be put into basic use. We'll always be
13152 * using TC0, whether or not DCB is running, and TC0 will get the
13155 queues_left
= pf
->hw
.func_caps
.num_tx_qp
;
13157 if ((queues_left
== 1) ||
13158 !(pf
->flags
& I40E_FLAG_MSIX_ENABLED
)) {
13159 /* one qp for PF, no queues for anything else */
13161 pf
->alloc_rss_size
= pf
->num_lan_qps
= 1;
13163 /* make sure all the fancies are disabled */
13164 pf
->flags
&= ~(I40E_FLAG_RSS_ENABLED
|
13165 I40E_FLAG_IWARP_ENABLED
|
13166 I40E_FLAG_FD_SB_ENABLED
|
13167 I40E_FLAG_FD_ATR_ENABLED
|
13168 I40E_FLAG_DCB_CAPABLE
|
13169 I40E_FLAG_DCB_ENABLED
|
13170 I40E_FLAG_SRIOV_ENABLED
|
13171 I40E_FLAG_VMDQ_ENABLED
);
13172 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
13173 } else if (!(pf
->flags
& (I40E_FLAG_RSS_ENABLED
|
13174 I40E_FLAG_FD_SB_ENABLED
|
13175 I40E_FLAG_FD_ATR_ENABLED
|
13176 I40E_FLAG_DCB_CAPABLE
))) {
13177 /* one qp for PF */
13178 pf
->alloc_rss_size
= pf
->num_lan_qps
= 1;
13179 queues_left
-= pf
->num_lan_qps
;
13181 pf
->flags
&= ~(I40E_FLAG_RSS_ENABLED
|
13182 I40E_FLAG_IWARP_ENABLED
|
13183 I40E_FLAG_FD_SB_ENABLED
|
13184 I40E_FLAG_FD_ATR_ENABLED
|
13185 I40E_FLAG_DCB_ENABLED
|
13186 I40E_FLAG_VMDQ_ENABLED
);
13187 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
13189 /* Not enough queues for all TCs */
13190 if ((pf
->flags
& I40E_FLAG_DCB_CAPABLE
) &&
13191 (queues_left
< I40E_MAX_TRAFFIC_CLASS
)) {
13192 pf
->flags
&= ~(I40E_FLAG_DCB_CAPABLE
|
13193 I40E_FLAG_DCB_ENABLED
);
13194 dev_info(&pf
->pdev
->dev
, "not enough queues for DCB. DCB is disabled.\n");
13197 /* limit lan qps to the smaller of qps, cpus or msix */
13198 q_max
= max_t(int, pf
->rss_size_max
, num_online_cpus());
13199 q_max
= min_t(int, q_max
, pf
->hw
.func_caps
.num_tx_qp
);
13200 q_max
= min_t(int, q_max
, pf
->hw
.func_caps
.num_msix_vectors
);
13201 pf
->num_lan_qps
= q_max
;
13203 queues_left
-= pf
->num_lan_qps
;
13206 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
13207 if (queues_left
> 1) {
13208 queues_left
-= 1; /* save 1 queue for FD */
13210 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
13211 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
13212 dev_info(&pf
->pdev
->dev
, "not enough queues for Flow Director. Flow Director feature is disabled\n");
13216 if ((pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) &&
13217 pf
->num_vf_qps
&& pf
->num_req_vfs
&& queues_left
) {
13218 pf
->num_req_vfs
= min_t(int, pf
->num_req_vfs
,
13219 (queues_left
/ pf
->num_vf_qps
));
13220 queues_left
-= (pf
->num_req_vfs
* pf
->num_vf_qps
);
13223 if ((pf
->flags
& I40E_FLAG_VMDQ_ENABLED
) &&
13224 pf
->num_vmdq_vsis
&& pf
->num_vmdq_qps
&& queues_left
) {
13225 pf
->num_vmdq_vsis
= min_t(int, pf
->num_vmdq_vsis
,
13226 (queues_left
/ pf
->num_vmdq_qps
));
13227 queues_left
-= (pf
->num_vmdq_vsis
* pf
->num_vmdq_qps
);
13230 pf
->queues_left
= queues_left
;
13231 dev_dbg(&pf
->pdev
->dev
,
13232 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
13233 pf
->hw
.func_caps
.num_tx_qp
,
13234 !!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
),
13235 pf
->num_lan_qps
, pf
->alloc_rss_size
, pf
->num_req_vfs
,
13236 pf
->num_vf_qps
, pf
->num_vmdq_vsis
, pf
->num_vmdq_qps
,
13241 * i40e_setup_pf_filter_control - Setup PF static filter control
13242 * @pf: PF to be setup
13244 * i40e_setup_pf_filter_control sets up a PF's initial filter control
13245 * settings. If PE/FCoE are enabled then it will also set the per PF
13246 * based filter sizes required for them. It also enables Flow director,
13247 * ethertype and macvlan type filter settings for the pf.
13249 * Returns 0 on success, negative on failure
13251 static int i40e_setup_pf_filter_control(struct i40e_pf
*pf
)
13253 struct i40e_filter_control_settings
*settings
= &pf
->filter_settings
;
13255 settings
->hash_lut_size
= I40E_HASH_LUT_SIZE_128
;
13257 /* Flow Director is enabled */
13258 if (pf
->flags
& (I40E_FLAG_FD_SB_ENABLED
| I40E_FLAG_FD_ATR_ENABLED
))
13259 settings
->enable_fdir
= true;
13261 /* Ethtype and MACVLAN filters enabled for PF */
13262 settings
->enable_ethtype
= true;
13263 settings
->enable_macvlan
= true;
13265 if (i40e_set_filter_control(&pf
->hw
, settings
))
13271 #define INFO_STRING_LEN 255
13272 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
13273 static void i40e_print_features(struct i40e_pf
*pf
)
13275 struct i40e_hw
*hw
= &pf
->hw
;
13279 buf
= kmalloc(INFO_STRING_LEN
, GFP_KERNEL
);
13283 i
= snprintf(buf
, INFO_STRING_LEN
, "Features: PF-id[%d]", hw
->pf_id
);
13284 #ifdef CONFIG_PCI_IOV
13285 i
+= snprintf(&buf
[i
], REMAIN(i
), " VFs: %d", pf
->num_req_vfs
);
13287 i
+= snprintf(&buf
[i
], REMAIN(i
), " VSIs: %d QP: %d",
13288 pf
->hw
.func_caps
.num_vsis
,
13289 pf
->vsi
[pf
->lan_vsi
]->num_queue_pairs
);
13290 if (pf
->flags
& I40E_FLAG_RSS_ENABLED
)
13291 i
+= snprintf(&buf
[i
], REMAIN(i
), " RSS");
13292 if (pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
)
13293 i
+= snprintf(&buf
[i
], REMAIN(i
), " FD_ATR");
13294 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
13295 i
+= snprintf(&buf
[i
], REMAIN(i
), " FD_SB");
13296 i
+= snprintf(&buf
[i
], REMAIN(i
), " NTUPLE");
13298 if (pf
->flags
& I40E_FLAG_DCB_CAPABLE
)
13299 i
+= snprintf(&buf
[i
], REMAIN(i
), " DCB");
13300 i
+= snprintf(&buf
[i
], REMAIN(i
), " VxLAN");
13301 i
+= snprintf(&buf
[i
], REMAIN(i
), " Geneve");
13302 if (pf
->flags
& I40E_FLAG_PTP
)
13303 i
+= snprintf(&buf
[i
], REMAIN(i
), " PTP");
13304 if (pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)
13305 i
+= snprintf(&buf
[i
], REMAIN(i
), " VEB");
13307 i
+= snprintf(&buf
[i
], REMAIN(i
), " VEPA");
13309 dev_info(&pf
->pdev
->dev
, "%s\n", buf
);
13311 WARN_ON(i
> INFO_STRING_LEN
);
13315 * i40e_get_platform_mac_addr - get platform-specific MAC address
13316 * @pdev: PCI device information struct
13317 * @pf: board private structure
13319 * Look up the MAC address for the device. First we'll try
13320 * eth_platform_get_mac_address, which will check Open Firmware, or arch
13321 * specific fallback. Otherwise, we'll default to the stored value in
13324 static void i40e_get_platform_mac_addr(struct pci_dev
*pdev
, struct i40e_pf
*pf
)
13326 if (eth_platform_get_mac_address(&pdev
->dev
, pf
->hw
.mac
.addr
))
13327 i40e_get_mac_addr(&pf
->hw
, pf
->hw
.mac
.addr
);
13331 * i40e_probe - Device initialization routine
13332 * @pdev: PCI device information struct
13333 * @ent: entry in i40e_pci_tbl
13335 * i40e_probe initializes a PF identified by a pci_dev structure.
13336 * The OS initialization, configuring of the PF private structure,
13337 * and a hardware reset occur.
13339 * Returns 0 on success, negative on failure
13341 static int i40e_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
13343 struct i40e_aq_get_phy_abilities_resp abilities
;
13344 struct i40e_pf
*pf
;
13345 struct i40e_hw
*hw
;
13346 static u16 pfs_found
;
13354 err
= pci_enable_device_mem(pdev
);
13358 /* set up for high or low dma */
13359 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
13361 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
13363 dev_err(&pdev
->dev
,
13364 "DMA configuration failed: 0x%x\n", err
);
13369 /* set up pci connections */
13370 err
= pci_request_mem_regions(pdev
, i40e_driver_name
);
13372 dev_info(&pdev
->dev
,
13373 "pci_request_selected_regions failed %d\n", err
);
13377 pci_enable_pcie_error_reporting(pdev
);
13378 pci_set_master(pdev
);
13380 /* Now that we have a PCI connection, we need to do the
13381 * low level device setup. This is primarily setting up
13382 * the Admin Queue structures and then querying for the
13383 * device's current profile information.
13385 pf
= kzalloc(sizeof(*pf
), GFP_KERNEL
);
13392 set_bit(__I40E_DOWN
, pf
->state
);
13397 pf
->ioremap_len
= min_t(int, pci_resource_len(pdev
, 0),
13398 I40E_MAX_CSR_SPACE
);
13400 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0), pf
->ioremap_len
);
13401 if (!hw
->hw_addr
) {
13403 dev_info(&pdev
->dev
, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
13404 (unsigned int)pci_resource_start(pdev
, 0),
13405 pf
->ioremap_len
, err
);
13408 hw
->vendor_id
= pdev
->vendor
;
13409 hw
->device_id
= pdev
->device
;
13410 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
13411 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
13412 hw
->subsystem_device_id
= pdev
->subsystem_device
;
13413 hw
->bus
.device
= PCI_SLOT(pdev
->devfn
);
13414 hw
->bus
.func
= PCI_FUNC(pdev
->devfn
);
13415 hw
->bus
.bus_id
= pdev
->bus
->number
;
13416 pf
->instance
= pfs_found
;
13418 /* Select something other than the 802.1ad ethertype for the
13419 * switch to use internally and drop on ingress.
13421 hw
->switch_tag
= 0xffff;
13422 hw
->first_tag
= ETH_P_8021AD
;
13423 hw
->second_tag
= ETH_P_8021Q
;
13425 INIT_LIST_HEAD(&pf
->l3_flex_pit_list
);
13426 INIT_LIST_HEAD(&pf
->l4_flex_pit_list
);
13428 /* set up the locks for the AQ, do this only once in probe
13429 * and destroy them only once in remove
13431 mutex_init(&hw
->aq
.asq_mutex
);
13432 mutex_init(&hw
->aq
.arq_mutex
);
13434 pf
->msg_enable
= netif_msg_init(debug
,
13439 pf
->hw
.debug_mask
= debug
;
13441 /* do a special CORER for clearing PXE mode once at init */
13442 if (hw
->revision_id
== 0 &&
13443 (rd32(hw
, I40E_GLLAN_RCTL_0
) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK
)) {
13444 wr32(hw
, I40E_GLGEN_RTRIG
, I40E_GLGEN_RTRIG_CORER_MASK
);
13449 i40e_clear_pxe_mode(hw
);
13452 /* Reset here to make sure all is clean and to define PF 'n' */
13454 err
= i40e_pf_reset(hw
);
13456 dev_info(&pdev
->dev
, "Initial pf_reset failed: %d\n", err
);
13461 hw
->aq
.num_arq_entries
= I40E_AQ_LEN
;
13462 hw
->aq
.num_asq_entries
= I40E_AQ_LEN
;
13463 hw
->aq
.arq_buf_size
= I40E_MAX_AQ_BUF_SIZE
;
13464 hw
->aq
.asq_buf_size
= I40E_MAX_AQ_BUF_SIZE
;
13465 pf
->adminq_work_limit
= I40E_AQ_WORK_LIMIT
;
13467 snprintf(pf
->int_name
, sizeof(pf
->int_name
) - 1,
13469 dev_driver_string(&pf
->pdev
->dev
), dev_name(&pdev
->dev
));
13471 err
= i40e_init_shared_code(hw
);
13473 dev_warn(&pdev
->dev
, "unidentified MAC or BLANK NVM: %d\n",
13478 /* set up a default setting for link flow control */
13479 pf
->hw
.fc
.requested_mode
= I40E_FC_NONE
;
13481 err
= i40e_init_adminq(hw
);
13483 if (err
== I40E_ERR_FIRMWARE_API_VERSION
)
13484 dev_info(&pdev
->dev
,
13485 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
13487 dev_info(&pdev
->dev
,
13488 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
13492 i40e_get_oem_version(hw
);
13494 /* provide nvm, fw, api versions */
13495 dev_info(&pdev
->dev
, "fw %d.%d.%05d api %d.%d nvm %s\n",
13496 hw
->aq
.fw_maj_ver
, hw
->aq
.fw_min_ver
, hw
->aq
.fw_build
,
13497 hw
->aq
.api_maj_ver
, hw
->aq
.api_min_ver
,
13498 i40e_nvm_version_str(hw
));
13500 if (hw
->aq
.api_maj_ver
== I40E_FW_API_VERSION_MAJOR
&&
13501 hw
->aq
.api_min_ver
> I40E_FW_MINOR_VERSION(hw
))
13502 dev_info(&pdev
->dev
,
13503 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
13504 else if (hw
->aq
.api_maj_ver
== 1 && hw
->aq
.api_min_ver
< 4)
13505 dev_info(&pdev
->dev
,
13506 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
13508 i40e_verify_eeprom(pf
);
13510 /* Rev 0 hardware was never productized */
13511 if (hw
->revision_id
< 1)
13512 dev_warn(&pdev
->dev
, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
13514 i40e_clear_pxe_mode(hw
);
13515 err
= i40e_get_capabilities(pf
, i40e_aqc_opc_list_func_capabilities
);
13517 goto err_adminq_setup
;
13519 err
= i40e_sw_init(pf
);
13521 dev_info(&pdev
->dev
, "sw_init failed: %d\n", err
);
13525 err
= i40e_init_lan_hmc(hw
, hw
->func_caps
.num_tx_qp
,
13526 hw
->func_caps
.num_rx_qp
, 0, 0);
13528 dev_info(&pdev
->dev
, "init_lan_hmc failed: %d\n", err
);
13529 goto err_init_lan_hmc
;
13532 err
= i40e_configure_lan_hmc(hw
, I40E_HMC_MODEL_DIRECT_ONLY
);
13534 dev_info(&pdev
->dev
, "configure_lan_hmc failed: %d\n", err
);
13536 goto err_configure_lan_hmc
;
13539 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
13540 * Ignore error return codes because if it was already disabled via
13541 * hardware settings this will fail
13543 if (pf
->hw_features
& I40E_HW_STOP_FW_LLDP
) {
13544 dev_info(&pdev
->dev
, "Stopping firmware LLDP agent.\n");
13545 i40e_aq_stop_lldp(hw
, true, NULL
);
13548 /* allow a platform config to override the HW addr */
13549 i40e_get_platform_mac_addr(pdev
, pf
);
13551 if (!is_valid_ether_addr(hw
->mac
.addr
)) {
13552 dev_info(&pdev
->dev
, "invalid MAC address %pM\n", hw
->mac
.addr
);
13556 dev_info(&pdev
->dev
, "MAC address: %pM\n", hw
->mac
.addr
);
13557 ether_addr_copy(hw
->mac
.perm_addr
, hw
->mac
.addr
);
13558 i40e_get_port_mac_addr(hw
, hw
->mac
.port_addr
);
13559 if (is_valid_ether_addr(hw
->mac
.port_addr
))
13560 pf
->hw_features
|= I40E_HW_PORT_ID_VALID
;
13562 pci_set_drvdata(pdev
, pf
);
13563 pci_save_state(pdev
);
13565 /* Enable FW to write default DCB config on link-up */
13566 i40e_aq_set_dcb_parameters(hw
, true, NULL
);
13568 #ifdef CONFIG_I40E_DCB
13569 err
= i40e_init_pf_dcb(pf
);
13571 dev_info(&pdev
->dev
, "DCB init failed %d, disabled\n", err
);
13572 pf
->flags
&= ~(I40E_FLAG_DCB_CAPABLE
| I40E_FLAG_DCB_ENABLED
);
13573 /* Continue without DCB enabled */
13575 #endif /* CONFIG_I40E_DCB */
13577 /* set up periodic task facility */
13578 timer_setup(&pf
->service_timer
, i40e_service_timer
, 0);
13579 pf
->service_timer_period
= HZ
;
13581 INIT_WORK(&pf
->service_task
, i40e_service_task
);
13582 clear_bit(__I40E_SERVICE_SCHED
, pf
->state
);
13584 /* NVM bit on means WoL disabled for the port */
13585 i40e_read_nvm_word(hw
, I40E_SR_NVM_WAKE_ON_LAN
, &wol_nvm_bits
);
13586 if (BIT (hw
->port
) & wol_nvm_bits
|| hw
->partition_id
!= 1)
13587 pf
->wol_en
= false;
13590 device_set_wakeup_enable(&pf
->pdev
->dev
, pf
->wol_en
);
13592 /* set up the main switch operations */
13593 i40e_determine_queue_usage(pf
);
13594 err
= i40e_init_interrupt_scheme(pf
);
13596 goto err_switch_setup
;
13598 /* The number of VSIs reported by the FW is the minimum guaranteed
13599 * to us; HW supports far more and we share the remaining pool with
13600 * the other PFs. We allocate space for more than the guarantee with
13601 * the understanding that we might not get them all later.
13603 if (pf
->hw
.func_caps
.num_vsis
< I40E_MIN_VSI_ALLOC
)
13604 pf
->num_alloc_vsi
= I40E_MIN_VSI_ALLOC
;
13606 pf
->num_alloc_vsi
= pf
->hw
.func_caps
.num_vsis
;
13608 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
13609 pf
->vsi
= kcalloc(pf
->num_alloc_vsi
, sizeof(struct i40e_vsi
*),
13613 goto err_switch_setup
;
13616 #ifdef CONFIG_PCI_IOV
13617 /* prep for VF support */
13618 if ((pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) &&
13619 (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) &&
13620 !test_bit(__I40E_BAD_EEPROM
, pf
->state
)) {
13621 if (pci_num_vf(pdev
))
13622 pf
->flags
|= I40E_FLAG_VEB_MODE_ENABLED
;
13625 err
= i40e_setup_pf_switch(pf
, false);
13627 dev_info(&pdev
->dev
, "setup_pf_switch failed: %d\n", err
);
13630 INIT_LIST_HEAD(&pf
->vsi
[pf
->lan_vsi
]->ch_list
);
13632 /* Make sure flow control is set according to current settings */
13633 err
= i40e_set_fc(hw
, &set_fc_aq_fail
, true);
13634 if (set_fc_aq_fail
& I40E_SET_FC_AQ_FAIL_GET
)
13635 dev_dbg(&pf
->pdev
->dev
,
13636 "Set fc with err %s aq_err %s on get_phy_cap\n",
13637 i40e_stat_str(hw
, err
),
13638 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
13639 if (set_fc_aq_fail
& I40E_SET_FC_AQ_FAIL_SET
)
13640 dev_dbg(&pf
->pdev
->dev
,
13641 "Set fc with err %s aq_err %s on set_phy_config\n",
13642 i40e_stat_str(hw
, err
),
13643 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
13644 if (set_fc_aq_fail
& I40E_SET_FC_AQ_FAIL_UPDATE
)
13645 dev_dbg(&pf
->pdev
->dev
,
13646 "Set fc with err %s aq_err %s on get_link_info\n",
13647 i40e_stat_str(hw
, err
),
13648 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
13650 /* if FDIR VSI was set up, start it now */
13651 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
13652 if (pf
->vsi
[i
] && pf
->vsi
[i
]->type
== I40E_VSI_FDIR
) {
13653 i40e_vsi_open(pf
->vsi
[i
]);
13658 /* The driver only wants link up/down and module qualification
13659 * reports from firmware. Note the negative logic.
13661 err
= i40e_aq_set_phy_int_mask(&pf
->hw
,
13662 ~(I40E_AQ_EVENT_LINK_UPDOWN
|
13663 I40E_AQ_EVENT_MEDIA_NA
|
13664 I40E_AQ_EVENT_MODULE_QUAL_FAIL
), NULL
);
13666 dev_info(&pf
->pdev
->dev
, "set phy mask fail, err %s aq_err %s\n",
13667 i40e_stat_str(&pf
->hw
, err
),
13668 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13670 /* Reconfigure hardware for allowing smaller MSS in the case
13671 * of TSO, so that we avoid the MDD being fired and causing
13672 * a reset in the case of small MSS+TSO.
13674 val
= rd32(hw
, I40E_REG_MSS
);
13675 if ((val
& I40E_REG_MSS_MIN_MASK
) > I40E_64BYTE_MSS
) {
13676 val
&= ~I40E_REG_MSS_MIN_MASK
;
13677 val
|= I40E_64BYTE_MSS
;
13678 wr32(hw
, I40E_REG_MSS
, val
);
13681 if (pf
->hw_features
& I40E_HW_RESTART_AUTONEG
) {
13683 err
= i40e_aq_set_link_restart_an(&pf
->hw
, true, NULL
);
13685 dev_info(&pf
->pdev
->dev
, "link restart failed, err %s aq_err %s\n",
13686 i40e_stat_str(&pf
->hw
, err
),
13687 i40e_aq_str(&pf
->hw
,
13688 pf
->hw
.aq
.asq_last_status
));
13690 /* The main driver is (mostly) up and happy. We need to set this state
13691 * before setting up the misc vector or we get a race and the vector
13692 * ends up disabled forever.
13694 clear_bit(__I40E_DOWN
, pf
->state
);
13696 /* In case of MSIX we are going to setup the misc vector right here
13697 * to handle admin queue events etc. In case of legacy and MSI
13698 * the misc functionality and queue processing is combined in
13699 * the same vector and that gets setup at open.
13701 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
13702 err
= i40e_setup_misc_vector(pf
);
13704 dev_info(&pdev
->dev
,
13705 "setup of misc vector failed: %d\n", err
);
13710 #ifdef CONFIG_PCI_IOV
13711 /* prep for VF support */
13712 if ((pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) &&
13713 (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) &&
13714 !test_bit(__I40E_BAD_EEPROM
, pf
->state
)) {
13715 /* disable link interrupts for VFs */
13716 val
= rd32(hw
, I40E_PFGEN_PORTMDIO_NUM
);
13717 val
&= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK
;
13718 wr32(hw
, I40E_PFGEN_PORTMDIO_NUM
, val
);
13721 if (pci_num_vf(pdev
)) {
13722 dev_info(&pdev
->dev
,
13723 "Active VFs found, allocating resources.\n");
13724 err
= i40e_alloc_vfs(pf
, pci_num_vf(pdev
));
13726 dev_info(&pdev
->dev
,
13727 "Error %d allocating resources for existing VFs\n",
13731 #endif /* CONFIG_PCI_IOV */
13733 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
13734 pf
->iwarp_base_vector
= i40e_get_lump(pf
, pf
->irq_pile
,
13735 pf
->num_iwarp_msix
,
13736 I40E_IWARP_IRQ_PILE_ID
);
13737 if (pf
->iwarp_base_vector
< 0) {
13738 dev_info(&pdev
->dev
,
13739 "failed to get tracking for %d vectors for IWARP err=%d\n",
13740 pf
->num_iwarp_msix
, pf
->iwarp_base_vector
);
13741 pf
->flags
&= ~I40E_FLAG_IWARP_ENABLED
;
13745 i40e_dbg_pf_init(pf
);
13747 /* tell the firmware that we're starting */
13748 i40e_send_version(pf
);
13750 /* since everything's happy, start the service_task timer */
13751 mod_timer(&pf
->service_timer
,
13752 round_jiffies(jiffies
+ pf
->service_timer_period
));
13754 /* add this PF to client device list and launch a client service task */
13755 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
13756 err
= i40e_lan_add_device(pf
);
13758 dev_info(&pdev
->dev
, "Failed to add PF to client API service list: %d\n",
13762 #define PCI_SPEED_SIZE 8
13763 #define PCI_WIDTH_SIZE 8
13764 /* Devices on the IOSF bus do not have this information
13765 * and will report PCI Gen 1 x 1 by default so don't bother
13768 if (!(pf
->hw_features
& I40E_HW_NO_PCI_LINK_CHECK
)) {
13769 char speed
[PCI_SPEED_SIZE
] = "Unknown";
13770 char width
[PCI_WIDTH_SIZE
] = "Unknown";
13772 /* Get the negotiated link width and speed from PCI config
13775 pcie_capability_read_word(pf
->pdev
, PCI_EXP_LNKSTA
,
13778 i40e_set_pci_config_data(hw
, link_status
);
13780 switch (hw
->bus
.speed
) {
13781 case i40e_bus_speed_8000
:
13782 strncpy(speed
, "8.0", PCI_SPEED_SIZE
); break;
13783 case i40e_bus_speed_5000
:
13784 strncpy(speed
, "5.0", PCI_SPEED_SIZE
); break;
13785 case i40e_bus_speed_2500
:
13786 strncpy(speed
, "2.5", PCI_SPEED_SIZE
); break;
13790 switch (hw
->bus
.width
) {
13791 case i40e_bus_width_pcie_x8
:
13792 strncpy(width
, "8", PCI_WIDTH_SIZE
); break;
13793 case i40e_bus_width_pcie_x4
:
13794 strncpy(width
, "4", PCI_WIDTH_SIZE
); break;
13795 case i40e_bus_width_pcie_x2
:
13796 strncpy(width
, "2", PCI_WIDTH_SIZE
); break;
13797 case i40e_bus_width_pcie_x1
:
13798 strncpy(width
, "1", PCI_WIDTH_SIZE
); break;
13803 dev_info(&pdev
->dev
, "PCI-Express: Speed %sGT/s Width x%s\n",
13806 if (hw
->bus
.width
< i40e_bus_width_pcie_x8
||
13807 hw
->bus
.speed
< i40e_bus_speed_8000
) {
13808 dev_warn(&pdev
->dev
, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
13809 dev_warn(&pdev
->dev
, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
13813 /* get the requested speeds from the fw */
13814 err
= i40e_aq_get_phy_capabilities(hw
, false, false, &abilities
, NULL
);
13816 dev_dbg(&pf
->pdev
->dev
, "get requested speeds ret = %s last_status = %s\n",
13817 i40e_stat_str(&pf
->hw
, err
),
13818 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13819 pf
->hw
.phy
.link_info
.requested_speeds
= abilities
.link_speed
;
13821 /* get the supported phy types from the fw */
13822 err
= i40e_aq_get_phy_capabilities(hw
, false, true, &abilities
, NULL
);
13824 dev_dbg(&pf
->pdev
->dev
, "get supported phy types ret = %s last_status = %s\n",
13825 i40e_stat_str(&pf
->hw
, err
),
13826 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13828 /* Add a filter to drop all Flow control frames from any VSI from being
13829 * transmitted. By doing so we stop a malicious VF from sending out
13830 * PAUSE or PFC frames and potentially controlling traffic for other
13832 * The FW can still send Flow control frames if enabled.
13834 i40e_add_filter_to_drop_tx_flow_control_frames(&pf
->hw
,
13835 pf
->main_vsi_seid
);
13837 if ((pf
->hw
.device_id
== I40E_DEV_ID_10G_BASE_T
) ||
13838 (pf
->hw
.device_id
== I40E_DEV_ID_10G_BASE_T4
))
13839 pf
->hw_features
|= I40E_HW_PHY_CONTROLS_LEDS
;
13840 if (pf
->hw
.device_id
== I40E_DEV_ID_SFP_I_X722
)
13841 pf
->hw_features
|= I40E_HW_HAVE_CRT_RETIMER
;
13842 /* print a string summarizing features */
13843 i40e_print_features(pf
);
13847 /* Unwind what we've done if something failed in the setup */
13849 set_bit(__I40E_DOWN
, pf
->state
);
13850 i40e_clear_interrupt_scheme(pf
);
13853 i40e_reset_interrupt_capability(pf
);
13854 del_timer_sync(&pf
->service_timer
);
13856 err_configure_lan_hmc
:
13857 (void)i40e_shutdown_lan_hmc(hw
);
13859 kfree(pf
->qp_pile
);
13863 iounmap(hw
->hw_addr
);
13867 pci_disable_pcie_error_reporting(pdev
);
13868 pci_release_mem_regions(pdev
);
13871 pci_disable_device(pdev
);
13876 * i40e_remove - Device removal routine
13877 * @pdev: PCI device information struct
13879 * i40e_remove is called by the PCI subsystem to alert the driver
13880 * that is should release a PCI device. This could be caused by a
13881 * Hot-Plug event, or because the driver is going to be removed from
13884 static void i40e_remove(struct pci_dev
*pdev
)
13886 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
13887 struct i40e_hw
*hw
= &pf
->hw
;
13888 i40e_status ret_code
;
13891 i40e_dbg_pf_exit(pf
);
13895 /* Disable RSS in hw */
13896 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(0), 0);
13897 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(1), 0);
13899 /* no more scheduling of any task */
13900 set_bit(__I40E_SUSPENDED
, pf
->state
);
13901 set_bit(__I40E_DOWN
, pf
->state
);
13902 if (pf
->service_timer
.function
)
13903 del_timer_sync(&pf
->service_timer
);
13904 if (pf
->service_task
.func
)
13905 cancel_work_sync(&pf
->service_task
);
13907 /* Client close must be called explicitly here because the timer
13908 * has been stopped.
13910 i40e_notify_client_of_netdev_close(pf
->vsi
[pf
->lan_vsi
], false);
13912 if (pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) {
13914 pf
->flags
&= ~I40E_FLAG_SRIOV_ENABLED
;
13917 i40e_fdir_teardown(pf
);
13919 /* If there is a switch structure or any orphans, remove them.
13920 * This will leave only the PF's VSI remaining.
13922 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
13926 if (pf
->veb
[i
]->uplink_seid
== pf
->mac_seid
||
13927 pf
->veb
[i
]->uplink_seid
== 0)
13928 i40e_switch_branch_release(pf
->veb
[i
]);
13931 /* Now we can shutdown the PF's VSI, just before we kill
13934 if (pf
->vsi
[pf
->lan_vsi
])
13935 i40e_vsi_release(pf
->vsi
[pf
->lan_vsi
]);
13937 i40e_cloud_filter_exit(pf
);
13939 /* remove attached clients */
13940 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
13941 ret_code
= i40e_lan_del_device(pf
);
13943 dev_warn(&pdev
->dev
, "Failed to delete client device: %d\n",
13947 /* shutdown and destroy the HMC */
13948 if (hw
->hmc
.hmc_obj
) {
13949 ret_code
= i40e_shutdown_lan_hmc(hw
);
13951 dev_warn(&pdev
->dev
,
13952 "Failed to destroy the HMC resources: %d\n",
13956 /* shutdown the adminq */
13957 i40e_shutdown_adminq(hw
);
13959 /* destroy the locks only once, here */
13960 mutex_destroy(&hw
->aq
.arq_mutex
);
13961 mutex_destroy(&hw
->aq
.asq_mutex
);
13963 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
13964 i40e_clear_interrupt_scheme(pf
);
13965 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
13967 i40e_vsi_clear_rings(pf
->vsi
[i
]);
13968 i40e_vsi_clear(pf
->vsi
[i
]);
13973 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
13978 kfree(pf
->qp_pile
);
13981 iounmap(hw
->hw_addr
);
13983 pci_release_mem_regions(pdev
);
13985 pci_disable_pcie_error_reporting(pdev
);
13986 pci_disable_device(pdev
);
13990 * i40e_pci_error_detected - warning that something funky happened in PCI land
13991 * @pdev: PCI device information struct
13993 * Called to warn that something happened and the error handling steps
13994 * are in progress. Allows the driver to quiesce things, be ready for
13997 static pci_ers_result_t
i40e_pci_error_detected(struct pci_dev
*pdev
,
13998 enum pci_channel_state error
)
14000 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14002 dev_info(&pdev
->dev
, "%s: error %d\n", __func__
, error
);
14005 dev_info(&pdev
->dev
,
14006 "Cannot recover - error happened during device probe\n");
14007 return PCI_ERS_RESULT_DISCONNECT
;
14010 /* shutdown all operations */
14011 if (!test_bit(__I40E_SUSPENDED
, pf
->state
))
14012 i40e_prep_for_reset(pf
, false);
14014 /* Request a slot reset */
14015 return PCI_ERS_RESULT_NEED_RESET
;
14019 * i40e_pci_error_slot_reset - a PCI slot reset just happened
14020 * @pdev: PCI device information struct
14022 * Called to find if the driver can work with the device now that
14023 * the pci slot has been reset. If a basic connection seems good
14024 * (registers are readable and have sane content) then return a
14025 * happy little PCI_ERS_RESULT_xxx.
14027 static pci_ers_result_t
i40e_pci_error_slot_reset(struct pci_dev
*pdev
)
14029 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14030 pci_ers_result_t result
;
14034 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
14035 if (pci_enable_device_mem(pdev
)) {
14036 dev_info(&pdev
->dev
,
14037 "Cannot re-enable PCI device after reset.\n");
14038 result
= PCI_ERS_RESULT_DISCONNECT
;
14040 pci_set_master(pdev
);
14041 pci_restore_state(pdev
);
14042 pci_save_state(pdev
);
14043 pci_wake_from_d3(pdev
, false);
14045 reg
= rd32(&pf
->hw
, I40E_GLGEN_RTRIG
);
14047 result
= PCI_ERS_RESULT_RECOVERED
;
14049 result
= PCI_ERS_RESULT_DISCONNECT
;
14052 err
= pci_cleanup_aer_uncorrect_error_status(pdev
);
14054 dev_info(&pdev
->dev
,
14055 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
14057 /* non-fatal, continue */
14064 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
14065 * @pdev: PCI device information struct
14067 static void i40e_pci_error_reset_prepare(struct pci_dev
*pdev
)
14069 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14071 i40e_prep_for_reset(pf
, false);
14075 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
14076 * @pdev: PCI device information struct
14078 static void i40e_pci_error_reset_done(struct pci_dev
*pdev
)
14080 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14082 i40e_reset_and_rebuild(pf
, false, false);
14086 * i40e_pci_error_resume - restart operations after PCI error recovery
14087 * @pdev: PCI device information struct
14089 * Called to allow the driver to bring things back up after PCI error
14090 * and/or reset recovery has finished.
14092 static void i40e_pci_error_resume(struct pci_dev
*pdev
)
14094 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14096 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
14097 if (test_bit(__I40E_SUSPENDED
, pf
->state
))
14100 i40e_handle_reset_warning(pf
, false);
14104 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
14105 * using the mac_address_write admin q function
14106 * @pf: pointer to i40e_pf struct
14108 static void i40e_enable_mc_magic_wake(struct i40e_pf
*pf
)
14110 struct i40e_hw
*hw
= &pf
->hw
;
14115 /* Get current MAC address in case it's an LAA */
14116 if (pf
->vsi
[pf
->lan_vsi
] && pf
->vsi
[pf
->lan_vsi
]->netdev
) {
14117 ether_addr_copy(mac_addr
,
14118 pf
->vsi
[pf
->lan_vsi
]->netdev
->dev_addr
);
14120 dev_err(&pf
->pdev
->dev
,
14121 "Failed to retrieve MAC address; using default\n");
14122 ether_addr_copy(mac_addr
, hw
->mac
.addr
);
14125 /* The FW expects the mac address write cmd to first be called with
14126 * one of these flags before calling it again with the multicast
14129 flags
= I40E_AQC_WRITE_TYPE_LAA_WOL
;
14131 if (hw
->func_caps
.flex10_enable
&& hw
->partition_id
!= 1)
14132 flags
= I40E_AQC_WRITE_TYPE_LAA_ONLY
;
14134 ret
= i40e_aq_mac_address_write(hw
, flags
, mac_addr
, NULL
);
14136 dev_err(&pf
->pdev
->dev
,
14137 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
14141 flags
= I40E_AQC_MC_MAG_EN
14142 | I40E_AQC_WOL_PRESERVE_ON_PFR
14143 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG
;
14144 ret
= i40e_aq_mac_address_write(hw
, flags
, mac_addr
, NULL
);
14146 dev_err(&pf
->pdev
->dev
,
14147 "Failed to enable Multicast Magic Packet wake up\n");
14151 * i40e_shutdown - PCI callback for shutting down
14152 * @pdev: PCI device information struct
14154 static void i40e_shutdown(struct pci_dev
*pdev
)
14156 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14157 struct i40e_hw
*hw
= &pf
->hw
;
14159 set_bit(__I40E_SUSPENDED
, pf
->state
);
14160 set_bit(__I40E_DOWN
, pf
->state
);
14162 i40e_prep_for_reset(pf
, true);
14165 wr32(hw
, I40E_PFPM_APM
, (pf
->wol_en
? I40E_PFPM_APM_APME_MASK
: 0));
14166 wr32(hw
, I40E_PFPM_WUFC
, (pf
->wol_en
? I40E_PFPM_WUFC_MAG_MASK
: 0));
14168 del_timer_sync(&pf
->service_timer
);
14169 cancel_work_sync(&pf
->service_task
);
14170 i40e_cloud_filter_exit(pf
);
14171 i40e_fdir_teardown(pf
);
14173 /* Client close must be called explicitly here because the timer
14174 * has been stopped.
14176 i40e_notify_client_of_netdev_close(pf
->vsi
[pf
->lan_vsi
], false);
14178 if (pf
->wol_en
&& (pf
->hw_features
& I40E_HW_WOL_MC_MAGIC_PKT_WAKE
))
14179 i40e_enable_mc_magic_wake(pf
);
14181 i40e_prep_for_reset(pf
, false);
14183 wr32(hw
, I40E_PFPM_APM
,
14184 (pf
->wol_en
? I40E_PFPM_APM_APME_MASK
: 0));
14185 wr32(hw
, I40E_PFPM_WUFC
,
14186 (pf
->wol_en
? I40E_PFPM_WUFC_MAG_MASK
: 0));
14188 i40e_clear_interrupt_scheme(pf
);
14190 if (system_state
== SYSTEM_POWER_OFF
) {
14191 pci_wake_from_d3(pdev
, pf
->wol_en
);
14192 pci_set_power_state(pdev
, PCI_D3hot
);
14197 * i40e_suspend - PM callback for moving to D3
14198 * @dev: generic device information structure
14200 static int __maybe_unused
i40e_suspend(struct device
*dev
)
14202 struct pci_dev
*pdev
= to_pci_dev(dev
);
14203 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14204 struct i40e_hw
*hw
= &pf
->hw
;
14206 /* If we're already suspended, then there is nothing to do */
14207 if (test_and_set_bit(__I40E_SUSPENDED
, pf
->state
))
14210 set_bit(__I40E_DOWN
, pf
->state
);
14212 /* Ensure service task will not be running */
14213 del_timer_sync(&pf
->service_timer
);
14214 cancel_work_sync(&pf
->service_task
);
14216 if (pf
->wol_en
&& (pf
->hw_features
& I40E_HW_WOL_MC_MAGIC_PKT_WAKE
))
14217 i40e_enable_mc_magic_wake(pf
);
14219 i40e_prep_for_reset(pf
, false);
14221 wr32(hw
, I40E_PFPM_APM
, (pf
->wol_en
? I40E_PFPM_APM_APME_MASK
: 0));
14222 wr32(hw
, I40E_PFPM_WUFC
, (pf
->wol_en
? I40E_PFPM_WUFC_MAG_MASK
: 0));
14224 /* Clear the interrupt scheme and release our IRQs so that the system
14225 * can safely hibernate even when there are a large number of CPUs.
14226 * Otherwise hibernation might fail when mapping all the vectors back
14229 i40e_clear_interrupt_scheme(pf
);
14235 * i40e_resume - PM callback for waking up from D3
14236 * @dev: generic device information structure
14238 static int __maybe_unused
i40e_resume(struct device
*dev
)
14240 struct pci_dev
*pdev
= to_pci_dev(dev
);
14241 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14244 /* If we're not suspended, then there is nothing to do */
14245 if (!test_bit(__I40E_SUSPENDED
, pf
->state
))
14248 /* We cleared the interrupt scheme when we suspended, so we need to
14249 * restore it now to resume device functionality.
14251 err
= i40e_restore_interrupt_scheme(pf
);
14253 dev_err(&pdev
->dev
, "Cannot restore interrupt scheme: %d\n",
14257 clear_bit(__I40E_DOWN
, pf
->state
);
14258 i40e_reset_and_rebuild(pf
, false, false);
14260 /* Clear suspended state last after everything is recovered */
14261 clear_bit(__I40E_SUSPENDED
, pf
->state
);
14263 /* Restart the service task */
14264 mod_timer(&pf
->service_timer
,
14265 round_jiffies(jiffies
+ pf
->service_timer_period
));
14270 static const struct pci_error_handlers i40e_err_handler
= {
14271 .error_detected
= i40e_pci_error_detected
,
14272 .slot_reset
= i40e_pci_error_slot_reset
,
14273 .reset_prepare
= i40e_pci_error_reset_prepare
,
14274 .reset_done
= i40e_pci_error_reset_done
,
14275 .resume
= i40e_pci_error_resume
,
14278 static SIMPLE_DEV_PM_OPS(i40e_pm_ops
, i40e_suspend
, i40e_resume
);
14280 static struct pci_driver i40e_driver
= {
14281 .name
= i40e_driver_name
,
14282 .id_table
= i40e_pci_tbl
,
14283 .probe
= i40e_probe
,
14284 .remove
= i40e_remove
,
14286 .pm
= &i40e_pm_ops
,
14288 .shutdown
= i40e_shutdown
,
14289 .err_handler
= &i40e_err_handler
,
14290 .sriov_configure
= i40e_pci_sriov_configure
,
14294 * i40e_init_module - Driver registration routine
14296 * i40e_init_module is the first routine called when the driver is
14297 * loaded. All it does is register with the PCI subsystem.
14299 static int __init
i40e_init_module(void)
14301 pr_info("%s: %s - version %s\n", i40e_driver_name
,
14302 i40e_driver_string
, i40e_driver_version_str
);
14303 pr_info("%s: %s\n", i40e_driver_name
, i40e_copyright
);
14305 /* There is no need to throttle the number of active tasks because
14306 * each device limits its own task using a state bit for scheduling
14307 * the service task, and the device tasks do not interfere with each
14308 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
14309 * since we need to be able to guarantee forward progress even under
14312 i40e_wq
= alloc_workqueue("%s", WQ_MEM_RECLAIM
, 0, i40e_driver_name
);
14314 pr_err("%s: Failed to create workqueue\n", i40e_driver_name
);
14319 return pci_register_driver(&i40e_driver
);
14321 module_init(i40e_init_module
);
14324 * i40e_exit_module - Driver exit cleanup routine
14326 * i40e_exit_module is called just before the driver is removed
14329 static void __exit
i40e_exit_module(void)
14331 pci_unregister_driver(&i40e_driver
);
14332 destroy_workqueue(i40e_wq
);
14335 module_exit(i40e_exit_module
);