1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
4 #include <linux/etherdevice.h>
5 #include <linux/of_net.h>
11 #include "i40e_diag.h"
13 #include <net/udp_tunnel.h>
14 #include <net/xdp_sock.h>
15 /* All i40e tracepoints are defined by the include below, which
16 * must be included exactly once across the whole kernel with
17 * CREATE_TRACE_POINTS defined
19 #define CREATE_TRACE_POINTS
20 #include "i40e_trace.h"
22 const char i40e_driver_name
[] = "i40e";
23 static const char i40e_driver_string
[] =
24 "Intel(R) Ethernet Connection XL710 Network Driver";
28 #define DRV_VERSION_MAJOR 2
29 #define DRV_VERSION_MINOR 3
30 #define DRV_VERSION_BUILD 2
31 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
32 __stringify(DRV_VERSION_MINOR) "." \
33 __stringify(DRV_VERSION_BUILD) DRV_KERN
34 const char i40e_driver_version_str
[] = DRV_VERSION
;
35 static const char i40e_copyright
[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
37 /* a bit of forward declarations */
38 static void i40e_vsi_reinit_locked(struct i40e_vsi
*vsi
);
39 static void i40e_handle_reset_warning(struct i40e_pf
*pf
, bool lock_acquired
);
40 static int i40e_add_vsi(struct i40e_vsi
*vsi
);
41 static int i40e_add_veb(struct i40e_veb
*veb
, struct i40e_vsi
*vsi
);
42 static int i40e_setup_pf_switch(struct i40e_pf
*pf
, bool reinit
);
43 static int i40e_setup_misc_vector(struct i40e_pf
*pf
);
44 static void i40e_determine_queue_usage(struct i40e_pf
*pf
);
45 static int i40e_setup_pf_filter_control(struct i40e_pf
*pf
);
46 static void i40e_prep_for_reset(struct i40e_pf
*pf
, bool lock_acquired
);
47 static int i40e_reset(struct i40e_pf
*pf
);
48 static void i40e_rebuild(struct i40e_pf
*pf
, bool reinit
, bool lock_acquired
);
49 static void i40e_fdir_sb_setup(struct i40e_pf
*pf
);
50 static int i40e_veb_get_bw_info(struct i40e_veb
*veb
);
51 static int i40e_get_capabilities(struct i40e_pf
*pf
,
52 enum i40e_admin_queue_opc list_type
);
55 /* i40e_pci_tbl - PCI Device ID Table
57 * Last entry must be all 0s
59 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
60 * Class, Class Mask, private data (not used) }
62 static const struct pci_device_id i40e_pci_tbl
[] = {
63 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_SFP_XL710
), 0},
64 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QEMU
), 0},
65 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_KX_B
), 0},
66 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_KX_C
), 0},
67 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_A
), 0},
68 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_B
), 0},
69 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_C
), 0},
70 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_10G_BASE_T
), 0},
71 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_10G_BASE_T4
), 0},
72 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_KX_X722
), 0},
73 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_QSFP_X722
), 0},
74 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_SFP_X722
), 0},
75 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_1G_BASE_T_X722
), 0},
76 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_10G_BASE_T_X722
), 0},
77 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_SFP_I_X722
), 0},
78 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_20G_KR2
), 0},
79 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_20G_KR2_A
), 0},
80 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_25G_B
), 0},
81 {PCI_VDEVICE(INTEL
, I40E_DEV_ID_25G_SFP28
), 0},
82 /* required last entry */
85 MODULE_DEVICE_TABLE(pci
, i40e_pci_tbl
);
87 #define I40E_MAX_VF_COUNT 128
88 static int debug
= -1;
89 module_param(debug
, uint
, 0);
90 MODULE_PARM_DESC(debug
, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
92 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
93 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
94 MODULE_LICENSE("GPL v2");
95 MODULE_VERSION(DRV_VERSION
);
97 static struct workqueue_struct
*i40e_wq
;
100 * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
101 * @hw: pointer to the HW structure
102 * @mem: ptr to mem struct to fill out
103 * @size: size of memory requested
104 * @alignment: what to align the allocation to
106 int i40e_allocate_dma_mem_d(struct i40e_hw
*hw
, struct i40e_dma_mem
*mem
,
107 u64 size
, u32 alignment
)
109 struct i40e_pf
*pf
= (struct i40e_pf
*)hw
->back
;
111 mem
->size
= ALIGN(size
, alignment
);
112 mem
->va
= dma_zalloc_coherent(&pf
->pdev
->dev
, mem
->size
,
113 &mem
->pa
, GFP_KERNEL
);
121 * i40e_free_dma_mem_d - OS specific memory free for shared code
122 * @hw: pointer to the HW structure
123 * @mem: ptr to mem struct to free
125 int i40e_free_dma_mem_d(struct i40e_hw
*hw
, struct i40e_dma_mem
*mem
)
127 struct i40e_pf
*pf
= (struct i40e_pf
*)hw
->back
;
129 dma_free_coherent(&pf
->pdev
->dev
, mem
->size
, mem
->va
, mem
->pa
);
138 * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
139 * @hw: pointer to the HW structure
140 * @mem: ptr to mem struct to fill out
141 * @size: size of memory requested
143 int i40e_allocate_virt_mem_d(struct i40e_hw
*hw
, struct i40e_virt_mem
*mem
,
147 mem
->va
= kzalloc(size
, GFP_KERNEL
);
156 * i40e_free_virt_mem_d - OS specific memory free for shared code
157 * @hw: pointer to the HW structure
158 * @mem: ptr to mem struct to free
160 int i40e_free_virt_mem_d(struct i40e_hw
*hw
, struct i40e_virt_mem
*mem
)
162 /* it's ok to kfree a NULL pointer */
171 * i40e_get_lump - find a lump of free generic resource
172 * @pf: board private structure
173 * @pile: the pile of resource to search
174 * @needed: the number of items needed
175 * @id: an owner id to stick on the items assigned
177 * Returns the base item index of the lump, or negative for error
179 * The search_hint trick and lack of advanced fit-finding only work
180 * because we're highly likely to have all the same size lump requests.
181 * Linear search time and any fragmentation should be minimal.
183 static int i40e_get_lump(struct i40e_pf
*pf
, struct i40e_lump_tracking
*pile
,
189 if (!pile
|| needed
== 0 || id
>= I40E_PILE_VALID_BIT
) {
190 dev_info(&pf
->pdev
->dev
,
191 "param err: pile=%s needed=%d id=0x%04x\n",
192 pile
? "<valid>" : "<null>", needed
, id
);
196 /* start the linear search with an imperfect hint */
197 i
= pile
->search_hint
;
198 while (i
< pile
->num_entries
) {
199 /* skip already allocated entries */
200 if (pile
->list
[i
] & I40E_PILE_VALID_BIT
) {
205 /* do we have enough in this lump? */
206 for (j
= 0; (j
< needed
) && ((i
+j
) < pile
->num_entries
); j
++) {
207 if (pile
->list
[i
+j
] & I40E_PILE_VALID_BIT
)
212 /* there was enough, so assign it to the requestor */
213 for (j
= 0; j
< needed
; j
++)
214 pile
->list
[i
+j
] = id
| I40E_PILE_VALID_BIT
;
216 pile
->search_hint
= i
+ j
;
220 /* not enough, so skip over it and continue looking */
228 * i40e_put_lump - return a lump of generic resource
229 * @pile: the pile of resource to search
230 * @index: the base item index
231 * @id: the owner id of the items assigned
233 * Returns the count of items in the lump
235 static int i40e_put_lump(struct i40e_lump_tracking
*pile
, u16 index
, u16 id
)
237 int valid_id
= (id
| I40E_PILE_VALID_BIT
);
241 if (!pile
|| index
>= pile
->num_entries
)
245 i
< pile
->num_entries
&& pile
->list
[i
] == valid_id
;
251 if (count
&& index
< pile
->search_hint
)
252 pile
->search_hint
= index
;
258 * i40e_find_vsi_from_id - searches for the vsi with the given id
259 * @pf: the pf structure to search for the vsi
260 * @id: id of the vsi it is searching for
262 struct i40e_vsi
*i40e_find_vsi_from_id(struct i40e_pf
*pf
, u16 id
)
266 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
267 if (pf
->vsi
[i
] && (pf
->vsi
[i
]->id
== id
))
274 * i40e_service_event_schedule - Schedule the service task to wake up
275 * @pf: board private structure
277 * If not already scheduled, this puts the task into the work queue
279 void i40e_service_event_schedule(struct i40e_pf
*pf
)
281 if (!test_bit(__I40E_DOWN
, pf
->state
) &&
282 !test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
283 queue_work(i40e_wq
, &pf
->service_task
);
287 * i40e_tx_timeout - Respond to a Tx Hang
288 * @netdev: network interface device structure
290 * If any port has noticed a Tx timeout, it is likely that the whole
291 * device is munged, not just the one netdev port, so go for the full
294 static void i40e_tx_timeout(struct net_device
*netdev
)
296 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
297 struct i40e_vsi
*vsi
= np
->vsi
;
298 struct i40e_pf
*pf
= vsi
->back
;
299 struct i40e_ring
*tx_ring
= NULL
;
300 unsigned int i
, hung_queue
= 0;
303 pf
->tx_timeout_count
++;
305 /* find the stopped queue the same way the stack does */
306 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
307 struct netdev_queue
*q
;
308 unsigned long trans_start
;
310 q
= netdev_get_tx_queue(netdev
, i
);
311 trans_start
= q
->trans_start
;
312 if (netif_xmit_stopped(q
) &&
314 (trans_start
+ netdev
->watchdog_timeo
))) {
320 if (i
== netdev
->num_tx_queues
) {
321 netdev_info(netdev
, "tx_timeout: no netdev hung queue found\n");
323 /* now that we have an index, find the tx_ring struct */
324 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
325 if (vsi
->tx_rings
[i
] && vsi
->tx_rings
[i
]->desc
) {
327 vsi
->tx_rings
[i
]->queue_index
) {
328 tx_ring
= vsi
->tx_rings
[i
];
335 if (time_after(jiffies
, (pf
->tx_timeout_last_recovery
+ HZ
*20)))
336 pf
->tx_timeout_recovery_level
= 1; /* reset after some time */
337 else if (time_before(jiffies
,
338 (pf
->tx_timeout_last_recovery
+ netdev
->watchdog_timeo
)))
339 return; /* don't do any new action before the next timeout */
342 head
= i40e_get_head(tx_ring
);
343 /* Read interrupt register */
344 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
346 I40E_PFINT_DYN_CTLN(tx_ring
->q_vector
->v_idx
+
347 tx_ring
->vsi
->base_vector
- 1));
349 val
= rd32(&pf
->hw
, I40E_PFINT_DYN_CTL0
);
351 netdev_info(netdev
, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
352 vsi
->seid
, hung_queue
, tx_ring
->next_to_clean
,
353 head
, tx_ring
->next_to_use
,
354 readl(tx_ring
->tail
), val
);
357 pf
->tx_timeout_last_recovery
= jiffies
;
358 netdev_info(netdev
, "tx_timeout recovery level %d, hung_queue %d\n",
359 pf
->tx_timeout_recovery_level
, hung_queue
);
361 switch (pf
->tx_timeout_recovery_level
) {
363 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
366 set_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
);
369 set_bit(__I40E_GLOBAL_RESET_REQUESTED
, pf
->state
);
372 netdev_err(netdev
, "tx_timeout recovery unsuccessful\n");
376 i40e_service_event_schedule(pf
);
377 pf
->tx_timeout_recovery_level
++;
381 * i40e_get_vsi_stats_struct - Get System Network Statistics
382 * @vsi: the VSI we care about
384 * Returns the address of the device statistics structure.
385 * The statistics are actually updated from the service task.
387 struct rtnl_link_stats64
*i40e_get_vsi_stats_struct(struct i40e_vsi
*vsi
)
389 return &vsi
->net_stats
;
393 * i40e_get_netdev_stats_struct_tx - populate stats from a Tx ring
394 * @ring: Tx ring to get statistics from
395 * @stats: statistics entry to be updated
397 static void i40e_get_netdev_stats_struct_tx(struct i40e_ring
*ring
,
398 struct rtnl_link_stats64
*stats
)
404 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
405 packets
= ring
->stats
.packets
;
406 bytes
= ring
->stats
.bytes
;
407 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
409 stats
->tx_packets
+= packets
;
410 stats
->tx_bytes
+= bytes
;
414 * i40e_get_netdev_stats_struct - Get statistics for netdev interface
415 * @netdev: network interface device structure
416 * @stats: data structure to store statistics
418 * Returns the address of the device statistics structure.
419 * The statistics are actually updated from the service task.
421 static void i40e_get_netdev_stats_struct(struct net_device
*netdev
,
422 struct rtnl_link_stats64
*stats
)
424 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
425 struct i40e_vsi
*vsi
= np
->vsi
;
426 struct rtnl_link_stats64
*vsi_stats
= i40e_get_vsi_stats_struct(vsi
);
427 struct i40e_ring
*ring
;
430 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
))
437 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
441 ring
= READ_ONCE(vsi
->tx_rings
[i
]);
444 i40e_get_netdev_stats_struct_tx(ring
, stats
);
446 if (i40e_enabled_xdp_vsi(vsi
)) {
448 i40e_get_netdev_stats_struct_tx(ring
, stats
);
453 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
454 packets
= ring
->stats
.packets
;
455 bytes
= ring
->stats
.bytes
;
456 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
458 stats
->rx_packets
+= packets
;
459 stats
->rx_bytes
+= bytes
;
464 /* following stats updated by i40e_watchdog_subtask() */
465 stats
->multicast
= vsi_stats
->multicast
;
466 stats
->tx_errors
= vsi_stats
->tx_errors
;
467 stats
->tx_dropped
= vsi_stats
->tx_dropped
;
468 stats
->rx_errors
= vsi_stats
->rx_errors
;
469 stats
->rx_dropped
= vsi_stats
->rx_dropped
;
470 stats
->rx_crc_errors
= vsi_stats
->rx_crc_errors
;
471 stats
->rx_length_errors
= vsi_stats
->rx_length_errors
;
475 * i40e_vsi_reset_stats - Resets all stats of the given vsi
476 * @vsi: the VSI to have its stats reset
478 void i40e_vsi_reset_stats(struct i40e_vsi
*vsi
)
480 struct rtnl_link_stats64
*ns
;
486 ns
= i40e_get_vsi_stats_struct(vsi
);
487 memset(ns
, 0, sizeof(*ns
));
488 memset(&vsi
->net_stats_offsets
, 0, sizeof(vsi
->net_stats_offsets
));
489 memset(&vsi
->eth_stats
, 0, sizeof(vsi
->eth_stats
));
490 memset(&vsi
->eth_stats_offsets
, 0, sizeof(vsi
->eth_stats_offsets
));
491 if (vsi
->rx_rings
&& vsi
->rx_rings
[0]) {
492 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
493 memset(&vsi
->rx_rings
[i
]->stats
, 0,
494 sizeof(vsi
->rx_rings
[i
]->stats
));
495 memset(&vsi
->rx_rings
[i
]->rx_stats
, 0,
496 sizeof(vsi
->rx_rings
[i
]->rx_stats
));
497 memset(&vsi
->tx_rings
[i
]->stats
, 0,
498 sizeof(vsi
->tx_rings
[i
]->stats
));
499 memset(&vsi
->tx_rings
[i
]->tx_stats
, 0,
500 sizeof(vsi
->tx_rings
[i
]->tx_stats
));
503 vsi
->stat_offsets_loaded
= false;
507 * i40e_pf_reset_stats - Reset all of the stats for the given PF
508 * @pf: the PF to be reset
510 void i40e_pf_reset_stats(struct i40e_pf
*pf
)
514 memset(&pf
->stats
, 0, sizeof(pf
->stats
));
515 memset(&pf
->stats_offsets
, 0, sizeof(pf
->stats_offsets
));
516 pf
->stat_offsets_loaded
= false;
518 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
520 memset(&pf
->veb
[i
]->stats
, 0,
521 sizeof(pf
->veb
[i
]->stats
));
522 memset(&pf
->veb
[i
]->stats_offsets
, 0,
523 sizeof(pf
->veb
[i
]->stats_offsets
));
524 pf
->veb
[i
]->stat_offsets_loaded
= false;
527 pf
->hw_csum_rx_error
= 0;
531 * i40e_stat_update48 - read and update a 48 bit stat from the chip
532 * @hw: ptr to the hardware info
533 * @hireg: the high 32 bit reg to read
534 * @loreg: the low 32 bit reg to read
535 * @offset_loaded: has the initial offset been loaded yet
536 * @offset: ptr to current offset value
537 * @stat: ptr to the stat
539 * Since the device stats are not reset at PFReset, they likely will not
540 * be zeroed when the driver starts. We'll save the first values read
541 * and use them as offsets to be subtracted from the raw values in order
542 * to report stats that count from zero. In the process, we also manage
543 * the potential roll-over.
545 static void i40e_stat_update48(struct i40e_hw
*hw
, u32 hireg
, u32 loreg
,
546 bool offset_loaded
, u64
*offset
, u64
*stat
)
550 if (hw
->device_id
== I40E_DEV_ID_QEMU
) {
551 new_data
= rd32(hw
, loreg
);
552 new_data
|= ((u64
)(rd32(hw
, hireg
) & 0xFFFF)) << 32;
554 new_data
= rd64(hw
, loreg
);
558 if (likely(new_data
>= *offset
))
559 *stat
= new_data
- *offset
;
561 *stat
= (new_data
+ BIT_ULL(48)) - *offset
;
562 *stat
&= 0xFFFFFFFFFFFFULL
;
566 * i40e_stat_update32 - read and update a 32 bit stat from the chip
567 * @hw: ptr to the hardware info
568 * @reg: the hw reg to read
569 * @offset_loaded: has the initial offset been loaded yet
570 * @offset: ptr to current offset value
571 * @stat: ptr to the stat
573 static void i40e_stat_update32(struct i40e_hw
*hw
, u32 reg
,
574 bool offset_loaded
, u64
*offset
, u64
*stat
)
578 new_data
= rd32(hw
, reg
);
581 if (likely(new_data
>= *offset
))
582 *stat
= (u32
)(new_data
- *offset
);
584 *stat
= (u32
)((new_data
+ BIT_ULL(32)) - *offset
);
588 * i40e_stat_update_and_clear32 - read and clear hw reg, update a 32 bit stat
589 * @hw: ptr to the hardware info
590 * @reg: the hw reg to read and clear
591 * @stat: ptr to the stat
593 static void i40e_stat_update_and_clear32(struct i40e_hw
*hw
, u32 reg
, u64
*stat
)
595 u32 new_data
= rd32(hw
, reg
);
597 wr32(hw
, reg
, 1); /* must write a nonzero value to clear register */
602 * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
603 * @vsi: the VSI to be updated
605 void i40e_update_eth_stats(struct i40e_vsi
*vsi
)
607 int stat_idx
= le16_to_cpu(vsi
->info
.stat_counter_idx
);
608 struct i40e_pf
*pf
= vsi
->back
;
609 struct i40e_hw
*hw
= &pf
->hw
;
610 struct i40e_eth_stats
*oes
;
611 struct i40e_eth_stats
*es
; /* device's eth stats */
613 es
= &vsi
->eth_stats
;
614 oes
= &vsi
->eth_stats_offsets
;
616 /* Gather up the stats that the hw collects */
617 i40e_stat_update32(hw
, I40E_GLV_TEPC(stat_idx
),
618 vsi
->stat_offsets_loaded
,
619 &oes
->tx_errors
, &es
->tx_errors
);
620 i40e_stat_update32(hw
, I40E_GLV_RDPC(stat_idx
),
621 vsi
->stat_offsets_loaded
,
622 &oes
->rx_discards
, &es
->rx_discards
);
623 i40e_stat_update32(hw
, I40E_GLV_RUPP(stat_idx
),
624 vsi
->stat_offsets_loaded
,
625 &oes
->rx_unknown_protocol
, &es
->rx_unknown_protocol
);
626 i40e_stat_update32(hw
, I40E_GLV_TEPC(stat_idx
),
627 vsi
->stat_offsets_loaded
,
628 &oes
->tx_errors
, &es
->tx_errors
);
630 i40e_stat_update48(hw
, I40E_GLV_GORCH(stat_idx
),
631 I40E_GLV_GORCL(stat_idx
),
632 vsi
->stat_offsets_loaded
,
633 &oes
->rx_bytes
, &es
->rx_bytes
);
634 i40e_stat_update48(hw
, I40E_GLV_UPRCH(stat_idx
),
635 I40E_GLV_UPRCL(stat_idx
),
636 vsi
->stat_offsets_loaded
,
637 &oes
->rx_unicast
, &es
->rx_unicast
);
638 i40e_stat_update48(hw
, I40E_GLV_MPRCH(stat_idx
),
639 I40E_GLV_MPRCL(stat_idx
),
640 vsi
->stat_offsets_loaded
,
641 &oes
->rx_multicast
, &es
->rx_multicast
);
642 i40e_stat_update48(hw
, I40E_GLV_BPRCH(stat_idx
),
643 I40E_GLV_BPRCL(stat_idx
),
644 vsi
->stat_offsets_loaded
,
645 &oes
->rx_broadcast
, &es
->rx_broadcast
);
647 i40e_stat_update48(hw
, I40E_GLV_GOTCH(stat_idx
),
648 I40E_GLV_GOTCL(stat_idx
),
649 vsi
->stat_offsets_loaded
,
650 &oes
->tx_bytes
, &es
->tx_bytes
);
651 i40e_stat_update48(hw
, I40E_GLV_UPTCH(stat_idx
),
652 I40E_GLV_UPTCL(stat_idx
),
653 vsi
->stat_offsets_loaded
,
654 &oes
->tx_unicast
, &es
->tx_unicast
);
655 i40e_stat_update48(hw
, I40E_GLV_MPTCH(stat_idx
),
656 I40E_GLV_MPTCL(stat_idx
),
657 vsi
->stat_offsets_loaded
,
658 &oes
->tx_multicast
, &es
->tx_multicast
);
659 i40e_stat_update48(hw
, I40E_GLV_BPTCH(stat_idx
),
660 I40E_GLV_BPTCL(stat_idx
),
661 vsi
->stat_offsets_loaded
,
662 &oes
->tx_broadcast
, &es
->tx_broadcast
);
663 vsi
->stat_offsets_loaded
= true;
667 * i40e_update_veb_stats - Update Switch component statistics
668 * @veb: the VEB being updated
670 static void i40e_update_veb_stats(struct i40e_veb
*veb
)
672 struct i40e_pf
*pf
= veb
->pf
;
673 struct i40e_hw
*hw
= &pf
->hw
;
674 struct i40e_eth_stats
*oes
;
675 struct i40e_eth_stats
*es
; /* device's eth stats */
676 struct i40e_veb_tc_stats
*veb_oes
;
677 struct i40e_veb_tc_stats
*veb_es
;
680 idx
= veb
->stats_idx
;
682 oes
= &veb
->stats_offsets
;
683 veb_es
= &veb
->tc_stats
;
684 veb_oes
= &veb
->tc_stats_offsets
;
686 /* Gather up the stats that the hw collects */
687 i40e_stat_update32(hw
, I40E_GLSW_TDPC(idx
),
688 veb
->stat_offsets_loaded
,
689 &oes
->tx_discards
, &es
->tx_discards
);
690 if (hw
->revision_id
> 0)
691 i40e_stat_update32(hw
, I40E_GLSW_RUPP(idx
),
692 veb
->stat_offsets_loaded
,
693 &oes
->rx_unknown_protocol
,
694 &es
->rx_unknown_protocol
);
695 i40e_stat_update48(hw
, I40E_GLSW_GORCH(idx
), I40E_GLSW_GORCL(idx
),
696 veb
->stat_offsets_loaded
,
697 &oes
->rx_bytes
, &es
->rx_bytes
);
698 i40e_stat_update48(hw
, I40E_GLSW_UPRCH(idx
), I40E_GLSW_UPRCL(idx
),
699 veb
->stat_offsets_loaded
,
700 &oes
->rx_unicast
, &es
->rx_unicast
);
701 i40e_stat_update48(hw
, I40E_GLSW_MPRCH(idx
), I40E_GLSW_MPRCL(idx
),
702 veb
->stat_offsets_loaded
,
703 &oes
->rx_multicast
, &es
->rx_multicast
);
704 i40e_stat_update48(hw
, I40E_GLSW_BPRCH(idx
), I40E_GLSW_BPRCL(idx
),
705 veb
->stat_offsets_loaded
,
706 &oes
->rx_broadcast
, &es
->rx_broadcast
);
708 i40e_stat_update48(hw
, I40E_GLSW_GOTCH(idx
), I40E_GLSW_GOTCL(idx
),
709 veb
->stat_offsets_loaded
,
710 &oes
->tx_bytes
, &es
->tx_bytes
);
711 i40e_stat_update48(hw
, I40E_GLSW_UPTCH(idx
), I40E_GLSW_UPTCL(idx
),
712 veb
->stat_offsets_loaded
,
713 &oes
->tx_unicast
, &es
->tx_unicast
);
714 i40e_stat_update48(hw
, I40E_GLSW_MPTCH(idx
), I40E_GLSW_MPTCL(idx
),
715 veb
->stat_offsets_loaded
,
716 &oes
->tx_multicast
, &es
->tx_multicast
);
717 i40e_stat_update48(hw
, I40E_GLSW_BPTCH(idx
), I40E_GLSW_BPTCL(idx
),
718 veb
->stat_offsets_loaded
,
719 &oes
->tx_broadcast
, &es
->tx_broadcast
);
720 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
721 i40e_stat_update48(hw
, I40E_GLVEBTC_RPCH(i
, idx
),
722 I40E_GLVEBTC_RPCL(i
, idx
),
723 veb
->stat_offsets_loaded
,
724 &veb_oes
->tc_rx_packets
[i
],
725 &veb_es
->tc_rx_packets
[i
]);
726 i40e_stat_update48(hw
, I40E_GLVEBTC_RBCH(i
, idx
),
727 I40E_GLVEBTC_RBCL(i
, idx
),
728 veb
->stat_offsets_loaded
,
729 &veb_oes
->tc_rx_bytes
[i
],
730 &veb_es
->tc_rx_bytes
[i
]);
731 i40e_stat_update48(hw
, I40E_GLVEBTC_TPCH(i
, idx
),
732 I40E_GLVEBTC_TPCL(i
, idx
),
733 veb
->stat_offsets_loaded
,
734 &veb_oes
->tc_tx_packets
[i
],
735 &veb_es
->tc_tx_packets
[i
]);
736 i40e_stat_update48(hw
, I40E_GLVEBTC_TBCH(i
, idx
),
737 I40E_GLVEBTC_TBCL(i
, idx
),
738 veb
->stat_offsets_loaded
,
739 &veb_oes
->tc_tx_bytes
[i
],
740 &veb_es
->tc_tx_bytes
[i
]);
742 veb
->stat_offsets_loaded
= true;
746 * i40e_update_vsi_stats - Update the vsi statistics counters.
747 * @vsi: the VSI to be updated
749 * There are a few instances where we store the same stat in a
750 * couple of different structs. This is partly because we have
751 * the netdev stats that need to be filled out, which is slightly
752 * different from the "eth_stats" defined by the chip and used in
753 * VF communications. We sort it out here.
755 static void i40e_update_vsi_stats(struct i40e_vsi
*vsi
)
757 struct i40e_pf
*pf
= vsi
->back
;
758 struct rtnl_link_stats64
*ons
;
759 struct rtnl_link_stats64
*ns
; /* netdev stats */
760 struct i40e_eth_stats
*oes
;
761 struct i40e_eth_stats
*es
; /* device's eth stats */
762 u32 tx_restart
, tx_busy
;
773 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
) ||
774 test_bit(__I40E_CONFIG_BUSY
, pf
->state
))
777 ns
= i40e_get_vsi_stats_struct(vsi
);
778 ons
= &vsi
->net_stats_offsets
;
779 es
= &vsi
->eth_stats
;
780 oes
= &vsi
->eth_stats_offsets
;
782 /* Gather up the netdev and vsi stats that the driver collects
783 * on the fly during packet processing
787 tx_restart
= tx_busy
= tx_linearize
= tx_force_wb
= 0;
791 for (q
= 0; q
< vsi
->num_queue_pairs
; q
++) {
793 p
= READ_ONCE(vsi
->tx_rings
[q
]);
796 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
797 packets
= p
->stats
.packets
;
798 bytes
= p
->stats
.bytes
;
799 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
802 tx_restart
+= p
->tx_stats
.restart_queue
;
803 tx_busy
+= p
->tx_stats
.tx_busy
;
804 tx_linearize
+= p
->tx_stats
.tx_linearize
;
805 tx_force_wb
+= p
->tx_stats
.tx_force_wb
;
807 /* Rx queue is part of the same block as Tx queue */
810 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
811 packets
= p
->stats
.packets
;
812 bytes
= p
->stats
.bytes
;
813 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
816 rx_buf
+= p
->rx_stats
.alloc_buff_failed
;
817 rx_page
+= p
->rx_stats
.alloc_page_failed
;
820 vsi
->tx_restart
= tx_restart
;
821 vsi
->tx_busy
= tx_busy
;
822 vsi
->tx_linearize
= tx_linearize
;
823 vsi
->tx_force_wb
= tx_force_wb
;
824 vsi
->rx_page_failed
= rx_page
;
825 vsi
->rx_buf_failed
= rx_buf
;
827 ns
->rx_packets
= rx_p
;
829 ns
->tx_packets
= tx_p
;
832 /* update netdev stats from eth stats */
833 i40e_update_eth_stats(vsi
);
834 ons
->tx_errors
= oes
->tx_errors
;
835 ns
->tx_errors
= es
->tx_errors
;
836 ons
->multicast
= oes
->rx_multicast
;
837 ns
->multicast
= es
->rx_multicast
;
838 ons
->rx_dropped
= oes
->rx_discards
;
839 ns
->rx_dropped
= es
->rx_discards
;
840 ons
->tx_dropped
= oes
->tx_discards
;
841 ns
->tx_dropped
= es
->tx_discards
;
843 /* pull in a couple PF stats if this is the main vsi */
844 if (vsi
== pf
->vsi
[pf
->lan_vsi
]) {
845 ns
->rx_crc_errors
= pf
->stats
.crc_errors
;
846 ns
->rx_errors
= pf
->stats
.crc_errors
+ pf
->stats
.illegal_bytes
;
847 ns
->rx_length_errors
= pf
->stats
.rx_length_errors
;
852 * i40e_update_pf_stats - Update the PF statistics counters.
853 * @pf: the PF to be updated
855 static void i40e_update_pf_stats(struct i40e_pf
*pf
)
857 struct i40e_hw_port_stats
*osd
= &pf
->stats_offsets
;
858 struct i40e_hw_port_stats
*nsd
= &pf
->stats
;
859 struct i40e_hw
*hw
= &pf
->hw
;
863 i40e_stat_update48(hw
, I40E_GLPRT_GORCH(hw
->port
),
864 I40E_GLPRT_GORCL(hw
->port
),
865 pf
->stat_offsets_loaded
,
866 &osd
->eth
.rx_bytes
, &nsd
->eth
.rx_bytes
);
867 i40e_stat_update48(hw
, I40E_GLPRT_GOTCH(hw
->port
),
868 I40E_GLPRT_GOTCL(hw
->port
),
869 pf
->stat_offsets_loaded
,
870 &osd
->eth
.tx_bytes
, &nsd
->eth
.tx_bytes
);
871 i40e_stat_update32(hw
, I40E_GLPRT_RDPC(hw
->port
),
872 pf
->stat_offsets_loaded
,
873 &osd
->eth
.rx_discards
,
874 &nsd
->eth
.rx_discards
);
875 i40e_stat_update48(hw
, I40E_GLPRT_UPRCH(hw
->port
),
876 I40E_GLPRT_UPRCL(hw
->port
),
877 pf
->stat_offsets_loaded
,
878 &osd
->eth
.rx_unicast
,
879 &nsd
->eth
.rx_unicast
);
880 i40e_stat_update48(hw
, I40E_GLPRT_MPRCH(hw
->port
),
881 I40E_GLPRT_MPRCL(hw
->port
),
882 pf
->stat_offsets_loaded
,
883 &osd
->eth
.rx_multicast
,
884 &nsd
->eth
.rx_multicast
);
885 i40e_stat_update48(hw
, I40E_GLPRT_BPRCH(hw
->port
),
886 I40E_GLPRT_BPRCL(hw
->port
),
887 pf
->stat_offsets_loaded
,
888 &osd
->eth
.rx_broadcast
,
889 &nsd
->eth
.rx_broadcast
);
890 i40e_stat_update48(hw
, I40E_GLPRT_UPTCH(hw
->port
),
891 I40E_GLPRT_UPTCL(hw
->port
),
892 pf
->stat_offsets_loaded
,
893 &osd
->eth
.tx_unicast
,
894 &nsd
->eth
.tx_unicast
);
895 i40e_stat_update48(hw
, I40E_GLPRT_MPTCH(hw
->port
),
896 I40E_GLPRT_MPTCL(hw
->port
),
897 pf
->stat_offsets_loaded
,
898 &osd
->eth
.tx_multicast
,
899 &nsd
->eth
.tx_multicast
);
900 i40e_stat_update48(hw
, I40E_GLPRT_BPTCH(hw
->port
),
901 I40E_GLPRT_BPTCL(hw
->port
),
902 pf
->stat_offsets_loaded
,
903 &osd
->eth
.tx_broadcast
,
904 &nsd
->eth
.tx_broadcast
);
906 i40e_stat_update32(hw
, I40E_GLPRT_TDOLD(hw
->port
),
907 pf
->stat_offsets_loaded
,
908 &osd
->tx_dropped_link_down
,
909 &nsd
->tx_dropped_link_down
);
911 i40e_stat_update32(hw
, I40E_GLPRT_CRCERRS(hw
->port
),
912 pf
->stat_offsets_loaded
,
913 &osd
->crc_errors
, &nsd
->crc_errors
);
915 i40e_stat_update32(hw
, I40E_GLPRT_ILLERRC(hw
->port
),
916 pf
->stat_offsets_loaded
,
917 &osd
->illegal_bytes
, &nsd
->illegal_bytes
);
919 i40e_stat_update32(hw
, I40E_GLPRT_MLFC(hw
->port
),
920 pf
->stat_offsets_loaded
,
921 &osd
->mac_local_faults
,
922 &nsd
->mac_local_faults
);
923 i40e_stat_update32(hw
, I40E_GLPRT_MRFC(hw
->port
),
924 pf
->stat_offsets_loaded
,
925 &osd
->mac_remote_faults
,
926 &nsd
->mac_remote_faults
);
928 i40e_stat_update32(hw
, I40E_GLPRT_RLEC(hw
->port
),
929 pf
->stat_offsets_loaded
,
930 &osd
->rx_length_errors
,
931 &nsd
->rx_length_errors
);
933 i40e_stat_update32(hw
, I40E_GLPRT_LXONRXC(hw
->port
),
934 pf
->stat_offsets_loaded
,
935 &osd
->link_xon_rx
, &nsd
->link_xon_rx
);
936 i40e_stat_update32(hw
, I40E_GLPRT_LXONTXC(hw
->port
),
937 pf
->stat_offsets_loaded
,
938 &osd
->link_xon_tx
, &nsd
->link_xon_tx
);
939 i40e_stat_update32(hw
, I40E_GLPRT_LXOFFRXC(hw
->port
),
940 pf
->stat_offsets_loaded
,
941 &osd
->link_xoff_rx
, &nsd
->link_xoff_rx
);
942 i40e_stat_update32(hw
, I40E_GLPRT_LXOFFTXC(hw
->port
),
943 pf
->stat_offsets_loaded
,
944 &osd
->link_xoff_tx
, &nsd
->link_xoff_tx
);
946 for (i
= 0; i
< 8; i
++) {
947 i40e_stat_update32(hw
, I40E_GLPRT_PXOFFRXC(hw
->port
, i
),
948 pf
->stat_offsets_loaded
,
949 &osd
->priority_xoff_rx
[i
],
950 &nsd
->priority_xoff_rx
[i
]);
951 i40e_stat_update32(hw
, I40E_GLPRT_PXONRXC(hw
->port
, i
),
952 pf
->stat_offsets_loaded
,
953 &osd
->priority_xon_rx
[i
],
954 &nsd
->priority_xon_rx
[i
]);
955 i40e_stat_update32(hw
, I40E_GLPRT_PXONTXC(hw
->port
, i
),
956 pf
->stat_offsets_loaded
,
957 &osd
->priority_xon_tx
[i
],
958 &nsd
->priority_xon_tx
[i
]);
959 i40e_stat_update32(hw
, I40E_GLPRT_PXOFFTXC(hw
->port
, i
),
960 pf
->stat_offsets_loaded
,
961 &osd
->priority_xoff_tx
[i
],
962 &nsd
->priority_xoff_tx
[i
]);
963 i40e_stat_update32(hw
,
964 I40E_GLPRT_RXON2OFFCNT(hw
->port
, i
),
965 pf
->stat_offsets_loaded
,
966 &osd
->priority_xon_2_xoff
[i
],
967 &nsd
->priority_xon_2_xoff
[i
]);
970 i40e_stat_update48(hw
, I40E_GLPRT_PRC64H(hw
->port
),
971 I40E_GLPRT_PRC64L(hw
->port
),
972 pf
->stat_offsets_loaded
,
973 &osd
->rx_size_64
, &nsd
->rx_size_64
);
974 i40e_stat_update48(hw
, I40E_GLPRT_PRC127H(hw
->port
),
975 I40E_GLPRT_PRC127L(hw
->port
),
976 pf
->stat_offsets_loaded
,
977 &osd
->rx_size_127
, &nsd
->rx_size_127
);
978 i40e_stat_update48(hw
, I40E_GLPRT_PRC255H(hw
->port
),
979 I40E_GLPRT_PRC255L(hw
->port
),
980 pf
->stat_offsets_loaded
,
981 &osd
->rx_size_255
, &nsd
->rx_size_255
);
982 i40e_stat_update48(hw
, I40E_GLPRT_PRC511H(hw
->port
),
983 I40E_GLPRT_PRC511L(hw
->port
),
984 pf
->stat_offsets_loaded
,
985 &osd
->rx_size_511
, &nsd
->rx_size_511
);
986 i40e_stat_update48(hw
, I40E_GLPRT_PRC1023H(hw
->port
),
987 I40E_GLPRT_PRC1023L(hw
->port
),
988 pf
->stat_offsets_loaded
,
989 &osd
->rx_size_1023
, &nsd
->rx_size_1023
);
990 i40e_stat_update48(hw
, I40E_GLPRT_PRC1522H(hw
->port
),
991 I40E_GLPRT_PRC1522L(hw
->port
),
992 pf
->stat_offsets_loaded
,
993 &osd
->rx_size_1522
, &nsd
->rx_size_1522
);
994 i40e_stat_update48(hw
, I40E_GLPRT_PRC9522H(hw
->port
),
995 I40E_GLPRT_PRC9522L(hw
->port
),
996 pf
->stat_offsets_loaded
,
997 &osd
->rx_size_big
, &nsd
->rx_size_big
);
999 i40e_stat_update48(hw
, I40E_GLPRT_PTC64H(hw
->port
),
1000 I40E_GLPRT_PTC64L(hw
->port
),
1001 pf
->stat_offsets_loaded
,
1002 &osd
->tx_size_64
, &nsd
->tx_size_64
);
1003 i40e_stat_update48(hw
, I40E_GLPRT_PTC127H(hw
->port
),
1004 I40E_GLPRT_PTC127L(hw
->port
),
1005 pf
->stat_offsets_loaded
,
1006 &osd
->tx_size_127
, &nsd
->tx_size_127
);
1007 i40e_stat_update48(hw
, I40E_GLPRT_PTC255H(hw
->port
),
1008 I40E_GLPRT_PTC255L(hw
->port
),
1009 pf
->stat_offsets_loaded
,
1010 &osd
->tx_size_255
, &nsd
->tx_size_255
);
1011 i40e_stat_update48(hw
, I40E_GLPRT_PTC511H(hw
->port
),
1012 I40E_GLPRT_PTC511L(hw
->port
),
1013 pf
->stat_offsets_loaded
,
1014 &osd
->tx_size_511
, &nsd
->tx_size_511
);
1015 i40e_stat_update48(hw
, I40E_GLPRT_PTC1023H(hw
->port
),
1016 I40E_GLPRT_PTC1023L(hw
->port
),
1017 pf
->stat_offsets_loaded
,
1018 &osd
->tx_size_1023
, &nsd
->tx_size_1023
);
1019 i40e_stat_update48(hw
, I40E_GLPRT_PTC1522H(hw
->port
),
1020 I40E_GLPRT_PTC1522L(hw
->port
),
1021 pf
->stat_offsets_loaded
,
1022 &osd
->tx_size_1522
, &nsd
->tx_size_1522
);
1023 i40e_stat_update48(hw
, I40E_GLPRT_PTC9522H(hw
->port
),
1024 I40E_GLPRT_PTC9522L(hw
->port
),
1025 pf
->stat_offsets_loaded
,
1026 &osd
->tx_size_big
, &nsd
->tx_size_big
);
1028 i40e_stat_update32(hw
, I40E_GLPRT_RUC(hw
->port
),
1029 pf
->stat_offsets_loaded
,
1030 &osd
->rx_undersize
, &nsd
->rx_undersize
);
1031 i40e_stat_update32(hw
, I40E_GLPRT_RFC(hw
->port
),
1032 pf
->stat_offsets_loaded
,
1033 &osd
->rx_fragments
, &nsd
->rx_fragments
);
1034 i40e_stat_update32(hw
, I40E_GLPRT_ROC(hw
->port
),
1035 pf
->stat_offsets_loaded
,
1036 &osd
->rx_oversize
, &nsd
->rx_oversize
);
1037 i40e_stat_update32(hw
, I40E_GLPRT_RJC(hw
->port
),
1038 pf
->stat_offsets_loaded
,
1039 &osd
->rx_jabber
, &nsd
->rx_jabber
);
1042 i40e_stat_update_and_clear32(hw
,
1043 I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(hw
->pf_id
)),
1044 &nsd
->fd_atr_match
);
1045 i40e_stat_update_and_clear32(hw
,
1046 I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(hw
->pf_id
)),
1048 i40e_stat_update_and_clear32(hw
,
1049 I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(hw
->pf_id
)),
1050 &nsd
->fd_atr_tunnel_match
);
1052 val
= rd32(hw
, I40E_PRTPM_EEE_STAT
);
1053 nsd
->tx_lpi_status
=
1054 (val
& I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK
) >>
1055 I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT
;
1056 nsd
->rx_lpi_status
=
1057 (val
& I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK
) >>
1058 I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT
;
1059 i40e_stat_update32(hw
, I40E_PRTPM_TLPIC
,
1060 pf
->stat_offsets_loaded
,
1061 &osd
->tx_lpi_count
, &nsd
->tx_lpi_count
);
1062 i40e_stat_update32(hw
, I40E_PRTPM_RLPIC
,
1063 pf
->stat_offsets_loaded
,
1064 &osd
->rx_lpi_count
, &nsd
->rx_lpi_count
);
1066 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
&&
1067 !test_bit(__I40E_FD_SB_AUTO_DISABLED
, pf
->state
))
1068 nsd
->fd_sb_status
= true;
1070 nsd
->fd_sb_status
= false;
1072 if (pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
&&
1073 !test_bit(__I40E_FD_ATR_AUTO_DISABLED
, pf
->state
))
1074 nsd
->fd_atr_status
= true;
1076 nsd
->fd_atr_status
= false;
1078 pf
->stat_offsets_loaded
= true;
1082 * i40e_update_stats - Update the various statistics counters.
1083 * @vsi: the VSI to be updated
1085 * Update the various stats for this VSI and its related entities.
1087 void i40e_update_stats(struct i40e_vsi
*vsi
)
1089 struct i40e_pf
*pf
= vsi
->back
;
1091 if (vsi
== pf
->vsi
[pf
->lan_vsi
])
1092 i40e_update_pf_stats(pf
);
1094 i40e_update_vsi_stats(vsi
);
1098 * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1099 * @vsi: the VSI to be searched
1100 * @macaddr: the MAC address
1103 * Returns ptr to the filter object or NULL
1105 static struct i40e_mac_filter
*i40e_find_filter(struct i40e_vsi
*vsi
,
1106 const u8
*macaddr
, s16 vlan
)
1108 struct i40e_mac_filter
*f
;
1111 if (!vsi
|| !macaddr
)
1114 key
= i40e_addr_to_hkey(macaddr
);
1115 hash_for_each_possible(vsi
->mac_filter_hash
, f
, hlist
, key
) {
1116 if ((ether_addr_equal(macaddr
, f
->macaddr
)) &&
1124 * i40e_find_mac - Find a mac addr in the macvlan filters list
1125 * @vsi: the VSI to be searched
1126 * @macaddr: the MAC address we are searching for
1128 * Returns the first filter with the provided MAC address or NULL if
1129 * MAC address was not found
1131 struct i40e_mac_filter
*i40e_find_mac(struct i40e_vsi
*vsi
, const u8
*macaddr
)
1133 struct i40e_mac_filter
*f
;
1136 if (!vsi
|| !macaddr
)
1139 key
= i40e_addr_to_hkey(macaddr
);
1140 hash_for_each_possible(vsi
->mac_filter_hash
, f
, hlist
, key
) {
1141 if ((ether_addr_equal(macaddr
, f
->macaddr
)))
1148 * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1149 * @vsi: the VSI to be searched
1151 * Returns true if VSI is in vlan mode or false otherwise
1153 bool i40e_is_vsi_in_vlan(struct i40e_vsi
*vsi
)
1155 /* If we have a PVID, always operate in VLAN mode */
1159 /* We need to operate in VLAN mode whenever we have any filters with
1160 * a VLAN other than I40E_VLAN_ALL. We could check the table each
1161 * time, incurring search cost repeatedly. However, we can notice two
1164 * 1) the only place where we can gain a VLAN filter is in
1167 * 2) the only place where filters are actually removed is in
1168 * i40e_sync_filters_subtask.
1170 * Thus, we can simply use a boolean value, has_vlan_filters which we
1171 * will set to true when we add a VLAN filter in i40e_add_filter. Then
1172 * we have to perform the full search after deleting filters in
1173 * i40e_sync_filters_subtask, but we already have to search
1174 * filters here and can perform the check at the same time. This
1175 * results in avoiding embedding a loop for VLAN mode inside another
1176 * loop over all the filters, and should maintain correctness as noted
1179 return vsi
->has_vlan_filter
;
1183 * i40e_correct_mac_vlan_filters - Correct non-VLAN filters if necessary
1184 * @vsi: the VSI to configure
1185 * @tmp_add_list: list of filters ready to be added
1186 * @tmp_del_list: list of filters ready to be deleted
1187 * @vlan_filters: the number of active VLAN filters
1189 * Update VLAN=0 and VLAN=-1 (I40E_VLAN_ANY) filters properly so that they
1190 * behave as expected. If we have any active VLAN filters remaining or about
1191 * to be added then we need to update non-VLAN filters to be marked as VLAN=0
1192 * so that they only match against untagged traffic. If we no longer have any
1193 * active VLAN filters, we need to make all non-VLAN filters marked as VLAN=-1
1194 * so that they match against both tagged and untagged traffic. In this way,
1195 * we ensure that we correctly receive the desired traffic. This ensures that
1196 * when we have an active VLAN we will receive only untagged traffic and
1197 * traffic matching active VLANs. If we have no active VLANs then we will
1198 * operate in non-VLAN mode and receive all traffic, tagged or untagged.
1200 * Finally, in a similar fashion, this function also corrects filters when
1201 * there is an active PVID assigned to this VSI.
1203 * In case of memory allocation failure return -ENOMEM. Otherwise, return 0.
1205 * This function is only expected to be called from within
1206 * i40e_sync_vsi_filters.
1208 * NOTE: This function expects to be called while under the
1209 * mac_filter_hash_lock
1211 static int i40e_correct_mac_vlan_filters(struct i40e_vsi
*vsi
,
1212 struct hlist_head
*tmp_add_list
,
1213 struct hlist_head
*tmp_del_list
,
1216 s16 pvid
= le16_to_cpu(vsi
->info
.pvid
);
1217 struct i40e_mac_filter
*f
, *add_head
;
1218 struct i40e_new_mac_filter
*new;
1219 struct hlist_node
*h
;
1222 /* To determine if a particular filter needs to be replaced we
1223 * have the three following conditions:
1225 * a) if we have a PVID assigned, then all filters which are
1226 * not marked as VLAN=PVID must be replaced with filters that
1228 * b) otherwise, if we have any active VLANS, all filters
1229 * which are marked as VLAN=-1 must be replaced with
1230 * filters marked as VLAN=0
1231 * c) finally, if we do not have any active VLANS, all filters
1232 * which are marked as VLAN=0 must be replaced with filters
1236 /* Update the filters about to be added in place */
1237 hlist_for_each_entry(new, tmp_add_list
, hlist
) {
1238 if (pvid
&& new->f
->vlan
!= pvid
)
1239 new->f
->vlan
= pvid
;
1240 else if (vlan_filters
&& new->f
->vlan
== I40E_VLAN_ANY
)
1242 else if (!vlan_filters
&& new->f
->vlan
== 0)
1243 new->f
->vlan
= I40E_VLAN_ANY
;
1246 /* Update the remaining active filters */
1247 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
1248 /* Combine the checks for whether a filter needs to be changed
1249 * and then determine the new VLAN inside the if block, in
1250 * order to avoid duplicating code for adding the new filter
1251 * then deleting the old filter.
1253 if ((pvid
&& f
->vlan
!= pvid
) ||
1254 (vlan_filters
&& f
->vlan
== I40E_VLAN_ANY
) ||
1255 (!vlan_filters
&& f
->vlan
== 0)) {
1256 /* Determine the new vlan we will be adding */
1259 else if (vlan_filters
)
1262 new_vlan
= I40E_VLAN_ANY
;
1264 /* Create the new filter */
1265 add_head
= i40e_add_filter(vsi
, f
->macaddr
, new_vlan
);
1269 /* Create a temporary i40e_new_mac_filter */
1270 new = kzalloc(sizeof(*new), GFP_ATOMIC
);
1275 new->state
= add_head
->state
;
1277 /* Add the new filter to the tmp list */
1278 hlist_add_head(&new->hlist
, tmp_add_list
);
1280 /* Put the original filter into the delete list */
1281 f
->state
= I40E_FILTER_REMOVE
;
1282 hash_del(&f
->hlist
);
1283 hlist_add_head(&f
->hlist
, tmp_del_list
);
1287 vsi
->has_vlan_filter
= !!vlan_filters
;
1293 * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1294 * @vsi: the PF Main VSI - inappropriate for any other VSI
1295 * @macaddr: the MAC address
1297 * Remove whatever filter the firmware set up so the driver can manage
1298 * its own filtering intelligently.
1300 static void i40e_rm_default_mac_filter(struct i40e_vsi
*vsi
, u8
*macaddr
)
1302 struct i40e_aqc_remove_macvlan_element_data element
;
1303 struct i40e_pf
*pf
= vsi
->back
;
1305 /* Only appropriate for the PF main VSI */
1306 if (vsi
->type
!= I40E_VSI_MAIN
)
1309 memset(&element
, 0, sizeof(element
));
1310 ether_addr_copy(element
.mac_addr
, macaddr
);
1311 element
.vlan_tag
= 0;
1312 /* Ignore error returns, some firmware does it this way... */
1313 element
.flags
= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH
;
1314 i40e_aq_remove_macvlan(&pf
->hw
, vsi
->seid
, &element
, 1, NULL
);
1316 memset(&element
, 0, sizeof(element
));
1317 ether_addr_copy(element
.mac_addr
, macaddr
);
1318 element
.vlan_tag
= 0;
1319 /* ...and some firmware does it this way. */
1320 element
.flags
= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH
|
1321 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN
;
1322 i40e_aq_remove_macvlan(&pf
->hw
, vsi
->seid
, &element
, 1, NULL
);
1326 * i40e_add_filter - Add a mac/vlan filter to the VSI
1327 * @vsi: the VSI to be searched
1328 * @macaddr: the MAC address
1331 * Returns ptr to the filter object or NULL when no memory available.
1333 * NOTE: This function is expected to be called with mac_filter_hash_lock
1336 struct i40e_mac_filter
*i40e_add_filter(struct i40e_vsi
*vsi
,
1337 const u8
*macaddr
, s16 vlan
)
1339 struct i40e_mac_filter
*f
;
1342 if (!vsi
|| !macaddr
)
1345 f
= i40e_find_filter(vsi
, macaddr
, vlan
);
1347 f
= kzalloc(sizeof(*f
), GFP_ATOMIC
);
1351 /* Update the boolean indicating if we need to function in
1355 vsi
->has_vlan_filter
= true;
1357 ether_addr_copy(f
->macaddr
, macaddr
);
1359 f
->state
= I40E_FILTER_NEW
;
1360 INIT_HLIST_NODE(&f
->hlist
);
1362 key
= i40e_addr_to_hkey(macaddr
);
1363 hash_add(vsi
->mac_filter_hash
, &f
->hlist
, key
);
1365 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1366 set_bit(__I40E_MACVLAN_SYNC_PENDING
, vsi
->back
->state
);
1369 /* If we're asked to add a filter that has been marked for removal, it
1370 * is safe to simply restore it to active state. __i40e_del_filter
1371 * will have simply deleted any filters which were previously marked
1372 * NEW or FAILED, so if it is currently marked REMOVE it must have
1373 * previously been ACTIVE. Since we haven't yet run the sync filters
1374 * task, just restore this filter to the ACTIVE state so that the
1375 * sync task leaves it in place
1377 if (f
->state
== I40E_FILTER_REMOVE
)
1378 f
->state
= I40E_FILTER_ACTIVE
;
1384 * __i40e_del_filter - Remove a specific filter from the VSI
1385 * @vsi: VSI to remove from
1386 * @f: the filter to remove from the list
1388 * This function should be called instead of i40e_del_filter only if you know
1389 * the exact filter you will remove already, such as via i40e_find_filter or
1392 * NOTE: This function is expected to be called with mac_filter_hash_lock
1394 * ANOTHER NOTE: This function MUST be called from within the context of
1395 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1396 * instead of list_for_each_entry().
1398 void __i40e_del_filter(struct i40e_vsi
*vsi
, struct i40e_mac_filter
*f
)
1403 /* If the filter was never added to firmware then we can just delete it
1404 * directly and we don't want to set the status to remove or else an
1405 * admin queue command will unnecessarily fire.
1407 if ((f
->state
== I40E_FILTER_FAILED
) ||
1408 (f
->state
== I40E_FILTER_NEW
)) {
1409 hash_del(&f
->hlist
);
1412 f
->state
= I40E_FILTER_REMOVE
;
1415 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1416 set_bit(__I40E_MACVLAN_SYNC_PENDING
, vsi
->back
->state
);
1420 * i40e_del_filter - Remove a MAC/VLAN filter from the VSI
1421 * @vsi: the VSI to be searched
1422 * @macaddr: the MAC address
1425 * NOTE: This function is expected to be called with mac_filter_hash_lock
1427 * ANOTHER NOTE: This function MUST be called from within the context of
1428 * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
1429 * instead of list_for_each_entry().
1431 void i40e_del_filter(struct i40e_vsi
*vsi
, const u8
*macaddr
, s16 vlan
)
1433 struct i40e_mac_filter
*f
;
1435 if (!vsi
|| !macaddr
)
1438 f
= i40e_find_filter(vsi
, macaddr
, vlan
);
1439 __i40e_del_filter(vsi
, f
);
1443 * i40e_add_mac_filter - Add a MAC filter for all active VLANs
1444 * @vsi: the VSI to be searched
1445 * @macaddr: the mac address to be filtered
1447 * If we're not in VLAN mode, just add the filter to I40E_VLAN_ANY. Otherwise,
1448 * go through all the macvlan filters and add a macvlan filter for each
1449 * unique vlan that already exists. If a PVID has been assigned, instead only
1450 * add the macaddr to that VLAN.
1452 * Returns last filter added on success, else NULL
1454 struct i40e_mac_filter
*i40e_add_mac_filter(struct i40e_vsi
*vsi
,
1457 struct i40e_mac_filter
*f
, *add
= NULL
;
1458 struct hlist_node
*h
;
1462 return i40e_add_filter(vsi
, macaddr
,
1463 le16_to_cpu(vsi
->info
.pvid
));
1465 if (!i40e_is_vsi_in_vlan(vsi
))
1466 return i40e_add_filter(vsi
, macaddr
, I40E_VLAN_ANY
);
1468 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
1469 if (f
->state
== I40E_FILTER_REMOVE
)
1471 add
= i40e_add_filter(vsi
, macaddr
, f
->vlan
);
1480 * i40e_del_mac_filter - Remove a MAC filter from all VLANs
1481 * @vsi: the VSI to be searched
1482 * @macaddr: the mac address to be removed
1484 * Removes a given MAC address from a VSI regardless of what VLAN it has been
1487 * Returns 0 for success, or error
1489 int i40e_del_mac_filter(struct i40e_vsi
*vsi
, const u8
*macaddr
)
1491 struct i40e_mac_filter
*f
;
1492 struct hlist_node
*h
;
1496 WARN(!spin_is_locked(&vsi
->mac_filter_hash_lock
),
1497 "Missing mac_filter_hash_lock\n");
1498 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
1499 if (ether_addr_equal(macaddr
, f
->macaddr
)) {
1500 __i40e_del_filter(vsi
, f
);
1512 * i40e_set_mac - NDO callback to set mac address
1513 * @netdev: network interface device structure
1514 * @p: pointer to an address structure
1516 * Returns 0 on success, negative on failure
1518 static int i40e_set_mac(struct net_device
*netdev
, void *p
)
1520 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1521 struct i40e_vsi
*vsi
= np
->vsi
;
1522 struct i40e_pf
*pf
= vsi
->back
;
1523 struct i40e_hw
*hw
= &pf
->hw
;
1524 struct sockaddr
*addr
= p
;
1526 if (!is_valid_ether_addr(addr
->sa_data
))
1527 return -EADDRNOTAVAIL
;
1529 if (ether_addr_equal(netdev
->dev_addr
, addr
->sa_data
)) {
1530 netdev_info(netdev
, "already using mac address %pM\n",
1535 if (test_bit(__I40E_DOWN
, pf
->state
) ||
1536 test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
1537 return -EADDRNOTAVAIL
;
1539 if (ether_addr_equal(hw
->mac
.addr
, addr
->sa_data
))
1540 netdev_info(netdev
, "returning to hw mac address %pM\n",
1543 netdev_info(netdev
, "set new mac address %pM\n", addr
->sa_data
);
1545 /* Copy the address first, so that we avoid a possible race with
1546 * .set_rx_mode(). If we copy after changing the address in the filter
1547 * list, we might open ourselves to a narrow race window where
1548 * .set_rx_mode could delete our dev_addr filter and prevent traffic
1551 ether_addr_copy(netdev
->dev_addr
, addr
->sa_data
);
1553 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
1554 i40e_del_mac_filter(vsi
, netdev
->dev_addr
);
1555 i40e_add_mac_filter(vsi
, addr
->sa_data
);
1556 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
1557 if (vsi
->type
== I40E_VSI_MAIN
) {
1560 ret
= i40e_aq_mac_address_write(hw
, I40E_AQC_WRITE_TYPE_LAA_WOL
,
1561 addr
->sa_data
, NULL
);
1563 netdev_info(netdev
, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
1564 i40e_stat_str(hw
, ret
),
1565 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
1568 /* schedule our worker thread which will take care of
1569 * applying the new filter changes
1571 i40e_service_event_schedule(pf
);
1576 * i40e_config_rss_aq - Prepare for RSS using AQ commands
1577 * @vsi: vsi structure
1578 * @seed: RSS hash seed
1580 static int i40e_config_rss_aq(struct i40e_vsi
*vsi
, const u8
*seed
,
1581 u8
*lut
, u16 lut_size
)
1583 struct i40e_pf
*pf
= vsi
->back
;
1584 struct i40e_hw
*hw
= &pf
->hw
;
1588 struct i40e_aqc_get_set_rss_key_data
*seed_dw
=
1589 (struct i40e_aqc_get_set_rss_key_data
*)seed
;
1590 ret
= i40e_aq_set_rss_key(hw
, vsi
->id
, seed_dw
);
1592 dev_info(&pf
->pdev
->dev
,
1593 "Cannot set RSS key, err %s aq_err %s\n",
1594 i40e_stat_str(hw
, ret
),
1595 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
1600 bool pf_lut
= vsi
->type
== I40E_VSI_MAIN
? true : false;
1602 ret
= i40e_aq_set_rss_lut(hw
, vsi
->id
, pf_lut
, lut
, lut_size
);
1604 dev_info(&pf
->pdev
->dev
,
1605 "Cannot set RSS lut, err %s aq_err %s\n",
1606 i40e_stat_str(hw
, ret
),
1607 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
1615 * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
1616 * @vsi: VSI structure
1618 static int i40e_vsi_config_rss(struct i40e_vsi
*vsi
)
1620 struct i40e_pf
*pf
= vsi
->back
;
1621 u8 seed
[I40E_HKEY_ARRAY_SIZE
];
1625 if (!(pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
))
1628 vsi
->rss_size
= min_t(int, pf
->alloc_rss_size
,
1629 vsi
->num_queue_pairs
);
1632 lut
= kzalloc(vsi
->rss_table_size
, GFP_KERNEL
);
1636 /* Use the user configured hash keys and lookup table if there is one,
1637 * otherwise use default
1639 if (vsi
->rss_lut_user
)
1640 memcpy(lut
, vsi
->rss_lut_user
, vsi
->rss_table_size
);
1642 i40e_fill_rss_lut(pf
, lut
, vsi
->rss_table_size
, vsi
->rss_size
);
1643 if (vsi
->rss_hkey_user
)
1644 memcpy(seed
, vsi
->rss_hkey_user
, I40E_HKEY_ARRAY_SIZE
);
1646 netdev_rss_key_fill((void *)seed
, I40E_HKEY_ARRAY_SIZE
);
1647 ret
= i40e_config_rss_aq(vsi
, seed
, lut
, vsi
->rss_table_size
);
1653 * i40e_vsi_setup_queue_map_mqprio - Prepares mqprio based tc_config
1654 * @vsi: the VSI being configured,
1655 * @ctxt: VSI context structure
1656 * @enabled_tc: number of traffic classes to enable
1658 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
1660 static int i40e_vsi_setup_queue_map_mqprio(struct i40e_vsi
*vsi
,
1661 struct i40e_vsi_context
*ctxt
,
1664 u16 qcount
= 0, max_qcount
, qmap
, sections
= 0;
1665 int i
, override_q
, pow
, num_qps
, ret
;
1666 u8 netdev_tc
= 0, offset
= 0;
1668 if (vsi
->type
!= I40E_VSI_MAIN
)
1670 sections
= I40E_AQ_VSI_PROP_QUEUE_MAP_VALID
;
1671 sections
|= I40E_AQ_VSI_PROP_SCHED_VALID
;
1672 vsi
->tc_config
.numtc
= vsi
->mqprio_qopt
.qopt
.num_tc
;
1673 vsi
->tc_config
.enabled_tc
= enabled_tc
? enabled_tc
: 1;
1674 num_qps
= vsi
->mqprio_qopt
.qopt
.count
[0];
1676 /* find the next higher power-of-2 of num queue pairs */
1677 pow
= ilog2(num_qps
);
1678 if (!is_power_of_2(num_qps
))
1680 qmap
= (offset
<< I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT
) |
1681 (pow
<< I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT
);
1683 /* Setup queue offset/count for all TCs for given VSI */
1684 max_qcount
= vsi
->mqprio_qopt
.qopt
.count
[0];
1685 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1686 /* See if the given TC is enabled for the given VSI */
1687 if (vsi
->tc_config
.enabled_tc
& BIT(i
)) {
1688 offset
= vsi
->mqprio_qopt
.qopt
.offset
[i
];
1689 qcount
= vsi
->mqprio_qopt
.qopt
.count
[i
];
1690 if (qcount
> max_qcount
)
1691 max_qcount
= qcount
;
1692 vsi
->tc_config
.tc_info
[i
].qoffset
= offset
;
1693 vsi
->tc_config
.tc_info
[i
].qcount
= qcount
;
1694 vsi
->tc_config
.tc_info
[i
].netdev_tc
= netdev_tc
++;
1696 /* TC is not enabled so set the offset to
1697 * default queue and allocate one queue
1700 vsi
->tc_config
.tc_info
[i
].qoffset
= 0;
1701 vsi
->tc_config
.tc_info
[i
].qcount
= 1;
1702 vsi
->tc_config
.tc_info
[i
].netdev_tc
= 0;
1706 /* Set actual Tx/Rx queue pairs */
1707 vsi
->num_queue_pairs
= offset
+ qcount
;
1709 /* Setup queue TC[0].qmap for given VSI context */
1710 ctxt
->info
.tc_mapping
[0] = cpu_to_le16(qmap
);
1711 ctxt
->info
.mapping_flags
|= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG
);
1712 ctxt
->info
.queue_mapping
[0] = cpu_to_le16(vsi
->base_queue
);
1713 ctxt
->info
.valid_sections
|= cpu_to_le16(sections
);
1715 /* Reconfigure RSS for main VSI with max queue count */
1716 vsi
->rss_size
= max_qcount
;
1717 ret
= i40e_vsi_config_rss(vsi
);
1719 dev_info(&vsi
->back
->pdev
->dev
,
1720 "Failed to reconfig rss for num_queues (%u)\n",
1724 vsi
->reconfig_rss
= true;
1725 dev_dbg(&vsi
->back
->pdev
->dev
,
1726 "Reconfigured rss with num_queues (%u)\n", max_qcount
);
1728 /* Find queue count available for channel VSIs and starting offset
1731 override_q
= vsi
->mqprio_qopt
.qopt
.count
[0];
1732 if (override_q
&& override_q
< vsi
->num_queue_pairs
) {
1733 vsi
->cnt_q_avail
= vsi
->num_queue_pairs
- override_q
;
1734 vsi
->next_base_queue
= override_q
;
1740 * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1741 * @vsi: the VSI being setup
1742 * @ctxt: VSI context structure
1743 * @enabled_tc: Enabled TCs bitmap
1744 * @is_add: True if called before Add VSI
1746 * Setup VSI queue mapping for enabled traffic classes.
1748 static void i40e_vsi_setup_queue_map(struct i40e_vsi
*vsi
,
1749 struct i40e_vsi_context
*ctxt
,
1753 struct i40e_pf
*pf
= vsi
->back
;
1763 sections
= I40E_AQ_VSI_PROP_QUEUE_MAP_VALID
;
1766 /* Number of queues per enabled TC */
1767 num_tc_qps
= vsi
->alloc_queue_pairs
;
1768 if (enabled_tc
&& (vsi
->back
->flags
& I40E_FLAG_DCB_ENABLED
)) {
1769 /* Find numtc from enabled TC bitmap */
1770 for (i
= 0, numtc
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1771 if (enabled_tc
& BIT(i
)) /* TC is enabled */
1775 dev_warn(&pf
->pdev
->dev
, "DCB is enabled but no TC enabled, forcing TC0\n");
1778 num_tc_qps
= num_tc_qps
/ numtc
;
1779 num_tc_qps
= min_t(int, num_tc_qps
,
1780 i40e_pf_get_max_q_per_tc(pf
));
1783 vsi
->tc_config
.numtc
= numtc
;
1784 vsi
->tc_config
.enabled_tc
= enabled_tc
? enabled_tc
: 1;
1786 /* Do not allow use more TC queue pairs than MSI-X vectors exist */
1787 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
1788 num_tc_qps
= min_t(int, num_tc_qps
, pf
->num_lan_msix
);
1790 /* Setup queue offset/count for all TCs for given VSI */
1791 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
1792 /* See if the given TC is enabled for the given VSI */
1793 if (vsi
->tc_config
.enabled_tc
& BIT(i
)) {
1797 switch (vsi
->type
) {
1799 if (!(pf
->flags
& (I40E_FLAG_FD_SB_ENABLED
|
1800 I40E_FLAG_FD_ATR_ENABLED
)) ||
1801 vsi
->tc_config
.enabled_tc
!= 1) {
1802 qcount
= min_t(int, pf
->alloc_rss_size
,
1808 case I40E_VSI_SRIOV
:
1809 case I40E_VSI_VMDQ2
:
1811 qcount
= num_tc_qps
;
1815 vsi
->tc_config
.tc_info
[i
].qoffset
= offset
;
1816 vsi
->tc_config
.tc_info
[i
].qcount
= qcount
;
1818 /* find the next higher power-of-2 of num queue pairs */
1821 while (num_qps
&& (BIT_ULL(pow
) < qcount
)) {
1826 vsi
->tc_config
.tc_info
[i
].netdev_tc
= netdev_tc
++;
1828 (offset
<< I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT
) |
1829 (pow
<< I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT
);
1833 /* TC is not enabled so set the offset to
1834 * default queue and allocate one queue
1837 vsi
->tc_config
.tc_info
[i
].qoffset
= 0;
1838 vsi
->tc_config
.tc_info
[i
].qcount
= 1;
1839 vsi
->tc_config
.tc_info
[i
].netdev_tc
= 0;
1843 ctxt
->info
.tc_mapping
[i
] = cpu_to_le16(qmap
);
1846 /* Set actual Tx/Rx queue pairs */
1847 vsi
->num_queue_pairs
= offset
;
1848 if ((vsi
->type
== I40E_VSI_MAIN
) && (numtc
== 1)) {
1849 if (vsi
->req_queue_pairs
> 0)
1850 vsi
->num_queue_pairs
= vsi
->req_queue_pairs
;
1851 else if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
1852 vsi
->num_queue_pairs
= pf
->num_lan_msix
;
1855 /* Scheduler section valid can only be set for ADD VSI */
1857 sections
|= I40E_AQ_VSI_PROP_SCHED_VALID
;
1859 ctxt
->info
.up_enable_bits
= enabled_tc
;
1861 if (vsi
->type
== I40E_VSI_SRIOV
) {
1862 ctxt
->info
.mapping_flags
|=
1863 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG
);
1864 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
1865 ctxt
->info
.queue_mapping
[i
] =
1866 cpu_to_le16(vsi
->base_queue
+ i
);
1868 ctxt
->info
.mapping_flags
|=
1869 cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG
);
1870 ctxt
->info
.queue_mapping
[0] = cpu_to_le16(vsi
->base_queue
);
1872 ctxt
->info
.valid_sections
|= cpu_to_le16(sections
);
1876 * i40e_addr_sync - Callback for dev_(mc|uc)_sync to add address
1877 * @netdev: the netdevice
1878 * @addr: address to add
1880 * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
1881 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1883 static int i40e_addr_sync(struct net_device
*netdev
, const u8
*addr
)
1885 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1886 struct i40e_vsi
*vsi
= np
->vsi
;
1888 if (i40e_add_mac_filter(vsi
, addr
))
1895 * i40e_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
1896 * @netdev: the netdevice
1897 * @addr: address to add
1899 * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
1900 * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
1902 static int i40e_addr_unsync(struct net_device
*netdev
, const u8
*addr
)
1904 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1905 struct i40e_vsi
*vsi
= np
->vsi
;
1907 /* Under some circumstances, we might receive a request to delete
1908 * our own device address from our uc list. Because we store the
1909 * device address in the VSI's MAC/VLAN filter list, we need to ignore
1910 * such requests and not delete our device address from this list.
1912 if (ether_addr_equal(addr
, netdev
->dev_addr
))
1915 i40e_del_mac_filter(vsi
, addr
);
1921 * i40e_set_rx_mode - NDO callback to set the netdev filters
1922 * @netdev: network interface device structure
1924 static void i40e_set_rx_mode(struct net_device
*netdev
)
1926 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
1927 struct i40e_vsi
*vsi
= np
->vsi
;
1929 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
1931 __dev_uc_sync(netdev
, i40e_addr_sync
, i40e_addr_unsync
);
1932 __dev_mc_sync(netdev
, i40e_addr_sync
, i40e_addr_unsync
);
1934 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
1936 /* check for other flag changes */
1937 if (vsi
->current_netdev_flags
!= vsi
->netdev
->flags
) {
1938 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
1939 set_bit(__I40E_MACVLAN_SYNC_PENDING
, vsi
->back
->state
);
1944 * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1945 * @vsi: Pointer to VSI struct
1946 * @from: Pointer to list which contains MAC filter entries - changes to
1947 * those entries needs to be undone.
1949 * MAC filter entries from this list were slated for deletion.
1951 static void i40e_undo_del_filter_entries(struct i40e_vsi
*vsi
,
1952 struct hlist_head
*from
)
1954 struct i40e_mac_filter
*f
;
1955 struct hlist_node
*h
;
1957 hlist_for_each_entry_safe(f
, h
, from
, hlist
) {
1958 u64 key
= i40e_addr_to_hkey(f
->macaddr
);
1960 /* Move the element back into MAC filter list*/
1961 hlist_del(&f
->hlist
);
1962 hash_add(vsi
->mac_filter_hash
, &f
->hlist
, key
);
1967 * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1968 * @vsi: Pointer to vsi struct
1969 * @from: Pointer to list which contains MAC filter entries - changes to
1970 * those entries needs to be undone.
1972 * MAC filter entries from this list were slated for addition.
1974 static void i40e_undo_add_filter_entries(struct i40e_vsi
*vsi
,
1975 struct hlist_head
*from
)
1977 struct i40e_new_mac_filter
*new;
1978 struct hlist_node
*h
;
1980 hlist_for_each_entry_safe(new, h
, from
, hlist
) {
1981 /* We can simply free the wrapper structure */
1982 hlist_del(&new->hlist
);
1988 * i40e_next_entry - Get the next non-broadcast filter from a list
1989 * @next: pointer to filter in list
1991 * Returns the next non-broadcast filter in the list. Required so that we
1992 * ignore broadcast filters within the list, since these are not handled via
1993 * the normal firmware update path.
1996 struct i40e_new_mac_filter
*i40e_next_filter(struct i40e_new_mac_filter
*next
)
1998 hlist_for_each_entry_continue(next
, hlist
) {
1999 if (!is_broadcast_ether_addr(next
->f
->macaddr
))
2007 * i40e_update_filter_state - Update filter state based on return data
2009 * @count: Number of filters added
2010 * @add_list: return data from fw
2011 * @add_head: pointer to first filter in current batch
2013 * MAC filter entries from list were slated to be added to device. Returns
2014 * number of successful filters. Note that 0 does NOT mean success!
2017 i40e_update_filter_state(int count
,
2018 struct i40e_aqc_add_macvlan_element_data
*add_list
,
2019 struct i40e_new_mac_filter
*add_head
)
2024 for (i
= 0; i
< count
; i
++) {
2025 /* Always check status of each filter. We don't need to check
2026 * the firmware return status because we pre-set the filter
2027 * status to I40E_AQC_MM_ERR_NO_RES when sending the filter
2028 * request to the adminq. Thus, if it no longer matches then
2029 * we know the filter is active.
2031 if (add_list
[i
].match_method
== I40E_AQC_MM_ERR_NO_RES
) {
2032 add_head
->state
= I40E_FILTER_FAILED
;
2034 add_head
->state
= I40E_FILTER_ACTIVE
;
2038 add_head
= i40e_next_filter(add_head
);
2047 * i40e_aqc_del_filters - Request firmware to delete a set of filters
2048 * @vsi: ptr to the VSI
2049 * @vsi_name: name to display in messages
2050 * @list: the list of filters to send to firmware
2051 * @num_del: the number of filters to delete
2052 * @retval: Set to -EIO on failure to delete
2054 * Send a request to firmware via AdminQ to delete a set of filters. Uses
2055 * *retval instead of a return value so that success does not force ret_val to
2056 * be set to 0. This ensures that a sequence of calls to this function
2057 * preserve the previous value of *retval on successful delete.
2060 void i40e_aqc_del_filters(struct i40e_vsi
*vsi
, const char *vsi_name
,
2061 struct i40e_aqc_remove_macvlan_element_data
*list
,
2062 int num_del
, int *retval
)
2064 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2068 aq_ret
= i40e_aq_remove_macvlan(hw
, vsi
->seid
, list
, num_del
, NULL
);
2069 aq_err
= hw
->aq
.asq_last_status
;
2071 /* Explicitly ignore and do not report when firmware returns ENOENT */
2072 if (aq_ret
&& !(aq_err
== I40E_AQ_RC_ENOENT
)) {
2074 dev_info(&vsi
->back
->pdev
->dev
,
2075 "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
2076 vsi_name
, i40e_stat_str(hw
, aq_ret
),
2077 i40e_aq_str(hw
, aq_err
));
2082 * i40e_aqc_add_filters - Request firmware to add a set of filters
2083 * @vsi: ptr to the VSI
2084 * @vsi_name: name to display in messages
2085 * @list: the list of filters to send to firmware
2086 * @add_head: Position in the add hlist
2087 * @num_add: the number of filters to add
2089 * Send a request to firmware via AdminQ to add a chunk of filters. Will set
2090 * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
2091 * space for more filters.
2094 void i40e_aqc_add_filters(struct i40e_vsi
*vsi
, const char *vsi_name
,
2095 struct i40e_aqc_add_macvlan_element_data
*list
,
2096 struct i40e_new_mac_filter
*add_head
,
2099 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2102 i40e_aq_add_macvlan(hw
, vsi
->seid
, list
, num_add
, NULL
);
2103 aq_err
= hw
->aq
.asq_last_status
;
2104 fcnt
= i40e_update_filter_state(num_add
, list
, add_head
);
2106 if (fcnt
!= num_add
) {
2107 set_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
2108 dev_warn(&vsi
->back
->pdev
->dev
,
2109 "Error %s adding RX filters on %s, promiscuous mode forced on\n",
2110 i40e_aq_str(hw
, aq_err
),
2116 * i40e_aqc_broadcast_filter - Set promiscuous broadcast flags
2117 * @vsi: pointer to the VSI
2118 * @vsi_name: the VSI name
2121 * This function sets or clears the promiscuous broadcast flags for VLAN
2122 * filters in order to properly receive broadcast frames. Assumes that only
2123 * broadcast filters are passed.
2125 * Returns status indicating success or failure;
2128 i40e_aqc_broadcast_filter(struct i40e_vsi
*vsi
, const char *vsi_name
,
2129 struct i40e_mac_filter
*f
)
2131 bool enable
= f
->state
== I40E_FILTER_NEW
;
2132 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2135 if (f
->vlan
== I40E_VLAN_ANY
) {
2136 aq_ret
= i40e_aq_set_vsi_broadcast(hw
,
2141 aq_ret
= i40e_aq_set_vsi_bc_promisc_on_vlan(hw
,
2149 set_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
2150 dev_warn(&vsi
->back
->pdev
->dev
,
2151 "Error %s, forcing overflow promiscuous on %s\n",
2152 i40e_aq_str(hw
, hw
->aq
.asq_last_status
),
2160 * i40e_set_promiscuous - set promiscuous mode
2161 * @pf: board private structure
2162 * @promisc: promisc on or off
2164 * There are different ways of setting promiscuous mode on a PF depending on
2165 * what state/environment we're in. This identifies and sets it appropriately.
2166 * Returns 0 on success.
2168 static int i40e_set_promiscuous(struct i40e_pf
*pf
, bool promisc
)
2170 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
2171 struct i40e_hw
*hw
= &pf
->hw
;
2174 if (vsi
->type
== I40E_VSI_MAIN
&&
2175 pf
->lan_veb
!= I40E_NO_VEB
&&
2176 !(pf
->flags
& I40E_FLAG_MFP_ENABLED
)) {
2177 /* set defport ON for Main VSI instead of true promisc
2178 * this way we will get all unicast/multicast and VLAN
2179 * promisc behavior but will not get VF or VMDq traffic
2180 * replicated on the Main VSI.
2183 aq_ret
= i40e_aq_set_default_vsi(hw
,
2187 aq_ret
= i40e_aq_clear_default_vsi(hw
,
2191 dev_info(&pf
->pdev
->dev
,
2192 "Set default VSI failed, err %s, aq_err %s\n",
2193 i40e_stat_str(hw
, aq_ret
),
2194 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2197 aq_ret
= i40e_aq_set_vsi_unicast_promiscuous(
2203 dev_info(&pf
->pdev
->dev
,
2204 "set unicast promisc failed, err %s, aq_err %s\n",
2205 i40e_stat_str(hw
, aq_ret
),
2206 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2208 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(
2213 dev_info(&pf
->pdev
->dev
,
2214 "set multicast promisc failed, err %s, aq_err %s\n",
2215 i40e_stat_str(hw
, aq_ret
),
2216 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2221 pf
->cur_promisc
= promisc
;
2227 * i40e_sync_vsi_filters - Update the VSI filter list to the HW
2228 * @vsi: ptr to the VSI
2230 * Push any outstanding VSI filter changes through the AdminQ.
2232 * Returns 0 or error value
2234 int i40e_sync_vsi_filters(struct i40e_vsi
*vsi
)
2236 struct hlist_head tmp_add_list
, tmp_del_list
;
2237 struct i40e_mac_filter
*f
;
2238 struct i40e_new_mac_filter
*new, *add_head
= NULL
;
2239 struct i40e_hw
*hw
= &vsi
->back
->hw
;
2240 bool old_overflow
, new_overflow
;
2241 unsigned int failed_filters
= 0;
2242 unsigned int vlan_filters
= 0;
2243 char vsi_name
[16] = "PF";
2244 int filter_list_len
= 0;
2245 i40e_status aq_ret
= 0;
2246 u32 changed_flags
= 0;
2247 struct hlist_node
*h
;
2256 /* empty array typed pointers, kcalloc later */
2257 struct i40e_aqc_add_macvlan_element_data
*add_list
;
2258 struct i40e_aqc_remove_macvlan_element_data
*del_list
;
2260 while (test_and_set_bit(__I40E_VSI_SYNCING_FILTERS
, vsi
->state
))
2261 usleep_range(1000, 2000);
2264 old_overflow
= test_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
2267 changed_flags
= vsi
->current_netdev_flags
^ vsi
->netdev
->flags
;
2268 vsi
->current_netdev_flags
= vsi
->netdev
->flags
;
2271 INIT_HLIST_HEAD(&tmp_add_list
);
2272 INIT_HLIST_HEAD(&tmp_del_list
);
2274 if (vsi
->type
== I40E_VSI_SRIOV
)
2275 snprintf(vsi_name
, sizeof(vsi_name
) - 1, "VF %d", vsi
->vf_id
);
2276 else if (vsi
->type
!= I40E_VSI_MAIN
)
2277 snprintf(vsi_name
, sizeof(vsi_name
) - 1, "vsi %d", vsi
->seid
);
2279 if (vsi
->flags
& I40E_VSI_FLAG_FILTER_CHANGED
) {
2280 vsi
->flags
&= ~I40E_VSI_FLAG_FILTER_CHANGED
;
2282 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2283 /* Create a list of filters to delete. */
2284 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
2285 if (f
->state
== I40E_FILTER_REMOVE
) {
2286 /* Move the element into temporary del_list */
2287 hash_del(&f
->hlist
);
2288 hlist_add_head(&f
->hlist
, &tmp_del_list
);
2290 /* Avoid counting removed filters */
2293 if (f
->state
== I40E_FILTER_NEW
) {
2294 /* Create a temporary i40e_new_mac_filter */
2295 new = kzalloc(sizeof(*new), GFP_ATOMIC
);
2297 goto err_no_memory_locked
;
2299 /* Store pointer to the real filter */
2301 new->state
= f
->state
;
2303 /* Add it to the hash list */
2304 hlist_add_head(&new->hlist
, &tmp_add_list
);
2307 /* Count the number of active (current and new) VLAN
2308 * filters we have now. Does not count filters which
2309 * are marked for deletion.
2315 retval
= i40e_correct_mac_vlan_filters(vsi
,
2320 goto err_no_memory_locked
;
2322 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2325 /* Now process 'del_list' outside the lock */
2326 if (!hlist_empty(&tmp_del_list
)) {
2327 filter_list_len
= hw
->aq
.asq_buf_size
/
2328 sizeof(struct i40e_aqc_remove_macvlan_element_data
);
2329 list_size
= filter_list_len
*
2330 sizeof(struct i40e_aqc_remove_macvlan_element_data
);
2331 del_list
= kzalloc(list_size
, GFP_ATOMIC
);
2335 hlist_for_each_entry_safe(f
, h
, &tmp_del_list
, hlist
) {
2338 /* handle broadcast filters by updating the broadcast
2339 * promiscuous flag and release filter list.
2341 if (is_broadcast_ether_addr(f
->macaddr
)) {
2342 i40e_aqc_broadcast_filter(vsi
, vsi_name
, f
);
2344 hlist_del(&f
->hlist
);
2349 /* add to delete list */
2350 ether_addr_copy(del_list
[num_del
].mac_addr
, f
->macaddr
);
2351 if (f
->vlan
== I40E_VLAN_ANY
) {
2352 del_list
[num_del
].vlan_tag
= 0;
2353 cmd_flags
|= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN
;
2355 del_list
[num_del
].vlan_tag
=
2356 cpu_to_le16((u16
)(f
->vlan
));
2359 cmd_flags
|= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH
;
2360 del_list
[num_del
].flags
= cmd_flags
;
2363 /* flush a full buffer */
2364 if (num_del
== filter_list_len
) {
2365 i40e_aqc_del_filters(vsi
, vsi_name
, del_list
,
2367 memset(del_list
, 0, list_size
);
2370 /* Release memory for MAC filter entries which were
2371 * synced up with HW.
2373 hlist_del(&f
->hlist
);
2378 i40e_aqc_del_filters(vsi
, vsi_name
, del_list
,
2386 if (!hlist_empty(&tmp_add_list
)) {
2387 /* Do all the adds now. */
2388 filter_list_len
= hw
->aq
.asq_buf_size
/
2389 sizeof(struct i40e_aqc_add_macvlan_element_data
);
2390 list_size
= filter_list_len
*
2391 sizeof(struct i40e_aqc_add_macvlan_element_data
);
2392 add_list
= kzalloc(list_size
, GFP_ATOMIC
);
2397 hlist_for_each_entry_safe(new, h
, &tmp_add_list
, hlist
) {
2398 /* handle broadcast filters by updating the broadcast
2399 * promiscuous flag instead of adding a MAC filter.
2401 if (is_broadcast_ether_addr(new->f
->macaddr
)) {
2402 if (i40e_aqc_broadcast_filter(vsi
, vsi_name
,
2404 new->state
= I40E_FILTER_FAILED
;
2406 new->state
= I40E_FILTER_ACTIVE
;
2410 /* add to add array */
2414 ether_addr_copy(add_list
[num_add
].mac_addr
,
2416 if (new->f
->vlan
== I40E_VLAN_ANY
) {
2417 add_list
[num_add
].vlan_tag
= 0;
2418 cmd_flags
|= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN
;
2420 add_list
[num_add
].vlan_tag
=
2421 cpu_to_le16((u16
)(new->f
->vlan
));
2423 add_list
[num_add
].queue_number
= 0;
2424 /* set invalid match method for later detection */
2425 add_list
[num_add
].match_method
= I40E_AQC_MM_ERR_NO_RES
;
2426 cmd_flags
|= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH
;
2427 add_list
[num_add
].flags
= cpu_to_le16(cmd_flags
);
2430 /* flush a full buffer */
2431 if (num_add
== filter_list_len
) {
2432 i40e_aqc_add_filters(vsi
, vsi_name
, add_list
,
2434 memset(add_list
, 0, list_size
);
2439 i40e_aqc_add_filters(vsi
, vsi_name
, add_list
, add_head
,
2442 /* Now move all of the filters from the temp add list back to
2445 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2446 hlist_for_each_entry_safe(new, h
, &tmp_add_list
, hlist
) {
2447 /* Only update the state if we're still NEW */
2448 if (new->f
->state
== I40E_FILTER_NEW
)
2449 new->f
->state
= new->state
;
2450 hlist_del(&new->hlist
);
2453 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2458 /* Determine the number of active and failed filters. */
2459 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2460 vsi
->active_filters
= 0;
2461 hash_for_each(vsi
->mac_filter_hash
, bkt
, f
, hlist
) {
2462 if (f
->state
== I40E_FILTER_ACTIVE
)
2463 vsi
->active_filters
++;
2464 else if (f
->state
== I40E_FILTER_FAILED
)
2467 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2469 /* Check if we are able to exit overflow promiscuous mode. We can
2470 * safely exit if we didn't just enter, we no longer have any failed
2471 * filters, and we have reduced filters below the threshold value.
2473 if (old_overflow
&& !failed_filters
&&
2474 vsi
->active_filters
< vsi
->promisc_threshold
) {
2475 dev_info(&pf
->pdev
->dev
,
2476 "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
2478 clear_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
2479 vsi
->promisc_threshold
= 0;
2482 /* if the VF is not trusted do not do promisc */
2483 if ((vsi
->type
== I40E_VSI_SRIOV
) && !pf
->vf
[vsi
->vf_id
].trusted
) {
2484 clear_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
2488 new_overflow
= test_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
2490 /* If we are entering overflow promiscuous, we need to calculate a new
2491 * threshold for when we are safe to exit
2493 if (!old_overflow
&& new_overflow
)
2494 vsi
->promisc_threshold
= (vsi
->active_filters
* 3) / 4;
2496 /* check for changes in promiscuous modes */
2497 if (changed_flags
& IFF_ALLMULTI
) {
2498 bool cur_multipromisc
;
2500 cur_multipromisc
= !!(vsi
->current_netdev_flags
& IFF_ALLMULTI
);
2501 aq_ret
= i40e_aq_set_vsi_multicast_promiscuous(&vsi
->back
->hw
,
2506 retval
= i40e_aq_rc_to_posix(aq_ret
,
2507 hw
->aq
.asq_last_status
);
2508 dev_info(&pf
->pdev
->dev
,
2509 "set multi promisc failed on %s, err %s aq_err %s\n",
2511 i40e_stat_str(hw
, aq_ret
),
2512 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2516 if ((changed_flags
& IFF_PROMISC
) || old_overflow
!= new_overflow
) {
2519 cur_promisc
= (!!(vsi
->current_netdev_flags
& IFF_PROMISC
) ||
2521 aq_ret
= i40e_set_promiscuous(pf
, cur_promisc
);
2523 retval
= i40e_aq_rc_to_posix(aq_ret
,
2524 hw
->aq
.asq_last_status
);
2525 dev_info(&pf
->pdev
->dev
,
2526 "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
2527 cur_promisc
? "on" : "off",
2529 i40e_stat_str(hw
, aq_ret
),
2530 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
2534 /* if something went wrong then set the changed flag so we try again */
2536 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
2538 clear_bit(__I40E_VSI_SYNCING_FILTERS
, vsi
->state
);
2542 /* Restore elements on the temporary add and delete lists */
2543 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2544 err_no_memory_locked
:
2545 i40e_undo_del_filter_entries(vsi
, &tmp_del_list
);
2546 i40e_undo_add_filter_entries(vsi
, &tmp_add_list
);
2547 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2549 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
2550 clear_bit(__I40E_VSI_SYNCING_FILTERS
, vsi
->state
);
2555 * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2556 * @pf: board private structure
2558 static void i40e_sync_filters_subtask(struct i40e_pf
*pf
)
2564 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING
, pf
->state
))
2567 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
2569 (pf
->vsi
[v
]->flags
& I40E_VSI_FLAG_FILTER_CHANGED
)) {
2570 int ret
= i40e_sync_vsi_filters(pf
->vsi
[v
]);
2573 /* come back and try again later */
2574 set_bit(__I40E_MACVLAN_SYNC_PENDING
,
2583 * i40e_max_xdp_frame_size - returns the maximum allowed frame size for XDP
2586 static int i40e_max_xdp_frame_size(struct i40e_vsi
*vsi
)
2588 if (PAGE_SIZE
>= 8192 || (vsi
->back
->flags
& I40E_FLAG_LEGACY_RX
))
2589 return I40E_RXBUFFER_2048
;
2591 return I40E_RXBUFFER_3072
;
2595 * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2596 * @netdev: network interface device structure
2597 * @new_mtu: new value for maximum frame size
2599 * Returns 0 on success, negative on failure
2601 static int i40e_change_mtu(struct net_device
*netdev
, int new_mtu
)
2603 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2604 struct i40e_vsi
*vsi
= np
->vsi
;
2605 struct i40e_pf
*pf
= vsi
->back
;
2607 if (i40e_enabled_xdp_vsi(vsi
)) {
2608 int frame_size
= new_mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
2610 if (frame_size
> i40e_max_xdp_frame_size(vsi
))
2614 netdev_info(netdev
, "changing MTU from %d to %d\n",
2615 netdev
->mtu
, new_mtu
);
2616 netdev
->mtu
= new_mtu
;
2617 if (netif_running(netdev
))
2618 i40e_vsi_reinit_locked(vsi
);
2619 set_bit(__I40E_CLIENT_SERVICE_REQUESTED
, pf
->state
);
2620 set_bit(__I40E_CLIENT_L2_CHANGE
, pf
->state
);
2625 * i40e_ioctl - Access the hwtstamp interface
2626 * @netdev: network interface device structure
2627 * @ifr: interface request data
2628 * @cmd: ioctl command
2630 int i40e_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2632 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2633 struct i40e_pf
*pf
= np
->vsi
->back
;
2637 return i40e_ptp_get_ts_config(pf
, ifr
);
2639 return i40e_ptp_set_ts_config(pf
, ifr
);
2646 * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2647 * @vsi: the vsi being adjusted
2649 void i40e_vlan_stripping_enable(struct i40e_vsi
*vsi
)
2651 struct i40e_vsi_context ctxt
;
2654 if ((vsi
->info
.valid_sections
&
2655 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
)) &&
2656 ((vsi
->info
.port_vlan_flags
& I40E_AQ_VSI_PVLAN_MODE_MASK
) == 0))
2657 return; /* already enabled */
2659 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
2660 vsi
->info
.port_vlan_flags
= I40E_AQ_VSI_PVLAN_MODE_ALL
|
2661 I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH
;
2663 ctxt
.seid
= vsi
->seid
;
2664 ctxt
.info
= vsi
->info
;
2665 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
2667 dev_info(&vsi
->back
->pdev
->dev
,
2668 "update vlan stripping failed, err %s aq_err %s\n",
2669 i40e_stat_str(&vsi
->back
->hw
, ret
),
2670 i40e_aq_str(&vsi
->back
->hw
,
2671 vsi
->back
->hw
.aq
.asq_last_status
));
2676 * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2677 * @vsi: the vsi being adjusted
2679 void i40e_vlan_stripping_disable(struct i40e_vsi
*vsi
)
2681 struct i40e_vsi_context ctxt
;
2684 if ((vsi
->info
.valid_sections
&
2685 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
)) &&
2686 ((vsi
->info
.port_vlan_flags
& I40E_AQ_VSI_PVLAN_EMOD_MASK
) ==
2687 I40E_AQ_VSI_PVLAN_EMOD_MASK
))
2688 return; /* already disabled */
2690 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
2691 vsi
->info
.port_vlan_flags
= I40E_AQ_VSI_PVLAN_MODE_ALL
|
2692 I40E_AQ_VSI_PVLAN_EMOD_NOTHING
;
2694 ctxt
.seid
= vsi
->seid
;
2695 ctxt
.info
= vsi
->info
;
2696 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
2698 dev_info(&vsi
->back
->pdev
->dev
,
2699 "update vlan stripping failed, err %s aq_err %s\n",
2700 i40e_stat_str(&vsi
->back
->hw
, ret
),
2701 i40e_aq_str(&vsi
->back
->hw
,
2702 vsi
->back
->hw
.aq
.asq_last_status
));
2707 * i40e_add_vlan_all_mac - Add a MAC/VLAN filter for each existing MAC address
2708 * @vsi: the vsi being configured
2709 * @vid: vlan id to be added (0 = untagged only , -1 = any)
2711 * This is a helper function for adding a new MAC/VLAN filter with the
2712 * specified VLAN for each existing MAC address already in the hash table.
2713 * This function does *not* perform any accounting to update filters based on
2716 * NOTE: this function expects to be called while under the
2717 * mac_filter_hash_lock
2719 int i40e_add_vlan_all_mac(struct i40e_vsi
*vsi
, s16 vid
)
2721 struct i40e_mac_filter
*f
, *add_f
;
2722 struct hlist_node
*h
;
2725 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
2726 if (f
->state
== I40E_FILTER_REMOVE
)
2728 add_f
= i40e_add_filter(vsi
, f
->macaddr
, vid
);
2730 dev_info(&vsi
->back
->pdev
->dev
,
2731 "Could not add vlan filter %d for %pM\n",
2741 * i40e_vsi_add_vlan - Add VSI membership for given VLAN
2742 * @vsi: the VSI being configured
2743 * @vid: VLAN id to be added
2745 int i40e_vsi_add_vlan(struct i40e_vsi
*vsi
, u16 vid
)
2752 /* The network stack will attempt to add VID=0, with the intention to
2753 * receive priority tagged packets with a VLAN of 0. Our HW receives
2754 * these packets by default when configured to receive untagged
2755 * packets, so we don't need to add a filter for this case.
2756 * Additionally, HW interprets adding a VID=0 filter as meaning to
2757 * receive *only* tagged traffic and stops receiving untagged traffic.
2758 * Thus, we do not want to actually add a filter for VID=0
2763 /* Locked once because all functions invoked below iterates list*/
2764 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2765 err
= i40e_add_vlan_all_mac(vsi
, vid
);
2766 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2770 /* schedule our worker thread which will take care of
2771 * applying the new filter changes
2773 i40e_service_event_schedule(vsi
->back
);
2778 * i40e_rm_vlan_all_mac - Remove MAC/VLAN pair for all MAC with the given VLAN
2779 * @vsi: the vsi being configured
2780 * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2782 * This function should be used to remove all VLAN filters which match the
2783 * given VID. It does not schedule the service event and does not take the
2784 * mac_filter_hash_lock so it may be combined with other operations under
2785 * a single invocation of the mac_filter_hash_lock.
2787 * NOTE: this function expects to be called while under the
2788 * mac_filter_hash_lock
2790 void i40e_rm_vlan_all_mac(struct i40e_vsi
*vsi
, s16 vid
)
2792 struct i40e_mac_filter
*f
;
2793 struct hlist_node
*h
;
2796 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
2798 __i40e_del_filter(vsi
, f
);
2803 * i40e_vsi_kill_vlan - Remove VSI membership for given VLAN
2804 * @vsi: the VSI being configured
2805 * @vid: VLAN id to be removed
2807 void i40e_vsi_kill_vlan(struct i40e_vsi
*vsi
, u16 vid
)
2809 if (!vid
|| vsi
->info
.pvid
)
2812 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
2813 i40e_rm_vlan_all_mac(vsi
, vid
);
2814 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
2816 /* schedule our worker thread which will take care of
2817 * applying the new filter changes
2819 i40e_service_event_schedule(vsi
->back
);
2823 * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2824 * @netdev: network interface to be adjusted
2825 * @proto: unused protocol value
2826 * @vid: vlan id to be added
2828 * net_device_ops implementation for adding vlan ids
2830 static int i40e_vlan_rx_add_vid(struct net_device
*netdev
,
2831 __always_unused __be16 proto
, u16 vid
)
2833 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2834 struct i40e_vsi
*vsi
= np
->vsi
;
2837 if (vid
>= VLAN_N_VID
)
2840 ret
= i40e_vsi_add_vlan(vsi
, vid
);
2842 set_bit(vid
, vsi
->active_vlans
);
2848 * i40e_vlan_rx_add_vid_up - Add a vlan id filter to HW offload in UP path
2849 * @netdev: network interface to be adjusted
2850 * @proto: unused protocol value
2851 * @vid: vlan id to be added
2853 static void i40e_vlan_rx_add_vid_up(struct net_device
*netdev
,
2854 __always_unused __be16 proto
, u16 vid
)
2856 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2857 struct i40e_vsi
*vsi
= np
->vsi
;
2859 if (vid
>= VLAN_N_VID
)
2861 set_bit(vid
, vsi
->active_vlans
);
2865 * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2866 * @netdev: network interface to be adjusted
2867 * @proto: unused protocol value
2868 * @vid: vlan id to be removed
2870 * net_device_ops implementation for removing vlan ids
2872 static int i40e_vlan_rx_kill_vid(struct net_device
*netdev
,
2873 __always_unused __be16 proto
, u16 vid
)
2875 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
2876 struct i40e_vsi
*vsi
= np
->vsi
;
2878 /* return code is ignored as there is nothing a user
2879 * can do about failure to remove and a log message was
2880 * already printed from the other function
2882 i40e_vsi_kill_vlan(vsi
, vid
);
2884 clear_bit(vid
, vsi
->active_vlans
);
2890 * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2891 * @vsi: the vsi being brought back up
2893 static void i40e_restore_vlan(struct i40e_vsi
*vsi
)
2900 if (vsi
->netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
)
2901 i40e_vlan_stripping_enable(vsi
);
2903 i40e_vlan_stripping_disable(vsi
);
2905 for_each_set_bit(vid
, vsi
->active_vlans
, VLAN_N_VID
)
2906 i40e_vlan_rx_add_vid_up(vsi
->netdev
, htons(ETH_P_8021Q
),
2911 * i40e_vsi_add_pvid - Add pvid for the VSI
2912 * @vsi: the vsi being adjusted
2913 * @vid: the vlan id to set as a PVID
2915 int i40e_vsi_add_pvid(struct i40e_vsi
*vsi
, u16 vid
)
2917 struct i40e_vsi_context ctxt
;
2920 vsi
->info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
2921 vsi
->info
.pvid
= cpu_to_le16(vid
);
2922 vsi
->info
.port_vlan_flags
= I40E_AQ_VSI_PVLAN_MODE_TAGGED
|
2923 I40E_AQ_VSI_PVLAN_INSERT_PVID
|
2924 I40E_AQ_VSI_PVLAN_EMOD_STR
;
2926 ctxt
.seid
= vsi
->seid
;
2927 ctxt
.info
= vsi
->info
;
2928 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
2930 dev_info(&vsi
->back
->pdev
->dev
,
2931 "add pvid failed, err %s aq_err %s\n",
2932 i40e_stat_str(&vsi
->back
->hw
, ret
),
2933 i40e_aq_str(&vsi
->back
->hw
,
2934 vsi
->back
->hw
.aq
.asq_last_status
));
2942 * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2943 * @vsi: the vsi being adjusted
2945 * Just use the vlan_rx_register() service to put it back to normal
2947 void i40e_vsi_remove_pvid(struct i40e_vsi
*vsi
)
2949 i40e_vlan_stripping_disable(vsi
);
2955 * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2956 * @vsi: ptr to the VSI
2958 * If this function returns with an error, then it's possible one or
2959 * more of the rings is populated (while the rest are not). It is the
2960 * callers duty to clean those orphaned rings.
2962 * Return 0 on success, negative on failure
2964 static int i40e_vsi_setup_tx_resources(struct i40e_vsi
*vsi
)
2968 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
2969 err
= i40e_setup_tx_descriptors(vsi
->tx_rings
[i
]);
2971 if (!i40e_enabled_xdp_vsi(vsi
))
2974 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
2975 err
= i40e_setup_tx_descriptors(vsi
->xdp_rings
[i
]);
2981 * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2982 * @vsi: ptr to the VSI
2984 * Free VSI's transmit software resources
2986 static void i40e_vsi_free_tx_resources(struct i40e_vsi
*vsi
)
2990 if (vsi
->tx_rings
) {
2991 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
2992 if (vsi
->tx_rings
[i
] && vsi
->tx_rings
[i
]->desc
)
2993 i40e_free_tx_resources(vsi
->tx_rings
[i
]);
2996 if (vsi
->xdp_rings
) {
2997 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
2998 if (vsi
->xdp_rings
[i
] && vsi
->xdp_rings
[i
]->desc
)
2999 i40e_free_tx_resources(vsi
->xdp_rings
[i
]);
3004 * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
3005 * @vsi: ptr to the VSI
3007 * If this function returns with an error, then it's possible one or
3008 * more of the rings is populated (while the rest are not). It is the
3009 * callers duty to clean those orphaned rings.
3011 * Return 0 on success, negative on failure
3013 static int i40e_vsi_setup_rx_resources(struct i40e_vsi
*vsi
)
3017 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
3018 err
= i40e_setup_rx_descriptors(vsi
->rx_rings
[i
]);
3023 * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
3024 * @vsi: ptr to the VSI
3026 * Free all receive software resources
3028 static void i40e_vsi_free_rx_resources(struct i40e_vsi
*vsi
)
3035 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
3036 if (vsi
->rx_rings
[i
] && vsi
->rx_rings
[i
]->desc
)
3037 i40e_free_rx_resources(vsi
->rx_rings
[i
]);
3041 * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
3042 * @ring: The Tx ring to configure
3044 * This enables/disables XPS for a given Tx descriptor ring
3045 * based on the TCs enabled for the VSI that ring belongs to.
3047 static void i40e_config_xps_tx_ring(struct i40e_ring
*ring
)
3051 if (!ring
->q_vector
|| !ring
->netdev
|| ring
->ch
)
3054 /* We only initialize XPS once, so as not to overwrite user settings */
3055 if (test_and_set_bit(__I40E_TX_XPS_INIT_DONE
, ring
->state
))
3058 cpu
= cpumask_local_spread(ring
->q_vector
->v_idx
, -1);
3059 netif_set_xps_queue(ring
->netdev
, get_cpu_mask(cpu
),
3064 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3065 * @ring: The Tx ring to configure
3067 * Configure the Tx descriptor ring in the HMC context.
3069 static int i40e_configure_tx_ring(struct i40e_ring
*ring
)
3071 struct i40e_vsi
*vsi
= ring
->vsi
;
3072 u16 pf_q
= vsi
->base_queue
+ ring
->queue_index
;
3073 struct i40e_hw
*hw
= &vsi
->back
->hw
;
3074 struct i40e_hmc_obj_txq tx_ctx
;
3075 i40e_status err
= 0;
3078 if (ring_is_xdp(ring
))
3079 ring
->xsk_umem
= i40e_xsk_umem(ring
);
3081 /* some ATR related tx ring init */
3082 if (vsi
->back
->flags
& I40E_FLAG_FD_ATR_ENABLED
) {
3083 ring
->atr_sample_rate
= vsi
->back
->atr_sample_rate
;
3084 ring
->atr_count
= 0;
3086 ring
->atr_sample_rate
= 0;
3090 i40e_config_xps_tx_ring(ring
);
3092 /* clear the context structure first */
3093 memset(&tx_ctx
, 0, sizeof(tx_ctx
));
3095 tx_ctx
.new_context
= 1;
3096 tx_ctx
.base
= (ring
->dma
/ 128);
3097 tx_ctx
.qlen
= ring
->count
;
3098 tx_ctx
.fd_ena
= !!(vsi
->back
->flags
& (I40E_FLAG_FD_SB_ENABLED
|
3099 I40E_FLAG_FD_ATR_ENABLED
));
3100 tx_ctx
.timesync_ena
= !!(vsi
->back
->flags
& I40E_FLAG_PTP
);
3101 /* FDIR VSI tx ring can still use RS bit and writebacks */
3102 if (vsi
->type
!= I40E_VSI_FDIR
)
3103 tx_ctx
.head_wb_ena
= 1;
3104 tx_ctx
.head_wb_addr
= ring
->dma
+
3105 (ring
->count
* sizeof(struct i40e_tx_desc
));
3107 /* As part of VSI creation/update, FW allocates certain
3108 * Tx arbitration queue sets for each TC enabled for
3109 * the VSI. The FW returns the handles to these queue
3110 * sets as part of the response buffer to Add VSI,
3111 * Update VSI, etc. AQ commands. It is expected that
3112 * these queue set handles be associated with the Tx
3113 * queues by the driver as part of the TX queue context
3114 * initialization. This has to be done regardless of
3115 * DCB as by default everything is mapped to TC0.
3120 le16_to_cpu(ring
->ch
->info
.qs_handle
[ring
->dcb_tc
]);
3123 tx_ctx
.rdylist
= le16_to_cpu(vsi
->info
.qs_handle
[ring
->dcb_tc
]);
3125 tx_ctx
.rdylist_act
= 0;
3127 /* clear the context in the HMC */
3128 err
= i40e_clear_lan_tx_queue_context(hw
, pf_q
);
3130 dev_info(&vsi
->back
->pdev
->dev
,
3131 "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
3132 ring
->queue_index
, pf_q
, err
);
3136 /* set the context in the HMC */
3137 err
= i40e_set_lan_tx_queue_context(hw
, pf_q
, &tx_ctx
);
3139 dev_info(&vsi
->back
->pdev
->dev
,
3140 "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
3141 ring
->queue_index
, pf_q
, err
);
3145 /* Now associate this queue with this PCI function */
3147 if (ring
->ch
->type
== I40E_VSI_VMDQ2
)
3148 qtx_ctl
= I40E_QTX_CTL_VM_QUEUE
;
3152 qtx_ctl
|= (ring
->ch
->vsi_number
<<
3153 I40E_QTX_CTL_VFVM_INDX_SHIFT
) &
3154 I40E_QTX_CTL_VFVM_INDX_MASK
;
3156 if (vsi
->type
== I40E_VSI_VMDQ2
) {
3157 qtx_ctl
= I40E_QTX_CTL_VM_QUEUE
;
3158 qtx_ctl
|= ((vsi
->id
) << I40E_QTX_CTL_VFVM_INDX_SHIFT
) &
3159 I40E_QTX_CTL_VFVM_INDX_MASK
;
3161 qtx_ctl
= I40E_QTX_CTL_PF_QUEUE
;
3165 qtx_ctl
|= ((hw
->pf_id
<< I40E_QTX_CTL_PF_INDX_SHIFT
) &
3166 I40E_QTX_CTL_PF_INDX_MASK
);
3167 wr32(hw
, I40E_QTX_CTL(pf_q
), qtx_ctl
);
3170 /* cache tail off for easier writes later */
3171 ring
->tail
= hw
->hw_addr
+ I40E_QTX_TAIL(pf_q
);
3177 * i40e_configure_rx_ring - Configure a receive ring context
3178 * @ring: The Rx ring to configure
3180 * Configure the Rx descriptor ring in the HMC context.
3182 static int i40e_configure_rx_ring(struct i40e_ring
*ring
)
3184 struct i40e_vsi
*vsi
= ring
->vsi
;
3185 u32 chain_len
= vsi
->back
->hw
.func_caps
.rx_buf_chain_len
;
3186 u16 pf_q
= vsi
->base_queue
+ ring
->queue_index
;
3187 struct i40e_hw
*hw
= &vsi
->back
->hw
;
3188 struct i40e_hmc_obj_rxq rx_ctx
;
3189 i40e_status err
= 0;
3193 bitmap_zero(ring
->state
, __I40E_RING_STATE_NBITS
);
3195 /* clear the context structure first */
3196 memset(&rx_ctx
, 0, sizeof(rx_ctx
));
3198 if (ring
->vsi
->type
== I40E_VSI_MAIN
)
3199 xdp_rxq_info_unreg_mem_model(&ring
->xdp_rxq
);
3201 ring
->xsk_umem
= i40e_xsk_umem(ring
);
3202 if (ring
->xsk_umem
) {
3203 ring
->rx_buf_len
= ring
->xsk_umem
->chunk_size_nohr
-
3204 XDP_PACKET_HEADROOM
;
3205 /* For AF_XDP ZC, we disallow packets to span on
3206 * multiple buffers, thus letting us skip that
3207 * handling in the fast-path.
3210 ring
->zca
.free
= i40e_zca_free
;
3211 ret
= xdp_rxq_info_reg_mem_model(&ring
->xdp_rxq
,
3216 dev_info(&vsi
->back
->pdev
->dev
,
3217 "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
3221 ring
->rx_buf_len
= vsi
->rx_buf_len
;
3222 if (ring
->vsi
->type
== I40E_VSI_MAIN
) {
3223 ret
= xdp_rxq_info_reg_mem_model(&ring
->xdp_rxq
,
3224 MEM_TYPE_PAGE_SHARED
,
3231 rx_ctx
.dbuff
= DIV_ROUND_UP(ring
->rx_buf_len
,
3232 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT
));
3234 rx_ctx
.base
= (ring
->dma
/ 128);
3235 rx_ctx
.qlen
= ring
->count
;
3237 /* use 32 byte descriptors */
3240 /* descriptor type is always zero
3243 rx_ctx
.hsplit_0
= 0;
3245 rx_ctx
.rxmax
= min_t(u16
, vsi
->max_frame
, chain_len
* ring
->rx_buf_len
);
3246 if (hw
->revision_id
== 0)
3247 rx_ctx
.lrxqthresh
= 0;
3249 rx_ctx
.lrxqthresh
= 1;
3250 rx_ctx
.crcstrip
= 1;
3252 /* this controls whether VLAN is stripped from inner headers */
3254 /* set the prefena field to 1 because the manual says to */
3257 /* clear the context in the HMC */
3258 err
= i40e_clear_lan_rx_queue_context(hw
, pf_q
);
3260 dev_info(&vsi
->back
->pdev
->dev
,
3261 "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3262 ring
->queue_index
, pf_q
, err
);
3266 /* set the context in the HMC */
3267 err
= i40e_set_lan_rx_queue_context(hw
, pf_q
, &rx_ctx
);
3269 dev_info(&vsi
->back
->pdev
->dev
,
3270 "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
3271 ring
->queue_index
, pf_q
, err
);
3275 /* configure Rx buffer alignment */
3276 if (!vsi
->netdev
|| (vsi
->back
->flags
& I40E_FLAG_LEGACY_RX
))
3277 clear_ring_build_skb_enabled(ring
);
3279 set_ring_build_skb_enabled(ring
);
3281 /* cache tail for quicker writes, and clear the reg before use */
3282 ring
->tail
= hw
->hw_addr
+ I40E_QRX_TAIL(pf_q
);
3283 writel(0, ring
->tail
);
3285 ok
= ring
->xsk_umem
?
3286 i40e_alloc_rx_buffers_zc(ring
, I40E_DESC_UNUSED(ring
)) :
3287 !i40e_alloc_rx_buffers(ring
, I40E_DESC_UNUSED(ring
));
3289 dev_info(&vsi
->back
->pdev
->dev
,
3290 "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
3291 ring
->xsk_umem
? "UMEM enabled " : "",
3292 ring
->queue_index
, pf_q
);
3299 * i40e_vsi_configure_tx - Configure the VSI for Tx
3300 * @vsi: VSI structure describing this set of rings and resources
3302 * Configure the Tx VSI for operation.
3304 static int i40e_vsi_configure_tx(struct i40e_vsi
*vsi
)
3309 for (i
= 0; (i
< vsi
->num_queue_pairs
) && !err
; i
++)
3310 err
= i40e_configure_tx_ring(vsi
->tx_rings
[i
]);
3312 if (!i40e_enabled_xdp_vsi(vsi
))
3315 for (i
= 0; (i
< vsi
->num_queue_pairs
) && !err
; i
++)
3316 err
= i40e_configure_tx_ring(vsi
->xdp_rings
[i
]);
3322 * i40e_vsi_configure_rx - Configure the VSI for Rx
3323 * @vsi: the VSI being configured
3325 * Configure the Rx VSI for operation.
3327 static int i40e_vsi_configure_rx(struct i40e_vsi
*vsi
)
3332 if (!vsi
->netdev
|| (vsi
->back
->flags
& I40E_FLAG_LEGACY_RX
)) {
3333 vsi
->max_frame
= I40E_MAX_RXBUFFER
;
3334 vsi
->rx_buf_len
= I40E_RXBUFFER_2048
;
3335 #if (PAGE_SIZE < 8192)
3336 } else if (!I40E_2K_TOO_SMALL_WITH_PADDING
&&
3337 (vsi
->netdev
->mtu
<= ETH_DATA_LEN
)) {
3338 vsi
->max_frame
= I40E_RXBUFFER_1536
- NET_IP_ALIGN
;
3339 vsi
->rx_buf_len
= I40E_RXBUFFER_1536
- NET_IP_ALIGN
;
3342 vsi
->max_frame
= I40E_MAX_RXBUFFER
;
3343 vsi
->rx_buf_len
= (PAGE_SIZE
< 8192) ? I40E_RXBUFFER_3072
:
3347 /* set up individual rings */
3348 for (i
= 0; i
< vsi
->num_queue_pairs
&& !err
; i
++)
3349 err
= i40e_configure_rx_ring(vsi
->rx_rings
[i
]);
3355 * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3356 * @vsi: ptr to the VSI
3358 static void i40e_vsi_config_dcb_rings(struct i40e_vsi
*vsi
)
3360 struct i40e_ring
*tx_ring
, *rx_ring
;
3361 u16 qoffset
, qcount
;
3364 if (!(vsi
->back
->flags
& I40E_FLAG_DCB_ENABLED
)) {
3365 /* Reset the TC information */
3366 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
3367 rx_ring
= vsi
->rx_rings
[i
];
3368 tx_ring
= vsi
->tx_rings
[i
];
3369 rx_ring
->dcb_tc
= 0;
3370 tx_ring
->dcb_tc
= 0;
3375 for (n
= 0; n
< I40E_MAX_TRAFFIC_CLASS
; n
++) {
3376 if (!(vsi
->tc_config
.enabled_tc
& BIT_ULL(n
)))
3379 qoffset
= vsi
->tc_config
.tc_info
[n
].qoffset
;
3380 qcount
= vsi
->tc_config
.tc_info
[n
].qcount
;
3381 for (i
= qoffset
; i
< (qoffset
+ qcount
); i
++) {
3382 rx_ring
= vsi
->rx_rings
[i
];
3383 tx_ring
= vsi
->tx_rings
[i
];
3384 rx_ring
->dcb_tc
= n
;
3385 tx_ring
->dcb_tc
= n
;
3391 * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3392 * @vsi: ptr to the VSI
3394 static void i40e_set_vsi_rx_mode(struct i40e_vsi
*vsi
)
3397 i40e_set_rx_mode(vsi
->netdev
);
3401 * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3402 * @vsi: Pointer to the targeted VSI
3404 * This function replays the hlist on the hw where all the SB Flow Director
3405 * filters were saved.
3407 static void i40e_fdir_filter_restore(struct i40e_vsi
*vsi
)
3409 struct i40e_fdir_filter
*filter
;
3410 struct i40e_pf
*pf
= vsi
->back
;
3411 struct hlist_node
*node
;
3413 if (!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
))
3416 /* Reset FDir counters as we're replaying all existing filters */
3417 pf
->fd_tcp4_filter_cnt
= 0;
3418 pf
->fd_udp4_filter_cnt
= 0;
3419 pf
->fd_sctp4_filter_cnt
= 0;
3420 pf
->fd_ip4_filter_cnt
= 0;
3422 hlist_for_each_entry_safe(filter
, node
,
3423 &pf
->fdir_filter_list
, fdir_node
) {
3424 i40e_add_del_fdir(vsi
, filter
, true);
3429 * i40e_vsi_configure - Set up the VSI for action
3430 * @vsi: the VSI being configured
3432 static int i40e_vsi_configure(struct i40e_vsi
*vsi
)
3436 i40e_set_vsi_rx_mode(vsi
);
3437 i40e_restore_vlan(vsi
);
3438 i40e_vsi_config_dcb_rings(vsi
);
3439 err
= i40e_vsi_configure_tx(vsi
);
3441 err
= i40e_vsi_configure_rx(vsi
);
3447 * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3448 * @vsi: the VSI being configured
3450 static void i40e_vsi_configure_msix(struct i40e_vsi
*vsi
)
3452 bool has_xdp
= i40e_enabled_xdp_vsi(vsi
);
3453 struct i40e_pf
*pf
= vsi
->back
;
3454 struct i40e_hw
*hw
= &pf
->hw
;
3459 /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3460 * and PFINT_LNKLSTn registers, e.g.:
3461 * PFINT_ITRn[0..n-1] gets msix-1..msix-n (qpair interrupts)
3463 qp
= vsi
->base_queue
;
3464 vector
= vsi
->base_vector
;
3465 for (i
= 0; i
< vsi
->num_q_vectors
; i
++, vector
++) {
3466 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[i
];
3468 q_vector
->rx
.next_update
= jiffies
+ 1;
3469 q_vector
->rx
.target_itr
=
3470 ITR_TO_REG(vsi
->rx_rings
[i
]->itr_setting
);
3471 wr32(hw
, I40E_PFINT_ITRN(I40E_RX_ITR
, vector
- 1),
3472 q_vector
->rx
.target_itr
);
3473 q_vector
->rx
.current_itr
= q_vector
->rx
.target_itr
;
3475 q_vector
->tx
.next_update
= jiffies
+ 1;
3476 q_vector
->tx
.target_itr
=
3477 ITR_TO_REG(vsi
->tx_rings
[i
]->itr_setting
);
3478 wr32(hw
, I40E_PFINT_ITRN(I40E_TX_ITR
, vector
- 1),
3479 q_vector
->tx
.target_itr
);
3480 q_vector
->tx
.current_itr
= q_vector
->tx
.target_itr
;
3482 wr32(hw
, I40E_PFINT_RATEN(vector
- 1),
3483 i40e_intrl_usec_to_reg(vsi
->int_rate_limit
));
3485 /* Linked list for the queuepairs assigned to this vector */
3486 wr32(hw
, I40E_PFINT_LNKLSTN(vector
- 1), qp
);
3487 for (q
= 0; q
< q_vector
->num_ringpairs
; q
++) {
3488 u32 nextqp
= has_xdp
? qp
+ vsi
->alloc_queue_pairs
: qp
;
3491 val
= I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
3492 (I40E_RX_ITR
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
) |
3493 (vector
<< I40E_QINT_RQCTL_MSIX_INDX_SHIFT
) |
3494 (nextqp
<< I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT
) |
3495 (I40E_QUEUE_TYPE_TX
<<
3496 I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT
);
3498 wr32(hw
, I40E_QINT_RQCTL(qp
), val
);
3501 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3502 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
) |
3503 (vector
<< I40E_QINT_TQCTL_MSIX_INDX_SHIFT
) |
3504 (qp
<< I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
) |
3505 (I40E_QUEUE_TYPE_TX
<<
3506 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3508 wr32(hw
, I40E_QINT_TQCTL(nextqp
), val
);
3511 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3512 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
) |
3513 (vector
<< I40E_QINT_TQCTL_MSIX_INDX_SHIFT
) |
3514 ((qp
+ 1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
) |
3515 (I40E_QUEUE_TYPE_RX
<<
3516 I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3518 /* Terminate the linked list */
3519 if (q
== (q_vector
->num_ringpairs
- 1))
3520 val
|= (I40E_QUEUE_END_OF_LIST
<<
3521 I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
);
3523 wr32(hw
, I40E_QINT_TQCTL(qp
), val
);
3532 * i40e_enable_misc_int_causes - enable the non-queue interrupts
3533 * @pf: pointer to private device data structure
3535 static void i40e_enable_misc_int_causes(struct i40e_pf
*pf
)
3537 struct i40e_hw
*hw
= &pf
->hw
;
3540 /* clear things first */
3541 wr32(hw
, I40E_PFINT_ICR0_ENA
, 0); /* disable all */
3542 rd32(hw
, I40E_PFINT_ICR0
); /* read to clear */
3544 val
= I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
|
3545 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
|
3546 I40E_PFINT_ICR0_ENA_GRST_MASK
|
3547 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
|
3548 I40E_PFINT_ICR0_ENA_GPIO_MASK
|
3549 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
|
3550 I40E_PFINT_ICR0_ENA_VFLR_MASK
|
3551 I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
3553 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
)
3554 val
|= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
;
3556 if (pf
->flags
& I40E_FLAG_PTP
)
3557 val
|= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK
;
3559 wr32(hw
, I40E_PFINT_ICR0_ENA
, val
);
3561 /* SW_ITR_IDX = 0, but don't change INTENA */
3562 wr32(hw
, I40E_PFINT_DYN_CTL0
, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK
|
3563 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK
);
3565 /* OTHER_ITR_IDX = 0 */
3566 wr32(hw
, I40E_PFINT_STAT_CTL0
, 0);
3570 * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3571 * @vsi: the VSI being configured
3573 static void i40e_configure_msi_and_legacy(struct i40e_vsi
*vsi
)
3575 u32 nextqp
= i40e_enabled_xdp_vsi(vsi
) ? vsi
->alloc_queue_pairs
: 0;
3576 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[0];
3577 struct i40e_pf
*pf
= vsi
->back
;
3578 struct i40e_hw
*hw
= &pf
->hw
;
3581 /* set the ITR configuration */
3582 q_vector
->rx
.next_update
= jiffies
+ 1;
3583 q_vector
->rx
.target_itr
= ITR_TO_REG(vsi
->rx_rings
[0]->itr_setting
);
3584 wr32(hw
, I40E_PFINT_ITR0(I40E_RX_ITR
), q_vector
->rx
.target_itr
);
3585 q_vector
->rx
.current_itr
= q_vector
->rx
.target_itr
;
3586 q_vector
->tx
.next_update
= jiffies
+ 1;
3587 q_vector
->tx
.target_itr
= ITR_TO_REG(vsi
->tx_rings
[0]->itr_setting
);
3588 wr32(hw
, I40E_PFINT_ITR0(I40E_TX_ITR
), q_vector
->tx
.target_itr
);
3589 q_vector
->tx
.current_itr
= q_vector
->tx
.target_itr
;
3591 i40e_enable_misc_int_causes(pf
);
3593 /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3594 wr32(hw
, I40E_PFINT_LNKLST0
, 0);
3596 /* Associate the queue pair to the vector and enable the queue int */
3597 val
= I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
3598 (I40E_RX_ITR
<< I40E_QINT_RQCTL_ITR_INDX_SHIFT
) |
3599 (nextqp
<< I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT
)|
3600 (I40E_QUEUE_TYPE_TX
<< I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3602 wr32(hw
, I40E_QINT_RQCTL(0), val
);
3604 if (i40e_enabled_xdp_vsi(vsi
)) {
3605 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3606 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
)|
3608 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT
);
3610 wr32(hw
, I40E_QINT_TQCTL(nextqp
), val
);
3613 val
= I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
3614 (I40E_TX_ITR
<< I40E_QINT_TQCTL_ITR_INDX_SHIFT
) |
3615 (I40E_QUEUE_END_OF_LIST
<< I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
);
3617 wr32(hw
, I40E_QINT_TQCTL(0), val
);
3622 * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3623 * @pf: board private structure
3625 void i40e_irq_dynamic_disable_icr0(struct i40e_pf
*pf
)
3627 struct i40e_hw
*hw
= &pf
->hw
;
3629 wr32(hw
, I40E_PFINT_DYN_CTL0
,
3630 I40E_ITR_NONE
<< I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT
);
3635 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3636 * @pf: board private structure
3638 void i40e_irq_dynamic_enable_icr0(struct i40e_pf
*pf
)
3640 struct i40e_hw
*hw
= &pf
->hw
;
3643 val
= I40E_PFINT_DYN_CTL0_INTENA_MASK
|
3644 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK
|
3645 (I40E_ITR_NONE
<< I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT
);
3647 wr32(hw
, I40E_PFINT_DYN_CTL0
, val
);
3652 * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3653 * @irq: interrupt number
3654 * @data: pointer to a q_vector
3656 static irqreturn_t
i40e_msix_clean_rings(int irq
, void *data
)
3658 struct i40e_q_vector
*q_vector
= data
;
3660 if (!q_vector
->tx
.ring
&& !q_vector
->rx
.ring
)
3663 napi_schedule_irqoff(&q_vector
->napi
);
3669 * i40e_irq_affinity_notify - Callback for affinity changes
3670 * @notify: context as to what irq was changed
3671 * @mask: the new affinity mask
3673 * This is a callback function used by the irq_set_affinity_notifier function
3674 * so that we may register to receive changes to the irq affinity masks.
3676 static void i40e_irq_affinity_notify(struct irq_affinity_notify
*notify
,
3677 const cpumask_t
*mask
)
3679 struct i40e_q_vector
*q_vector
=
3680 container_of(notify
, struct i40e_q_vector
, affinity_notify
);
3682 cpumask_copy(&q_vector
->affinity_mask
, mask
);
3686 * i40e_irq_affinity_release - Callback for affinity notifier release
3687 * @ref: internal core kernel usage
3689 * This is a callback function used by the irq_set_affinity_notifier function
3690 * to inform the current notification subscriber that they will no longer
3691 * receive notifications.
3693 static void i40e_irq_affinity_release(struct kref
*ref
) {}
3696 * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3697 * @vsi: the VSI being configured
3698 * @basename: name for the vector
3700 * Allocates MSI-X vectors and requests interrupts from the kernel.
3702 static int i40e_vsi_request_irq_msix(struct i40e_vsi
*vsi
, char *basename
)
3704 int q_vectors
= vsi
->num_q_vectors
;
3705 struct i40e_pf
*pf
= vsi
->back
;
3706 int base
= vsi
->base_vector
;
3713 for (vector
= 0; vector
< q_vectors
; vector
++) {
3714 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[vector
];
3716 irq_num
= pf
->msix_entries
[base
+ vector
].vector
;
3718 if (q_vector
->tx
.ring
&& q_vector
->rx
.ring
) {
3719 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
3720 "%s-%s-%d", basename
, "TxRx", rx_int_idx
++);
3722 } else if (q_vector
->rx
.ring
) {
3723 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
3724 "%s-%s-%d", basename
, "rx", rx_int_idx
++);
3725 } else if (q_vector
->tx
.ring
) {
3726 snprintf(q_vector
->name
, sizeof(q_vector
->name
) - 1,
3727 "%s-%s-%d", basename
, "tx", tx_int_idx
++);
3729 /* skip this unused q_vector */
3732 err
= request_irq(irq_num
,
3738 dev_info(&pf
->pdev
->dev
,
3739 "MSIX request_irq failed, error: %d\n", err
);
3740 goto free_queue_irqs
;
3743 /* register for affinity change notifications */
3744 q_vector
->affinity_notify
.notify
= i40e_irq_affinity_notify
;
3745 q_vector
->affinity_notify
.release
= i40e_irq_affinity_release
;
3746 irq_set_affinity_notifier(irq_num
, &q_vector
->affinity_notify
);
3747 /* Spread affinity hints out across online CPUs.
3749 * get_cpu_mask returns a static constant mask with
3750 * a permanent lifetime so it's ok to pass to
3751 * irq_set_affinity_hint without making a copy.
3753 cpu
= cpumask_local_spread(q_vector
->v_idx
, -1);
3754 irq_set_affinity_hint(irq_num
, get_cpu_mask(cpu
));
3757 vsi
->irqs_ready
= true;
3763 irq_num
= pf
->msix_entries
[base
+ vector
].vector
;
3764 irq_set_affinity_notifier(irq_num
, NULL
);
3765 irq_set_affinity_hint(irq_num
, NULL
);
3766 free_irq(irq_num
, &vsi
->q_vectors
[vector
]);
3772 * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3773 * @vsi: the VSI being un-configured
3775 static void i40e_vsi_disable_irq(struct i40e_vsi
*vsi
)
3777 struct i40e_pf
*pf
= vsi
->back
;
3778 struct i40e_hw
*hw
= &pf
->hw
;
3779 int base
= vsi
->base_vector
;
3782 /* disable interrupt causation from each queue */
3783 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
3786 val
= rd32(hw
, I40E_QINT_TQCTL(vsi
->tx_rings
[i
]->reg_idx
));
3787 val
&= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK
;
3788 wr32(hw
, I40E_QINT_TQCTL(vsi
->tx_rings
[i
]->reg_idx
), val
);
3790 val
= rd32(hw
, I40E_QINT_RQCTL(vsi
->rx_rings
[i
]->reg_idx
));
3791 val
&= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK
;
3792 wr32(hw
, I40E_QINT_RQCTL(vsi
->rx_rings
[i
]->reg_idx
), val
);
3794 if (!i40e_enabled_xdp_vsi(vsi
))
3796 wr32(hw
, I40E_QINT_TQCTL(vsi
->xdp_rings
[i
]->reg_idx
), 0);
3799 /* disable each interrupt */
3800 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
3801 for (i
= vsi
->base_vector
;
3802 i
< (vsi
->num_q_vectors
+ vsi
->base_vector
); i
++)
3803 wr32(hw
, I40E_PFINT_DYN_CTLN(i
- 1), 0);
3806 for (i
= 0; i
< vsi
->num_q_vectors
; i
++)
3807 synchronize_irq(pf
->msix_entries
[i
+ base
].vector
);
3809 /* Legacy and MSI mode - this stops all interrupt handling */
3810 wr32(hw
, I40E_PFINT_ICR0_ENA
, 0);
3811 wr32(hw
, I40E_PFINT_DYN_CTL0
, 0);
3813 synchronize_irq(pf
->pdev
->irq
);
3818 * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3819 * @vsi: the VSI being configured
3821 static int i40e_vsi_enable_irq(struct i40e_vsi
*vsi
)
3823 struct i40e_pf
*pf
= vsi
->back
;
3826 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
3827 for (i
= 0; i
< vsi
->num_q_vectors
; i
++)
3828 i40e_irq_dynamic_enable(vsi
, i
);
3830 i40e_irq_dynamic_enable_icr0(pf
);
3833 i40e_flush(&pf
->hw
);
3838 * i40e_free_misc_vector - Free the vector that handles non-queue events
3839 * @pf: board private structure
3841 static void i40e_free_misc_vector(struct i40e_pf
*pf
)
3844 wr32(&pf
->hw
, I40E_PFINT_ICR0_ENA
, 0);
3845 i40e_flush(&pf
->hw
);
3847 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
&& pf
->msix_entries
) {
3848 synchronize_irq(pf
->msix_entries
[0].vector
);
3849 free_irq(pf
->msix_entries
[0].vector
, pf
);
3850 clear_bit(__I40E_MISC_IRQ_REQUESTED
, pf
->state
);
3855 * i40e_intr - MSI/Legacy and non-queue interrupt handler
3856 * @irq: interrupt number
3857 * @data: pointer to a q_vector
3859 * This is the handler used for all MSI/Legacy interrupts, and deals
3860 * with both queue and non-queue interrupts. This is also used in
3861 * MSIX mode to handle the non-queue interrupts.
3863 static irqreturn_t
i40e_intr(int irq
, void *data
)
3865 struct i40e_pf
*pf
= (struct i40e_pf
*)data
;
3866 struct i40e_hw
*hw
= &pf
->hw
;
3867 irqreturn_t ret
= IRQ_NONE
;
3868 u32 icr0
, icr0_remaining
;
3871 icr0
= rd32(hw
, I40E_PFINT_ICR0
);
3872 ena_mask
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
3874 /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3875 if ((icr0
& I40E_PFINT_ICR0_INTEVENT_MASK
) == 0)
3878 /* if interrupt but no bits showing, must be SWINT */
3879 if (((icr0
& ~I40E_PFINT_ICR0_INTEVENT_MASK
) == 0) ||
3880 (icr0
& I40E_PFINT_ICR0_SWINT_MASK
))
3883 if ((pf
->flags
& I40E_FLAG_IWARP_ENABLED
) &&
3884 (icr0
& I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
)) {
3885 ena_mask
&= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
;
3886 dev_dbg(&pf
->pdev
->dev
, "cleared PE_CRITERR\n");
3887 set_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
);
3890 /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3891 if (icr0
& I40E_PFINT_ICR0_QUEUE_0_MASK
) {
3892 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
3893 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[0];
3895 /* We do not have a way to disarm Queue causes while leaving
3896 * interrupt enabled for all other causes, ideally
3897 * interrupt should be disabled while we are in NAPI but
3898 * this is not a performance path and napi_schedule()
3899 * can deal with rescheduling.
3901 if (!test_bit(__I40E_DOWN
, pf
->state
))
3902 napi_schedule_irqoff(&q_vector
->napi
);
3905 if (icr0
& I40E_PFINT_ICR0_ADMINQ_MASK
) {
3906 ena_mask
&= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
3907 set_bit(__I40E_ADMINQ_EVENT_PENDING
, pf
->state
);
3908 i40e_debug(&pf
->hw
, I40E_DEBUG_NVM
, "AdminQ event\n");
3911 if (icr0
& I40E_PFINT_ICR0_MAL_DETECT_MASK
) {
3912 ena_mask
&= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
;
3913 set_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
);
3916 if (icr0
& I40E_PFINT_ICR0_VFLR_MASK
) {
3917 ena_mask
&= ~I40E_PFINT_ICR0_ENA_VFLR_MASK
;
3918 set_bit(__I40E_VFLR_EVENT_PENDING
, pf
->state
);
3921 if (icr0
& I40E_PFINT_ICR0_GRST_MASK
) {
3922 if (!test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
3923 set_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
);
3924 ena_mask
&= ~I40E_PFINT_ICR0_ENA_GRST_MASK
;
3925 val
= rd32(hw
, I40E_GLGEN_RSTAT
);
3926 val
= (val
& I40E_GLGEN_RSTAT_RESET_TYPE_MASK
)
3927 >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT
;
3928 if (val
== I40E_RESET_CORER
) {
3930 } else if (val
== I40E_RESET_GLOBR
) {
3932 } else if (val
== I40E_RESET_EMPR
) {
3934 set_bit(__I40E_EMP_RESET_INTR_RECEIVED
, pf
->state
);
3938 if (icr0
& I40E_PFINT_ICR0_HMC_ERR_MASK
) {
3939 icr0
&= ~I40E_PFINT_ICR0_HMC_ERR_MASK
;
3940 dev_info(&pf
->pdev
->dev
, "HMC error interrupt\n");
3941 dev_info(&pf
->pdev
->dev
, "HMC error info 0x%x, HMC error data 0x%x\n",
3942 rd32(hw
, I40E_PFHMC_ERRORINFO
),
3943 rd32(hw
, I40E_PFHMC_ERRORDATA
));
3946 if (icr0
& I40E_PFINT_ICR0_TIMESYNC_MASK
) {
3947 u32 prttsyn_stat
= rd32(hw
, I40E_PRTTSYN_STAT_0
);
3949 if (prttsyn_stat
& I40E_PRTTSYN_STAT_0_TXTIME_MASK
) {
3950 icr0
&= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK
;
3951 i40e_ptp_tx_hwtstamp(pf
);
3955 /* If a critical error is pending we have no choice but to reset the
3957 * Report and mask out any remaining unexpected interrupts.
3959 icr0_remaining
= icr0
& ena_mask
;
3960 if (icr0_remaining
) {
3961 dev_info(&pf
->pdev
->dev
, "unhandled interrupt icr0=0x%08x\n",
3963 if ((icr0_remaining
& I40E_PFINT_ICR0_PE_CRITERR_MASK
) ||
3964 (icr0_remaining
& I40E_PFINT_ICR0_PCI_EXCEPTION_MASK
) ||
3965 (icr0_remaining
& I40E_PFINT_ICR0_ECC_ERR_MASK
)) {
3966 dev_info(&pf
->pdev
->dev
, "device will be reset\n");
3967 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
3968 i40e_service_event_schedule(pf
);
3970 ena_mask
&= ~icr0_remaining
;
3975 /* re-enable interrupt causes */
3976 wr32(hw
, I40E_PFINT_ICR0_ENA
, ena_mask
);
3977 if (!test_bit(__I40E_DOWN
, pf
->state
)) {
3978 i40e_service_event_schedule(pf
);
3979 i40e_irq_dynamic_enable_icr0(pf
);
3986 * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3987 * @tx_ring: tx ring to clean
3988 * @budget: how many cleans we're allowed
3990 * Returns true if there's any budget left (e.g. the clean is finished)
3992 static bool i40e_clean_fdir_tx_irq(struct i40e_ring
*tx_ring
, int budget
)
3994 struct i40e_vsi
*vsi
= tx_ring
->vsi
;
3995 u16 i
= tx_ring
->next_to_clean
;
3996 struct i40e_tx_buffer
*tx_buf
;
3997 struct i40e_tx_desc
*tx_desc
;
3999 tx_buf
= &tx_ring
->tx_bi
[i
];
4000 tx_desc
= I40E_TX_DESC(tx_ring
, i
);
4001 i
-= tx_ring
->count
;
4004 struct i40e_tx_desc
*eop_desc
= tx_buf
->next_to_watch
;
4006 /* if next_to_watch is not set then there is no work pending */
4010 /* prevent any other reads prior to eop_desc */
4013 /* if the descriptor isn't done, no work yet to do */
4014 if (!(eop_desc
->cmd_type_offset_bsz
&
4015 cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE
)))
4018 /* clear next_to_watch to prevent false hangs */
4019 tx_buf
->next_to_watch
= NULL
;
4021 tx_desc
->buffer_addr
= 0;
4022 tx_desc
->cmd_type_offset_bsz
= 0;
4023 /* move past filter desc */
4028 i
-= tx_ring
->count
;
4029 tx_buf
= tx_ring
->tx_bi
;
4030 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
4032 /* unmap skb header data */
4033 dma_unmap_single(tx_ring
->dev
,
4034 dma_unmap_addr(tx_buf
, dma
),
4035 dma_unmap_len(tx_buf
, len
),
4037 if (tx_buf
->tx_flags
& I40E_TX_FLAGS_FD_SB
)
4038 kfree(tx_buf
->raw_buf
);
4040 tx_buf
->raw_buf
= NULL
;
4041 tx_buf
->tx_flags
= 0;
4042 tx_buf
->next_to_watch
= NULL
;
4043 dma_unmap_len_set(tx_buf
, len
, 0);
4044 tx_desc
->buffer_addr
= 0;
4045 tx_desc
->cmd_type_offset_bsz
= 0;
4047 /* move us past the eop_desc for start of next FD desc */
4052 i
-= tx_ring
->count
;
4053 tx_buf
= tx_ring
->tx_bi
;
4054 tx_desc
= I40E_TX_DESC(tx_ring
, 0);
4057 /* update budget accounting */
4059 } while (likely(budget
));
4061 i
+= tx_ring
->count
;
4062 tx_ring
->next_to_clean
= i
;
4064 if (vsi
->back
->flags
& I40E_FLAG_MSIX_ENABLED
)
4065 i40e_irq_dynamic_enable(vsi
, tx_ring
->q_vector
->v_idx
);
4071 * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
4072 * @irq: interrupt number
4073 * @data: pointer to a q_vector
4075 static irqreturn_t
i40e_fdir_clean_ring(int irq
, void *data
)
4077 struct i40e_q_vector
*q_vector
= data
;
4078 struct i40e_vsi
*vsi
;
4080 if (!q_vector
->tx
.ring
)
4083 vsi
= q_vector
->tx
.ring
->vsi
;
4084 i40e_clean_fdir_tx_irq(q_vector
->tx
.ring
, vsi
->work_limit
);
4090 * i40e_map_vector_to_qp - Assigns the queue pair to the vector
4091 * @vsi: the VSI being configured
4092 * @v_idx: vector index
4093 * @qp_idx: queue pair index
4095 static void i40e_map_vector_to_qp(struct i40e_vsi
*vsi
, int v_idx
, int qp_idx
)
4097 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[v_idx
];
4098 struct i40e_ring
*tx_ring
= vsi
->tx_rings
[qp_idx
];
4099 struct i40e_ring
*rx_ring
= vsi
->rx_rings
[qp_idx
];
4101 tx_ring
->q_vector
= q_vector
;
4102 tx_ring
->next
= q_vector
->tx
.ring
;
4103 q_vector
->tx
.ring
= tx_ring
;
4104 q_vector
->tx
.count
++;
4106 /* Place XDP Tx ring in the same q_vector ring list as regular Tx */
4107 if (i40e_enabled_xdp_vsi(vsi
)) {
4108 struct i40e_ring
*xdp_ring
= vsi
->xdp_rings
[qp_idx
];
4110 xdp_ring
->q_vector
= q_vector
;
4111 xdp_ring
->next
= q_vector
->tx
.ring
;
4112 q_vector
->tx
.ring
= xdp_ring
;
4113 q_vector
->tx
.count
++;
4116 rx_ring
->q_vector
= q_vector
;
4117 rx_ring
->next
= q_vector
->rx
.ring
;
4118 q_vector
->rx
.ring
= rx_ring
;
4119 q_vector
->rx
.count
++;
4123 * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
4124 * @vsi: the VSI being configured
4126 * This function maps descriptor rings to the queue-specific vectors
4127 * we were allotted through the MSI-X enabling code. Ideally, we'd have
4128 * one vector per queue pair, but on a constrained vector budget, we
4129 * group the queue pairs as "efficiently" as possible.
4131 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi
*vsi
)
4133 int qp_remaining
= vsi
->num_queue_pairs
;
4134 int q_vectors
= vsi
->num_q_vectors
;
4139 /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
4140 * group them so there are multiple queues per vector.
4141 * It is also important to go through all the vectors available to be
4142 * sure that if we don't use all the vectors, that the remaining vectors
4143 * are cleared. This is especially important when decreasing the
4144 * number of queues in use.
4146 for (; v_start
< q_vectors
; v_start
++) {
4147 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[v_start
];
4149 num_ringpairs
= DIV_ROUND_UP(qp_remaining
, q_vectors
- v_start
);
4151 q_vector
->num_ringpairs
= num_ringpairs
;
4152 q_vector
->reg_idx
= q_vector
->v_idx
+ vsi
->base_vector
- 1;
4154 q_vector
->rx
.count
= 0;
4155 q_vector
->tx
.count
= 0;
4156 q_vector
->rx
.ring
= NULL
;
4157 q_vector
->tx
.ring
= NULL
;
4159 while (num_ringpairs
--) {
4160 i40e_map_vector_to_qp(vsi
, v_start
, qp_idx
);
4168 * i40e_vsi_request_irq - Request IRQ from the OS
4169 * @vsi: the VSI being configured
4170 * @basename: name for the vector
4172 static int i40e_vsi_request_irq(struct i40e_vsi
*vsi
, char *basename
)
4174 struct i40e_pf
*pf
= vsi
->back
;
4177 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
4178 err
= i40e_vsi_request_irq_msix(vsi
, basename
);
4179 else if (pf
->flags
& I40E_FLAG_MSI_ENABLED
)
4180 err
= request_irq(pf
->pdev
->irq
, i40e_intr
, 0,
4183 err
= request_irq(pf
->pdev
->irq
, i40e_intr
, IRQF_SHARED
,
4187 dev_info(&pf
->pdev
->dev
, "request_irq failed, Error %d\n", err
);
4192 #ifdef CONFIG_NET_POLL_CONTROLLER
4194 * i40e_netpoll - A Polling 'interrupt' handler
4195 * @netdev: network interface device structure
4197 * This is used by netconsole to send skbs without having to re-enable
4198 * interrupts. It's not called while the normal interrupt routine is executing.
4200 static void i40e_netpoll(struct net_device
*netdev
)
4202 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
4203 struct i40e_vsi
*vsi
= np
->vsi
;
4204 struct i40e_pf
*pf
= vsi
->back
;
4207 /* if interface is down do nothing */
4208 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
))
4211 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
4212 for (i
= 0; i
< vsi
->num_q_vectors
; i
++)
4213 i40e_msix_clean_rings(0, vsi
->q_vectors
[i
]);
4215 i40e_intr(pf
->pdev
->irq
, netdev
);
4220 #define I40E_QTX_ENA_WAIT_COUNT 50
4223 * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
4224 * @pf: the PF being configured
4225 * @pf_q: the PF queue
4226 * @enable: enable or disable state of the queue
4228 * This routine will wait for the given Tx queue of the PF to reach the
4229 * enabled or disabled state.
4230 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4231 * multiple retries; else will return 0 in case of success.
4233 static int i40e_pf_txq_wait(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4238 for (i
= 0; i
< I40E_QUEUE_WAIT_RETRY_LIMIT
; i
++) {
4239 tx_reg
= rd32(&pf
->hw
, I40E_QTX_ENA(pf_q
));
4240 if (enable
== !!(tx_reg
& I40E_QTX_ENA_QENA_STAT_MASK
))
4243 usleep_range(10, 20);
4245 if (i
>= I40E_QUEUE_WAIT_RETRY_LIMIT
)
4252 * i40e_control_tx_q - Start or stop a particular Tx queue
4253 * @pf: the PF structure
4254 * @pf_q: the PF queue to configure
4255 * @enable: start or stop the queue
4257 * This function enables or disables a single queue. Note that any delay
4258 * required after the operation is expected to be handled by the caller of
4261 static void i40e_control_tx_q(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4263 struct i40e_hw
*hw
= &pf
->hw
;
4267 /* warn the TX unit of coming changes */
4268 i40e_pre_tx_queue_cfg(&pf
->hw
, pf_q
, enable
);
4270 usleep_range(10, 20);
4272 for (i
= 0; i
< I40E_QTX_ENA_WAIT_COUNT
; i
++) {
4273 tx_reg
= rd32(hw
, I40E_QTX_ENA(pf_q
));
4274 if (((tx_reg
>> I40E_QTX_ENA_QENA_REQ_SHIFT
) & 1) ==
4275 ((tx_reg
>> I40E_QTX_ENA_QENA_STAT_SHIFT
) & 1))
4277 usleep_range(1000, 2000);
4280 /* Skip if the queue is already in the requested state */
4281 if (enable
== !!(tx_reg
& I40E_QTX_ENA_QENA_STAT_MASK
))
4284 /* turn on/off the queue */
4286 wr32(hw
, I40E_QTX_HEAD(pf_q
), 0);
4287 tx_reg
|= I40E_QTX_ENA_QENA_REQ_MASK
;
4289 tx_reg
&= ~I40E_QTX_ENA_QENA_REQ_MASK
;
4292 wr32(hw
, I40E_QTX_ENA(pf_q
), tx_reg
);
4296 * i40e_control_wait_tx_q - Start/stop Tx queue and wait for completion
4298 * @pf: the PF structure
4299 * @pf_q: the PF queue to configure
4300 * @is_xdp: true if the queue is used for XDP
4301 * @enable: start or stop the queue
4303 int i40e_control_wait_tx_q(int seid
, struct i40e_pf
*pf
, int pf_q
,
4304 bool is_xdp
, bool enable
)
4308 i40e_control_tx_q(pf
, pf_q
, enable
);
4310 /* wait for the change to finish */
4311 ret
= i40e_pf_txq_wait(pf
, pf_q
, enable
);
4313 dev_info(&pf
->pdev
->dev
,
4314 "VSI seid %d %sTx ring %d %sable timeout\n",
4315 seid
, (is_xdp
? "XDP " : ""), pf_q
,
4316 (enable
? "en" : "dis"));
4323 * i40e_vsi_control_tx - Start or stop a VSI's rings
4324 * @vsi: the VSI being configured
4325 * @enable: start or stop the rings
4327 static int i40e_vsi_control_tx(struct i40e_vsi
*vsi
, bool enable
)
4329 struct i40e_pf
*pf
= vsi
->back
;
4330 int i
, pf_q
, ret
= 0;
4332 pf_q
= vsi
->base_queue
;
4333 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4334 ret
= i40e_control_wait_tx_q(vsi
->seid
, pf
,
4336 false /*is xdp*/, enable
);
4340 if (!i40e_enabled_xdp_vsi(vsi
))
4343 ret
= i40e_control_wait_tx_q(vsi
->seid
, pf
,
4344 pf_q
+ vsi
->alloc_queue_pairs
,
4345 true /*is xdp*/, enable
);
4353 * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
4354 * @pf: the PF being configured
4355 * @pf_q: the PF queue
4356 * @enable: enable or disable state of the queue
4358 * This routine will wait for the given Rx queue of the PF to reach the
4359 * enabled or disabled state.
4360 * Returns -ETIMEDOUT in case of failing to reach the requested state after
4361 * multiple retries; else will return 0 in case of success.
4363 static int i40e_pf_rxq_wait(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4368 for (i
= 0; i
< I40E_QUEUE_WAIT_RETRY_LIMIT
; i
++) {
4369 rx_reg
= rd32(&pf
->hw
, I40E_QRX_ENA(pf_q
));
4370 if (enable
== !!(rx_reg
& I40E_QRX_ENA_QENA_STAT_MASK
))
4373 usleep_range(10, 20);
4375 if (i
>= I40E_QUEUE_WAIT_RETRY_LIMIT
)
4382 * i40e_control_rx_q - Start or stop a particular Rx queue
4383 * @pf: the PF structure
4384 * @pf_q: the PF queue to configure
4385 * @enable: start or stop the queue
4387 * This function enables or disables a single queue. Note that
4388 * any delay required after the operation is expected to be
4389 * handled by the caller of this function.
4391 static void i40e_control_rx_q(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4393 struct i40e_hw
*hw
= &pf
->hw
;
4397 for (i
= 0; i
< I40E_QTX_ENA_WAIT_COUNT
; i
++) {
4398 rx_reg
= rd32(hw
, I40E_QRX_ENA(pf_q
));
4399 if (((rx_reg
>> I40E_QRX_ENA_QENA_REQ_SHIFT
) & 1) ==
4400 ((rx_reg
>> I40E_QRX_ENA_QENA_STAT_SHIFT
) & 1))
4402 usleep_range(1000, 2000);
4405 /* Skip if the queue is already in the requested state */
4406 if (enable
== !!(rx_reg
& I40E_QRX_ENA_QENA_STAT_MASK
))
4409 /* turn on/off the queue */
4411 rx_reg
|= I40E_QRX_ENA_QENA_REQ_MASK
;
4413 rx_reg
&= ~I40E_QRX_ENA_QENA_REQ_MASK
;
4415 wr32(hw
, I40E_QRX_ENA(pf_q
), rx_reg
);
4419 * i40e_control_wait_rx_q
4420 * @pf: the PF structure
4421 * @pf_q: queue being configured
4422 * @enable: start or stop the rings
4424 * This function enables or disables a single queue along with waiting
4425 * for the change to finish. The caller of this function should handle
4426 * the delays needed in the case of disabling queues.
4428 int i40e_control_wait_rx_q(struct i40e_pf
*pf
, int pf_q
, bool enable
)
4432 i40e_control_rx_q(pf
, pf_q
, enable
);
4434 /* wait for the change to finish */
4435 ret
= i40e_pf_rxq_wait(pf
, pf_q
, enable
);
4443 * i40e_vsi_control_rx - Start or stop a VSI's rings
4444 * @vsi: the VSI being configured
4445 * @enable: start or stop the rings
4447 static int i40e_vsi_control_rx(struct i40e_vsi
*vsi
, bool enable
)
4449 struct i40e_pf
*pf
= vsi
->back
;
4450 int i
, pf_q
, ret
= 0;
4452 pf_q
= vsi
->base_queue
;
4453 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4454 ret
= i40e_control_wait_rx_q(pf
, pf_q
, enable
);
4456 dev_info(&pf
->pdev
->dev
,
4457 "VSI seid %d Rx ring %d %sable timeout\n",
4458 vsi
->seid
, pf_q
, (enable
? "en" : "dis"));
4463 /* Due to HW errata, on Rx disable only, the register can indicate done
4464 * before it really is. Needs 50ms to be sure
4473 * i40e_vsi_start_rings - Start a VSI's rings
4474 * @vsi: the VSI being configured
4476 int i40e_vsi_start_rings(struct i40e_vsi
*vsi
)
4480 /* do rx first for enable and last for disable */
4481 ret
= i40e_vsi_control_rx(vsi
, true);
4484 ret
= i40e_vsi_control_tx(vsi
, true);
4490 * i40e_vsi_stop_rings - Stop a VSI's rings
4491 * @vsi: the VSI being configured
4493 void i40e_vsi_stop_rings(struct i40e_vsi
*vsi
)
4495 /* When port TX is suspended, don't wait */
4496 if (test_bit(__I40E_PORT_SUSPENDED
, vsi
->back
->state
))
4497 return i40e_vsi_stop_rings_no_wait(vsi
);
4499 /* do rx first for enable and last for disable
4500 * Ignore return value, we need to shutdown whatever we can
4502 i40e_vsi_control_tx(vsi
, false);
4503 i40e_vsi_control_rx(vsi
, false);
4507 * i40e_vsi_stop_rings_no_wait - Stop a VSI's rings and do not delay
4508 * @vsi: the VSI being shutdown
4510 * This function stops all the rings for a VSI but does not delay to verify
4511 * that rings have been disabled. It is expected that the caller is shutting
4512 * down multiple VSIs at once and will delay together for all the VSIs after
4513 * initiating the shutdown. This is particularly useful for shutting down lots
4514 * of VFs together. Otherwise, a large delay can be incurred while configuring
4515 * each VSI in serial.
4517 void i40e_vsi_stop_rings_no_wait(struct i40e_vsi
*vsi
)
4519 struct i40e_pf
*pf
= vsi
->back
;
4522 pf_q
= vsi
->base_queue
;
4523 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4524 i40e_control_tx_q(pf
, pf_q
, false);
4525 i40e_control_rx_q(pf
, pf_q
, false);
4530 * i40e_vsi_free_irq - Free the irq association with the OS
4531 * @vsi: the VSI being configured
4533 static void i40e_vsi_free_irq(struct i40e_vsi
*vsi
)
4535 struct i40e_pf
*pf
= vsi
->back
;
4536 struct i40e_hw
*hw
= &pf
->hw
;
4537 int base
= vsi
->base_vector
;
4541 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
4542 if (!vsi
->q_vectors
)
4545 if (!vsi
->irqs_ready
)
4548 vsi
->irqs_ready
= false;
4549 for (i
= 0; i
< vsi
->num_q_vectors
; i
++) {
4554 irq_num
= pf
->msix_entries
[vector
].vector
;
4556 /* free only the irqs that were actually requested */
4557 if (!vsi
->q_vectors
[i
] ||
4558 !vsi
->q_vectors
[i
]->num_ringpairs
)
4561 /* clear the affinity notifier in the IRQ descriptor */
4562 irq_set_affinity_notifier(irq_num
, NULL
);
4563 /* remove our suggested affinity mask for this IRQ */
4564 irq_set_affinity_hint(irq_num
, NULL
);
4565 synchronize_irq(irq_num
);
4566 free_irq(irq_num
, vsi
->q_vectors
[i
]);
4568 /* Tear down the interrupt queue link list
4570 * We know that they come in pairs and always
4571 * the Rx first, then the Tx. To clear the
4572 * link list, stick the EOL value into the
4573 * next_q field of the registers.
4575 val
= rd32(hw
, I40E_PFINT_LNKLSTN(vector
- 1));
4576 qp
= (val
& I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK
)
4577 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT
;
4578 val
|= I40E_QUEUE_END_OF_LIST
4579 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT
;
4580 wr32(hw
, I40E_PFINT_LNKLSTN(vector
- 1), val
);
4582 while (qp
!= I40E_QUEUE_END_OF_LIST
) {
4585 val
= rd32(hw
, I40E_QINT_RQCTL(qp
));
4587 val
&= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK
|
4588 I40E_QINT_RQCTL_MSIX0_INDX_MASK
|
4589 I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
4590 I40E_QINT_RQCTL_INTEVENT_MASK
);
4592 val
|= (I40E_QINT_RQCTL_ITR_INDX_MASK
|
4593 I40E_QINT_RQCTL_NEXTQ_INDX_MASK
);
4595 wr32(hw
, I40E_QINT_RQCTL(qp
), val
);
4597 val
= rd32(hw
, I40E_QINT_TQCTL(qp
));
4599 next
= (val
& I40E_QINT_TQCTL_NEXTQ_INDX_MASK
)
4600 >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT
;
4602 val
&= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK
|
4603 I40E_QINT_TQCTL_MSIX0_INDX_MASK
|
4604 I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
4605 I40E_QINT_TQCTL_INTEVENT_MASK
);
4607 val
|= (I40E_QINT_TQCTL_ITR_INDX_MASK
|
4608 I40E_QINT_TQCTL_NEXTQ_INDX_MASK
);
4610 wr32(hw
, I40E_QINT_TQCTL(qp
), val
);
4615 free_irq(pf
->pdev
->irq
, pf
);
4617 val
= rd32(hw
, I40E_PFINT_LNKLST0
);
4618 qp
= (val
& I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK
)
4619 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT
;
4620 val
|= I40E_QUEUE_END_OF_LIST
4621 << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT
;
4622 wr32(hw
, I40E_PFINT_LNKLST0
, val
);
4624 val
= rd32(hw
, I40E_QINT_RQCTL(qp
));
4625 val
&= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK
|
4626 I40E_QINT_RQCTL_MSIX0_INDX_MASK
|
4627 I40E_QINT_RQCTL_CAUSE_ENA_MASK
|
4628 I40E_QINT_RQCTL_INTEVENT_MASK
);
4630 val
|= (I40E_QINT_RQCTL_ITR_INDX_MASK
|
4631 I40E_QINT_RQCTL_NEXTQ_INDX_MASK
);
4633 wr32(hw
, I40E_QINT_RQCTL(qp
), val
);
4635 val
= rd32(hw
, I40E_QINT_TQCTL(qp
));
4637 val
&= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK
|
4638 I40E_QINT_TQCTL_MSIX0_INDX_MASK
|
4639 I40E_QINT_TQCTL_CAUSE_ENA_MASK
|
4640 I40E_QINT_TQCTL_INTEVENT_MASK
);
4642 val
|= (I40E_QINT_TQCTL_ITR_INDX_MASK
|
4643 I40E_QINT_TQCTL_NEXTQ_INDX_MASK
);
4645 wr32(hw
, I40E_QINT_TQCTL(qp
), val
);
4650 * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4651 * @vsi: the VSI being configured
4652 * @v_idx: Index of vector to be freed
4654 * This function frees the memory allocated to the q_vector. In addition if
4655 * NAPI is enabled it will delete any references to the NAPI struct prior
4656 * to freeing the q_vector.
4658 static void i40e_free_q_vector(struct i40e_vsi
*vsi
, int v_idx
)
4660 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[v_idx
];
4661 struct i40e_ring
*ring
;
4666 /* disassociate q_vector from rings */
4667 i40e_for_each_ring(ring
, q_vector
->tx
)
4668 ring
->q_vector
= NULL
;
4670 i40e_for_each_ring(ring
, q_vector
->rx
)
4671 ring
->q_vector
= NULL
;
4673 /* only VSI w/ an associated netdev is set up w/ NAPI */
4675 netif_napi_del(&q_vector
->napi
);
4677 vsi
->q_vectors
[v_idx
] = NULL
;
4679 kfree_rcu(q_vector
, rcu
);
4683 * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4684 * @vsi: the VSI being un-configured
4686 * This frees the memory allocated to the q_vectors and
4687 * deletes references to the NAPI struct.
4689 static void i40e_vsi_free_q_vectors(struct i40e_vsi
*vsi
)
4693 for (v_idx
= 0; v_idx
< vsi
->num_q_vectors
; v_idx
++)
4694 i40e_free_q_vector(vsi
, v_idx
);
4698 * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4699 * @pf: board private structure
4701 static void i40e_reset_interrupt_capability(struct i40e_pf
*pf
)
4703 /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4704 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
4705 pci_disable_msix(pf
->pdev
);
4706 kfree(pf
->msix_entries
);
4707 pf
->msix_entries
= NULL
;
4708 kfree(pf
->irq_pile
);
4709 pf
->irq_pile
= NULL
;
4710 } else if (pf
->flags
& I40E_FLAG_MSI_ENABLED
) {
4711 pci_disable_msi(pf
->pdev
);
4713 pf
->flags
&= ~(I40E_FLAG_MSIX_ENABLED
| I40E_FLAG_MSI_ENABLED
);
4717 * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4718 * @pf: board private structure
4720 * We go through and clear interrupt specific resources and reset the structure
4721 * to pre-load conditions
4723 static void i40e_clear_interrupt_scheme(struct i40e_pf
*pf
)
4727 i40e_free_misc_vector(pf
);
4729 i40e_put_lump(pf
->irq_pile
, pf
->iwarp_base_vector
,
4730 I40E_IWARP_IRQ_PILE_ID
);
4732 i40e_put_lump(pf
->irq_pile
, 0, I40E_PILE_VALID_BIT
-1);
4733 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
4735 i40e_vsi_free_q_vectors(pf
->vsi
[i
]);
4736 i40e_reset_interrupt_capability(pf
);
4740 * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4741 * @vsi: the VSI being configured
4743 static void i40e_napi_enable_all(struct i40e_vsi
*vsi
)
4750 for (q_idx
= 0; q_idx
< vsi
->num_q_vectors
; q_idx
++) {
4751 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[q_idx
];
4753 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
4754 napi_enable(&q_vector
->napi
);
4759 * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4760 * @vsi: the VSI being configured
4762 static void i40e_napi_disable_all(struct i40e_vsi
*vsi
)
4769 for (q_idx
= 0; q_idx
< vsi
->num_q_vectors
; q_idx
++) {
4770 struct i40e_q_vector
*q_vector
= vsi
->q_vectors
[q_idx
];
4772 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
)
4773 napi_disable(&q_vector
->napi
);
4778 * i40e_vsi_close - Shut down a VSI
4779 * @vsi: the vsi to be quelled
4781 static void i40e_vsi_close(struct i40e_vsi
*vsi
)
4783 struct i40e_pf
*pf
= vsi
->back
;
4784 if (!test_and_set_bit(__I40E_VSI_DOWN
, vsi
->state
))
4786 i40e_vsi_free_irq(vsi
);
4787 i40e_vsi_free_tx_resources(vsi
);
4788 i40e_vsi_free_rx_resources(vsi
);
4789 vsi
->current_netdev_flags
= 0;
4790 set_bit(__I40E_CLIENT_SERVICE_REQUESTED
, pf
->state
);
4791 if (test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
4792 set_bit(__I40E_CLIENT_RESET
, pf
->state
);
4796 * i40e_quiesce_vsi - Pause a given VSI
4797 * @vsi: the VSI being paused
4799 static void i40e_quiesce_vsi(struct i40e_vsi
*vsi
)
4801 if (test_bit(__I40E_VSI_DOWN
, vsi
->state
))
4804 set_bit(__I40E_VSI_NEEDS_RESTART
, vsi
->state
);
4805 if (vsi
->netdev
&& netif_running(vsi
->netdev
))
4806 vsi
->netdev
->netdev_ops
->ndo_stop(vsi
->netdev
);
4808 i40e_vsi_close(vsi
);
4812 * i40e_unquiesce_vsi - Resume a given VSI
4813 * @vsi: the VSI being resumed
4815 static void i40e_unquiesce_vsi(struct i40e_vsi
*vsi
)
4817 if (!test_and_clear_bit(__I40E_VSI_NEEDS_RESTART
, vsi
->state
))
4820 if (vsi
->netdev
&& netif_running(vsi
->netdev
))
4821 vsi
->netdev
->netdev_ops
->ndo_open(vsi
->netdev
);
4823 i40e_vsi_open(vsi
); /* this clears the DOWN bit */
4827 * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4830 static void i40e_pf_quiesce_all_vsi(struct i40e_pf
*pf
)
4834 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
4836 i40e_quiesce_vsi(pf
->vsi
[v
]);
4841 * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4844 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf
*pf
)
4848 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
4850 i40e_unquiesce_vsi(pf
->vsi
[v
]);
4855 * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
4856 * @vsi: the VSI being configured
4858 * Wait until all queues on a given VSI have been disabled.
4860 int i40e_vsi_wait_queues_disabled(struct i40e_vsi
*vsi
)
4862 struct i40e_pf
*pf
= vsi
->back
;
4865 pf_q
= vsi
->base_queue
;
4866 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++, pf_q
++) {
4867 /* Check and wait for the Tx queue */
4868 ret
= i40e_pf_txq_wait(pf
, pf_q
, false);
4870 dev_info(&pf
->pdev
->dev
,
4871 "VSI seid %d Tx ring %d disable timeout\n",
4876 if (!i40e_enabled_xdp_vsi(vsi
))
4879 /* Check and wait for the XDP Tx queue */
4880 ret
= i40e_pf_txq_wait(pf
, pf_q
+ vsi
->alloc_queue_pairs
,
4883 dev_info(&pf
->pdev
->dev
,
4884 "VSI seid %d XDP Tx ring %d disable timeout\n",
4889 /* Check and wait for the Rx queue */
4890 ret
= i40e_pf_rxq_wait(pf
, pf_q
, false);
4892 dev_info(&pf
->pdev
->dev
,
4893 "VSI seid %d Rx ring %d disable timeout\n",
4902 #ifdef CONFIG_I40E_DCB
4904 * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
4907 * This function waits for the queues to be in disabled state for all the
4908 * VSIs that are managed by this PF.
4910 static int i40e_pf_wait_queues_disabled(struct i40e_pf
*pf
)
4914 for (v
= 0; v
< pf
->hw
.func_caps
.num_vsis
; v
++) {
4916 ret
= i40e_vsi_wait_queues_disabled(pf
->vsi
[v
]);
4928 * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4929 * @pf: pointer to PF
4931 * Get TC map for ISCSI PF type that will include iSCSI TC
4934 static u8
i40e_get_iscsi_tc_map(struct i40e_pf
*pf
)
4936 struct i40e_dcb_app_priority_table app
;
4937 struct i40e_hw
*hw
= &pf
->hw
;
4938 u8 enabled_tc
= 1; /* TC0 is always enabled */
4940 /* Get the iSCSI APP TLV */
4941 struct i40e_dcbx_config
*dcbcfg
= &hw
->local_dcbx_config
;
4943 for (i
= 0; i
< dcbcfg
->numapps
; i
++) {
4944 app
= dcbcfg
->app
[i
];
4945 if (app
.selector
== I40E_APP_SEL_TCPIP
&&
4946 app
.protocolid
== I40E_APP_PROTOID_ISCSI
) {
4947 tc
= dcbcfg
->etscfg
.prioritytable
[app
.priority
];
4948 enabled_tc
|= BIT(tc
);
4957 * i40e_dcb_get_num_tc - Get the number of TCs from DCBx config
4958 * @dcbcfg: the corresponding DCBx configuration structure
4960 * Return the number of TCs from given DCBx configuration
4962 static u8
i40e_dcb_get_num_tc(struct i40e_dcbx_config
*dcbcfg
)
4964 int i
, tc_unused
= 0;
4968 /* Scan the ETS Config Priority Table to find
4969 * traffic class enabled for a given priority
4970 * and create a bitmask of enabled TCs
4972 for (i
= 0; i
< I40E_MAX_USER_PRIORITY
; i
++)
4973 num_tc
|= BIT(dcbcfg
->etscfg
.prioritytable
[i
]);
4975 /* Now scan the bitmask to check for
4976 * contiguous TCs starting with TC0
4978 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
4979 if (num_tc
& BIT(i
)) {
4983 pr_err("Non-contiguous TC - Disabling DCB\n");
4991 /* There is always at least TC0 */
4999 * i40e_dcb_get_enabled_tc - Get enabled traffic classes
5000 * @dcbcfg: the corresponding DCBx configuration structure
5002 * Query the current DCB configuration and return the number of
5003 * traffic classes enabled from the given DCBX config
5005 static u8
i40e_dcb_get_enabled_tc(struct i40e_dcbx_config
*dcbcfg
)
5007 u8 num_tc
= i40e_dcb_get_num_tc(dcbcfg
);
5011 for (i
= 0; i
< num_tc
; i
++)
5012 enabled_tc
|= BIT(i
);
5018 * i40e_mqprio_get_enabled_tc - Get enabled traffic classes
5019 * @pf: PF being queried
5021 * Query the current MQPRIO configuration and return the number of
5022 * traffic classes enabled.
5024 static u8
i40e_mqprio_get_enabled_tc(struct i40e_pf
*pf
)
5026 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
5027 u8 num_tc
= vsi
->mqprio_qopt
.qopt
.num_tc
;
5028 u8 enabled_tc
= 1, i
;
5030 for (i
= 1; i
< num_tc
; i
++)
5031 enabled_tc
|= BIT(i
);
5036 * i40e_pf_get_num_tc - Get enabled traffic classes for PF
5037 * @pf: PF being queried
5039 * Return number of traffic classes enabled for the given PF
5041 static u8
i40e_pf_get_num_tc(struct i40e_pf
*pf
)
5043 struct i40e_hw
*hw
= &pf
->hw
;
5044 u8 i
, enabled_tc
= 1;
5046 struct i40e_dcbx_config
*dcbcfg
= &hw
->local_dcbx_config
;
5048 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
5049 return pf
->vsi
[pf
->lan_vsi
]->mqprio_qopt
.qopt
.num_tc
;
5051 /* If neither MQPRIO nor DCB is enabled, then always use single TC */
5052 if (!(pf
->flags
& I40E_FLAG_DCB_ENABLED
))
5055 /* SFP mode will be enabled for all TCs on port */
5056 if (!(pf
->flags
& I40E_FLAG_MFP_ENABLED
))
5057 return i40e_dcb_get_num_tc(dcbcfg
);
5059 /* MFP mode return count of enabled TCs for this PF */
5060 if (pf
->hw
.func_caps
.iscsi
)
5061 enabled_tc
= i40e_get_iscsi_tc_map(pf
);
5063 return 1; /* Only TC0 */
5065 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5066 if (enabled_tc
& BIT(i
))
5073 * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
5074 * @pf: PF being queried
5076 * Return a bitmap for enabled traffic classes for this PF.
5078 static u8
i40e_pf_get_tc_map(struct i40e_pf
*pf
)
5080 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
5081 return i40e_mqprio_get_enabled_tc(pf
);
5083 /* If neither MQPRIO nor DCB is enabled for this PF then just return
5086 if (!(pf
->flags
& I40E_FLAG_DCB_ENABLED
))
5087 return I40E_DEFAULT_TRAFFIC_CLASS
;
5089 /* SFP mode we want PF to be enabled for all TCs */
5090 if (!(pf
->flags
& I40E_FLAG_MFP_ENABLED
))
5091 return i40e_dcb_get_enabled_tc(&pf
->hw
.local_dcbx_config
);
5093 /* MFP enabled and iSCSI PF type */
5094 if (pf
->hw
.func_caps
.iscsi
)
5095 return i40e_get_iscsi_tc_map(pf
);
5097 return I40E_DEFAULT_TRAFFIC_CLASS
;
5101 * i40e_vsi_get_bw_info - Query VSI BW Information
5102 * @vsi: the VSI being queried
5104 * Returns 0 on success, negative value on failure
5106 static int i40e_vsi_get_bw_info(struct i40e_vsi
*vsi
)
5108 struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config
= {0};
5109 struct i40e_aqc_query_vsi_bw_config_resp bw_config
= {0};
5110 struct i40e_pf
*pf
= vsi
->back
;
5111 struct i40e_hw
*hw
= &pf
->hw
;
5116 /* Get the VSI level BW configuration */
5117 ret
= i40e_aq_query_vsi_bw_config(hw
, vsi
->seid
, &bw_config
, NULL
);
5119 dev_info(&pf
->pdev
->dev
,
5120 "couldn't get PF vsi bw config, err %s aq_err %s\n",
5121 i40e_stat_str(&pf
->hw
, ret
),
5122 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
5126 /* Get the VSI level BW configuration per TC */
5127 ret
= i40e_aq_query_vsi_ets_sla_config(hw
, vsi
->seid
, &bw_ets_config
,
5130 dev_info(&pf
->pdev
->dev
,
5131 "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
5132 i40e_stat_str(&pf
->hw
, ret
),
5133 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
5137 if (bw_config
.tc_valid_bits
!= bw_ets_config
.tc_valid_bits
) {
5138 dev_info(&pf
->pdev
->dev
,
5139 "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
5140 bw_config
.tc_valid_bits
,
5141 bw_ets_config
.tc_valid_bits
);
5142 /* Still continuing */
5145 vsi
->bw_limit
= le16_to_cpu(bw_config
.port_bw_limit
);
5146 vsi
->bw_max_quanta
= bw_config
.max_bw
;
5147 tc_bw_max
= le16_to_cpu(bw_ets_config
.tc_bw_max
[0]) |
5148 (le16_to_cpu(bw_ets_config
.tc_bw_max
[1]) << 16);
5149 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5150 vsi
->bw_ets_share_credits
[i
] = bw_ets_config
.share_credits
[i
];
5151 vsi
->bw_ets_limit_credits
[i
] =
5152 le16_to_cpu(bw_ets_config
.credits
[i
]);
5153 /* 3 bits out of 4 for each TC */
5154 vsi
->bw_ets_max_quanta
[i
] = (u8
)((tc_bw_max
>> (i
*4)) & 0x7);
5161 * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
5162 * @vsi: the VSI being configured
5163 * @enabled_tc: TC bitmap
5164 * @bw_share: BW shared credits per TC
5166 * Returns 0 on success, negative value on failure
5168 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi
*vsi
, u8 enabled_tc
,
5171 struct i40e_aqc_configure_vsi_tc_bw_data bw_data
;
5172 struct i40e_pf
*pf
= vsi
->back
;
5176 /* There is no need to reset BW when mqprio mode is on. */
5177 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
5179 if (!vsi
->mqprio_qopt
.qopt
.hw
&& !(pf
->flags
& I40E_FLAG_DCB_ENABLED
)) {
5180 ret
= i40e_set_bw_limit(vsi
, vsi
->seid
, 0);
5182 dev_info(&pf
->pdev
->dev
,
5183 "Failed to reset tx rate for vsi->seid %u\n",
5187 bw_data
.tc_valid_bits
= enabled_tc
;
5188 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5189 bw_data
.tc_bw_credits
[i
] = bw_share
[i
];
5191 ret
= i40e_aq_config_vsi_tc_bw(&pf
->hw
, vsi
->seid
, &bw_data
, NULL
);
5193 dev_info(&pf
->pdev
->dev
,
5194 "AQ command Config VSI BW allocation per TC failed = %d\n",
5195 pf
->hw
.aq
.asq_last_status
);
5199 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5200 vsi
->info
.qs_handle
[i
] = bw_data
.qs_handles
[i
];
5206 * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
5207 * @vsi: the VSI being configured
5208 * @enabled_tc: TC map to be enabled
5211 static void i40e_vsi_config_netdev_tc(struct i40e_vsi
*vsi
, u8 enabled_tc
)
5213 struct net_device
*netdev
= vsi
->netdev
;
5214 struct i40e_pf
*pf
= vsi
->back
;
5215 struct i40e_hw
*hw
= &pf
->hw
;
5218 struct i40e_dcbx_config
*dcbcfg
= &hw
->local_dcbx_config
;
5224 netdev_reset_tc(netdev
);
5228 /* Set up actual enabled TCs on the VSI */
5229 if (netdev_set_num_tc(netdev
, vsi
->tc_config
.numtc
))
5232 /* set per TC queues for the VSI */
5233 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5234 /* Only set TC queues for enabled tcs
5236 * e.g. For a VSI that has TC0 and TC3 enabled the
5237 * enabled_tc bitmap would be 0x00001001; the driver
5238 * will set the numtc for netdev as 2 that will be
5239 * referenced by the netdev layer as TC 0 and 1.
5241 if (vsi
->tc_config
.enabled_tc
& BIT(i
))
5242 netdev_set_tc_queue(netdev
,
5243 vsi
->tc_config
.tc_info
[i
].netdev_tc
,
5244 vsi
->tc_config
.tc_info
[i
].qcount
,
5245 vsi
->tc_config
.tc_info
[i
].qoffset
);
5248 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
5251 /* Assign UP2TC map for the VSI */
5252 for (i
= 0; i
< I40E_MAX_USER_PRIORITY
; i
++) {
5253 /* Get the actual TC# for the UP */
5254 u8 ets_tc
= dcbcfg
->etscfg
.prioritytable
[i
];
5255 /* Get the mapped netdev TC# for the UP */
5256 netdev_tc
= vsi
->tc_config
.tc_info
[ets_tc
].netdev_tc
;
5257 netdev_set_prio_tc_map(netdev
, i
, netdev_tc
);
5262 * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
5263 * @vsi: the VSI being configured
5264 * @ctxt: the ctxt buffer returned from AQ VSI update param command
5266 static void i40e_vsi_update_queue_map(struct i40e_vsi
*vsi
,
5267 struct i40e_vsi_context
*ctxt
)
5269 /* copy just the sections touched not the entire info
5270 * since not all sections are valid as returned by
5273 vsi
->info
.mapping_flags
= ctxt
->info
.mapping_flags
;
5274 memcpy(&vsi
->info
.queue_mapping
,
5275 &ctxt
->info
.queue_mapping
, sizeof(vsi
->info
.queue_mapping
));
5276 memcpy(&vsi
->info
.tc_mapping
, ctxt
->info
.tc_mapping
,
5277 sizeof(vsi
->info
.tc_mapping
));
5281 * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
5282 * @vsi: VSI to be configured
5283 * @enabled_tc: TC bitmap
5285 * This configures a particular VSI for TCs that are mapped to the
5286 * given TC bitmap. It uses default bandwidth share for TCs across
5287 * VSIs to configure TC for a particular VSI.
5290 * It is expected that the VSI queues have been quisced before calling
5293 static int i40e_vsi_config_tc(struct i40e_vsi
*vsi
, u8 enabled_tc
)
5295 u8 bw_share
[I40E_MAX_TRAFFIC_CLASS
] = {0};
5296 struct i40e_pf
*pf
= vsi
->back
;
5297 struct i40e_hw
*hw
= &pf
->hw
;
5298 struct i40e_vsi_context ctxt
;
5302 /* Check if enabled_tc is same as existing or new TCs */
5303 if (vsi
->tc_config
.enabled_tc
== enabled_tc
&&
5304 vsi
->mqprio_qopt
.mode
!= TC_MQPRIO_MODE_CHANNEL
)
5307 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5308 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5309 if (enabled_tc
& BIT(i
))
5313 ret
= i40e_vsi_configure_bw_alloc(vsi
, enabled_tc
, bw_share
);
5315 struct i40e_aqc_query_vsi_bw_config_resp bw_config
= {0};
5317 dev_info(&pf
->pdev
->dev
,
5318 "Failed configuring TC map %d for VSI %d\n",
5319 enabled_tc
, vsi
->seid
);
5320 ret
= i40e_aq_query_vsi_bw_config(hw
, vsi
->seid
,
5323 dev_info(&pf
->pdev
->dev
,
5324 "Failed querying vsi bw info, err %s aq_err %s\n",
5325 i40e_stat_str(hw
, ret
),
5326 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
5329 if ((bw_config
.tc_valid_bits
& enabled_tc
) != enabled_tc
) {
5330 u8 valid_tc
= bw_config
.tc_valid_bits
& enabled_tc
;
5333 valid_tc
= bw_config
.tc_valid_bits
;
5334 /* Always enable TC0, no matter what */
5336 dev_info(&pf
->pdev
->dev
,
5337 "Requested tc 0x%x, but FW reports 0x%x as valid. Attempting to use 0x%x.\n",
5338 enabled_tc
, bw_config
.tc_valid_bits
, valid_tc
);
5339 enabled_tc
= valid_tc
;
5342 ret
= i40e_vsi_configure_bw_alloc(vsi
, enabled_tc
, bw_share
);
5344 dev_err(&pf
->pdev
->dev
,
5345 "Unable to configure TC map %d for VSI %d\n",
5346 enabled_tc
, vsi
->seid
);
5351 /* Update Queue Pairs Mapping for currently enabled UPs */
5352 ctxt
.seid
= vsi
->seid
;
5353 ctxt
.pf_num
= vsi
->back
->hw
.pf_id
;
5355 ctxt
.uplink_seid
= vsi
->uplink_seid
;
5356 ctxt
.info
= vsi
->info
;
5357 if (vsi
->back
->flags
& I40E_FLAG_TC_MQPRIO
) {
5358 ret
= i40e_vsi_setup_queue_map_mqprio(vsi
, &ctxt
, enabled_tc
);
5362 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, false);
5365 /* On destroying the qdisc, reset vsi->rss_size, as number of enabled
5368 if (!vsi
->mqprio_qopt
.qopt
.hw
&& vsi
->reconfig_rss
) {
5369 vsi
->rss_size
= min_t(int, vsi
->back
->alloc_rss_size
,
5370 vsi
->num_queue_pairs
);
5371 ret
= i40e_vsi_config_rss(vsi
);
5373 dev_info(&vsi
->back
->pdev
->dev
,
5374 "Failed to reconfig rss for num_queues\n");
5377 vsi
->reconfig_rss
= false;
5379 if (vsi
->back
->flags
& I40E_FLAG_IWARP_ENABLED
) {
5380 ctxt
.info
.valid_sections
|=
5381 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID
);
5382 ctxt
.info
.queueing_opt_flags
|= I40E_AQ_VSI_QUE_OPT_TCP_ENA
;
5385 /* Update the VSI after updating the VSI queue-mapping
5388 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
5390 dev_info(&pf
->pdev
->dev
,
5391 "Update vsi tc config failed, err %s aq_err %s\n",
5392 i40e_stat_str(hw
, ret
),
5393 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
5396 /* update the local VSI info with updated queue map */
5397 i40e_vsi_update_queue_map(vsi
, &ctxt
);
5398 vsi
->info
.valid_sections
= 0;
5400 /* Update current VSI BW information */
5401 ret
= i40e_vsi_get_bw_info(vsi
);
5403 dev_info(&pf
->pdev
->dev
,
5404 "Failed updating vsi bw info, err %s aq_err %s\n",
5405 i40e_stat_str(hw
, ret
),
5406 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
5410 /* Update the netdev TC setup */
5411 i40e_vsi_config_netdev_tc(vsi
, enabled_tc
);
5417 * i40e_get_link_speed - Returns link speed for the interface
5418 * @vsi: VSI to be configured
5421 static int i40e_get_link_speed(struct i40e_vsi
*vsi
)
5423 struct i40e_pf
*pf
= vsi
->back
;
5425 switch (pf
->hw
.phy
.link_info
.link_speed
) {
5426 case I40E_LINK_SPEED_40GB
:
5428 case I40E_LINK_SPEED_25GB
:
5430 case I40E_LINK_SPEED_20GB
:
5432 case I40E_LINK_SPEED_10GB
:
5434 case I40E_LINK_SPEED_1GB
:
5442 * i40e_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
5443 * @vsi: VSI to be configured
5444 * @seid: seid of the channel/VSI
5445 * @max_tx_rate: max TX rate to be configured as BW limit
5447 * Helper function to set BW limit for a given VSI
5449 int i40e_set_bw_limit(struct i40e_vsi
*vsi
, u16 seid
, u64 max_tx_rate
)
5451 struct i40e_pf
*pf
= vsi
->back
;
5456 speed
= i40e_get_link_speed(vsi
);
5457 if (max_tx_rate
> speed
) {
5458 dev_err(&pf
->pdev
->dev
,
5459 "Invalid max tx rate %llu specified for VSI seid %d.",
5463 if (max_tx_rate
&& max_tx_rate
< 50) {
5464 dev_warn(&pf
->pdev
->dev
,
5465 "Setting max tx rate to minimum usable value of 50Mbps.\n");
5469 /* Tx rate credits are in values of 50Mbps, 0 is disabled */
5470 credits
= max_tx_rate
;
5471 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
5472 ret
= i40e_aq_config_vsi_bw_limit(&pf
->hw
, seid
, credits
,
5473 I40E_MAX_BW_INACTIVE_ACCUM
, NULL
);
5475 dev_err(&pf
->pdev
->dev
,
5476 "Failed set tx rate (%llu Mbps) for vsi->seid %u, err %s aq_err %s\n",
5477 max_tx_rate
, seid
, i40e_stat_str(&pf
->hw
, ret
),
5478 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
5483 * i40e_remove_queue_channels - Remove queue channels for the TCs
5484 * @vsi: VSI to be configured
5486 * Remove queue channels for the TCs
5488 static void i40e_remove_queue_channels(struct i40e_vsi
*vsi
)
5490 enum i40e_admin_queue_err last_aq_status
;
5491 struct i40e_cloud_filter
*cfilter
;
5492 struct i40e_channel
*ch
, *ch_tmp
;
5493 struct i40e_pf
*pf
= vsi
->back
;
5494 struct hlist_node
*node
;
5497 /* Reset rss size that was stored when reconfiguring rss for
5498 * channel VSIs with non-power-of-2 queue count.
5500 vsi
->current_rss_size
= 0;
5502 /* perform cleanup for channels if they exist */
5503 if (list_empty(&vsi
->ch_list
))
5506 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
5507 struct i40e_vsi
*p_vsi
;
5509 list_del(&ch
->list
);
5510 p_vsi
= ch
->parent_vsi
;
5511 if (!p_vsi
|| !ch
->initialized
) {
5515 /* Reset queue contexts */
5516 for (i
= 0; i
< ch
->num_queue_pairs
; i
++) {
5517 struct i40e_ring
*tx_ring
, *rx_ring
;
5520 pf_q
= ch
->base_queue
+ i
;
5521 tx_ring
= vsi
->tx_rings
[pf_q
];
5524 rx_ring
= vsi
->rx_rings
[pf_q
];
5528 /* Reset BW configured for this VSI via mqprio */
5529 ret
= i40e_set_bw_limit(vsi
, ch
->seid
, 0);
5531 dev_info(&vsi
->back
->pdev
->dev
,
5532 "Failed to reset tx rate for ch->seid %u\n",
5535 /* delete cloud filters associated with this channel */
5536 hlist_for_each_entry_safe(cfilter
, node
,
5537 &pf
->cloud_filter_list
, cloud_node
) {
5538 if (cfilter
->seid
!= ch
->seid
)
5541 hash_del(&cfilter
->cloud_node
);
5542 if (cfilter
->dst_port
)
5543 ret
= i40e_add_del_cloud_filter_big_buf(vsi
,
5547 ret
= i40e_add_del_cloud_filter(vsi
, cfilter
,
5549 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
5551 dev_info(&pf
->pdev
->dev
,
5552 "Failed to delete cloud filter, err %s aq_err %s\n",
5553 i40e_stat_str(&pf
->hw
, ret
),
5554 i40e_aq_str(&pf
->hw
, last_aq_status
));
5558 /* delete VSI from FW */
5559 ret
= i40e_aq_delete_element(&vsi
->back
->hw
, ch
->seid
,
5562 dev_err(&vsi
->back
->pdev
->dev
,
5563 "unable to remove channel (%d) for parent VSI(%d)\n",
5564 ch
->seid
, p_vsi
->seid
);
5567 INIT_LIST_HEAD(&vsi
->ch_list
);
5571 * i40e_is_any_channel - channel exist or not
5572 * @vsi: ptr to VSI to which channels are associated with
5574 * Returns true or false if channel(s) exist for associated VSI or not
5576 static bool i40e_is_any_channel(struct i40e_vsi
*vsi
)
5578 struct i40e_channel
*ch
, *ch_tmp
;
5580 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
5581 if (ch
->initialized
)
5589 * i40e_get_max_queues_for_channel
5590 * @vsi: ptr to VSI to which channels are associated with
5592 * Helper function which returns max value among the queue counts set on the
5593 * channels/TCs created.
5595 static int i40e_get_max_queues_for_channel(struct i40e_vsi
*vsi
)
5597 struct i40e_channel
*ch
, *ch_tmp
;
5600 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
5601 if (!ch
->initialized
)
5603 if (ch
->num_queue_pairs
> max
)
5604 max
= ch
->num_queue_pairs
;
5611 * i40e_validate_num_queues - validate num_queues w.r.t channel
5612 * @pf: ptr to PF device
5613 * @num_queues: number of queues
5614 * @vsi: the parent VSI
5615 * @reconfig_rss: indicates should the RSS be reconfigured or not
5617 * This function validates number of queues in the context of new channel
5618 * which is being established and determines if RSS should be reconfigured
5619 * or not for parent VSI.
5621 static int i40e_validate_num_queues(struct i40e_pf
*pf
, int num_queues
,
5622 struct i40e_vsi
*vsi
, bool *reconfig_rss
)
5629 *reconfig_rss
= false;
5630 if (vsi
->current_rss_size
) {
5631 if (num_queues
> vsi
->current_rss_size
) {
5632 dev_dbg(&pf
->pdev
->dev
,
5633 "Error: num_queues (%d) > vsi's current_size(%d)\n",
5634 num_queues
, vsi
->current_rss_size
);
5636 } else if ((num_queues
< vsi
->current_rss_size
) &&
5637 (!is_power_of_2(num_queues
))) {
5638 dev_dbg(&pf
->pdev
->dev
,
5639 "Error: num_queues (%d) < vsi's current_size(%d), but not power of 2\n",
5640 num_queues
, vsi
->current_rss_size
);
5645 if (!is_power_of_2(num_queues
)) {
5646 /* Find the max num_queues configured for channel if channel
5648 * if channel exist, then enforce 'num_queues' to be more than
5649 * max ever queues configured for channel.
5651 max_ch_queues
= i40e_get_max_queues_for_channel(vsi
);
5652 if (num_queues
< max_ch_queues
) {
5653 dev_dbg(&pf
->pdev
->dev
,
5654 "Error: num_queues (%d) < max queues configured for channel(%d)\n",
5655 num_queues
, max_ch_queues
);
5658 *reconfig_rss
= true;
5665 * i40e_vsi_reconfig_rss - reconfig RSS based on specified rss_size
5666 * @vsi: the VSI being setup
5667 * @rss_size: size of RSS, accordingly LUT gets reprogrammed
5669 * This function reconfigures RSS by reprogramming LUTs using 'rss_size'
5671 static int i40e_vsi_reconfig_rss(struct i40e_vsi
*vsi
, u16 rss_size
)
5673 struct i40e_pf
*pf
= vsi
->back
;
5674 u8 seed
[I40E_HKEY_ARRAY_SIZE
];
5675 struct i40e_hw
*hw
= &pf
->hw
;
5683 if (rss_size
> vsi
->rss_size
)
5686 local_rss_size
= min_t(int, vsi
->rss_size
, rss_size
);
5687 lut
= kzalloc(vsi
->rss_table_size
, GFP_KERNEL
);
5691 /* Ignoring user configured lut if there is one */
5692 i40e_fill_rss_lut(pf
, lut
, vsi
->rss_table_size
, local_rss_size
);
5694 /* Use user configured hash key if there is one, otherwise
5697 if (vsi
->rss_hkey_user
)
5698 memcpy(seed
, vsi
->rss_hkey_user
, I40E_HKEY_ARRAY_SIZE
);
5700 netdev_rss_key_fill((void *)seed
, I40E_HKEY_ARRAY_SIZE
);
5702 ret
= i40e_config_rss(vsi
, seed
, lut
, vsi
->rss_table_size
);
5704 dev_info(&pf
->pdev
->dev
,
5705 "Cannot set RSS lut, err %s aq_err %s\n",
5706 i40e_stat_str(hw
, ret
),
5707 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
5713 /* Do the update w.r.t. storing rss_size */
5714 if (!vsi
->orig_rss_size
)
5715 vsi
->orig_rss_size
= vsi
->rss_size
;
5716 vsi
->current_rss_size
= local_rss_size
;
5722 * i40e_channel_setup_queue_map - Setup a channel queue map
5723 * @pf: ptr to PF device
5724 * @vsi: the VSI being setup
5725 * @ctxt: VSI context structure
5726 * @ch: ptr to channel structure
5728 * Setup queue map for a specific channel
5730 static void i40e_channel_setup_queue_map(struct i40e_pf
*pf
,
5731 struct i40e_vsi_context
*ctxt
,
5732 struct i40e_channel
*ch
)
5734 u16 qcount
, qmap
, sections
= 0;
5738 sections
= I40E_AQ_VSI_PROP_QUEUE_MAP_VALID
;
5739 sections
|= I40E_AQ_VSI_PROP_SCHED_VALID
;
5741 qcount
= min_t(int, ch
->num_queue_pairs
, pf
->num_lan_msix
);
5742 ch
->num_queue_pairs
= qcount
;
5744 /* find the next higher power-of-2 of num queue pairs */
5745 pow
= ilog2(qcount
);
5746 if (!is_power_of_2(qcount
))
5749 qmap
= (offset
<< I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT
) |
5750 (pow
<< I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT
);
5752 /* Setup queue TC[0].qmap for given VSI context */
5753 ctxt
->info
.tc_mapping
[0] = cpu_to_le16(qmap
);
5755 ctxt
->info
.up_enable_bits
= 0x1; /* TC0 enabled */
5756 ctxt
->info
.mapping_flags
|= cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG
);
5757 ctxt
->info
.queue_mapping
[0] = cpu_to_le16(ch
->base_queue
);
5758 ctxt
->info
.valid_sections
|= cpu_to_le16(sections
);
5762 * i40e_add_channel - add a channel by adding VSI
5763 * @pf: ptr to PF device
5764 * @uplink_seid: underlying HW switching element (VEB) ID
5765 * @ch: ptr to channel structure
5767 * Add a channel (VSI) using add_vsi and queue_map
5769 static int i40e_add_channel(struct i40e_pf
*pf
, u16 uplink_seid
,
5770 struct i40e_channel
*ch
)
5772 struct i40e_hw
*hw
= &pf
->hw
;
5773 struct i40e_vsi_context ctxt
;
5774 u8 enabled_tc
= 0x1; /* TC0 enabled */
5777 if (ch
->type
!= I40E_VSI_VMDQ2
) {
5778 dev_info(&pf
->pdev
->dev
,
5779 "add new vsi failed, ch->type %d\n", ch
->type
);
5783 memset(&ctxt
, 0, sizeof(ctxt
));
5784 ctxt
.pf_num
= hw
->pf_id
;
5786 ctxt
.uplink_seid
= uplink_seid
;
5787 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
5788 if (ch
->type
== I40E_VSI_VMDQ2
)
5789 ctxt
.flags
= I40E_AQ_VSI_TYPE_VMDQ2
;
5791 if (pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
) {
5792 ctxt
.info
.valid_sections
|=
5793 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
5794 ctxt
.info
.switch_id
=
5795 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
5798 /* Set queue map for a given VSI context */
5799 i40e_channel_setup_queue_map(pf
, &ctxt
, ch
);
5801 /* Now time to create VSI */
5802 ret
= i40e_aq_add_vsi(hw
, &ctxt
, NULL
);
5804 dev_info(&pf
->pdev
->dev
,
5805 "add new vsi failed, err %s aq_err %s\n",
5806 i40e_stat_str(&pf
->hw
, ret
),
5807 i40e_aq_str(&pf
->hw
,
5808 pf
->hw
.aq
.asq_last_status
));
5812 /* Success, update channel */
5813 ch
->enabled_tc
= enabled_tc
;
5814 ch
->seid
= ctxt
.seid
;
5815 ch
->vsi_number
= ctxt
.vsi_number
;
5816 ch
->stat_counter_idx
= cpu_to_le16(ctxt
.info
.stat_counter_idx
);
5818 /* copy just the sections touched not the entire info
5819 * since not all sections are valid as returned by
5822 ch
->info
.mapping_flags
= ctxt
.info
.mapping_flags
;
5823 memcpy(&ch
->info
.queue_mapping
,
5824 &ctxt
.info
.queue_mapping
, sizeof(ctxt
.info
.queue_mapping
));
5825 memcpy(&ch
->info
.tc_mapping
, ctxt
.info
.tc_mapping
,
5826 sizeof(ctxt
.info
.tc_mapping
));
5831 static int i40e_channel_config_bw(struct i40e_vsi
*vsi
, struct i40e_channel
*ch
,
5834 struct i40e_aqc_configure_vsi_tc_bw_data bw_data
;
5838 bw_data
.tc_valid_bits
= ch
->enabled_tc
;
5839 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5840 bw_data
.tc_bw_credits
[i
] = bw_share
[i
];
5842 ret
= i40e_aq_config_vsi_tc_bw(&vsi
->back
->hw
, ch
->seid
,
5845 dev_info(&vsi
->back
->pdev
->dev
,
5846 "Config VSI BW allocation per TC failed, aq_err: %d for new_vsi->seid %u\n",
5847 vsi
->back
->hw
.aq
.asq_last_status
, ch
->seid
);
5851 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++)
5852 ch
->info
.qs_handle
[i
] = bw_data
.qs_handles
[i
];
5858 * i40e_channel_config_tx_ring - config TX ring associated with new channel
5859 * @pf: ptr to PF device
5860 * @vsi: the VSI being setup
5861 * @ch: ptr to channel structure
5863 * Configure TX rings associated with channel (VSI) since queues are being
5866 static int i40e_channel_config_tx_ring(struct i40e_pf
*pf
,
5867 struct i40e_vsi
*vsi
,
5868 struct i40e_channel
*ch
)
5872 u8 bw_share
[I40E_MAX_TRAFFIC_CLASS
] = {0};
5874 /* Enable ETS TCs with equal BW Share for now across all VSIs */
5875 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
5876 if (ch
->enabled_tc
& BIT(i
))
5880 /* configure BW for new VSI */
5881 ret
= i40e_channel_config_bw(vsi
, ch
, bw_share
);
5883 dev_info(&vsi
->back
->pdev
->dev
,
5884 "Failed configuring TC map %d for channel (seid %u)\n",
5885 ch
->enabled_tc
, ch
->seid
);
5889 for (i
= 0; i
< ch
->num_queue_pairs
; i
++) {
5890 struct i40e_ring
*tx_ring
, *rx_ring
;
5893 pf_q
= ch
->base_queue
+ i
;
5895 /* Get to TX ring ptr of main VSI, for re-setup TX queue
5898 tx_ring
= vsi
->tx_rings
[pf_q
];
5901 /* Get the RX ring ptr */
5902 rx_ring
= vsi
->rx_rings
[pf_q
];
5910 * i40e_setup_hw_channel - setup new channel
5911 * @pf: ptr to PF device
5912 * @vsi: the VSI being setup
5913 * @ch: ptr to channel structure
5914 * @uplink_seid: underlying HW switching element (VEB) ID
5915 * @type: type of channel to be created (VMDq2/VF)
5917 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5918 * and configures TX rings accordingly
5920 static inline int i40e_setup_hw_channel(struct i40e_pf
*pf
,
5921 struct i40e_vsi
*vsi
,
5922 struct i40e_channel
*ch
,
5923 u16 uplink_seid
, u8 type
)
5927 ch
->initialized
= false;
5928 ch
->base_queue
= vsi
->next_base_queue
;
5931 /* Proceed with creation of channel (VMDq2) VSI */
5932 ret
= i40e_add_channel(pf
, uplink_seid
, ch
);
5934 dev_info(&pf
->pdev
->dev
,
5935 "failed to add_channel using uplink_seid %u\n",
5940 /* Mark the successful creation of channel */
5941 ch
->initialized
= true;
5943 /* Reconfigure TX queues using QTX_CTL register */
5944 ret
= i40e_channel_config_tx_ring(pf
, vsi
, ch
);
5946 dev_info(&pf
->pdev
->dev
,
5947 "failed to configure TX rings for channel %u\n",
5952 /* update 'next_base_queue' */
5953 vsi
->next_base_queue
= vsi
->next_base_queue
+ ch
->num_queue_pairs
;
5954 dev_dbg(&pf
->pdev
->dev
,
5955 "Added channel: vsi_seid %u, vsi_number %u, stat_counter_idx %u, num_queue_pairs %u, pf->next_base_queue %d\n",
5956 ch
->seid
, ch
->vsi_number
, ch
->stat_counter_idx
,
5957 ch
->num_queue_pairs
,
5958 vsi
->next_base_queue
);
5963 * i40e_setup_channel - setup new channel using uplink element
5964 * @pf: ptr to PF device
5965 * @type: type of channel to be created (VMDq2/VF)
5966 * @uplink_seid: underlying HW switching element (VEB) ID
5967 * @ch: ptr to channel structure
5969 * Setup new channel (VSI) based on specified type (VMDq2/VF)
5970 * and uplink switching element (uplink_seid)
5972 static bool i40e_setup_channel(struct i40e_pf
*pf
, struct i40e_vsi
*vsi
,
5973 struct i40e_channel
*ch
)
5979 if (vsi
->type
== I40E_VSI_MAIN
) {
5980 vsi_type
= I40E_VSI_VMDQ2
;
5982 dev_err(&pf
->pdev
->dev
, "unsupported parent vsi type(%d)\n",
5987 /* underlying switching element */
5988 seid
= pf
->vsi
[pf
->lan_vsi
]->uplink_seid
;
5990 /* create channel (VSI), configure TX rings */
5991 ret
= i40e_setup_hw_channel(pf
, vsi
, ch
, seid
, vsi_type
);
5993 dev_err(&pf
->pdev
->dev
, "failed to setup hw_channel\n");
5997 return ch
->initialized
? true : false;
6001 * i40e_validate_and_set_switch_mode - sets up switch mode correctly
6002 * @vsi: ptr to VSI which has PF backing
6004 * Sets up switch mode correctly if it needs to be changed and perform
6005 * what are allowed modes.
6007 static int i40e_validate_and_set_switch_mode(struct i40e_vsi
*vsi
)
6010 struct i40e_pf
*pf
= vsi
->back
;
6011 struct i40e_hw
*hw
= &pf
->hw
;
6014 ret
= i40e_get_capabilities(pf
, i40e_aqc_opc_list_dev_capabilities
);
6018 if (hw
->dev_caps
.switch_mode
) {
6019 /* if switch mode is set, support mode2 (non-tunneled for
6020 * cloud filter) for now
6022 u32 switch_mode
= hw
->dev_caps
.switch_mode
&
6023 I40E_SWITCH_MODE_MASK
;
6024 if (switch_mode
>= I40E_CLOUD_FILTER_MODE1
) {
6025 if (switch_mode
== I40E_CLOUD_FILTER_MODE2
)
6027 dev_err(&pf
->pdev
->dev
,
6028 "Invalid switch_mode (%d), only non-tunneled mode for cloud filter is supported\n",
6029 hw
->dev_caps
.switch_mode
);
6034 /* Set Bit 7 to be valid */
6035 mode
= I40E_AQ_SET_SWITCH_BIT7_VALID
;
6037 /* Set L4type for TCP support */
6038 mode
|= I40E_AQ_SET_SWITCH_L4_TYPE_TCP
;
6040 /* Set cloud filter mode */
6041 mode
|= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL
;
6043 /* Prep mode field for set_switch_config */
6044 ret
= i40e_aq_set_switch_config(hw
, pf
->last_sw_conf_flags
,
6045 pf
->last_sw_conf_valid_flags
,
6047 if (ret
&& hw
->aq
.asq_last_status
!= I40E_AQ_RC_ESRCH
)
6048 dev_err(&pf
->pdev
->dev
,
6049 "couldn't set switch config bits, err %s aq_err %s\n",
6050 i40e_stat_str(hw
, ret
),
6052 hw
->aq
.asq_last_status
));
6058 * i40e_create_queue_channel - function to create channel
6059 * @vsi: VSI to be configured
6060 * @ch: ptr to channel (it contains channel specific params)
6062 * This function creates channel (VSI) using num_queues specified by user,
6063 * reconfigs RSS if needed.
6065 int i40e_create_queue_channel(struct i40e_vsi
*vsi
,
6066 struct i40e_channel
*ch
)
6068 struct i40e_pf
*pf
= vsi
->back
;
6075 if (!ch
->num_queue_pairs
) {
6076 dev_err(&pf
->pdev
->dev
, "Invalid num_queues requested: %d\n",
6077 ch
->num_queue_pairs
);
6081 /* validate user requested num_queues for channel */
6082 err
= i40e_validate_num_queues(pf
, ch
->num_queue_pairs
, vsi
,
6085 dev_info(&pf
->pdev
->dev
, "Failed to validate num_queues (%d)\n",
6086 ch
->num_queue_pairs
);
6090 /* By default we are in VEPA mode, if this is the first VF/VMDq
6091 * VSI to be added switch to VEB mode.
6093 if ((!(pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)) ||
6094 (!i40e_is_any_channel(vsi
))) {
6095 if (!is_power_of_2(vsi
->tc_config
.tc_info
[0].qcount
)) {
6096 dev_dbg(&pf
->pdev
->dev
,
6097 "Failed to create channel. Override queues (%u) not power of 2\n",
6098 vsi
->tc_config
.tc_info
[0].qcount
);
6102 if (!(pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)) {
6103 pf
->flags
|= I40E_FLAG_VEB_MODE_ENABLED
;
6105 if (vsi
->type
== I40E_VSI_MAIN
) {
6106 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
)
6107 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
,
6110 i40e_do_reset_safe(pf
,
6111 I40E_PF_RESET_FLAG
);
6114 /* now onwards for main VSI, number of queues will be value
6115 * of TC0's queue count
6119 /* By this time, vsi->cnt_q_avail shall be set to non-zero and
6120 * it should be more than num_queues
6122 if (!vsi
->cnt_q_avail
|| vsi
->cnt_q_avail
< ch
->num_queue_pairs
) {
6123 dev_dbg(&pf
->pdev
->dev
,
6124 "Error: cnt_q_avail (%u) less than num_queues %d\n",
6125 vsi
->cnt_q_avail
, ch
->num_queue_pairs
);
6129 /* reconfig_rss only if vsi type is MAIN_VSI */
6130 if (reconfig_rss
&& (vsi
->type
== I40E_VSI_MAIN
)) {
6131 err
= i40e_vsi_reconfig_rss(vsi
, ch
->num_queue_pairs
);
6133 dev_info(&pf
->pdev
->dev
,
6134 "Error: unable to reconfig rss for num_queues (%u)\n",
6135 ch
->num_queue_pairs
);
6140 if (!i40e_setup_channel(pf
, vsi
, ch
)) {
6141 dev_info(&pf
->pdev
->dev
, "Failed to setup channel\n");
6145 dev_info(&pf
->pdev
->dev
,
6146 "Setup channel (id:%u) utilizing num_queues %d\n",
6147 ch
->seid
, ch
->num_queue_pairs
);
6149 /* configure VSI for BW limit */
6150 if (ch
->max_tx_rate
) {
6151 u64 credits
= ch
->max_tx_rate
;
6153 if (i40e_set_bw_limit(vsi
, ch
->seid
, ch
->max_tx_rate
))
6156 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
6157 dev_dbg(&pf
->pdev
->dev
,
6158 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6164 /* in case of VF, this will be main SRIOV VSI */
6165 ch
->parent_vsi
= vsi
;
6167 /* and update main_vsi's count for queue_available to use */
6168 vsi
->cnt_q_avail
-= ch
->num_queue_pairs
;
6174 * i40e_configure_queue_channels - Add queue channel for the given TCs
6175 * @vsi: VSI to be configured
6177 * Configures queue channel mapping to the given TCs
6179 static int i40e_configure_queue_channels(struct i40e_vsi
*vsi
)
6181 struct i40e_channel
*ch
;
6185 /* Create app vsi with the TCs. Main VSI with TC0 is already set up */
6186 vsi
->tc_seid_map
[0] = vsi
->seid
;
6187 for (i
= 1; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
6188 if (vsi
->tc_config
.enabled_tc
& BIT(i
)) {
6189 ch
= kzalloc(sizeof(*ch
), GFP_KERNEL
);
6195 INIT_LIST_HEAD(&ch
->list
);
6196 ch
->num_queue_pairs
=
6197 vsi
->tc_config
.tc_info
[i
].qcount
;
6199 vsi
->tc_config
.tc_info
[i
].qoffset
;
6201 /* Bandwidth limit through tc interface is in bytes/s,
6204 max_rate
= vsi
->mqprio_qopt
.max_rate
[i
];
6205 do_div(max_rate
, I40E_BW_MBPS_DIVISOR
);
6206 ch
->max_tx_rate
= max_rate
;
6208 list_add_tail(&ch
->list
, &vsi
->ch_list
);
6210 ret
= i40e_create_queue_channel(vsi
, ch
);
6212 dev_err(&vsi
->back
->pdev
->dev
,
6213 "Failed creating queue channel with TC%d: queues %d\n",
6214 i
, ch
->num_queue_pairs
);
6217 vsi
->tc_seid_map
[i
] = ch
->seid
;
6223 i40e_remove_queue_channels(vsi
);
6228 * i40e_veb_config_tc - Configure TCs for given VEB
6230 * @enabled_tc: TC bitmap
6232 * Configures given TC bitmap for VEB (switching) element
6234 int i40e_veb_config_tc(struct i40e_veb
*veb
, u8 enabled_tc
)
6236 struct i40e_aqc_configure_switching_comp_bw_config_data bw_data
= {0};
6237 struct i40e_pf
*pf
= veb
->pf
;
6241 /* No TCs or already enabled TCs just return */
6242 if (!enabled_tc
|| veb
->enabled_tc
== enabled_tc
)
6245 bw_data
.tc_valid_bits
= enabled_tc
;
6246 /* bw_data.absolute_credits is not set (relative) */
6248 /* Enable ETS TCs with equal BW Share for now */
6249 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
6250 if (enabled_tc
& BIT(i
))
6251 bw_data
.tc_bw_share_credits
[i
] = 1;
6254 ret
= i40e_aq_config_switch_comp_bw_config(&pf
->hw
, veb
->seid
,
6257 dev_info(&pf
->pdev
->dev
,
6258 "VEB bw config failed, err %s aq_err %s\n",
6259 i40e_stat_str(&pf
->hw
, ret
),
6260 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6264 /* Update the BW information */
6265 ret
= i40e_veb_get_bw_info(veb
);
6267 dev_info(&pf
->pdev
->dev
,
6268 "Failed getting veb bw config, err %s aq_err %s\n",
6269 i40e_stat_str(&pf
->hw
, ret
),
6270 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6277 #ifdef CONFIG_I40E_DCB
6279 * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
6282 * Reconfigure VEB/VSIs on a given PF; it is assumed that
6283 * the caller would've quiesce all the VSIs before calling
6286 static void i40e_dcb_reconfigure(struct i40e_pf
*pf
)
6292 /* Enable the TCs available on PF to all VEBs */
6293 tc_map
= i40e_pf_get_tc_map(pf
);
6294 for (v
= 0; v
< I40E_MAX_VEB
; v
++) {
6297 ret
= i40e_veb_config_tc(pf
->veb
[v
], tc_map
);
6299 dev_info(&pf
->pdev
->dev
,
6300 "Failed configuring TC for VEB seid=%d\n",
6302 /* Will try to configure as many components */
6306 /* Update each VSI */
6307 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
6311 /* - Enable all TCs for the LAN VSI
6312 * - For all others keep them at TC0 for now
6314 if (v
== pf
->lan_vsi
)
6315 tc_map
= i40e_pf_get_tc_map(pf
);
6317 tc_map
= I40E_DEFAULT_TRAFFIC_CLASS
;
6319 ret
= i40e_vsi_config_tc(pf
->vsi
[v
], tc_map
);
6321 dev_info(&pf
->pdev
->dev
,
6322 "Failed configuring TC for VSI seid=%d\n",
6324 /* Will try to configure as many components */
6326 /* Re-configure VSI vectors based on updated TC map */
6327 i40e_vsi_map_rings_to_vectors(pf
->vsi
[v
]);
6328 if (pf
->vsi
[v
]->netdev
)
6329 i40e_dcbnl_set_all(pf
->vsi
[v
]);
6335 * i40e_resume_port_tx - Resume port Tx
6338 * Resume a port's Tx and issue a PF reset in case of failure to
6341 static int i40e_resume_port_tx(struct i40e_pf
*pf
)
6343 struct i40e_hw
*hw
= &pf
->hw
;
6346 ret
= i40e_aq_resume_port_tx(hw
, NULL
);
6348 dev_info(&pf
->pdev
->dev
,
6349 "Resume Port Tx failed, err %s aq_err %s\n",
6350 i40e_stat_str(&pf
->hw
, ret
),
6351 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6352 /* Schedule PF reset to recover */
6353 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
6354 i40e_service_event_schedule(pf
);
6361 * i40e_init_pf_dcb - Initialize DCB configuration
6362 * @pf: PF being configured
6364 * Query the current DCB configuration and cache it
6365 * in the hardware structure
6367 static int i40e_init_pf_dcb(struct i40e_pf
*pf
)
6369 struct i40e_hw
*hw
= &pf
->hw
;
6372 /* Do not enable DCB for SW1 and SW2 images even if the FW is capable
6373 * Also do not enable DCBx if FW LLDP agent is disabled
6375 if ((pf
->hw_features
& I40E_HW_NO_DCB_SUPPORT
) ||
6376 (pf
->flags
& I40E_FLAG_DISABLE_FW_LLDP
))
6379 /* Get the initial DCB configuration */
6380 err
= i40e_init_dcb(hw
);
6382 /* Device/Function is not DCBX capable */
6383 if ((!hw
->func_caps
.dcb
) ||
6384 (hw
->dcbx_status
== I40E_DCBX_STATUS_DISABLED
)) {
6385 dev_info(&pf
->pdev
->dev
,
6386 "DCBX offload is not supported or is disabled for this PF.\n");
6388 /* When status is not DISABLED then DCBX in FW */
6389 pf
->dcbx_cap
= DCB_CAP_DCBX_LLD_MANAGED
|
6390 DCB_CAP_DCBX_VER_IEEE
;
6392 pf
->flags
|= I40E_FLAG_DCB_CAPABLE
;
6393 /* Enable DCB tagging only when more than one TC
6394 * or explicitly disable if only one TC
6396 if (i40e_dcb_get_num_tc(&hw
->local_dcbx_config
) > 1)
6397 pf
->flags
|= I40E_FLAG_DCB_ENABLED
;
6399 pf
->flags
&= ~I40E_FLAG_DCB_ENABLED
;
6400 dev_dbg(&pf
->pdev
->dev
,
6401 "DCBX offload is supported for this PF.\n");
6403 } else if (pf
->hw
.aq
.asq_last_status
== I40E_AQ_RC_EPERM
) {
6404 dev_info(&pf
->pdev
->dev
, "FW LLDP disabled for this PF.\n");
6405 pf
->flags
|= I40E_FLAG_DISABLE_FW_LLDP
;
6407 dev_info(&pf
->pdev
->dev
,
6408 "Query for DCB configuration failed, err %s aq_err %s\n",
6409 i40e_stat_str(&pf
->hw
, err
),
6410 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6416 #endif /* CONFIG_I40E_DCB */
6417 #define SPEED_SIZE 14
6420 * i40e_print_link_message - print link up or down
6421 * @vsi: the VSI for which link needs a message
6422 * @isup: true of link is up, false otherwise
6424 void i40e_print_link_message(struct i40e_vsi
*vsi
, bool isup
)
6426 enum i40e_aq_link_speed new_speed
;
6427 struct i40e_pf
*pf
= vsi
->back
;
6428 char *speed
= "Unknown";
6429 char *fc
= "Unknown";
6435 new_speed
= pf
->hw
.phy
.link_info
.link_speed
;
6437 new_speed
= I40E_LINK_SPEED_UNKNOWN
;
6439 if ((vsi
->current_isup
== isup
) && (vsi
->current_speed
== new_speed
))
6441 vsi
->current_isup
= isup
;
6442 vsi
->current_speed
= new_speed
;
6444 netdev_info(vsi
->netdev
, "NIC Link is Down\n");
6448 /* Warn user if link speed on NPAR enabled partition is not at
6451 if (pf
->hw
.func_caps
.npar_enable
&&
6452 (pf
->hw
.phy
.link_info
.link_speed
== I40E_LINK_SPEED_1GB
||
6453 pf
->hw
.phy
.link_info
.link_speed
== I40E_LINK_SPEED_100MB
))
6454 netdev_warn(vsi
->netdev
,
6455 "The partition detected link speed that is less than 10Gbps\n");
6457 switch (pf
->hw
.phy
.link_info
.link_speed
) {
6458 case I40E_LINK_SPEED_40GB
:
6461 case I40E_LINK_SPEED_20GB
:
6464 case I40E_LINK_SPEED_25GB
:
6467 case I40E_LINK_SPEED_10GB
:
6470 case I40E_LINK_SPEED_1GB
:
6473 case I40E_LINK_SPEED_100MB
:
6480 switch (pf
->hw
.fc
.current_mode
) {
6484 case I40E_FC_TX_PAUSE
:
6487 case I40E_FC_RX_PAUSE
:
6495 if (pf
->hw
.phy
.link_info
.link_speed
== I40E_LINK_SPEED_25GB
) {
6496 req_fec
= ", Requested FEC: None";
6497 fec
= ", FEC: None";
6498 an
= ", Autoneg: False";
6500 if (pf
->hw
.phy
.link_info
.an_info
& I40E_AQ_AN_COMPLETED
)
6501 an
= ", Autoneg: True";
6503 if (pf
->hw
.phy
.link_info
.fec_info
&
6504 I40E_AQ_CONFIG_FEC_KR_ENA
)
6505 fec
= ", FEC: CL74 FC-FEC/BASE-R";
6506 else if (pf
->hw
.phy
.link_info
.fec_info
&
6507 I40E_AQ_CONFIG_FEC_RS_ENA
)
6508 fec
= ", FEC: CL108 RS-FEC";
6510 /* 'CL108 RS-FEC' should be displayed when RS is requested, or
6511 * both RS and FC are requested
6513 if (vsi
->back
->hw
.phy
.link_info
.req_fec_info
&
6514 (I40E_AQ_REQUEST_FEC_KR
| I40E_AQ_REQUEST_FEC_RS
)) {
6515 if (vsi
->back
->hw
.phy
.link_info
.req_fec_info
&
6516 I40E_AQ_REQUEST_FEC_RS
)
6517 req_fec
= ", Requested FEC: CL108 RS-FEC";
6519 req_fec
= ", Requested FEC: CL74 FC-FEC/BASE-R";
6523 netdev_info(vsi
->netdev
, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
6524 speed
, req_fec
, fec
, an
, fc
);
6528 * i40e_up_complete - Finish the last steps of bringing up a connection
6529 * @vsi: the VSI being configured
6531 static int i40e_up_complete(struct i40e_vsi
*vsi
)
6533 struct i40e_pf
*pf
= vsi
->back
;
6536 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
6537 i40e_vsi_configure_msix(vsi
);
6539 i40e_configure_msi_and_legacy(vsi
);
6542 err
= i40e_vsi_start_rings(vsi
);
6546 clear_bit(__I40E_VSI_DOWN
, vsi
->state
);
6547 i40e_napi_enable_all(vsi
);
6548 i40e_vsi_enable_irq(vsi
);
6550 if ((pf
->hw
.phy
.link_info
.link_info
& I40E_AQ_LINK_UP
) &&
6552 i40e_print_link_message(vsi
, true);
6553 netif_tx_start_all_queues(vsi
->netdev
);
6554 netif_carrier_on(vsi
->netdev
);
6557 /* replay FDIR SB filters */
6558 if (vsi
->type
== I40E_VSI_FDIR
) {
6559 /* reset fd counters */
6562 i40e_fdir_filter_restore(vsi
);
6565 /* On the next run of the service_task, notify any clients of the new
6568 set_bit(__I40E_CLIENT_SERVICE_REQUESTED
, pf
->state
);
6569 i40e_service_event_schedule(pf
);
6575 * i40e_vsi_reinit_locked - Reset the VSI
6576 * @vsi: the VSI being configured
6578 * Rebuild the ring structs after some configuration
6579 * has changed, e.g. MTU size.
6581 static void i40e_vsi_reinit_locked(struct i40e_vsi
*vsi
)
6583 struct i40e_pf
*pf
= vsi
->back
;
6585 WARN_ON(in_interrupt());
6586 while (test_and_set_bit(__I40E_CONFIG_BUSY
, pf
->state
))
6587 usleep_range(1000, 2000);
6591 clear_bit(__I40E_CONFIG_BUSY
, pf
->state
);
6595 * i40e_up - Bring the connection back up after being down
6596 * @vsi: the VSI being configured
6598 int i40e_up(struct i40e_vsi
*vsi
)
6602 err
= i40e_vsi_configure(vsi
);
6604 err
= i40e_up_complete(vsi
);
6610 * i40e_force_link_state - Force the link status
6611 * @pf: board private structure
6612 * @is_up: whether the link state should be forced up or down
6614 static i40e_status
i40e_force_link_state(struct i40e_pf
*pf
, bool is_up
)
6616 struct i40e_aq_get_phy_abilities_resp abilities
;
6617 struct i40e_aq_set_phy_config config
= {0};
6618 struct i40e_hw
*hw
= &pf
->hw
;
6623 /* Card might've been put in an unstable state by other drivers
6624 * and applications, which causes incorrect speed values being
6625 * set on startup. In order to clear speed registers, we call
6626 * get_phy_capabilities twice, once to get initial state of
6627 * available speeds, and once to get current PHY config.
6629 err
= i40e_aq_get_phy_capabilities(hw
, false, true, &abilities
,
6632 dev_err(&pf
->pdev
->dev
,
6633 "failed to get phy cap., ret = %s last_status = %s\n",
6634 i40e_stat_str(hw
, err
),
6635 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
6638 speed
= abilities
.link_speed
;
6640 /* Get the current phy config */
6641 err
= i40e_aq_get_phy_capabilities(hw
, false, false, &abilities
,
6644 dev_err(&pf
->pdev
->dev
,
6645 "failed to get phy cap., ret = %s last_status = %s\n",
6646 i40e_stat_str(hw
, err
),
6647 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
6651 /* If link needs to go up, but was not forced to go down,
6652 * and its speed values are OK, no need for a flap
6654 if (is_up
&& abilities
.phy_type
!= 0 && abilities
.link_speed
!= 0)
6655 return I40E_SUCCESS
;
6657 /* To force link we need to set bits for all supported PHY types,
6658 * but there are now more than 32, so we need to split the bitmap
6659 * across two fields.
6661 mask
= I40E_PHY_TYPES_BITMASK
;
6662 config
.phy_type
= is_up
? cpu_to_le32((u32
)(mask
& 0xffffffff)) : 0;
6663 config
.phy_type_ext
= is_up
? (u8
)((mask
>> 32) & 0xff) : 0;
6664 /* Copy the old settings, except of phy_type */
6665 config
.abilities
= abilities
.abilities
;
6666 if (abilities
.link_speed
!= 0)
6667 config
.link_speed
= abilities
.link_speed
;
6669 config
.link_speed
= speed
;
6670 config
.eee_capability
= abilities
.eee_capability
;
6671 config
.eeer
= abilities
.eeer_val
;
6672 config
.low_power_ctrl
= abilities
.d3_lpan
;
6673 config
.fec_config
= abilities
.fec_cfg_curr_mod_ext_info
&
6674 I40E_AQ_PHY_FEC_CONFIG_MASK
;
6675 err
= i40e_aq_set_phy_config(hw
, &config
, NULL
);
6678 dev_err(&pf
->pdev
->dev
,
6679 "set phy config ret = %s last_status = %s\n",
6680 i40e_stat_str(&pf
->hw
, err
),
6681 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
6685 /* Update the link info */
6686 err
= i40e_update_link_info(hw
);
6688 /* Wait a little bit (on 40G cards it sometimes takes a really
6689 * long time for link to come back from the atomic reset)
6693 i40e_update_link_info(hw
);
6696 i40e_aq_set_link_restart_an(hw
, true, NULL
);
6698 return I40E_SUCCESS
;
6702 * i40e_down - Shutdown the connection processing
6703 * @vsi: the VSI being stopped
6705 void i40e_down(struct i40e_vsi
*vsi
)
6709 /* It is assumed that the caller of this function
6710 * sets the vsi->state __I40E_VSI_DOWN bit.
6713 netif_carrier_off(vsi
->netdev
);
6714 netif_tx_disable(vsi
->netdev
);
6716 i40e_vsi_disable_irq(vsi
);
6717 i40e_vsi_stop_rings(vsi
);
6718 if (vsi
->type
== I40E_VSI_MAIN
&&
6719 vsi
->back
->flags
& I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED
)
6720 i40e_force_link_state(vsi
->back
, false);
6721 i40e_napi_disable_all(vsi
);
6723 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++) {
6724 i40e_clean_tx_ring(vsi
->tx_rings
[i
]);
6725 if (i40e_enabled_xdp_vsi(vsi
))
6726 i40e_clean_tx_ring(vsi
->xdp_rings
[i
]);
6727 i40e_clean_rx_ring(vsi
->rx_rings
[i
]);
6733 * i40e_validate_mqprio_qopt- validate queue mapping info
6734 * @vsi: the VSI being configured
6735 * @mqprio_qopt: queue parametrs
6737 static int i40e_validate_mqprio_qopt(struct i40e_vsi
*vsi
,
6738 struct tc_mqprio_qopt_offload
*mqprio_qopt
)
6740 u64 sum_max_rate
= 0;
6744 if (mqprio_qopt
->qopt
.offset
[0] != 0 ||
6745 mqprio_qopt
->qopt
.num_tc
< 1 ||
6746 mqprio_qopt
->qopt
.num_tc
> I40E_MAX_TRAFFIC_CLASS
)
6748 for (i
= 0; ; i
++) {
6749 if (!mqprio_qopt
->qopt
.count
[i
])
6751 if (mqprio_qopt
->min_rate
[i
]) {
6752 dev_err(&vsi
->back
->pdev
->dev
,
6753 "Invalid min tx rate (greater than 0) specified\n");
6756 max_rate
= mqprio_qopt
->max_rate
[i
];
6757 do_div(max_rate
, I40E_BW_MBPS_DIVISOR
);
6758 sum_max_rate
+= max_rate
;
6760 if (i
>= mqprio_qopt
->qopt
.num_tc
- 1)
6762 if (mqprio_qopt
->qopt
.offset
[i
+ 1] !=
6763 (mqprio_qopt
->qopt
.offset
[i
] + mqprio_qopt
->qopt
.count
[i
]))
6766 if (vsi
->num_queue_pairs
<
6767 (mqprio_qopt
->qopt
.offset
[i
] + mqprio_qopt
->qopt
.count
[i
])) {
6770 if (sum_max_rate
> i40e_get_link_speed(vsi
)) {
6771 dev_err(&vsi
->back
->pdev
->dev
,
6772 "Invalid max tx rate specified\n");
6779 * i40e_vsi_set_default_tc_config - set default values for tc configuration
6780 * @vsi: the VSI being configured
6782 static void i40e_vsi_set_default_tc_config(struct i40e_vsi
*vsi
)
6787 /* Only TC0 is enabled */
6788 vsi
->tc_config
.numtc
= 1;
6789 vsi
->tc_config
.enabled_tc
= 1;
6790 qcount
= min_t(int, vsi
->alloc_queue_pairs
,
6791 i40e_pf_get_max_q_per_tc(vsi
->back
));
6792 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
6793 /* For the TC that is not enabled set the offset to to default
6794 * queue and allocate one queue for the given TC.
6796 vsi
->tc_config
.tc_info
[i
].qoffset
= 0;
6798 vsi
->tc_config
.tc_info
[i
].qcount
= qcount
;
6800 vsi
->tc_config
.tc_info
[i
].qcount
= 1;
6801 vsi
->tc_config
.tc_info
[i
].netdev_tc
= 0;
6806 * i40e_setup_tc - configure multiple traffic classes
6807 * @netdev: net device to configure
6808 * @type_data: tc offload data
6810 static int i40e_setup_tc(struct net_device
*netdev
, void *type_data
)
6812 struct tc_mqprio_qopt_offload
*mqprio_qopt
= type_data
;
6813 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
6814 struct i40e_vsi
*vsi
= np
->vsi
;
6815 struct i40e_pf
*pf
= vsi
->back
;
6816 u8 enabled_tc
= 0, num_tc
, hw
;
6817 bool need_reset
= false;
6822 num_tc
= mqprio_qopt
->qopt
.num_tc
;
6823 hw
= mqprio_qopt
->qopt
.hw
;
6824 mode
= mqprio_qopt
->mode
;
6826 pf
->flags
&= ~I40E_FLAG_TC_MQPRIO
;
6827 memcpy(&vsi
->mqprio_qopt
, mqprio_qopt
, sizeof(*mqprio_qopt
));
6831 /* Check if MFP enabled */
6832 if (pf
->flags
& I40E_FLAG_MFP_ENABLED
) {
6834 "Configuring TC not supported in MFP mode\n");
6838 case TC_MQPRIO_MODE_DCB
:
6839 pf
->flags
&= ~I40E_FLAG_TC_MQPRIO
;
6841 /* Check if DCB enabled to continue */
6842 if (!(pf
->flags
& I40E_FLAG_DCB_ENABLED
)) {
6844 "DCB is not enabled for adapter\n");
6848 /* Check whether tc count is within enabled limit */
6849 if (num_tc
> i40e_pf_get_num_tc(pf
)) {
6851 "TC count greater than enabled on link for adapter\n");
6855 case TC_MQPRIO_MODE_CHANNEL
:
6856 if (pf
->flags
& I40E_FLAG_DCB_ENABLED
) {
6858 "Full offload of TC Mqprio options is not supported when DCB is enabled\n");
6861 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
))
6863 ret
= i40e_validate_mqprio_qopt(vsi
, mqprio_qopt
);
6866 memcpy(&vsi
->mqprio_qopt
, mqprio_qopt
,
6867 sizeof(*mqprio_qopt
));
6868 pf
->flags
|= I40E_FLAG_TC_MQPRIO
;
6869 pf
->flags
&= ~I40E_FLAG_DCB_ENABLED
;
6876 /* Generate TC map for number of tc requested */
6877 for (i
= 0; i
< num_tc
; i
++)
6878 enabled_tc
|= BIT(i
);
6880 /* Requesting same TC configuration as already enabled */
6881 if (enabled_tc
== vsi
->tc_config
.enabled_tc
&&
6882 mode
!= TC_MQPRIO_MODE_CHANNEL
)
6885 /* Quiesce VSI queues */
6886 i40e_quiesce_vsi(vsi
);
6888 if (!hw
&& !(pf
->flags
& I40E_FLAG_TC_MQPRIO
))
6889 i40e_remove_queue_channels(vsi
);
6891 /* Configure VSI for enabled TCs */
6892 ret
= i40e_vsi_config_tc(vsi
, enabled_tc
);
6894 netdev_info(netdev
, "Failed configuring TC for VSI seid=%d\n",
6900 if (pf
->flags
& I40E_FLAG_TC_MQPRIO
) {
6901 if (vsi
->mqprio_qopt
.max_rate
[0]) {
6902 u64 max_tx_rate
= vsi
->mqprio_qopt
.max_rate
[0];
6904 do_div(max_tx_rate
, I40E_BW_MBPS_DIVISOR
);
6905 ret
= i40e_set_bw_limit(vsi
, vsi
->seid
, max_tx_rate
);
6907 u64 credits
= max_tx_rate
;
6909 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
6910 dev_dbg(&vsi
->back
->pdev
->dev
,
6911 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
6920 ret
= i40e_configure_queue_channels(vsi
);
6923 "Failed configuring queue channels\n");
6930 /* Reset the configuration data to defaults, only TC0 is enabled */
6932 i40e_vsi_set_default_tc_config(vsi
);
6937 i40e_unquiesce_vsi(vsi
);
6942 * i40e_set_cld_element - sets cloud filter element data
6943 * @filter: cloud filter rule
6944 * @cld: ptr to cloud filter element data
6946 * This is helper function to copy data into cloud filter element
6949 i40e_set_cld_element(struct i40e_cloud_filter
*filter
,
6950 struct i40e_aqc_cloud_filters_element_data
*cld
)
6955 memset(cld
, 0, sizeof(*cld
));
6956 ether_addr_copy(cld
->outer_mac
, filter
->dst_mac
);
6957 ether_addr_copy(cld
->inner_mac
, filter
->src_mac
);
6959 if (filter
->n_proto
!= ETH_P_IP
&& filter
->n_proto
!= ETH_P_IPV6
)
6962 if (filter
->n_proto
== ETH_P_IPV6
) {
6963 #define IPV6_MAX_INDEX (ARRAY_SIZE(filter->dst_ipv6) - 1)
6964 for (i
= 0, j
= 0; i
< ARRAY_SIZE(filter
->dst_ipv6
);
6966 ipa
= be32_to_cpu(filter
->dst_ipv6
[IPV6_MAX_INDEX
- i
]);
6967 ipa
= cpu_to_le32(ipa
);
6968 memcpy(&cld
->ipaddr
.raw_v6
.data
[j
], &ipa
, sizeof(ipa
));
6971 ipa
= be32_to_cpu(filter
->dst_ipv4
);
6972 memcpy(&cld
->ipaddr
.v4
.data
, &ipa
, sizeof(ipa
));
6975 cld
->inner_vlan
= cpu_to_le16(ntohs(filter
->vlan_id
));
6977 /* tenant_id is not supported by FW now, once the support is enabled
6978 * fill the cld->tenant_id with cpu_to_le32(filter->tenant_id)
6980 if (filter
->tenant_id
)
6985 * i40e_add_del_cloud_filter - Add/del cloud filter
6986 * @vsi: pointer to VSI
6987 * @filter: cloud filter rule
6988 * @add: if true, add, if false, delete
6990 * Add or delete a cloud filter for a specific flow spec.
6991 * Returns 0 if the filter were successfully added.
6993 int i40e_add_del_cloud_filter(struct i40e_vsi
*vsi
,
6994 struct i40e_cloud_filter
*filter
, bool add
)
6996 struct i40e_aqc_cloud_filters_element_data cld_filter
;
6997 struct i40e_pf
*pf
= vsi
->back
;
6999 static const u16 flag_table
[128] = {
7000 [I40E_CLOUD_FILTER_FLAGS_OMAC
] =
7001 I40E_AQC_ADD_CLOUD_FILTER_OMAC
,
7002 [I40E_CLOUD_FILTER_FLAGS_IMAC
] =
7003 I40E_AQC_ADD_CLOUD_FILTER_IMAC
,
7004 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN
] =
7005 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN
,
7006 [I40E_CLOUD_FILTER_FLAGS_IMAC_TEN_ID
] =
7007 I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID
,
7008 [I40E_CLOUD_FILTER_FLAGS_OMAC_TEN_ID_IMAC
] =
7009 I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC
,
7010 [I40E_CLOUD_FILTER_FLAGS_IMAC_IVLAN_TEN_ID
] =
7011 I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID
,
7012 [I40E_CLOUD_FILTER_FLAGS_IIP
] =
7013 I40E_AQC_ADD_CLOUD_FILTER_IIP
,
7016 if (filter
->flags
>= ARRAY_SIZE(flag_table
))
7017 return I40E_ERR_CONFIG
;
7019 /* copy element needed to add cloud filter from filter */
7020 i40e_set_cld_element(filter
, &cld_filter
);
7022 if (filter
->tunnel_type
!= I40E_CLOUD_TNL_TYPE_NONE
)
7023 cld_filter
.flags
= cpu_to_le16(filter
->tunnel_type
<<
7024 I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT
);
7026 if (filter
->n_proto
== ETH_P_IPV6
)
7027 cld_filter
.flags
|= cpu_to_le16(flag_table
[filter
->flags
] |
7028 I40E_AQC_ADD_CLOUD_FLAGS_IPV6
);
7030 cld_filter
.flags
|= cpu_to_le16(flag_table
[filter
->flags
] |
7031 I40E_AQC_ADD_CLOUD_FLAGS_IPV4
);
7034 ret
= i40e_aq_add_cloud_filters(&pf
->hw
, filter
->seid
,
7037 ret
= i40e_aq_rem_cloud_filters(&pf
->hw
, filter
->seid
,
7040 dev_dbg(&pf
->pdev
->dev
,
7041 "Failed to %s cloud filter using l4 port %u, err %d aq_err %d\n",
7042 add
? "add" : "delete", filter
->dst_port
, ret
,
7043 pf
->hw
.aq
.asq_last_status
);
7045 dev_info(&pf
->pdev
->dev
,
7046 "%s cloud filter for VSI: %d\n",
7047 add
? "Added" : "Deleted", filter
->seid
);
7052 * i40e_add_del_cloud_filter_big_buf - Add/del cloud filter using big_buf
7053 * @vsi: pointer to VSI
7054 * @filter: cloud filter rule
7055 * @add: if true, add, if false, delete
7057 * Add or delete a cloud filter for a specific flow spec using big buffer.
7058 * Returns 0 if the filter were successfully added.
7060 int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi
*vsi
,
7061 struct i40e_cloud_filter
*filter
,
7064 struct i40e_aqc_cloud_filters_element_bb cld_filter
;
7065 struct i40e_pf
*pf
= vsi
->back
;
7068 /* Both (src/dst) valid mac_addr are not supported */
7069 if ((is_valid_ether_addr(filter
->dst_mac
) &&
7070 is_valid_ether_addr(filter
->src_mac
)) ||
7071 (is_multicast_ether_addr(filter
->dst_mac
) &&
7072 is_multicast_ether_addr(filter
->src_mac
)))
7075 /* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
7076 * ports are not supported via big buffer now.
7078 if (!filter
->dst_port
|| filter
->ip_proto
== IPPROTO_UDP
)
7081 /* adding filter using src_port/src_ip is not supported at this stage */
7082 if (filter
->src_port
|| filter
->src_ipv4
||
7083 !ipv6_addr_any(&filter
->ip
.v6
.src_ip6
))
7086 /* copy element needed to add cloud filter from filter */
7087 i40e_set_cld_element(filter
, &cld_filter
.element
);
7089 if (is_valid_ether_addr(filter
->dst_mac
) ||
7090 is_valid_ether_addr(filter
->src_mac
) ||
7091 is_multicast_ether_addr(filter
->dst_mac
) ||
7092 is_multicast_ether_addr(filter
->src_mac
)) {
7093 /* MAC + IP : unsupported mode */
7094 if (filter
->dst_ipv4
)
7097 /* since we validated that L4 port must be valid before
7098 * we get here, start with respective "flags" value
7099 * and update if vlan is present or not
7101 cld_filter
.element
.flags
=
7102 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT
);
7104 if (filter
->vlan_id
) {
7105 cld_filter
.element
.flags
=
7106 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT
);
7109 } else if (filter
->dst_ipv4
||
7110 !ipv6_addr_any(&filter
->ip
.v6
.dst_ip6
)) {
7111 cld_filter
.element
.flags
=
7112 cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT
);
7113 if (filter
->n_proto
== ETH_P_IPV6
)
7114 cld_filter
.element
.flags
|=
7115 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV6
);
7117 cld_filter
.element
.flags
|=
7118 cpu_to_le16(I40E_AQC_ADD_CLOUD_FLAGS_IPV4
);
7120 dev_err(&pf
->pdev
->dev
,
7121 "either mac or ip has to be valid for cloud filter\n");
7125 /* Now copy L4 port in Byte 6..7 in general fields */
7126 cld_filter
.general_fields
[I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0
] =
7127 be16_to_cpu(filter
->dst_port
);
7130 /* Validate current device switch mode, change if necessary */
7131 ret
= i40e_validate_and_set_switch_mode(vsi
);
7133 dev_err(&pf
->pdev
->dev
,
7134 "failed to set switch mode, ret %d\n",
7139 ret
= i40e_aq_add_cloud_filters_bb(&pf
->hw
, filter
->seid
,
7142 ret
= i40e_aq_rem_cloud_filters_bb(&pf
->hw
, filter
->seid
,
7147 dev_dbg(&pf
->pdev
->dev
,
7148 "Failed to %s cloud filter(big buffer) err %d aq_err %d\n",
7149 add
? "add" : "delete", ret
, pf
->hw
.aq
.asq_last_status
);
7151 dev_info(&pf
->pdev
->dev
,
7152 "%s cloud filter for VSI: %d, L4 port: %d\n",
7153 add
? "add" : "delete", filter
->seid
,
7154 ntohs(filter
->dst_port
));
7159 * i40e_parse_cls_flower - Parse tc flower filters provided by kernel
7160 * @vsi: Pointer to VSI
7161 * @cls_flower: Pointer to struct tc_cls_flower_offload
7162 * @filter: Pointer to cloud filter structure
7165 static int i40e_parse_cls_flower(struct i40e_vsi
*vsi
,
7166 struct tc_cls_flower_offload
*f
,
7167 struct i40e_cloud_filter
*filter
)
7169 u16 n_proto_mask
= 0, n_proto_key
= 0, addr_type
= 0;
7170 struct i40e_pf
*pf
= vsi
->back
;
7173 if (f
->dissector
->used_keys
&
7174 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL
) |
7175 BIT(FLOW_DISSECTOR_KEY_BASIC
) |
7176 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS
) |
7177 BIT(FLOW_DISSECTOR_KEY_VLAN
) |
7178 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS
) |
7179 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS
) |
7180 BIT(FLOW_DISSECTOR_KEY_PORTS
) |
7181 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID
))) {
7182 dev_err(&pf
->pdev
->dev
, "Unsupported key used: 0x%x\n",
7183 f
->dissector
->used_keys
);
7187 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ENC_KEYID
)) {
7188 struct flow_dissector_key_keyid
*key
=
7189 skb_flow_dissector_target(f
->dissector
,
7190 FLOW_DISSECTOR_KEY_ENC_KEYID
,
7193 struct flow_dissector_key_keyid
*mask
=
7194 skb_flow_dissector_target(f
->dissector
,
7195 FLOW_DISSECTOR_KEY_ENC_KEYID
,
7198 if (mask
->keyid
!= 0)
7199 field_flags
|= I40E_CLOUD_FIELD_TEN_ID
;
7201 filter
->tenant_id
= be32_to_cpu(key
->keyid
);
7204 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_BASIC
)) {
7205 struct flow_dissector_key_basic
*key
=
7206 skb_flow_dissector_target(f
->dissector
,
7207 FLOW_DISSECTOR_KEY_BASIC
,
7210 struct flow_dissector_key_basic
*mask
=
7211 skb_flow_dissector_target(f
->dissector
,
7212 FLOW_DISSECTOR_KEY_BASIC
,
7215 n_proto_key
= ntohs(key
->n_proto
);
7216 n_proto_mask
= ntohs(mask
->n_proto
);
7218 if (n_proto_key
== ETH_P_ALL
) {
7222 filter
->n_proto
= n_proto_key
& n_proto_mask
;
7223 filter
->ip_proto
= key
->ip_proto
;
7226 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_ETH_ADDRS
)) {
7227 struct flow_dissector_key_eth_addrs
*key
=
7228 skb_flow_dissector_target(f
->dissector
,
7229 FLOW_DISSECTOR_KEY_ETH_ADDRS
,
7232 struct flow_dissector_key_eth_addrs
*mask
=
7233 skb_flow_dissector_target(f
->dissector
,
7234 FLOW_DISSECTOR_KEY_ETH_ADDRS
,
7237 /* use is_broadcast and is_zero to check for all 0xf or 0 */
7238 if (!is_zero_ether_addr(mask
->dst
)) {
7239 if (is_broadcast_ether_addr(mask
->dst
)) {
7240 field_flags
|= I40E_CLOUD_FIELD_OMAC
;
7242 dev_err(&pf
->pdev
->dev
, "Bad ether dest mask %pM\n",
7244 return I40E_ERR_CONFIG
;
7248 if (!is_zero_ether_addr(mask
->src
)) {
7249 if (is_broadcast_ether_addr(mask
->src
)) {
7250 field_flags
|= I40E_CLOUD_FIELD_IMAC
;
7252 dev_err(&pf
->pdev
->dev
, "Bad ether src mask %pM\n",
7254 return I40E_ERR_CONFIG
;
7257 ether_addr_copy(filter
->dst_mac
, key
->dst
);
7258 ether_addr_copy(filter
->src_mac
, key
->src
);
7261 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_VLAN
)) {
7262 struct flow_dissector_key_vlan
*key
=
7263 skb_flow_dissector_target(f
->dissector
,
7264 FLOW_DISSECTOR_KEY_VLAN
,
7266 struct flow_dissector_key_vlan
*mask
=
7267 skb_flow_dissector_target(f
->dissector
,
7268 FLOW_DISSECTOR_KEY_VLAN
,
7271 if (mask
->vlan_id
) {
7272 if (mask
->vlan_id
== VLAN_VID_MASK
) {
7273 field_flags
|= I40E_CLOUD_FIELD_IVLAN
;
7276 dev_err(&pf
->pdev
->dev
, "Bad vlan mask 0x%04x\n",
7278 return I40E_ERR_CONFIG
;
7282 filter
->vlan_id
= cpu_to_be16(key
->vlan_id
);
7285 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_CONTROL
)) {
7286 struct flow_dissector_key_control
*key
=
7287 skb_flow_dissector_target(f
->dissector
,
7288 FLOW_DISSECTOR_KEY_CONTROL
,
7291 addr_type
= key
->addr_type
;
7294 if (addr_type
== FLOW_DISSECTOR_KEY_IPV4_ADDRS
) {
7295 struct flow_dissector_key_ipv4_addrs
*key
=
7296 skb_flow_dissector_target(f
->dissector
,
7297 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
7299 struct flow_dissector_key_ipv4_addrs
*mask
=
7300 skb_flow_dissector_target(f
->dissector
,
7301 FLOW_DISSECTOR_KEY_IPV4_ADDRS
,
7305 if (mask
->dst
== cpu_to_be32(0xffffffff)) {
7306 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7308 dev_err(&pf
->pdev
->dev
, "Bad ip dst mask %pI4b\n",
7310 return I40E_ERR_CONFIG
;
7315 if (mask
->src
== cpu_to_be32(0xffffffff)) {
7316 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7318 dev_err(&pf
->pdev
->dev
, "Bad ip src mask %pI4b\n",
7320 return I40E_ERR_CONFIG
;
7324 if (field_flags
& I40E_CLOUD_FIELD_TEN_ID
) {
7325 dev_err(&pf
->pdev
->dev
, "Tenant id not allowed for ip filter\n");
7326 return I40E_ERR_CONFIG
;
7328 filter
->dst_ipv4
= key
->dst
;
7329 filter
->src_ipv4
= key
->src
;
7332 if (addr_type
== FLOW_DISSECTOR_KEY_IPV6_ADDRS
) {
7333 struct flow_dissector_key_ipv6_addrs
*key
=
7334 skb_flow_dissector_target(f
->dissector
,
7335 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
7337 struct flow_dissector_key_ipv6_addrs
*mask
=
7338 skb_flow_dissector_target(f
->dissector
,
7339 FLOW_DISSECTOR_KEY_IPV6_ADDRS
,
7342 /* src and dest IPV6 address should not be LOOPBACK
7343 * (0:0:0:0:0:0:0:1), which can be represented as ::1
7345 if (ipv6_addr_loopback(&key
->dst
) ||
7346 ipv6_addr_loopback(&key
->src
)) {
7347 dev_err(&pf
->pdev
->dev
,
7348 "Bad ipv6, addr is LOOPBACK\n");
7349 return I40E_ERR_CONFIG
;
7351 if (!ipv6_addr_any(&mask
->dst
) || !ipv6_addr_any(&mask
->src
))
7352 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7354 memcpy(&filter
->src_ipv6
, &key
->src
.s6_addr32
,
7355 sizeof(filter
->src_ipv6
));
7356 memcpy(&filter
->dst_ipv6
, &key
->dst
.s6_addr32
,
7357 sizeof(filter
->dst_ipv6
));
7360 if (dissector_uses_key(f
->dissector
, FLOW_DISSECTOR_KEY_PORTS
)) {
7361 struct flow_dissector_key_ports
*key
=
7362 skb_flow_dissector_target(f
->dissector
,
7363 FLOW_DISSECTOR_KEY_PORTS
,
7365 struct flow_dissector_key_ports
*mask
=
7366 skb_flow_dissector_target(f
->dissector
,
7367 FLOW_DISSECTOR_KEY_PORTS
,
7371 if (mask
->src
== cpu_to_be16(0xffff)) {
7372 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7374 dev_err(&pf
->pdev
->dev
, "Bad src port mask 0x%04x\n",
7375 be16_to_cpu(mask
->src
));
7376 return I40E_ERR_CONFIG
;
7381 if (mask
->dst
== cpu_to_be16(0xffff)) {
7382 field_flags
|= I40E_CLOUD_FIELD_IIP
;
7384 dev_err(&pf
->pdev
->dev
, "Bad dst port mask 0x%04x\n",
7385 be16_to_cpu(mask
->dst
));
7386 return I40E_ERR_CONFIG
;
7390 filter
->dst_port
= key
->dst
;
7391 filter
->src_port
= key
->src
;
7393 switch (filter
->ip_proto
) {
7398 dev_err(&pf
->pdev
->dev
,
7399 "Only UDP and TCP transport are supported\n");
7403 filter
->flags
= field_flags
;
7408 * i40e_handle_tclass: Forward to a traffic class on the device
7409 * @vsi: Pointer to VSI
7410 * @tc: traffic class index on the device
7411 * @filter: Pointer to cloud filter structure
7414 static int i40e_handle_tclass(struct i40e_vsi
*vsi
, u32 tc
,
7415 struct i40e_cloud_filter
*filter
)
7417 struct i40e_channel
*ch
, *ch_tmp
;
7419 /* direct to a traffic class on the same device */
7421 filter
->seid
= vsi
->seid
;
7423 } else if (vsi
->tc_config
.enabled_tc
& BIT(tc
)) {
7424 if (!filter
->dst_port
) {
7425 dev_err(&vsi
->back
->pdev
->dev
,
7426 "Specify destination port to direct to traffic class that is not default\n");
7429 if (list_empty(&vsi
->ch_list
))
7431 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
,
7433 if (ch
->seid
== vsi
->tc_seid_map
[tc
])
7434 filter
->seid
= ch
->seid
;
7438 dev_err(&vsi
->back
->pdev
->dev
, "TC is not enabled\n");
7443 * i40e_configure_clsflower - Configure tc flower filters
7444 * @vsi: Pointer to VSI
7445 * @cls_flower: Pointer to struct tc_cls_flower_offload
7448 static int i40e_configure_clsflower(struct i40e_vsi
*vsi
,
7449 struct tc_cls_flower_offload
*cls_flower
)
7451 int tc
= tc_classid_to_hwtc(vsi
->netdev
, cls_flower
->classid
);
7452 struct i40e_cloud_filter
*filter
= NULL
;
7453 struct i40e_pf
*pf
= vsi
->back
;
7457 dev_err(&vsi
->back
->pdev
->dev
, "Invalid traffic class\n");
7461 if (test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
) ||
7462 test_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
))
7465 if (pf
->fdir_pf_active_filters
||
7466 (!hlist_empty(&pf
->fdir_filter_list
))) {
7467 dev_err(&vsi
->back
->pdev
->dev
,
7468 "Flow Director Sideband filters exists, turn ntuple off to configure cloud filters\n");
7472 if (vsi
->back
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
7473 dev_err(&vsi
->back
->pdev
->dev
,
7474 "Disable Flow Director Sideband, configuring Cloud filters via tc-flower\n");
7475 vsi
->back
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
7476 vsi
->back
->flags
|= I40E_FLAG_FD_SB_TO_CLOUD_FILTER
;
7479 filter
= kzalloc(sizeof(*filter
), GFP_KERNEL
);
7483 filter
->cookie
= cls_flower
->cookie
;
7485 err
= i40e_parse_cls_flower(vsi
, cls_flower
, filter
);
7489 err
= i40e_handle_tclass(vsi
, tc
, filter
);
7493 /* Add cloud filter */
7494 if (filter
->dst_port
)
7495 err
= i40e_add_del_cloud_filter_big_buf(vsi
, filter
, true);
7497 err
= i40e_add_del_cloud_filter(vsi
, filter
, true);
7500 dev_err(&pf
->pdev
->dev
,
7501 "Failed to add cloud filter, err %s\n",
7502 i40e_stat_str(&pf
->hw
, err
));
7506 /* add filter to the ordered list */
7507 INIT_HLIST_NODE(&filter
->cloud_node
);
7509 hlist_add_head(&filter
->cloud_node
, &pf
->cloud_filter_list
);
7511 pf
->num_cloud_filters
++;
7520 * i40e_find_cloud_filter - Find the could filter in the list
7521 * @vsi: Pointer to VSI
7522 * @cookie: filter specific cookie
7525 static struct i40e_cloud_filter
*i40e_find_cloud_filter(struct i40e_vsi
*vsi
,
7526 unsigned long *cookie
)
7528 struct i40e_cloud_filter
*filter
= NULL
;
7529 struct hlist_node
*node2
;
7531 hlist_for_each_entry_safe(filter
, node2
,
7532 &vsi
->back
->cloud_filter_list
, cloud_node
)
7533 if (!memcmp(cookie
, &filter
->cookie
, sizeof(filter
->cookie
)))
7539 * i40e_delete_clsflower - Remove tc flower filters
7540 * @vsi: Pointer to VSI
7541 * @cls_flower: Pointer to struct tc_cls_flower_offload
7544 static int i40e_delete_clsflower(struct i40e_vsi
*vsi
,
7545 struct tc_cls_flower_offload
*cls_flower
)
7547 struct i40e_cloud_filter
*filter
= NULL
;
7548 struct i40e_pf
*pf
= vsi
->back
;
7551 filter
= i40e_find_cloud_filter(vsi
, &cls_flower
->cookie
);
7556 hash_del(&filter
->cloud_node
);
7558 if (filter
->dst_port
)
7559 err
= i40e_add_del_cloud_filter_big_buf(vsi
, filter
, false);
7561 err
= i40e_add_del_cloud_filter(vsi
, filter
, false);
7565 dev_err(&pf
->pdev
->dev
,
7566 "Failed to delete cloud filter, err %s\n",
7567 i40e_stat_str(&pf
->hw
, err
));
7568 return i40e_aq_rc_to_posix(err
, pf
->hw
.aq
.asq_last_status
);
7571 pf
->num_cloud_filters
--;
7572 if (!pf
->num_cloud_filters
)
7573 if ((pf
->flags
& I40E_FLAG_FD_SB_TO_CLOUD_FILTER
) &&
7574 !(pf
->flags
& I40E_FLAG_FD_SB_INACTIVE
)) {
7575 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
7576 pf
->flags
&= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER
;
7577 pf
->flags
&= ~I40E_FLAG_FD_SB_INACTIVE
;
7583 * i40e_setup_tc_cls_flower - flower classifier offloads
7584 * @netdev: net device to configure
7585 * @type_data: offload data
7587 static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv
*np
,
7588 struct tc_cls_flower_offload
*cls_flower
)
7590 struct i40e_vsi
*vsi
= np
->vsi
;
7592 switch (cls_flower
->command
) {
7593 case TC_CLSFLOWER_REPLACE
:
7594 return i40e_configure_clsflower(vsi
, cls_flower
);
7595 case TC_CLSFLOWER_DESTROY
:
7596 return i40e_delete_clsflower(vsi
, cls_flower
);
7597 case TC_CLSFLOWER_STATS
:
7604 static int i40e_setup_tc_block_cb(enum tc_setup_type type
, void *type_data
,
7607 struct i40e_netdev_priv
*np
= cb_priv
;
7609 if (!tc_cls_can_offload_and_chain0(np
->vsi
->netdev
, type_data
))
7613 case TC_SETUP_CLSFLOWER
:
7614 return i40e_setup_tc_cls_flower(np
, type_data
);
7621 static int i40e_setup_tc_block(struct net_device
*dev
,
7622 struct tc_block_offload
*f
)
7624 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
7626 if (f
->binder_type
!= TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS
)
7629 switch (f
->command
) {
7631 return tcf_block_cb_register(f
->block
, i40e_setup_tc_block_cb
,
7633 case TC_BLOCK_UNBIND
:
7634 tcf_block_cb_unregister(f
->block
, i40e_setup_tc_block_cb
, np
);
7641 static int __i40e_setup_tc(struct net_device
*netdev
, enum tc_setup_type type
,
7645 case TC_SETUP_QDISC_MQPRIO
:
7646 return i40e_setup_tc(netdev
, type_data
);
7647 case TC_SETUP_BLOCK
:
7648 return i40e_setup_tc_block(netdev
, type_data
);
7655 * i40e_open - Called when a network interface is made active
7656 * @netdev: network interface device structure
7658 * The open entry point is called when a network interface is made
7659 * active by the system (IFF_UP). At this point all resources needed
7660 * for transmit and receive operations are allocated, the interrupt
7661 * handler is registered with the OS, the netdev watchdog subtask is
7662 * enabled, and the stack is notified that the interface is ready.
7664 * Returns 0 on success, negative value on failure
7666 int i40e_open(struct net_device
*netdev
)
7668 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
7669 struct i40e_vsi
*vsi
= np
->vsi
;
7670 struct i40e_pf
*pf
= vsi
->back
;
7673 /* disallow open during test or if eeprom is broken */
7674 if (test_bit(__I40E_TESTING
, pf
->state
) ||
7675 test_bit(__I40E_BAD_EEPROM
, pf
->state
))
7678 netif_carrier_off(netdev
);
7680 if (i40e_force_link_state(pf
, true))
7683 err
= i40e_vsi_open(vsi
);
7687 /* configure global TSO hardware offload settings */
7688 wr32(&pf
->hw
, I40E_GLLAN_TSOMSK_F
, be32_to_cpu(TCP_FLAG_PSH
|
7689 TCP_FLAG_FIN
) >> 16);
7690 wr32(&pf
->hw
, I40E_GLLAN_TSOMSK_M
, be32_to_cpu(TCP_FLAG_PSH
|
7692 TCP_FLAG_CWR
) >> 16);
7693 wr32(&pf
->hw
, I40E_GLLAN_TSOMSK_L
, be32_to_cpu(TCP_FLAG_CWR
) >> 16);
7695 udp_tunnel_get_rx_info(netdev
);
7702 * @vsi: the VSI to open
7704 * Finish initialization of the VSI.
7706 * Returns 0 on success, negative value on failure
7708 * Note: expects to be called while under rtnl_lock()
7710 int i40e_vsi_open(struct i40e_vsi
*vsi
)
7712 struct i40e_pf
*pf
= vsi
->back
;
7713 char int_name
[I40E_INT_NAME_STR_LEN
];
7716 /* allocate descriptors */
7717 err
= i40e_vsi_setup_tx_resources(vsi
);
7720 err
= i40e_vsi_setup_rx_resources(vsi
);
7724 err
= i40e_vsi_configure(vsi
);
7729 snprintf(int_name
, sizeof(int_name
) - 1, "%s-%s",
7730 dev_driver_string(&pf
->pdev
->dev
), vsi
->netdev
->name
);
7731 err
= i40e_vsi_request_irq(vsi
, int_name
);
7735 /* Notify the stack of the actual queue counts. */
7736 err
= netif_set_real_num_tx_queues(vsi
->netdev
,
7737 vsi
->num_queue_pairs
);
7739 goto err_set_queues
;
7741 err
= netif_set_real_num_rx_queues(vsi
->netdev
,
7742 vsi
->num_queue_pairs
);
7744 goto err_set_queues
;
7746 } else if (vsi
->type
== I40E_VSI_FDIR
) {
7747 snprintf(int_name
, sizeof(int_name
) - 1, "%s-%s:fdir",
7748 dev_driver_string(&pf
->pdev
->dev
),
7749 dev_name(&pf
->pdev
->dev
));
7750 err
= i40e_vsi_request_irq(vsi
, int_name
);
7757 err
= i40e_up_complete(vsi
);
7759 goto err_up_complete
;
7766 i40e_vsi_free_irq(vsi
);
7768 i40e_vsi_free_rx_resources(vsi
);
7770 i40e_vsi_free_tx_resources(vsi
);
7771 if (vsi
== pf
->vsi
[pf
->lan_vsi
])
7772 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
, true);
7778 * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
7779 * @pf: Pointer to PF
7781 * This function destroys the hlist where all the Flow Director
7782 * filters were saved.
7784 static void i40e_fdir_filter_exit(struct i40e_pf
*pf
)
7786 struct i40e_fdir_filter
*filter
;
7787 struct i40e_flex_pit
*pit_entry
, *tmp
;
7788 struct hlist_node
*node2
;
7790 hlist_for_each_entry_safe(filter
, node2
,
7791 &pf
->fdir_filter_list
, fdir_node
) {
7792 hlist_del(&filter
->fdir_node
);
7796 list_for_each_entry_safe(pit_entry
, tmp
, &pf
->l3_flex_pit_list
, list
) {
7797 list_del(&pit_entry
->list
);
7800 INIT_LIST_HEAD(&pf
->l3_flex_pit_list
);
7802 list_for_each_entry_safe(pit_entry
, tmp
, &pf
->l4_flex_pit_list
, list
) {
7803 list_del(&pit_entry
->list
);
7806 INIT_LIST_HEAD(&pf
->l4_flex_pit_list
);
7808 pf
->fdir_pf_active_filters
= 0;
7809 pf
->fd_tcp4_filter_cnt
= 0;
7810 pf
->fd_udp4_filter_cnt
= 0;
7811 pf
->fd_sctp4_filter_cnt
= 0;
7812 pf
->fd_ip4_filter_cnt
= 0;
7814 /* Reprogram the default input set for TCP/IPv4 */
7815 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_TCP
,
7816 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
|
7817 I40E_L4_SRC_MASK
| I40E_L4_DST_MASK
);
7819 /* Reprogram the default input set for UDP/IPv4 */
7820 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_UDP
,
7821 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
|
7822 I40E_L4_SRC_MASK
| I40E_L4_DST_MASK
);
7824 /* Reprogram the default input set for SCTP/IPv4 */
7825 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_SCTP
,
7826 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
|
7827 I40E_L4_SRC_MASK
| I40E_L4_DST_MASK
);
7829 /* Reprogram the default input set for Other/IPv4 */
7830 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER
,
7831 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
);
7833 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_FRAG_IPV4
,
7834 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
);
7838 * i40e_cloud_filter_exit - Cleans up the cloud filters
7839 * @pf: Pointer to PF
7841 * This function destroys the hlist where all the cloud filters
7844 static void i40e_cloud_filter_exit(struct i40e_pf
*pf
)
7846 struct i40e_cloud_filter
*cfilter
;
7847 struct hlist_node
*node
;
7849 hlist_for_each_entry_safe(cfilter
, node
,
7850 &pf
->cloud_filter_list
, cloud_node
) {
7851 hlist_del(&cfilter
->cloud_node
);
7854 pf
->num_cloud_filters
= 0;
7856 if ((pf
->flags
& I40E_FLAG_FD_SB_TO_CLOUD_FILTER
) &&
7857 !(pf
->flags
& I40E_FLAG_FD_SB_INACTIVE
)) {
7858 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
7859 pf
->flags
&= ~I40E_FLAG_FD_SB_TO_CLOUD_FILTER
;
7860 pf
->flags
&= ~I40E_FLAG_FD_SB_INACTIVE
;
7865 * i40e_close - Disables a network interface
7866 * @netdev: network interface device structure
7868 * The close entry point is called when an interface is de-activated
7869 * by the OS. The hardware is still under the driver's control, but
7870 * this netdev interface is disabled.
7872 * Returns 0, this is not allowed to fail
7874 int i40e_close(struct net_device
*netdev
)
7876 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
7877 struct i40e_vsi
*vsi
= np
->vsi
;
7879 i40e_vsi_close(vsi
);
7885 * i40e_do_reset - Start a PF or Core Reset sequence
7886 * @pf: board private structure
7887 * @reset_flags: which reset is requested
7888 * @lock_acquired: indicates whether or not the lock has been acquired
7889 * before this function was called.
7891 * The essential difference in resets is that the PF Reset
7892 * doesn't clear the packet buffers, doesn't reset the PE
7893 * firmware, and doesn't bother the other PFs on the chip.
7895 void i40e_do_reset(struct i40e_pf
*pf
, u32 reset_flags
, bool lock_acquired
)
7899 WARN_ON(in_interrupt());
7902 /* do the biggest reset indicated */
7903 if (reset_flags
& BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED
)) {
7905 /* Request a Global Reset
7907 * This will start the chip's countdown to the actual full
7908 * chip reset event, and a warning interrupt to be sent
7909 * to all PFs, including the requestor. Our handler
7910 * for the warning interrupt will deal with the shutdown
7911 * and recovery of the switch setup.
7913 dev_dbg(&pf
->pdev
->dev
, "GlobalR requested\n");
7914 val
= rd32(&pf
->hw
, I40E_GLGEN_RTRIG
);
7915 val
|= I40E_GLGEN_RTRIG_GLOBR_MASK
;
7916 wr32(&pf
->hw
, I40E_GLGEN_RTRIG
, val
);
7918 } else if (reset_flags
& BIT_ULL(__I40E_CORE_RESET_REQUESTED
)) {
7920 /* Request a Core Reset
7922 * Same as Global Reset, except does *not* include the MAC/PHY
7924 dev_dbg(&pf
->pdev
->dev
, "CoreR requested\n");
7925 val
= rd32(&pf
->hw
, I40E_GLGEN_RTRIG
);
7926 val
|= I40E_GLGEN_RTRIG_CORER_MASK
;
7927 wr32(&pf
->hw
, I40E_GLGEN_RTRIG
, val
);
7928 i40e_flush(&pf
->hw
);
7930 } else if (reset_flags
& I40E_PF_RESET_FLAG
) {
7932 /* Request a PF Reset
7934 * Resets only the PF-specific registers
7936 * This goes directly to the tear-down and rebuild of
7937 * the switch, since we need to do all the recovery as
7938 * for the Core Reset.
7940 dev_dbg(&pf
->pdev
->dev
, "PFR requested\n");
7941 i40e_handle_reset_warning(pf
, lock_acquired
);
7943 } else if (reset_flags
& BIT_ULL(__I40E_REINIT_REQUESTED
)) {
7946 /* Find the VSI(s) that requested a re-init */
7947 dev_info(&pf
->pdev
->dev
,
7948 "VSI reinit requested\n");
7949 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
7950 struct i40e_vsi
*vsi
= pf
->vsi
[v
];
7953 test_and_clear_bit(__I40E_VSI_REINIT_REQUESTED
,
7955 i40e_vsi_reinit_locked(pf
->vsi
[v
]);
7957 } else if (reset_flags
& BIT_ULL(__I40E_DOWN_REQUESTED
)) {
7960 /* Find the VSI(s) that needs to be brought down */
7961 dev_info(&pf
->pdev
->dev
, "VSI down requested\n");
7962 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
7963 struct i40e_vsi
*vsi
= pf
->vsi
[v
];
7966 test_and_clear_bit(__I40E_VSI_DOWN_REQUESTED
,
7968 set_bit(__I40E_VSI_DOWN
, vsi
->state
);
7973 dev_info(&pf
->pdev
->dev
,
7974 "bad reset request 0x%08x\n", reset_flags
);
7978 #ifdef CONFIG_I40E_DCB
7980 * i40e_dcb_need_reconfig - Check if DCB needs reconfig
7981 * @pf: board private structure
7982 * @old_cfg: current DCB config
7983 * @new_cfg: new DCB config
7985 bool i40e_dcb_need_reconfig(struct i40e_pf
*pf
,
7986 struct i40e_dcbx_config
*old_cfg
,
7987 struct i40e_dcbx_config
*new_cfg
)
7989 bool need_reconfig
= false;
7991 /* Check if ETS configuration has changed */
7992 if (memcmp(&new_cfg
->etscfg
,
7994 sizeof(new_cfg
->etscfg
))) {
7995 /* If Priority Table has changed reconfig is needed */
7996 if (memcmp(&new_cfg
->etscfg
.prioritytable
,
7997 &old_cfg
->etscfg
.prioritytable
,
7998 sizeof(new_cfg
->etscfg
.prioritytable
))) {
7999 need_reconfig
= true;
8000 dev_dbg(&pf
->pdev
->dev
, "ETS UP2TC changed.\n");
8003 if (memcmp(&new_cfg
->etscfg
.tcbwtable
,
8004 &old_cfg
->etscfg
.tcbwtable
,
8005 sizeof(new_cfg
->etscfg
.tcbwtable
)))
8006 dev_dbg(&pf
->pdev
->dev
, "ETS TC BW Table changed.\n");
8008 if (memcmp(&new_cfg
->etscfg
.tsatable
,
8009 &old_cfg
->etscfg
.tsatable
,
8010 sizeof(new_cfg
->etscfg
.tsatable
)))
8011 dev_dbg(&pf
->pdev
->dev
, "ETS TSA Table changed.\n");
8014 /* Check if PFC configuration has changed */
8015 if (memcmp(&new_cfg
->pfc
,
8017 sizeof(new_cfg
->pfc
))) {
8018 need_reconfig
= true;
8019 dev_dbg(&pf
->pdev
->dev
, "PFC config change detected.\n");
8022 /* Check if APP Table has changed */
8023 if (memcmp(&new_cfg
->app
,
8025 sizeof(new_cfg
->app
))) {
8026 need_reconfig
= true;
8027 dev_dbg(&pf
->pdev
->dev
, "APP Table change detected.\n");
8030 dev_dbg(&pf
->pdev
->dev
, "dcb need_reconfig=%d\n", need_reconfig
);
8031 return need_reconfig
;
8035 * i40e_handle_lldp_event - Handle LLDP Change MIB event
8036 * @pf: board private structure
8037 * @e: event info posted on ARQ
8039 static int i40e_handle_lldp_event(struct i40e_pf
*pf
,
8040 struct i40e_arq_event_info
*e
)
8042 struct i40e_aqc_lldp_get_mib
*mib
=
8043 (struct i40e_aqc_lldp_get_mib
*)&e
->desc
.params
.raw
;
8044 struct i40e_hw
*hw
= &pf
->hw
;
8045 struct i40e_dcbx_config tmp_dcbx_cfg
;
8046 bool need_reconfig
= false;
8050 /* Not DCB capable or capability disabled */
8051 if (!(pf
->flags
& I40E_FLAG_DCB_CAPABLE
))
8054 /* Ignore if event is not for Nearest Bridge */
8055 type
= ((mib
->type
>> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT
)
8056 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK
);
8057 dev_dbg(&pf
->pdev
->dev
, "LLDP event mib bridge type 0x%x\n", type
);
8058 if (type
!= I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE
)
8061 /* Check MIB Type and return if event for Remote MIB update */
8062 type
= mib
->type
& I40E_AQ_LLDP_MIB_TYPE_MASK
;
8063 dev_dbg(&pf
->pdev
->dev
,
8064 "LLDP event mib type %s\n", type
? "remote" : "local");
8065 if (type
== I40E_AQ_LLDP_MIB_REMOTE
) {
8066 /* Update the remote cached instance and return */
8067 ret
= i40e_aq_get_dcb_config(hw
, I40E_AQ_LLDP_MIB_REMOTE
,
8068 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE
,
8069 &hw
->remote_dcbx_config
);
8073 /* Store the old configuration */
8074 tmp_dcbx_cfg
= hw
->local_dcbx_config
;
8076 /* Reset the old DCBx configuration data */
8077 memset(&hw
->local_dcbx_config
, 0, sizeof(hw
->local_dcbx_config
));
8078 /* Get updated DCBX data from firmware */
8079 ret
= i40e_get_dcb_config(&pf
->hw
);
8081 dev_info(&pf
->pdev
->dev
,
8082 "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
8083 i40e_stat_str(&pf
->hw
, ret
),
8084 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8088 /* No change detected in DCBX configs */
8089 if (!memcmp(&tmp_dcbx_cfg
, &hw
->local_dcbx_config
,
8090 sizeof(tmp_dcbx_cfg
))) {
8091 dev_dbg(&pf
->pdev
->dev
, "No change detected in DCBX configuration.\n");
8095 need_reconfig
= i40e_dcb_need_reconfig(pf
, &tmp_dcbx_cfg
,
8096 &hw
->local_dcbx_config
);
8098 i40e_dcbnl_flush_apps(pf
, &tmp_dcbx_cfg
, &hw
->local_dcbx_config
);
8103 /* Enable DCB tagging only when more than one TC */
8104 if (i40e_dcb_get_num_tc(&hw
->local_dcbx_config
) > 1)
8105 pf
->flags
|= I40E_FLAG_DCB_ENABLED
;
8107 pf
->flags
&= ~I40E_FLAG_DCB_ENABLED
;
8109 set_bit(__I40E_PORT_SUSPENDED
, pf
->state
);
8110 /* Reconfiguration needed quiesce all VSIs */
8111 i40e_pf_quiesce_all_vsi(pf
);
8113 /* Changes in configuration update VEB/VSI */
8114 i40e_dcb_reconfigure(pf
);
8116 ret
= i40e_resume_port_tx(pf
);
8118 clear_bit(__I40E_PORT_SUSPENDED
, pf
->state
);
8119 /* In case of error no point in resuming VSIs */
8123 /* Wait for the PF's queues to be disabled */
8124 ret
= i40e_pf_wait_queues_disabled(pf
);
8126 /* Schedule PF reset to recover */
8127 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
8128 i40e_service_event_schedule(pf
);
8130 i40e_pf_unquiesce_all_vsi(pf
);
8131 set_bit(__I40E_CLIENT_SERVICE_REQUESTED
, pf
->state
);
8132 set_bit(__I40E_CLIENT_L2_CHANGE
, pf
->state
);
8138 #endif /* CONFIG_I40E_DCB */
8141 * i40e_do_reset_safe - Protected reset path for userland calls.
8142 * @pf: board private structure
8143 * @reset_flags: which reset is requested
8146 void i40e_do_reset_safe(struct i40e_pf
*pf
, u32 reset_flags
)
8149 i40e_do_reset(pf
, reset_flags
, true);
8154 * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
8155 * @pf: board private structure
8156 * @e: event info posted on ARQ
8158 * Handler for LAN Queue Overflow Event generated by the firmware for PF
8161 static void i40e_handle_lan_overflow_event(struct i40e_pf
*pf
,
8162 struct i40e_arq_event_info
*e
)
8164 struct i40e_aqc_lan_overflow
*data
=
8165 (struct i40e_aqc_lan_overflow
*)&e
->desc
.params
.raw
;
8166 u32 queue
= le32_to_cpu(data
->prtdcb_rupto
);
8167 u32 qtx_ctl
= le32_to_cpu(data
->otx_ctl
);
8168 struct i40e_hw
*hw
= &pf
->hw
;
8172 dev_dbg(&pf
->pdev
->dev
, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
8175 /* Queue belongs to VF, find the VF and issue VF reset */
8176 if (((qtx_ctl
& I40E_QTX_CTL_PFVF_Q_MASK
)
8177 >> I40E_QTX_CTL_PFVF_Q_SHIFT
) == I40E_QTX_CTL_VF_QUEUE
) {
8178 vf_id
= (u16
)((qtx_ctl
& I40E_QTX_CTL_VFVM_INDX_MASK
)
8179 >> I40E_QTX_CTL_VFVM_INDX_SHIFT
);
8180 vf_id
-= hw
->func_caps
.vf_base_id
;
8181 vf
= &pf
->vf
[vf_id
];
8182 i40e_vc_notify_vf_reset(vf
);
8183 /* Allow VF to process pending reset notification */
8185 i40e_reset_vf(vf
, false);
8190 * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
8191 * @pf: board private structure
8193 u32
i40e_get_cur_guaranteed_fd_count(struct i40e_pf
*pf
)
8197 val
= rd32(&pf
->hw
, I40E_PFQF_FDSTAT
);
8198 fcnt_prog
= (val
& I40E_PFQF_FDSTAT_GUARANT_CNT_MASK
);
8203 * i40e_get_current_fd_count - Get total FD filters programmed for this PF
8204 * @pf: board private structure
8206 u32
i40e_get_current_fd_count(struct i40e_pf
*pf
)
8210 val
= rd32(&pf
->hw
, I40E_PFQF_FDSTAT
);
8211 fcnt_prog
= (val
& I40E_PFQF_FDSTAT_GUARANT_CNT_MASK
) +
8212 ((val
& I40E_PFQF_FDSTAT_BEST_CNT_MASK
) >>
8213 I40E_PFQF_FDSTAT_BEST_CNT_SHIFT
);
8218 * i40e_get_global_fd_count - Get total FD filters programmed on device
8219 * @pf: board private structure
8221 u32
i40e_get_global_fd_count(struct i40e_pf
*pf
)
8225 val
= rd32(&pf
->hw
, I40E_GLQF_FDCNT_0
);
8226 fcnt_prog
= (val
& I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK
) +
8227 ((val
& I40E_GLQF_FDCNT_0_BESTCNT_MASK
) >>
8228 I40E_GLQF_FDCNT_0_BESTCNT_SHIFT
);
8233 * i40e_reenable_fdir_sb - Restore FDir SB capability
8234 * @pf: board private structure
8236 static void i40e_reenable_fdir_sb(struct i40e_pf
*pf
)
8238 if (test_and_clear_bit(__I40E_FD_SB_AUTO_DISABLED
, pf
->state
))
8239 if ((pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) &&
8240 (I40E_DEBUG_FD
& pf
->hw
.debug_mask
))
8241 dev_info(&pf
->pdev
->dev
, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
8245 * i40e_reenable_fdir_atr - Restore FDir ATR capability
8246 * @pf: board private structure
8248 static void i40e_reenable_fdir_atr(struct i40e_pf
*pf
)
8250 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED
, pf
->state
)) {
8251 /* ATR uses the same filtering logic as SB rules. It only
8252 * functions properly if the input set mask is at the default
8253 * settings. It is safe to restore the default input set
8254 * because there are no active TCPv4 filter rules.
8256 i40e_write_fd_input_set(pf
, I40E_FILTER_PCTYPE_NONF_IPV4_TCP
,
8257 I40E_L3_SRC_MASK
| I40E_L3_DST_MASK
|
8258 I40E_L4_SRC_MASK
| I40E_L4_DST_MASK
);
8260 if ((pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) &&
8261 (I40E_DEBUG_FD
& pf
->hw
.debug_mask
))
8262 dev_info(&pf
->pdev
->dev
, "ATR is being enabled since we have space in the table and there are no conflicting ntuple rules\n");
8267 * i40e_delete_invalid_filter - Delete an invalid FDIR filter
8268 * @pf: board private structure
8269 * @filter: FDir filter to remove
8271 static void i40e_delete_invalid_filter(struct i40e_pf
*pf
,
8272 struct i40e_fdir_filter
*filter
)
8274 /* Update counters */
8275 pf
->fdir_pf_active_filters
--;
8278 switch (filter
->flow_type
) {
8280 pf
->fd_tcp4_filter_cnt
--;
8283 pf
->fd_udp4_filter_cnt
--;
8286 pf
->fd_sctp4_filter_cnt
--;
8289 switch (filter
->ip4_proto
) {
8291 pf
->fd_tcp4_filter_cnt
--;
8294 pf
->fd_udp4_filter_cnt
--;
8297 pf
->fd_sctp4_filter_cnt
--;
8300 pf
->fd_ip4_filter_cnt
--;
8306 /* Remove the filter from the list and free memory */
8307 hlist_del(&filter
->fdir_node
);
8312 * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
8313 * @pf: board private structure
8315 void i40e_fdir_check_and_reenable(struct i40e_pf
*pf
)
8317 struct i40e_fdir_filter
*filter
;
8318 u32 fcnt_prog
, fcnt_avail
;
8319 struct hlist_node
*node
;
8321 if (test_bit(__I40E_FD_FLUSH_REQUESTED
, pf
->state
))
8324 /* Check if we have enough room to re-enable FDir SB capability. */
8325 fcnt_prog
= i40e_get_global_fd_count(pf
);
8326 fcnt_avail
= pf
->fdir_pf_filter_count
;
8327 if ((fcnt_prog
< (fcnt_avail
- I40E_FDIR_BUFFER_HEAD_ROOM
)) ||
8328 (pf
->fd_add_err
== 0) ||
8329 (i40e_get_current_atr_cnt(pf
) < pf
->fd_atr_cnt
))
8330 i40e_reenable_fdir_sb(pf
);
8332 /* We should wait for even more space before re-enabling ATR.
8333 * Additionally, we cannot enable ATR as long as we still have TCP SB
8336 if ((fcnt_prog
< (fcnt_avail
- I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR
)) &&
8337 (pf
->fd_tcp4_filter_cnt
== 0))
8338 i40e_reenable_fdir_atr(pf
);
8340 /* if hw had a problem adding a filter, delete it */
8341 if (pf
->fd_inv
> 0) {
8342 hlist_for_each_entry_safe(filter
, node
,
8343 &pf
->fdir_filter_list
, fdir_node
)
8344 if (filter
->fd_id
== pf
->fd_inv
)
8345 i40e_delete_invalid_filter(pf
, filter
);
8349 #define I40E_MIN_FD_FLUSH_INTERVAL 10
8350 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
8352 * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
8353 * @pf: board private structure
8355 static void i40e_fdir_flush_and_replay(struct i40e_pf
*pf
)
8357 unsigned long min_flush_time
;
8358 int flush_wait_retry
= 50;
8359 bool disable_atr
= false;
8363 if (!time_after(jiffies
, pf
->fd_flush_timestamp
+
8364 (I40E_MIN_FD_FLUSH_INTERVAL
* HZ
)))
8367 /* If the flush is happening too quick and we have mostly SB rules we
8368 * should not re-enable ATR for some time.
8370 min_flush_time
= pf
->fd_flush_timestamp
+
8371 (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE
* HZ
);
8372 fd_room
= pf
->fdir_pf_filter_count
- pf
->fdir_pf_active_filters
;
8374 if (!(time_after(jiffies
, min_flush_time
)) &&
8375 (fd_room
< I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR
)) {
8376 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
8377 dev_info(&pf
->pdev
->dev
, "ATR disabled, not enough FD filter space.\n");
8381 pf
->fd_flush_timestamp
= jiffies
;
8382 set_bit(__I40E_FD_ATR_AUTO_DISABLED
, pf
->state
);
8383 /* flush all filters */
8384 wr32(&pf
->hw
, I40E_PFQF_CTL_1
,
8385 I40E_PFQF_CTL_1_CLEARFDTABLE_MASK
);
8386 i40e_flush(&pf
->hw
);
8390 /* Check FD flush status every 5-6msec */
8391 usleep_range(5000, 6000);
8392 reg
= rd32(&pf
->hw
, I40E_PFQF_CTL_1
);
8393 if (!(reg
& I40E_PFQF_CTL_1_CLEARFDTABLE_MASK
))
8395 } while (flush_wait_retry
--);
8396 if (reg
& I40E_PFQF_CTL_1_CLEARFDTABLE_MASK
) {
8397 dev_warn(&pf
->pdev
->dev
, "FD table did not flush, needs more time\n");
8399 /* replay sideband filters */
8400 i40e_fdir_filter_restore(pf
->vsi
[pf
->lan_vsi
]);
8401 if (!disable_atr
&& !pf
->fd_tcp4_filter_cnt
)
8402 clear_bit(__I40E_FD_ATR_AUTO_DISABLED
, pf
->state
);
8403 clear_bit(__I40E_FD_FLUSH_REQUESTED
, pf
->state
);
8404 if (I40E_DEBUG_FD
& pf
->hw
.debug_mask
)
8405 dev_info(&pf
->pdev
->dev
, "FD Filter table flushed and FD-SB replayed.\n");
8410 * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
8411 * @pf: board private structure
8413 u32
i40e_get_current_atr_cnt(struct i40e_pf
*pf
)
8415 return i40e_get_current_fd_count(pf
) - pf
->fdir_pf_active_filters
;
8418 /* We can see up to 256 filter programming desc in transit if the filters are
8419 * being applied really fast; before we see the first
8420 * filter miss error on Rx queue 0. Accumulating enough error messages before
8421 * reacting will make sure we don't cause flush too often.
8423 #define I40E_MAX_FD_PROGRAM_ERROR 256
8426 * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
8427 * @pf: board private structure
8429 static void i40e_fdir_reinit_subtask(struct i40e_pf
*pf
)
8432 /* if interface is down do nothing */
8433 if (test_bit(__I40E_DOWN
, pf
->state
))
8436 if (test_bit(__I40E_FD_FLUSH_REQUESTED
, pf
->state
))
8437 i40e_fdir_flush_and_replay(pf
);
8439 i40e_fdir_check_and_reenable(pf
);
8444 * i40e_vsi_link_event - notify VSI of a link event
8445 * @vsi: vsi to be notified
8446 * @link_up: link up or down
8448 static void i40e_vsi_link_event(struct i40e_vsi
*vsi
, bool link_up
)
8450 if (!vsi
|| test_bit(__I40E_VSI_DOWN
, vsi
->state
))
8453 switch (vsi
->type
) {
8455 if (!vsi
->netdev
|| !vsi
->netdev_registered
)
8459 netif_carrier_on(vsi
->netdev
);
8460 netif_tx_wake_all_queues(vsi
->netdev
);
8462 netif_carrier_off(vsi
->netdev
);
8463 netif_tx_stop_all_queues(vsi
->netdev
);
8467 case I40E_VSI_SRIOV
:
8468 case I40E_VSI_VMDQ2
:
8470 case I40E_VSI_IWARP
:
8471 case I40E_VSI_MIRROR
:
8473 /* there is no notification for other VSIs */
8479 * i40e_veb_link_event - notify elements on the veb of a link event
8480 * @veb: veb to be notified
8481 * @link_up: link up or down
8483 static void i40e_veb_link_event(struct i40e_veb
*veb
, bool link_up
)
8488 if (!veb
|| !veb
->pf
)
8492 /* depth first... */
8493 for (i
= 0; i
< I40E_MAX_VEB
; i
++)
8494 if (pf
->veb
[i
] && (pf
->veb
[i
]->uplink_seid
== veb
->seid
))
8495 i40e_veb_link_event(pf
->veb
[i
], link_up
);
8497 /* ... now the local VSIs */
8498 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
8499 if (pf
->vsi
[i
] && (pf
->vsi
[i
]->uplink_seid
== veb
->seid
))
8500 i40e_vsi_link_event(pf
->vsi
[i
], link_up
);
8504 * i40e_link_event - Update netif_carrier status
8505 * @pf: board private structure
8507 static void i40e_link_event(struct i40e_pf
*pf
)
8509 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
8510 u8 new_link_speed
, old_link_speed
;
8512 bool new_link
, old_link
;
8514 /* set this to force the get_link_status call to refresh state */
8515 pf
->hw
.phy
.get_link_info
= true;
8516 old_link
= (pf
->hw
.phy
.link_info_old
.link_info
& I40E_AQ_LINK_UP
);
8517 status
= i40e_get_link_status(&pf
->hw
, &new_link
);
8519 /* On success, disable temp link polling */
8520 if (status
== I40E_SUCCESS
) {
8521 clear_bit(__I40E_TEMP_LINK_POLLING
, pf
->state
);
8523 /* Enable link polling temporarily until i40e_get_link_status
8524 * returns I40E_SUCCESS
8526 set_bit(__I40E_TEMP_LINK_POLLING
, pf
->state
);
8527 dev_dbg(&pf
->pdev
->dev
, "couldn't get link state, status: %d\n",
8532 old_link_speed
= pf
->hw
.phy
.link_info_old
.link_speed
;
8533 new_link_speed
= pf
->hw
.phy
.link_info
.link_speed
;
8535 if (new_link
== old_link
&&
8536 new_link_speed
== old_link_speed
&&
8537 (test_bit(__I40E_VSI_DOWN
, vsi
->state
) ||
8538 new_link
== netif_carrier_ok(vsi
->netdev
)))
8541 i40e_print_link_message(vsi
, new_link
);
8543 /* Notify the base of the switch tree connected to
8544 * the link. Floating VEBs are not notified.
8546 if (pf
->lan_veb
!= I40E_NO_VEB
&& pf
->veb
[pf
->lan_veb
])
8547 i40e_veb_link_event(pf
->veb
[pf
->lan_veb
], new_link
);
8549 i40e_vsi_link_event(vsi
, new_link
);
8552 i40e_vc_notify_link_state(pf
);
8554 if (pf
->flags
& I40E_FLAG_PTP
)
8555 i40e_ptp_set_increment(pf
);
8559 * i40e_watchdog_subtask - periodic checks not using event driven response
8560 * @pf: board private structure
8562 static void i40e_watchdog_subtask(struct i40e_pf
*pf
)
8566 /* if interface is down do nothing */
8567 if (test_bit(__I40E_DOWN
, pf
->state
) ||
8568 test_bit(__I40E_CONFIG_BUSY
, pf
->state
))
8571 /* make sure we don't do these things too often */
8572 if (time_before(jiffies
, (pf
->service_timer_previous
+
8573 pf
->service_timer_period
)))
8575 pf
->service_timer_previous
= jiffies
;
8577 if ((pf
->flags
& I40E_FLAG_LINK_POLLING_ENABLED
) ||
8578 test_bit(__I40E_TEMP_LINK_POLLING
, pf
->state
))
8579 i40e_link_event(pf
);
8581 /* Update the stats for active netdevs so the network stack
8582 * can look at updated numbers whenever it cares to
8584 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++)
8585 if (pf
->vsi
[i
] && pf
->vsi
[i
]->netdev
)
8586 i40e_update_stats(pf
->vsi
[i
]);
8588 if (pf
->flags
& I40E_FLAG_VEB_STATS_ENABLED
) {
8589 /* Update the stats for the active switching components */
8590 for (i
= 0; i
< I40E_MAX_VEB
; i
++)
8592 i40e_update_veb_stats(pf
->veb
[i
]);
8595 i40e_ptp_rx_hang(pf
);
8596 i40e_ptp_tx_hang(pf
);
8600 * i40e_reset_subtask - Set up for resetting the device and driver
8601 * @pf: board private structure
8603 static void i40e_reset_subtask(struct i40e_pf
*pf
)
8605 u32 reset_flags
= 0;
8607 if (test_bit(__I40E_REINIT_REQUESTED
, pf
->state
)) {
8608 reset_flags
|= BIT(__I40E_REINIT_REQUESTED
);
8609 clear_bit(__I40E_REINIT_REQUESTED
, pf
->state
);
8611 if (test_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
)) {
8612 reset_flags
|= BIT(__I40E_PF_RESET_REQUESTED
);
8613 clear_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
8615 if (test_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
)) {
8616 reset_flags
|= BIT(__I40E_CORE_RESET_REQUESTED
);
8617 clear_bit(__I40E_CORE_RESET_REQUESTED
, pf
->state
);
8619 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED
, pf
->state
)) {
8620 reset_flags
|= BIT(__I40E_GLOBAL_RESET_REQUESTED
);
8621 clear_bit(__I40E_GLOBAL_RESET_REQUESTED
, pf
->state
);
8623 if (test_bit(__I40E_DOWN_REQUESTED
, pf
->state
)) {
8624 reset_flags
|= BIT(__I40E_DOWN_REQUESTED
);
8625 clear_bit(__I40E_DOWN_REQUESTED
, pf
->state
);
8628 /* If there's a recovery already waiting, it takes
8629 * precedence before starting a new reset sequence.
8631 if (test_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
)) {
8632 i40e_prep_for_reset(pf
, false);
8634 i40e_rebuild(pf
, false, false);
8637 /* If we're already down or resetting, just bail */
8639 !test_bit(__I40E_DOWN
, pf
->state
) &&
8640 !test_bit(__I40E_CONFIG_BUSY
, pf
->state
)) {
8641 i40e_do_reset(pf
, reset_flags
, false);
8646 * i40e_handle_link_event - Handle link event
8647 * @pf: board private structure
8648 * @e: event info posted on ARQ
8650 static void i40e_handle_link_event(struct i40e_pf
*pf
,
8651 struct i40e_arq_event_info
*e
)
8653 struct i40e_aqc_get_link_status
*status
=
8654 (struct i40e_aqc_get_link_status
*)&e
->desc
.params
.raw
;
8656 /* Do a new status request to re-enable LSE reporting
8657 * and load new status information into the hw struct
8658 * This completely ignores any state information
8659 * in the ARQ event info, instead choosing to always
8660 * issue the AQ update link status command.
8662 i40e_link_event(pf
);
8664 /* Check if module meets thermal requirements */
8665 if (status
->phy_type
== I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP
) {
8666 dev_err(&pf
->pdev
->dev
,
8667 "Rx/Tx is disabled on this device because the module does not meet thermal requirements.\n");
8668 dev_err(&pf
->pdev
->dev
,
8669 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8671 /* check for unqualified module, if link is down, suppress
8672 * the message if link was forced to be down.
8674 if ((status
->link_info
& I40E_AQ_MEDIA_AVAILABLE
) &&
8675 (!(status
->an_info
& I40E_AQ_QUALIFIED_MODULE
)) &&
8676 (!(status
->link_info
& I40E_AQ_LINK_UP
)) &&
8677 (!(pf
->flags
& I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED
))) {
8678 dev_err(&pf
->pdev
->dev
,
8679 "Rx/Tx is disabled on this device because an unsupported SFP module type was detected.\n");
8680 dev_err(&pf
->pdev
->dev
,
8681 "Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
8687 * i40e_clean_adminq_subtask - Clean the AdminQ rings
8688 * @pf: board private structure
8690 static void i40e_clean_adminq_subtask(struct i40e_pf
*pf
)
8692 struct i40e_arq_event_info event
;
8693 struct i40e_hw
*hw
= &pf
->hw
;
8700 /* Do not run clean AQ when PF reset fails */
8701 if (test_bit(__I40E_RESET_FAILED
, pf
->state
))
8704 /* check for error indications */
8705 val
= rd32(&pf
->hw
, pf
->hw
.aq
.arq
.len
);
8707 if (val
& I40E_PF_ARQLEN_ARQVFE_MASK
) {
8708 if (hw
->debug_mask
& I40E_DEBUG_AQ
)
8709 dev_info(&pf
->pdev
->dev
, "ARQ VF Error detected\n");
8710 val
&= ~I40E_PF_ARQLEN_ARQVFE_MASK
;
8712 if (val
& I40E_PF_ARQLEN_ARQOVFL_MASK
) {
8713 if (hw
->debug_mask
& I40E_DEBUG_AQ
)
8714 dev_info(&pf
->pdev
->dev
, "ARQ Overflow Error detected\n");
8715 val
&= ~I40E_PF_ARQLEN_ARQOVFL_MASK
;
8716 pf
->arq_overflows
++;
8718 if (val
& I40E_PF_ARQLEN_ARQCRIT_MASK
) {
8719 if (hw
->debug_mask
& I40E_DEBUG_AQ
)
8720 dev_info(&pf
->pdev
->dev
, "ARQ Critical Error detected\n");
8721 val
&= ~I40E_PF_ARQLEN_ARQCRIT_MASK
;
8724 wr32(&pf
->hw
, pf
->hw
.aq
.arq
.len
, val
);
8726 val
= rd32(&pf
->hw
, pf
->hw
.aq
.asq
.len
);
8728 if (val
& I40E_PF_ATQLEN_ATQVFE_MASK
) {
8729 if (pf
->hw
.debug_mask
& I40E_DEBUG_AQ
)
8730 dev_info(&pf
->pdev
->dev
, "ASQ VF Error detected\n");
8731 val
&= ~I40E_PF_ATQLEN_ATQVFE_MASK
;
8733 if (val
& I40E_PF_ATQLEN_ATQOVFL_MASK
) {
8734 if (pf
->hw
.debug_mask
& I40E_DEBUG_AQ
)
8735 dev_info(&pf
->pdev
->dev
, "ASQ Overflow Error detected\n");
8736 val
&= ~I40E_PF_ATQLEN_ATQOVFL_MASK
;
8738 if (val
& I40E_PF_ATQLEN_ATQCRIT_MASK
) {
8739 if (pf
->hw
.debug_mask
& I40E_DEBUG_AQ
)
8740 dev_info(&pf
->pdev
->dev
, "ASQ Critical Error detected\n");
8741 val
&= ~I40E_PF_ATQLEN_ATQCRIT_MASK
;
8744 wr32(&pf
->hw
, pf
->hw
.aq
.asq
.len
, val
);
8746 event
.buf_len
= I40E_MAX_AQ_BUF_SIZE
;
8747 event
.msg_buf
= kzalloc(event
.buf_len
, GFP_KERNEL
);
8752 ret
= i40e_clean_arq_element(hw
, &event
, &pending
);
8753 if (ret
== I40E_ERR_ADMIN_QUEUE_NO_WORK
)
8756 dev_info(&pf
->pdev
->dev
, "ARQ event error %d\n", ret
);
8760 opcode
= le16_to_cpu(event
.desc
.opcode
);
8763 case i40e_aqc_opc_get_link_status
:
8764 i40e_handle_link_event(pf
, &event
);
8766 case i40e_aqc_opc_send_msg_to_pf
:
8767 ret
= i40e_vc_process_vf_msg(pf
,
8768 le16_to_cpu(event
.desc
.retval
),
8769 le32_to_cpu(event
.desc
.cookie_high
),
8770 le32_to_cpu(event
.desc
.cookie_low
),
8774 case i40e_aqc_opc_lldp_update_mib
:
8775 dev_dbg(&pf
->pdev
->dev
, "ARQ: Update LLDP MIB event received\n");
8776 #ifdef CONFIG_I40E_DCB
8778 ret
= i40e_handle_lldp_event(pf
, &event
);
8780 #endif /* CONFIG_I40E_DCB */
8782 case i40e_aqc_opc_event_lan_overflow
:
8783 dev_dbg(&pf
->pdev
->dev
, "ARQ LAN queue overflow event received\n");
8784 i40e_handle_lan_overflow_event(pf
, &event
);
8786 case i40e_aqc_opc_send_msg_to_peer
:
8787 dev_info(&pf
->pdev
->dev
, "ARQ: Msg from other pf\n");
8789 case i40e_aqc_opc_nvm_erase
:
8790 case i40e_aqc_opc_nvm_update
:
8791 case i40e_aqc_opc_oem_post_update
:
8792 i40e_debug(&pf
->hw
, I40E_DEBUG_NVM
,
8793 "ARQ NVM operation 0x%04x completed\n",
8797 dev_info(&pf
->pdev
->dev
,
8798 "ARQ: Unknown event 0x%04x ignored\n",
8802 } while (i
++ < pf
->adminq_work_limit
);
8804 if (i
< pf
->adminq_work_limit
)
8805 clear_bit(__I40E_ADMINQ_EVENT_PENDING
, pf
->state
);
8807 /* re-enable Admin queue interrupt cause */
8808 val
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
8809 val
|= I40E_PFINT_ICR0_ENA_ADMINQ_MASK
;
8810 wr32(hw
, I40E_PFINT_ICR0_ENA
, val
);
8813 kfree(event
.msg_buf
);
8817 * i40e_verify_eeprom - make sure eeprom is good to use
8818 * @pf: board private structure
8820 static void i40e_verify_eeprom(struct i40e_pf
*pf
)
8824 err
= i40e_diag_eeprom_test(&pf
->hw
);
8826 /* retry in case of garbage read */
8827 err
= i40e_diag_eeprom_test(&pf
->hw
);
8829 dev_info(&pf
->pdev
->dev
, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
8831 set_bit(__I40E_BAD_EEPROM
, pf
->state
);
8835 if (!err
&& test_bit(__I40E_BAD_EEPROM
, pf
->state
)) {
8836 dev_info(&pf
->pdev
->dev
, "eeprom check passed, Tx/Rx traffic enabled\n");
8837 clear_bit(__I40E_BAD_EEPROM
, pf
->state
);
8842 * i40e_enable_pf_switch_lb
8843 * @pf: pointer to the PF structure
8845 * enable switch loop back or die - no point in a return value
8847 static void i40e_enable_pf_switch_lb(struct i40e_pf
*pf
)
8849 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
8850 struct i40e_vsi_context ctxt
;
8853 ctxt
.seid
= pf
->main_vsi_seid
;
8854 ctxt
.pf_num
= pf
->hw
.pf_id
;
8856 ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
8858 dev_info(&pf
->pdev
->dev
,
8859 "couldn't get PF vsi config, err %s aq_err %s\n",
8860 i40e_stat_str(&pf
->hw
, ret
),
8861 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8864 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
8865 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
8866 ctxt
.info
.switch_id
|= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
8868 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
8870 dev_info(&pf
->pdev
->dev
,
8871 "update vsi switch failed, err %s aq_err %s\n",
8872 i40e_stat_str(&pf
->hw
, ret
),
8873 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8878 * i40e_disable_pf_switch_lb
8879 * @pf: pointer to the PF structure
8881 * disable switch loop back or die - no point in a return value
8883 static void i40e_disable_pf_switch_lb(struct i40e_pf
*pf
)
8885 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
8886 struct i40e_vsi_context ctxt
;
8889 ctxt
.seid
= pf
->main_vsi_seid
;
8890 ctxt
.pf_num
= pf
->hw
.pf_id
;
8892 ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
8894 dev_info(&pf
->pdev
->dev
,
8895 "couldn't get PF vsi config, err %s aq_err %s\n",
8896 i40e_stat_str(&pf
->hw
, ret
),
8897 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8900 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
8901 ctxt
.info
.valid_sections
= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
8902 ctxt
.info
.switch_id
&= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
8904 ret
= i40e_aq_update_vsi_params(&vsi
->back
->hw
, &ctxt
, NULL
);
8906 dev_info(&pf
->pdev
->dev
,
8907 "update vsi switch failed, err %s aq_err %s\n",
8908 i40e_stat_str(&pf
->hw
, ret
),
8909 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
8914 * i40e_config_bridge_mode - Configure the HW bridge mode
8915 * @veb: pointer to the bridge instance
8917 * Configure the loop back mode for the LAN VSI that is downlink to the
8918 * specified HW bridge instance. It is expected this function is called
8919 * when a new HW bridge is instantiated.
8921 static void i40e_config_bridge_mode(struct i40e_veb
*veb
)
8923 struct i40e_pf
*pf
= veb
->pf
;
8925 if (pf
->hw
.debug_mask
& I40E_DEBUG_LAN
)
8926 dev_info(&pf
->pdev
->dev
, "enabling bridge mode: %s\n",
8927 veb
->bridge_mode
== BRIDGE_MODE_VEPA
? "VEPA" : "VEB");
8928 if (veb
->bridge_mode
& BRIDGE_MODE_VEPA
)
8929 i40e_disable_pf_switch_lb(pf
);
8931 i40e_enable_pf_switch_lb(pf
);
8935 * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
8936 * @veb: pointer to the VEB instance
8938 * This is a recursive function that first builds the attached VSIs then
8939 * recurses in to build the next layer of VEB. We track the connections
8940 * through our own index numbers because the seid's from the HW could
8941 * change across the reset.
8943 static int i40e_reconstitute_veb(struct i40e_veb
*veb
)
8945 struct i40e_vsi
*ctl_vsi
= NULL
;
8946 struct i40e_pf
*pf
= veb
->pf
;
8950 /* build VSI that owns this VEB, temporarily attached to base VEB */
8951 for (v
= 0; v
< pf
->num_alloc_vsi
&& !ctl_vsi
; v
++) {
8953 pf
->vsi
[v
]->veb_idx
== veb
->idx
&&
8954 pf
->vsi
[v
]->flags
& I40E_VSI_FLAG_VEB_OWNER
) {
8955 ctl_vsi
= pf
->vsi
[v
];
8960 dev_info(&pf
->pdev
->dev
,
8961 "missing owner VSI for veb_idx %d\n", veb
->idx
);
8963 goto end_reconstitute
;
8965 if (ctl_vsi
!= pf
->vsi
[pf
->lan_vsi
])
8966 ctl_vsi
->uplink_seid
= pf
->vsi
[pf
->lan_vsi
]->uplink_seid
;
8967 ret
= i40e_add_vsi(ctl_vsi
);
8969 dev_info(&pf
->pdev
->dev
,
8970 "rebuild of veb_idx %d owner VSI failed: %d\n",
8972 goto end_reconstitute
;
8974 i40e_vsi_reset_stats(ctl_vsi
);
8976 /* create the VEB in the switch and move the VSI onto the VEB */
8977 ret
= i40e_add_veb(veb
, ctl_vsi
);
8979 goto end_reconstitute
;
8981 if (pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)
8982 veb
->bridge_mode
= BRIDGE_MODE_VEB
;
8984 veb
->bridge_mode
= BRIDGE_MODE_VEPA
;
8985 i40e_config_bridge_mode(veb
);
8987 /* create the remaining VSIs attached to this VEB */
8988 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
8989 if (!pf
->vsi
[v
] || pf
->vsi
[v
] == ctl_vsi
)
8992 if (pf
->vsi
[v
]->veb_idx
== veb
->idx
) {
8993 struct i40e_vsi
*vsi
= pf
->vsi
[v
];
8995 vsi
->uplink_seid
= veb
->seid
;
8996 ret
= i40e_add_vsi(vsi
);
8998 dev_info(&pf
->pdev
->dev
,
8999 "rebuild of vsi_idx %d failed: %d\n",
9001 goto end_reconstitute
;
9003 i40e_vsi_reset_stats(vsi
);
9007 /* create any VEBs attached to this VEB - RECURSION */
9008 for (veb_idx
= 0; veb_idx
< I40E_MAX_VEB
; veb_idx
++) {
9009 if (pf
->veb
[veb_idx
] && pf
->veb
[veb_idx
]->veb_idx
== veb
->idx
) {
9010 pf
->veb
[veb_idx
]->uplink_seid
= veb
->seid
;
9011 ret
= i40e_reconstitute_veb(pf
->veb
[veb_idx
]);
9022 * i40e_get_capabilities - get info about the HW
9023 * @pf: the PF struct
9025 static int i40e_get_capabilities(struct i40e_pf
*pf
,
9026 enum i40e_admin_queue_opc list_type
)
9028 struct i40e_aqc_list_capabilities_element_resp
*cap_buf
;
9033 buf_len
= 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp
);
9035 cap_buf
= kzalloc(buf_len
, GFP_KERNEL
);
9039 /* this loads the data into the hw struct for us */
9040 err
= i40e_aq_discover_capabilities(&pf
->hw
, cap_buf
, buf_len
,
9041 &data_size
, list_type
,
9043 /* data loaded, buffer no longer needed */
9046 if (pf
->hw
.aq
.asq_last_status
== I40E_AQ_RC_ENOMEM
) {
9047 /* retry with a larger buffer */
9048 buf_len
= data_size
;
9049 } else if (pf
->hw
.aq
.asq_last_status
!= I40E_AQ_RC_OK
) {
9050 dev_info(&pf
->pdev
->dev
,
9051 "capability discovery failed, err %s aq_err %s\n",
9052 i40e_stat_str(&pf
->hw
, err
),
9053 i40e_aq_str(&pf
->hw
,
9054 pf
->hw
.aq
.asq_last_status
));
9059 if (pf
->hw
.debug_mask
& I40E_DEBUG_USER
) {
9060 if (list_type
== i40e_aqc_opc_list_func_capabilities
) {
9061 dev_info(&pf
->pdev
->dev
,
9062 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
9063 pf
->hw
.pf_id
, pf
->hw
.func_caps
.num_vfs
,
9064 pf
->hw
.func_caps
.num_msix_vectors
,
9065 pf
->hw
.func_caps
.num_msix_vectors_vf
,
9066 pf
->hw
.func_caps
.fd_filters_guaranteed
,
9067 pf
->hw
.func_caps
.fd_filters_best_effort
,
9068 pf
->hw
.func_caps
.num_tx_qp
,
9069 pf
->hw
.func_caps
.num_vsis
);
9070 } else if (list_type
== i40e_aqc_opc_list_dev_capabilities
) {
9071 dev_info(&pf
->pdev
->dev
,
9072 "switch_mode=0x%04x, function_valid=0x%08x\n",
9073 pf
->hw
.dev_caps
.switch_mode
,
9074 pf
->hw
.dev_caps
.valid_functions
);
9075 dev_info(&pf
->pdev
->dev
,
9076 "SR-IOV=%d, num_vfs for all function=%u\n",
9077 pf
->hw
.dev_caps
.sr_iov_1_1
,
9078 pf
->hw
.dev_caps
.num_vfs
);
9079 dev_info(&pf
->pdev
->dev
,
9080 "num_vsis=%u, num_rx:%u, num_tx=%u\n",
9081 pf
->hw
.dev_caps
.num_vsis
,
9082 pf
->hw
.dev_caps
.num_rx_qp
,
9083 pf
->hw
.dev_caps
.num_tx_qp
);
9086 if (list_type
== i40e_aqc_opc_list_func_capabilities
) {
9087 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
9088 + pf->hw.func_caps.num_vfs)
9089 if (pf
->hw
.revision_id
== 0 &&
9090 pf
->hw
.func_caps
.num_vsis
< DEF_NUM_VSI
) {
9091 dev_info(&pf
->pdev
->dev
,
9092 "got num_vsis %d, setting num_vsis to %d\n",
9093 pf
->hw
.func_caps
.num_vsis
, DEF_NUM_VSI
);
9094 pf
->hw
.func_caps
.num_vsis
= DEF_NUM_VSI
;
9100 static int i40e_vsi_clear(struct i40e_vsi
*vsi
);
9103 * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
9104 * @pf: board private structure
9106 static void i40e_fdir_sb_setup(struct i40e_pf
*pf
)
9108 struct i40e_vsi
*vsi
;
9110 /* quick workaround for an NVM issue that leaves a critical register
9113 if (!rd32(&pf
->hw
, I40E_GLQF_HKEY(0))) {
9114 static const u32 hkey
[] = {
9115 0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
9116 0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
9117 0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
9121 for (i
= 0; i
<= I40E_GLQF_HKEY_MAX_INDEX
; i
++)
9122 wr32(&pf
->hw
, I40E_GLQF_HKEY(i
), hkey
[i
]);
9125 if (!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
))
9128 /* find existing VSI and see if it needs configuring */
9129 vsi
= i40e_find_vsi_by_type(pf
, I40E_VSI_FDIR
);
9131 /* create a new VSI if none exists */
9133 vsi
= i40e_vsi_setup(pf
, I40E_VSI_FDIR
,
9134 pf
->vsi
[pf
->lan_vsi
]->seid
, 0);
9136 dev_info(&pf
->pdev
->dev
, "Couldn't create FDir VSI\n");
9137 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
9138 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
9143 i40e_vsi_setup_irqhandler(vsi
, i40e_fdir_clean_ring
);
9147 * i40e_fdir_teardown - release the Flow Director resources
9148 * @pf: board private structure
9150 static void i40e_fdir_teardown(struct i40e_pf
*pf
)
9152 struct i40e_vsi
*vsi
;
9154 i40e_fdir_filter_exit(pf
);
9155 vsi
= i40e_find_vsi_by_type(pf
, I40E_VSI_FDIR
);
9157 i40e_vsi_release(vsi
);
9161 * i40e_rebuild_cloud_filters - Rebuilds cloud filters for VSIs
9163 * @seid: seid of main or channel VSIs
9165 * Rebuilds cloud filters associated with main VSI and channel VSIs if they
9166 * existed before reset
9168 static int i40e_rebuild_cloud_filters(struct i40e_vsi
*vsi
, u16 seid
)
9170 struct i40e_cloud_filter
*cfilter
;
9171 struct i40e_pf
*pf
= vsi
->back
;
9172 struct hlist_node
*node
;
9175 /* Add cloud filters back if they exist */
9176 hlist_for_each_entry_safe(cfilter
, node
, &pf
->cloud_filter_list
,
9178 if (cfilter
->seid
!= seid
)
9181 if (cfilter
->dst_port
)
9182 ret
= i40e_add_del_cloud_filter_big_buf(vsi
, cfilter
,
9185 ret
= i40e_add_del_cloud_filter(vsi
, cfilter
, true);
9188 dev_dbg(&pf
->pdev
->dev
,
9189 "Failed to rebuild cloud filter, err %s aq_err %s\n",
9190 i40e_stat_str(&pf
->hw
, ret
),
9191 i40e_aq_str(&pf
->hw
,
9192 pf
->hw
.aq
.asq_last_status
));
9200 * i40e_rebuild_channels - Rebuilds channel VSIs if they existed before reset
9203 * Rebuilds channel VSIs if they existed before reset
9205 static int i40e_rebuild_channels(struct i40e_vsi
*vsi
)
9207 struct i40e_channel
*ch
, *ch_tmp
;
9210 if (list_empty(&vsi
->ch_list
))
9213 list_for_each_entry_safe(ch
, ch_tmp
, &vsi
->ch_list
, list
) {
9214 if (!ch
->initialized
)
9216 /* Proceed with creation of channel (VMDq2) VSI */
9217 ret
= i40e_add_channel(vsi
->back
, vsi
->uplink_seid
, ch
);
9219 dev_info(&vsi
->back
->pdev
->dev
,
9220 "failed to rebuild channels using uplink_seid %u\n",
9224 /* Reconfigure TX queues using QTX_CTL register */
9225 ret
= i40e_channel_config_tx_ring(vsi
->back
, vsi
, ch
);
9227 dev_info(&vsi
->back
->pdev
->dev
,
9228 "failed to configure TX rings for channel %u\n",
9232 /* update 'next_base_queue' */
9233 vsi
->next_base_queue
= vsi
->next_base_queue
+
9234 ch
->num_queue_pairs
;
9235 if (ch
->max_tx_rate
) {
9236 u64 credits
= ch
->max_tx_rate
;
9238 if (i40e_set_bw_limit(vsi
, ch
->seid
,
9242 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
9243 dev_dbg(&vsi
->back
->pdev
->dev
,
9244 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9249 ret
= i40e_rebuild_cloud_filters(vsi
, ch
->seid
);
9251 dev_dbg(&vsi
->back
->pdev
->dev
,
9252 "Failed to rebuild cloud filters for channel VSI %u\n",
9261 * i40e_prep_for_reset - prep for the core to reset
9262 * @pf: board private structure
9263 * @lock_acquired: indicates whether or not the lock has been acquired
9264 * before this function was called.
9266 * Close up the VFs and other things in prep for PF Reset.
9268 static void i40e_prep_for_reset(struct i40e_pf
*pf
, bool lock_acquired
)
9270 struct i40e_hw
*hw
= &pf
->hw
;
9271 i40e_status ret
= 0;
9274 clear_bit(__I40E_RESET_INTR_RECEIVED
, pf
->state
);
9275 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
9277 if (i40e_check_asq_alive(&pf
->hw
))
9278 i40e_vc_notify_reset(pf
);
9280 dev_dbg(&pf
->pdev
->dev
, "Tearing down internal switch for reset\n");
9282 /* quiesce the VSIs and their queues that are not already DOWN */
9283 /* pf_quiesce_all_vsi modifies netdev structures -rtnl_lock needed */
9286 i40e_pf_quiesce_all_vsi(pf
);
9290 for (v
= 0; v
< pf
->num_alloc_vsi
; v
++) {
9292 pf
->vsi
[v
]->seid
= 0;
9295 i40e_shutdown_adminq(&pf
->hw
);
9297 /* call shutdown HMC */
9298 if (hw
->hmc
.hmc_obj
) {
9299 ret
= i40e_shutdown_lan_hmc(hw
);
9301 dev_warn(&pf
->pdev
->dev
,
9302 "shutdown_lan_hmc failed: %d\n", ret
);
9307 * i40e_send_version - update firmware with driver version
9310 static void i40e_send_version(struct i40e_pf
*pf
)
9312 struct i40e_driver_version dv
;
9314 dv
.major_version
= DRV_VERSION_MAJOR
;
9315 dv
.minor_version
= DRV_VERSION_MINOR
;
9316 dv
.build_version
= DRV_VERSION_BUILD
;
9317 dv
.subbuild_version
= 0;
9318 strlcpy(dv
.driver_string
, DRV_VERSION
, sizeof(dv
.driver_string
));
9319 i40e_aq_send_driver_version(&pf
->hw
, &dv
, NULL
);
9323 * i40e_get_oem_version - get OEM specific version information
9324 * @hw: pointer to the hardware structure
9326 static void i40e_get_oem_version(struct i40e_hw
*hw
)
9328 u16 block_offset
= 0xffff;
9329 u16 block_length
= 0;
9330 u16 capabilities
= 0;
9334 #define I40E_SR_NVM_OEM_VERSION_PTR 0x1B
9335 #define I40E_NVM_OEM_LENGTH_OFFSET 0x00
9336 #define I40E_NVM_OEM_CAPABILITIES_OFFSET 0x01
9337 #define I40E_NVM_OEM_GEN_OFFSET 0x02
9338 #define I40E_NVM_OEM_RELEASE_OFFSET 0x03
9339 #define I40E_NVM_OEM_CAPABILITIES_MASK 0x000F
9340 #define I40E_NVM_OEM_LENGTH 3
9342 /* Check if pointer to OEM version block is valid. */
9343 i40e_read_nvm_word(hw
, I40E_SR_NVM_OEM_VERSION_PTR
, &block_offset
);
9344 if (block_offset
== 0xffff)
9347 /* Check if OEM version block has correct length. */
9348 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_LENGTH_OFFSET
,
9350 if (block_length
< I40E_NVM_OEM_LENGTH
)
9353 /* Check if OEM version format is as expected. */
9354 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_CAPABILITIES_OFFSET
,
9356 if ((capabilities
& I40E_NVM_OEM_CAPABILITIES_MASK
) != 0)
9359 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_GEN_OFFSET
,
9361 i40e_read_nvm_word(hw
, block_offset
+ I40E_NVM_OEM_RELEASE_OFFSET
,
9363 hw
->nvm
.oem_ver
= (gen_snap
<< I40E_OEM_SNAP_SHIFT
) | release
;
9364 hw
->nvm
.eetrack
= I40E_OEM_EETRACK_ID
;
9368 * i40e_reset - wait for core reset to finish reset, reset pf if corer not seen
9369 * @pf: board private structure
9371 static int i40e_reset(struct i40e_pf
*pf
)
9373 struct i40e_hw
*hw
= &pf
->hw
;
9376 ret
= i40e_pf_reset(hw
);
9378 dev_info(&pf
->pdev
->dev
, "PF reset failed, %d\n", ret
);
9379 set_bit(__I40E_RESET_FAILED
, pf
->state
);
9380 clear_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
);
9388 * i40e_rebuild - rebuild using a saved config
9389 * @pf: board private structure
9390 * @reinit: if the Main VSI needs to re-initialized.
9391 * @lock_acquired: indicates whether or not the lock has been acquired
9392 * before this function was called.
9394 static void i40e_rebuild(struct i40e_pf
*pf
, bool reinit
, bool lock_acquired
)
9396 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
9397 struct i40e_hw
*hw
= &pf
->hw
;
9398 u8 set_fc_aq_fail
= 0;
9403 if (test_bit(__I40E_DOWN
, pf
->state
))
9404 goto clear_recovery
;
9405 dev_dbg(&pf
->pdev
->dev
, "Rebuilding internal switch\n");
9407 /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
9408 ret
= i40e_init_adminq(&pf
->hw
);
9410 dev_info(&pf
->pdev
->dev
, "Rebuild AdminQ failed, err %s aq_err %s\n",
9411 i40e_stat_str(&pf
->hw
, ret
),
9412 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9413 goto clear_recovery
;
9415 i40e_get_oem_version(&pf
->hw
);
9417 if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED
, pf
->state
) &&
9418 ((hw
->aq
.fw_maj_ver
== 4 && hw
->aq
.fw_min_ver
<= 33) ||
9419 hw
->aq
.fw_maj_ver
< 4) && hw
->mac
.type
== I40E_MAC_XL710
) {
9420 /* The following delay is necessary for 4.33 firmware and older
9421 * to recover after EMP reset. 200 ms should suffice but we
9422 * put here 300 ms to be sure that FW is ready to operate
9428 /* re-verify the eeprom if we just had an EMP reset */
9429 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED
, pf
->state
))
9430 i40e_verify_eeprom(pf
);
9432 i40e_clear_pxe_mode(hw
);
9433 ret
= i40e_get_capabilities(pf
, i40e_aqc_opc_list_func_capabilities
);
9435 goto end_core_reset
;
9437 ret
= i40e_init_lan_hmc(hw
, hw
->func_caps
.num_tx_qp
,
9438 hw
->func_caps
.num_rx_qp
, 0, 0);
9440 dev_info(&pf
->pdev
->dev
, "init_lan_hmc failed: %d\n", ret
);
9441 goto end_core_reset
;
9443 ret
= i40e_configure_lan_hmc(hw
, I40E_HMC_MODEL_DIRECT_ONLY
);
9445 dev_info(&pf
->pdev
->dev
, "configure_lan_hmc failed: %d\n", ret
);
9446 goto end_core_reset
;
9449 /* Enable FW to write a default DCB config on link-up */
9450 i40e_aq_set_dcb_parameters(hw
, true, NULL
);
9452 #ifdef CONFIG_I40E_DCB
9453 ret
= i40e_init_pf_dcb(pf
);
9455 dev_info(&pf
->pdev
->dev
, "DCB init failed %d, disabled\n", ret
);
9456 pf
->flags
&= ~I40E_FLAG_DCB_CAPABLE
;
9457 /* Continue without DCB enabled */
9459 #endif /* CONFIG_I40E_DCB */
9460 /* do basic switch setup */
9463 ret
= i40e_setup_pf_switch(pf
, reinit
);
9467 /* The driver only wants link up/down and module qualification
9468 * reports from firmware. Note the negative logic.
9470 ret
= i40e_aq_set_phy_int_mask(&pf
->hw
,
9471 ~(I40E_AQ_EVENT_LINK_UPDOWN
|
9472 I40E_AQ_EVENT_MEDIA_NA
|
9473 I40E_AQ_EVENT_MODULE_QUAL_FAIL
), NULL
);
9475 dev_info(&pf
->pdev
->dev
, "set phy mask fail, err %s aq_err %s\n",
9476 i40e_stat_str(&pf
->hw
, ret
),
9477 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9479 /* make sure our flow control settings are restored */
9480 ret
= i40e_set_fc(&pf
->hw
, &set_fc_aq_fail
, true);
9482 dev_dbg(&pf
->pdev
->dev
, "setting flow control: ret = %s last_status = %s\n",
9483 i40e_stat_str(&pf
->hw
, ret
),
9484 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9486 /* Rebuild the VSIs and VEBs that existed before reset.
9487 * They are still in our local switch element arrays, so only
9488 * need to rebuild the switch model in the HW.
9490 * If there were VEBs but the reconstitution failed, we'll try
9491 * try to recover minimal use by getting the basic PF VSI working.
9493 if (vsi
->uplink_seid
!= pf
->mac_seid
) {
9494 dev_dbg(&pf
->pdev
->dev
, "attempting to rebuild switch\n");
9495 /* find the one VEB connected to the MAC, and find orphans */
9496 for (v
= 0; v
< I40E_MAX_VEB
; v
++) {
9500 if (pf
->veb
[v
]->uplink_seid
== pf
->mac_seid
||
9501 pf
->veb
[v
]->uplink_seid
== 0) {
9502 ret
= i40e_reconstitute_veb(pf
->veb
[v
]);
9507 /* If Main VEB failed, we're in deep doodoo,
9508 * so give up rebuilding the switch and set up
9509 * for minimal rebuild of PF VSI.
9510 * If orphan failed, we'll report the error
9511 * but try to keep going.
9513 if (pf
->veb
[v
]->uplink_seid
== pf
->mac_seid
) {
9514 dev_info(&pf
->pdev
->dev
,
9515 "rebuild of switch failed: %d, will try to set up simple PF connection\n",
9517 vsi
->uplink_seid
= pf
->mac_seid
;
9519 } else if (pf
->veb
[v
]->uplink_seid
== 0) {
9520 dev_info(&pf
->pdev
->dev
,
9521 "rebuild of orphan VEB failed: %d\n",
9528 if (vsi
->uplink_seid
== pf
->mac_seid
) {
9529 dev_dbg(&pf
->pdev
->dev
, "attempting to rebuild PF VSI\n");
9530 /* no VEB, so rebuild only the Main VSI */
9531 ret
= i40e_add_vsi(vsi
);
9533 dev_info(&pf
->pdev
->dev
,
9534 "rebuild of Main VSI failed: %d\n", ret
);
9539 if (vsi
->mqprio_qopt
.max_rate
[0]) {
9540 u64 max_tx_rate
= vsi
->mqprio_qopt
.max_rate
[0];
9543 do_div(max_tx_rate
, I40E_BW_MBPS_DIVISOR
);
9544 ret
= i40e_set_bw_limit(vsi
, vsi
->seid
, max_tx_rate
);
9548 credits
= max_tx_rate
;
9549 do_div(credits
, I40E_BW_CREDIT_DIVISOR
);
9550 dev_dbg(&vsi
->back
->pdev
->dev
,
9551 "Set tx rate of %llu Mbps (count of 50Mbps %llu) for vsi->seid %u\n",
9557 ret
= i40e_rebuild_cloud_filters(vsi
, vsi
->seid
);
9561 /* PF Main VSI is rebuild by now, go ahead and rebuild channel VSIs
9562 * for this main VSI if they exist
9564 ret
= i40e_rebuild_channels(vsi
);
9568 /* Reconfigure hardware for allowing smaller MSS in the case
9569 * of TSO, so that we avoid the MDD being fired and causing
9570 * a reset in the case of small MSS+TSO.
9572 #define I40E_REG_MSS 0x000E64DC
9573 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
9574 #define I40E_64BYTE_MSS 0x400000
9575 val
= rd32(hw
, I40E_REG_MSS
);
9576 if ((val
& I40E_REG_MSS_MIN_MASK
) > I40E_64BYTE_MSS
) {
9577 val
&= ~I40E_REG_MSS_MIN_MASK
;
9578 val
|= I40E_64BYTE_MSS
;
9579 wr32(hw
, I40E_REG_MSS
, val
);
9582 if (pf
->hw_features
& I40E_HW_RESTART_AUTONEG
) {
9584 ret
= i40e_aq_set_link_restart_an(&pf
->hw
, true, NULL
);
9586 dev_info(&pf
->pdev
->dev
, "link restart failed, err %s aq_err %s\n",
9587 i40e_stat_str(&pf
->hw
, ret
),
9588 i40e_aq_str(&pf
->hw
,
9589 pf
->hw
.aq
.asq_last_status
));
9591 /* reinit the misc interrupt */
9592 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
9593 ret
= i40e_setup_misc_vector(pf
);
9595 /* Add a filter to drop all Flow control frames from any VSI from being
9596 * transmitted. By doing so we stop a malicious VF from sending out
9597 * PAUSE or PFC frames and potentially controlling traffic for other
9599 * The FW can still send Flow control frames if enabled.
9601 i40e_add_filter_to_drop_tx_flow_control_frames(&pf
->hw
,
9604 /* restart the VSIs that were rebuilt and running before the reset */
9605 i40e_pf_unquiesce_all_vsi(pf
);
9607 /* Release the RTNL lock before we start resetting VFs */
9611 /* Restore promiscuous settings */
9612 ret
= i40e_set_promiscuous(pf
, pf
->cur_promisc
);
9614 dev_warn(&pf
->pdev
->dev
,
9615 "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
9616 pf
->cur_promisc
? "on" : "off",
9617 i40e_stat_str(&pf
->hw
, ret
),
9618 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
9620 i40e_reset_all_vfs(pf
, true);
9622 /* tell the firmware that we're starting */
9623 i40e_send_version(pf
);
9625 /* We've already released the lock, so don't do it again */
9626 goto end_core_reset
;
9632 clear_bit(__I40E_RESET_FAILED
, pf
->state
);
9634 clear_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
);
9638 * i40e_reset_and_rebuild - reset and rebuild using a saved config
9639 * @pf: board private structure
9640 * @reinit: if the Main VSI needs to re-initialized.
9641 * @lock_acquired: indicates whether or not the lock has been acquired
9642 * before this function was called.
9644 static void i40e_reset_and_rebuild(struct i40e_pf
*pf
, bool reinit
,
9648 /* Now we wait for GRST to settle out.
9649 * We don't have to delete the VEBs or VSIs from the hw switch
9650 * because the reset will make them disappear.
9652 ret
= i40e_reset(pf
);
9654 i40e_rebuild(pf
, reinit
, lock_acquired
);
9658 * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
9659 * @pf: board private structure
9661 * Close up the VFs and other things in prep for a Core Reset,
9662 * then get ready to rebuild the world.
9663 * @lock_acquired: indicates whether or not the lock has been acquired
9664 * before this function was called.
9666 static void i40e_handle_reset_warning(struct i40e_pf
*pf
, bool lock_acquired
)
9668 i40e_prep_for_reset(pf
, lock_acquired
);
9669 i40e_reset_and_rebuild(pf
, false, lock_acquired
);
9673 * i40e_handle_mdd_event
9674 * @pf: pointer to the PF structure
9676 * Called from the MDD irq handler to identify possibly malicious vfs
9678 static void i40e_handle_mdd_event(struct i40e_pf
*pf
)
9680 struct i40e_hw
*hw
= &pf
->hw
;
9681 bool mdd_detected
= false;
9682 bool pf_mdd_detected
= false;
9687 if (!test_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
))
9690 /* find what triggered the MDD event */
9691 reg
= rd32(hw
, I40E_GL_MDET_TX
);
9692 if (reg
& I40E_GL_MDET_TX_VALID_MASK
) {
9693 u8 pf_num
= (reg
& I40E_GL_MDET_TX_PF_NUM_MASK
) >>
9694 I40E_GL_MDET_TX_PF_NUM_SHIFT
;
9695 u16 vf_num
= (reg
& I40E_GL_MDET_TX_VF_NUM_MASK
) >>
9696 I40E_GL_MDET_TX_VF_NUM_SHIFT
;
9697 u8 event
= (reg
& I40E_GL_MDET_TX_EVENT_MASK
) >>
9698 I40E_GL_MDET_TX_EVENT_SHIFT
;
9699 u16 queue
= ((reg
& I40E_GL_MDET_TX_QUEUE_MASK
) >>
9700 I40E_GL_MDET_TX_QUEUE_SHIFT
) -
9701 pf
->hw
.func_caps
.base_queue
;
9702 if (netif_msg_tx_err(pf
))
9703 dev_info(&pf
->pdev
->dev
, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
9704 event
, queue
, pf_num
, vf_num
);
9705 wr32(hw
, I40E_GL_MDET_TX
, 0xffffffff);
9706 mdd_detected
= true;
9708 reg
= rd32(hw
, I40E_GL_MDET_RX
);
9709 if (reg
& I40E_GL_MDET_RX_VALID_MASK
) {
9710 u8 func
= (reg
& I40E_GL_MDET_RX_FUNCTION_MASK
) >>
9711 I40E_GL_MDET_RX_FUNCTION_SHIFT
;
9712 u8 event
= (reg
& I40E_GL_MDET_RX_EVENT_MASK
) >>
9713 I40E_GL_MDET_RX_EVENT_SHIFT
;
9714 u16 queue
= ((reg
& I40E_GL_MDET_RX_QUEUE_MASK
) >>
9715 I40E_GL_MDET_RX_QUEUE_SHIFT
) -
9716 pf
->hw
.func_caps
.base_queue
;
9717 if (netif_msg_rx_err(pf
))
9718 dev_info(&pf
->pdev
->dev
, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
9719 event
, queue
, func
);
9720 wr32(hw
, I40E_GL_MDET_RX
, 0xffffffff);
9721 mdd_detected
= true;
9725 reg
= rd32(hw
, I40E_PF_MDET_TX
);
9726 if (reg
& I40E_PF_MDET_TX_VALID_MASK
) {
9727 wr32(hw
, I40E_PF_MDET_TX
, 0xFFFF);
9728 dev_info(&pf
->pdev
->dev
, "TX driver issue detected, PF reset issued\n");
9729 pf_mdd_detected
= true;
9731 reg
= rd32(hw
, I40E_PF_MDET_RX
);
9732 if (reg
& I40E_PF_MDET_RX_VALID_MASK
) {
9733 wr32(hw
, I40E_PF_MDET_RX
, 0xFFFF);
9734 dev_info(&pf
->pdev
->dev
, "RX driver issue detected, PF reset issued\n");
9735 pf_mdd_detected
= true;
9737 /* Queue belongs to the PF, initiate a reset */
9738 if (pf_mdd_detected
) {
9739 set_bit(__I40E_PF_RESET_REQUESTED
, pf
->state
);
9740 i40e_service_event_schedule(pf
);
9744 /* see if one of the VFs needs its hand slapped */
9745 for (i
= 0; i
< pf
->num_alloc_vfs
&& mdd_detected
; i
++) {
9747 reg
= rd32(hw
, I40E_VP_MDET_TX(i
));
9748 if (reg
& I40E_VP_MDET_TX_VALID_MASK
) {
9749 wr32(hw
, I40E_VP_MDET_TX(i
), 0xFFFF);
9750 vf
->num_mdd_events
++;
9751 dev_info(&pf
->pdev
->dev
, "TX driver issue detected on VF %d\n",
9755 reg
= rd32(hw
, I40E_VP_MDET_RX(i
));
9756 if (reg
& I40E_VP_MDET_RX_VALID_MASK
) {
9757 wr32(hw
, I40E_VP_MDET_RX(i
), 0xFFFF);
9758 vf
->num_mdd_events
++;
9759 dev_info(&pf
->pdev
->dev
, "RX driver issue detected on VF %d\n",
9763 if (vf
->num_mdd_events
> I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED
) {
9764 dev_info(&pf
->pdev
->dev
,
9765 "Too many MDD events on VF %d, disabled\n", i
);
9766 dev_info(&pf
->pdev
->dev
,
9767 "Use PF Control I/F to re-enable the VF\n");
9768 set_bit(I40E_VF_STATE_DISABLED
, &vf
->vf_states
);
9772 /* re-enable mdd interrupt cause */
9773 clear_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
);
9774 reg
= rd32(hw
, I40E_PFINT_ICR0_ENA
);
9775 reg
|= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
;
9776 wr32(hw
, I40E_PFINT_ICR0_ENA
, reg
);
9780 static const char *i40e_tunnel_name(u8 type
)
9783 case UDP_TUNNEL_TYPE_VXLAN
:
9785 case UDP_TUNNEL_TYPE_GENEVE
:
9793 * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
9794 * @pf: board private structure
9796 static void i40e_sync_udp_filters(struct i40e_pf
*pf
)
9800 /* loop through and set pending bit for all active UDP filters */
9801 for (i
= 0; i
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
; i
++) {
9802 if (pf
->udp_ports
[i
].port
)
9803 pf
->pending_udp_bitmap
|= BIT_ULL(i
);
9806 set_bit(__I40E_UDP_FILTER_SYNC_PENDING
, pf
->state
);
9810 * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
9811 * @pf: board private structure
9813 static void i40e_sync_udp_filters_subtask(struct i40e_pf
*pf
)
9815 struct i40e_hw
*hw
= &pf
->hw
;
9816 u8 filter_index
, type
;
9820 if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING
, pf
->state
))
9823 /* acquire RTNL to maintain state of flags and port requests */
9826 for (i
= 0; i
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
; i
++) {
9827 if (pf
->pending_udp_bitmap
& BIT_ULL(i
)) {
9828 struct i40e_udp_port_config
*udp_port
;
9829 i40e_status ret
= 0;
9831 udp_port
= &pf
->udp_ports
[i
];
9832 pf
->pending_udp_bitmap
&= ~BIT_ULL(i
);
9834 port
= READ_ONCE(udp_port
->port
);
9835 type
= READ_ONCE(udp_port
->type
);
9836 filter_index
= READ_ONCE(udp_port
->filter_index
);
9838 /* release RTNL while we wait on AQ command */
9842 ret
= i40e_aq_add_udp_tunnel(hw
, port
,
9846 else if (filter_index
!= I40E_UDP_PORT_INDEX_UNUSED
)
9847 ret
= i40e_aq_del_udp_tunnel(hw
, filter_index
,
9850 /* reacquire RTNL so we can update filter_index */
9854 dev_info(&pf
->pdev
->dev
,
9855 "%s %s port %d, index %d failed, err %s aq_err %s\n",
9856 i40e_tunnel_name(type
),
9857 port
? "add" : "delete",
9860 i40e_stat_str(&pf
->hw
, ret
),
9861 i40e_aq_str(&pf
->hw
,
9862 pf
->hw
.aq
.asq_last_status
));
9864 /* failed to add, just reset port,
9865 * drop pending bit for any deletion
9868 pf
->pending_udp_bitmap
&= ~BIT_ULL(i
);
9871 /* record filter index on success */
9872 udp_port
->filter_index
= filter_index
;
9881 * i40e_service_task - Run the driver's async subtasks
9882 * @work: pointer to work_struct containing our data
9884 static void i40e_service_task(struct work_struct
*work
)
9886 struct i40e_pf
*pf
= container_of(work
,
9889 unsigned long start_time
= jiffies
;
9891 /* don't bother with service tasks if a reset is in progress */
9892 if (test_bit(__I40E_RESET_RECOVERY_PENDING
, pf
->state
))
9895 if (test_and_set_bit(__I40E_SERVICE_SCHED
, pf
->state
))
9898 i40e_detect_recover_hung(pf
->vsi
[pf
->lan_vsi
]);
9899 i40e_sync_filters_subtask(pf
);
9900 i40e_reset_subtask(pf
);
9901 i40e_handle_mdd_event(pf
);
9902 i40e_vc_process_vflr_event(pf
);
9903 i40e_watchdog_subtask(pf
);
9904 i40e_fdir_reinit_subtask(pf
);
9905 if (test_and_clear_bit(__I40E_CLIENT_RESET
, pf
->state
)) {
9906 /* Client subtask will reopen next time through. */
9907 i40e_notify_client_of_netdev_close(pf
->vsi
[pf
->lan_vsi
], true);
9909 i40e_client_subtask(pf
);
9910 if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE
,
9912 i40e_notify_client_of_l2_param_changes(
9913 pf
->vsi
[pf
->lan_vsi
]);
9915 i40e_sync_filters_subtask(pf
);
9916 i40e_sync_udp_filters_subtask(pf
);
9917 i40e_clean_adminq_subtask(pf
);
9919 /* flush memory to make sure state is correct before next watchdog */
9920 smp_mb__before_atomic();
9921 clear_bit(__I40E_SERVICE_SCHED
, pf
->state
);
9923 /* If the tasks have taken longer than one timer cycle or there
9924 * is more work to be done, reschedule the service task now
9925 * rather than wait for the timer to tick again.
9927 if (time_after(jiffies
, (start_time
+ pf
->service_timer_period
)) ||
9928 test_bit(__I40E_ADMINQ_EVENT_PENDING
, pf
->state
) ||
9929 test_bit(__I40E_MDD_EVENT_PENDING
, pf
->state
) ||
9930 test_bit(__I40E_VFLR_EVENT_PENDING
, pf
->state
))
9931 i40e_service_event_schedule(pf
);
9935 * i40e_service_timer - timer callback
9936 * @data: pointer to PF struct
9938 static void i40e_service_timer(struct timer_list
*t
)
9940 struct i40e_pf
*pf
= from_timer(pf
, t
, service_timer
);
9942 mod_timer(&pf
->service_timer
,
9943 round_jiffies(jiffies
+ pf
->service_timer_period
));
9944 i40e_service_event_schedule(pf
);
9948 * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
9949 * @vsi: the VSI being configured
9951 static int i40e_set_num_rings_in_vsi(struct i40e_vsi
*vsi
)
9953 struct i40e_pf
*pf
= vsi
->back
;
9955 switch (vsi
->type
) {
9957 vsi
->alloc_queue_pairs
= pf
->num_lan_qps
;
9958 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
9959 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9960 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
9961 vsi
->num_q_vectors
= pf
->num_lan_msix
;
9963 vsi
->num_q_vectors
= 1;
9968 vsi
->alloc_queue_pairs
= 1;
9969 vsi
->num_desc
= ALIGN(I40E_FDIR_RING_COUNT
,
9970 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9971 vsi
->num_q_vectors
= pf
->num_fdsb_msix
;
9974 case I40E_VSI_VMDQ2
:
9975 vsi
->alloc_queue_pairs
= pf
->num_vmdq_qps
;
9976 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
9977 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9978 vsi
->num_q_vectors
= pf
->num_vmdq_msix
;
9981 case I40E_VSI_SRIOV
:
9982 vsi
->alloc_queue_pairs
= pf
->num_vf_qps
;
9983 vsi
->num_desc
= ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS
,
9984 I40E_REQ_DESCRIPTOR_MULTIPLE
);
9996 * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
9998 * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
10000 * On error: returns error code (negative)
10001 * On success: returns 0
10003 static int i40e_vsi_alloc_arrays(struct i40e_vsi
*vsi
, bool alloc_qvectors
)
10005 struct i40e_ring
**next_rings
;
10009 /* allocate memory for both Tx, XDP Tx and Rx ring pointers */
10010 size
= sizeof(struct i40e_ring
*) * vsi
->alloc_queue_pairs
*
10011 (i40e_enabled_xdp_vsi(vsi
) ? 3 : 2);
10012 vsi
->tx_rings
= kzalloc(size
, GFP_KERNEL
);
10013 if (!vsi
->tx_rings
)
10015 next_rings
= vsi
->tx_rings
+ vsi
->alloc_queue_pairs
;
10016 if (i40e_enabled_xdp_vsi(vsi
)) {
10017 vsi
->xdp_rings
= next_rings
;
10018 next_rings
+= vsi
->alloc_queue_pairs
;
10020 vsi
->rx_rings
= next_rings
;
10022 if (alloc_qvectors
) {
10023 /* allocate memory for q_vector pointers */
10024 size
= sizeof(struct i40e_q_vector
*) * vsi
->num_q_vectors
;
10025 vsi
->q_vectors
= kzalloc(size
, GFP_KERNEL
);
10026 if (!vsi
->q_vectors
) {
10034 kfree(vsi
->tx_rings
);
10039 * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
10040 * @pf: board private structure
10041 * @type: type of VSI
10043 * On error: returns error code (negative)
10044 * On success: returns vsi index in PF (positive)
10046 static int i40e_vsi_mem_alloc(struct i40e_pf
*pf
, enum i40e_vsi_type type
)
10049 struct i40e_vsi
*vsi
;
10053 /* Need to protect the allocation of the VSIs at the PF level */
10054 mutex_lock(&pf
->switch_mutex
);
10056 /* VSI list may be fragmented if VSI creation/destruction has
10057 * been happening. We can afford to do a quick scan to look
10058 * for any free VSIs in the list.
10060 * find next empty vsi slot, looping back around if necessary
10063 while (i
< pf
->num_alloc_vsi
&& pf
->vsi
[i
])
10065 if (i
>= pf
->num_alloc_vsi
) {
10067 while (i
< pf
->next_vsi
&& pf
->vsi
[i
])
10071 if (i
< pf
->num_alloc_vsi
&& !pf
->vsi
[i
]) {
10072 vsi_idx
= i
; /* Found one! */
10075 goto unlock_pf
; /* out of VSI slots! */
10077 pf
->next_vsi
= ++i
;
10079 vsi
= kzalloc(sizeof(*vsi
), GFP_KERNEL
);
10086 set_bit(__I40E_VSI_DOWN
, vsi
->state
);
10088 vsi
->idx
= vsi_idx
;
10089 vsi
->int_rate_limit
= 0;
10090 vsi
->rss_table_size
= (vsi
->type
== I40E_VSI_MAIN
) ?
10091 pf
->rss_table_size
: 64;
10092 vsi
->netdev_registered
= false;
10093 vsi
->work_limit
= I40E_DEFAULT_IRQ_WORK
;
10094 hash_init(vsi
->mac_filter_hash
);
10095 vsi
->irqs_ready
= false;
10097 ret
= i40e_set_num_rings_in_vsi(vsi
);
10101 ret
= i40e_vsi_alloc_arrays(vsi
, true);
10105 /* Setup default MSIX irq handler for VSI */
10106 i40e_vsi_setup_irqhandler(vsi
, i40e_msix_clean_rings
);
10108 /* Initialize VSI lock */
10109 spin_lock_init(&vsi
->mac_filter_hash_lock
);
10110 pf
->vsi
[vsi_idx
] = vsi
;
10115 pf
->next_vsi
= i
- 1;
10118 mutex_unlock(&pf
->switch_mutex
);
10123 * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
10124 * @vsi: VSI pointer
10125 * @free_qvectors: a bool to specify if q_vectors need to be freed.
10127 * On error: returns error code (negative)
10128 * On success: returns 0
10130 static void i40e_vsi_free_arrays(struct i40e_vsi
*vsi
, bool free_qvectors
)
10132 /* free the ring and vector containers */
10133 if (free_qvectors
) {
10134 kfree(vsi
->q_vectors
);
10135 vsi
->q_vectors
= NULL
;
10137 kfree(vsi
->tx_rings
);
10138 vsi
->tx_rings
= NULL
;
10139 vsi
->rx_rings
= NULL
;
10140 vsi
->xdp_rings
= NULL
;
10144 * i40e_clear_rss_config_user - clear the user configured RSS hash keys
10146 * @vsi: Pointer to VSI structure
10148 static void i40e_clear_rss_config_user(struct i40e_vsi
*vsi
)
10153 kfree(vsi
->rss_hkey_user
);
10154 vsi
->rss_hkey_user
= NULL
;
10156 kfree(vsi
->rss_lut_user
);
10157 vsi
->rss_lut_user
= NULL
;
10161 * i40e_vsi_clear - Deallocate the VSI provided
10162 * @vsi: the VSI being un-configured
10164 static int i40e_vsi_clear(struct i40e_vsi
*vsi
)
10166 struct i40e_pf
*pf
;
10175 mutex_lock(&pf
->switch_mutex
);
10176 if (!pf
->vsi
[vsi
->idx
]) {
10177 dev_err(&pf
->pdev
->dev
, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
10178 vsi
->idx
, vsi
->idx
, vsi
->type
);
10182 if (pf
->vsi
[vsi
->idx
] != vsi
) {
10183 dev_err(&pf
->pdev
->dev
,
10184 "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
10185 pf
->vsi
[vsi
->idx
]->idx
,
10186 pf
->vsi
[vsi
->idx
]->type
,
10187 vsi
->idx
, vsi
->type
);
10191 /* updates the PF for this cleared vsi */
10192 i40e_put_lump(pf
->qp_pile
, vsi
->base_queue
, vsi
->idx
);
10193 i40e_put_lump(pf
->irq_pile
, vsi
->base_vector
, vsi
->idx
);
10195 i40e_vsi_free_arrays(vsi
, true);
10196 i40e_clear_rss_config_user(vsi
);
10198 pf
->vsi
[vsi
->idx
] = NULL
;
10199 if (vsi
->idx
< pf
->next_vsi
)
10200 pf
->next_vsi
= vsi
->idx
;
10203 mutex_unlock(&pf
->switch_mutex
);
10211 * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
10212 * @vsi: the VSI being cleaned
10214 static void i40e_vsi_clear_rings(struct i40e_vsi
*vsi
)
10218 if (vsi
->tx_rings
&& vsi
->tx_rings
[0]) {
10219 for (i
= 0; i
< vsi
->alloc_queue_pairs
; i
++) {
10220 kfree_rcu(vsi
->tx_rings
[i
], rcu
);
10221 vsi
->tx_rings
[i
] = NULL
;
10222 vsi
->rx_rings
[i
] = NULL
;
10223 if (vsi
->xdp_rings
)
10224 vsi
->xdp_rings
[i
] = NULL
;
10230 * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
10231 * @vsi: the VSI being configured
10233 static int i40e_alloc_rings(struct i40e_vsi
*vsi
)
10235 int i
, qpv
= i40e_enabled_xdp_vsi(vsi
) ? 3 : 2;
10236 struct i40e_pf
*pf
= vsi
->back
;
10237 struct i40e_ring
*ring
;
10239 /* Set basic values in the rings to be used later during open() */
10240 for (i
= 0; i
< vsi
->alloc_queue_pairs
; i
++) {
10241 /* allocate space for both Tx and Rx in one shot */
10242 ring
= kcalloc(qpv
, sizeof(struct i40e_ring
), GFP_KERNEL
);
10246 ring
->queue_index
= i
;
10247 ring
->reg_idx
= vsi
->base_queue
+ i
;
10248 ring
->ring_active
= false;
10250 ring
->netdev
= vsi
->netdev
;
10251 ring
->dev
= &pf
->pdev
->dev
;
10252 ring
->count
= vsi
->num_desc
;
10255 if (vsi
->back
->hw_features
& I40E_HW_WB_ON_ITR_CAPABLE
)
10256 ring
->flags
= I40E_TXR_FLAGS_WB_ON_ITR
;
10257 ring
->itr_setting
= pf
->tx_itr_default
;
10258 vsi
->tx_rings
[i
] = ring
++;
10260 if (!i40e_enabled_xdp_vsi(vsi
))
10263 ring
->queue_index
= vsi
->alloc_queue_pairs
+ i
;
10264 ring
->reg_idx
= vsi
->base_queue
+ ring
->queue_index
;
10265 ring
->ring_active
= false;
10267 ring
->netdev
= NULL
;
10268 ring
->dev
= &pf
->pdev
->dev
;
10269 ring
->count
= vsi
->num_desc
;
10272 if (vsi
->back
->hw_features
& I40E_HW_WB_ON_ITR_CAPABLE
)
10273 ring
->flags
= I40E_TXR_FLAGS_WB_ON_ITR
;
10274 set_ring_xdp(ring
);
10275 ring
->itr_setting
= pf
->tx_itr_default
;
10276 vsi
->xdp_rings
[i
] = ring
++;
10279 ring
->queue_index
= i
;
10280 ring
->reg_idx
= vsi
->base_queue
+ i
;
10281 ring
->ring_active
= false;
10283 ring
->netdev
= vsi
->netdev
;
10284 ring
->dev
= &pf
->pdev
->dev
;
10285 ring
->count
= vsi
->num_desc
;
10288 ring
->itr_setting
= pf
->rx_itr_default
;
10289 vsi
->rx_rings
[i
] = ring
;
10295 i40e_vsi_clear_rings(vsi
);
10300 * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
10301 * @pf: board private structure
10302 * @vectors: the number of MSI-X vectors to request
10304 * Returns the number of vectors reserved, or error
10306 static int i40e_reserve_msix_vectors(struct i40e_pf
*pf
, int vectors
)
10308 vectors
= pci_enable_msix_range(pf
->pdev
, pf
->msix_entries
,
10309 I40E_MIN_MSIX
, vectors
);
10311 dev_info(&pf
->pdev
->dev
,
10312 "MSI-X vector reservation failed: %d\n", vectors
);
10320 * i40e_init_msix - Setup the MSIX capability
10321 * @pf: board private structure
10323 * Work with the OS to set up the MSIX vectors needed.
10325 * Returns the number of vectors reserved or negative on failure
10327 static int i40e_init_msix(struct i40e_pf
*pf
)
10329 struct i40e_hw
*hw
= &pf
->hw
;
10330 int cpus
, extra_vectors
;
10334 int iwarp_requested
= 0;
10336 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
))
10339 /* The number of vectors we'll request will be comprised of:
10340 * - Add 1 for "other" cause for Admin Queue events, etc.
10341 * - The number of LAN queue pairs
10342 * - Queues being used for RSS.
10343 * We don't need as many as max_rss_size vectors.
10344 * use rss_size instead in the calculation since that
10345 * is governed by number of cpus in the system.
10346 * - assumes symmetric Tx/Rx pairing
10347 * - The number of VMDq pairs
10348 * - The CPU count within the NUMA node if iWARP is enabled
10349 * Once we count this up, try the request.
10351 * If we can't get what we want, we'll simplify to nearly nothing
10352 * and try again. If that still fails, we punt.
10354 vectors_left
= hw
->func_caps
.num_msix_vectors
;
10357 /* reserve one vector for miscellaneous handler */
10358 if (vectors_left
) {
10363 /* reserve some vectors for the main PF traffic queues. Initially we
10364 * only reserve at most 50% of the available vectors, in the case that
10365 * the number of online CPUs is large. This ensures that we can enable
10366 * extra features as well. Once we've enabled the other features, we
10367 * will use any remaining vectors to reach as close as we can to the
10368 * number of online CPUs.
10370 cpus
= num_online_cpus();
10371 pf
->num_lan_msix
= min_t(int, cpus
, vectors_left
/ 2);
10372 vectors_left
-= pf
->num_lan_msix
;
10374 /* reserve one vector for sideband flow director */
10375 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
10376 if (vectors_left
) {
10377 pf
->num_fdsb_msix
= 1;
10381 pf
->num_fdsb_msix
= 0;
10385 /* can we reserve enough for iWARP? */
10386 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
10387 iwarp_requested
= pf
->num_iwarp_msix
;
10390 pf
->num_iwarp_msix
= 0;
10391 else if (vectors_left
< pf
->num_iwarp_msix
)
10392 pf
->num_iwarp_msix
= 1;
10393 v_budget
+= pf
->num_iwarp_msix
;
10394 vectors_left
-= pf
->num_iwarp_msix
;
10397 /* any vectors left over go for VMDq support */
10398 if (pf
->flags
& I40E_FLAG_VMDQ_ENABLED
) {
10399 if (!vectors_left
) {
10400 pf
->num_vmdq_msix
= 0;
10401 pf
->num_vmdq_qps
= 0;
10403 int vmdq_vecs_wanted
=
10404 pf
->num_vmdq_vsis
* pf
->num_vmdq_qps
;
10406 min_t(int, vectors_left
, vmdq_vecs_wanted
);
10408 /* if we're short on vectors for what's desired, we limit
10409 * the queues per vmdq. If this is still more than are
10410 * available, the user will need to change the number of
10411 * queues/vectors used by the PF later with the ethtool
10414 if (vectors_left
< vmdq_vecs_wanted
) {
10415 pf
->num_vmdq_qps
= 1;
10416 vmdq_vecs_wanted
= pf
->num_vmdq_vsis
;
10417 vmdq_vecs
= min_t(int,
10421 pf
->num_vmdq_msix
= pf
->num_vmdq_qps
;
10423 v_budget
+= vmdq_vecs
;
10424 vectors_left
-= vmdq_vecs
;
10428 /* On systems with a large number of SMP cores, we previously limited
10429 * the number of vectors for num_lan_msix to be at most 50% of the
10430 * available vectors, to allow for other features. Now, we add back
10431 * the remaining vectors. However, we ensure that the total
10432 * num_lan_msix will not exceed num_online_cpus(). To do this, we
10433 * calculate the number of vectors we can add without going over the
10434 * cap of CPUs. For systems with a small number of CPUs this will be
10437 extra_vectors
= min_t(int, cpus
- pf
->num_lan_msix
, vectors_left
);
10438 pf
->num_lan_msix
+= extra_vectors
;
10439 vectors_left
-= extra_vectors
;
10441 WARN(vectors_left
< 0,
10442 "Calculation of remaining vectors underflowed. This is an accounting bug when determining total MSI-X vectors.\n");
10444 v_budget
+= pf
->num_lan_msix
;
10445 pf
->msix_entries
= kcalloc(v_budget
, sizeof(struct msix_entry
),
10447 if (!pf
->msix_entries
)
10450 for (i
= 0; i
< v_budget
; i
++)
10451 pf
->msix_entries
[i
].entry
= i
;
10452 v_actual
= i40e_reserve_msix_vectors(pf
, v_budget
);
10454 if (v_actual
< I40E_MIN_MSIX
) {
10455 pf
->flags
&= ~I40E_FLAG_MSIX_ENABLED
;
10456 kfree(pf
->msix_entries
);
10457 pf
->msix_entries
= NULL
;
10458 pci_disable_msix(pf
->pdev
);
10461 } else if (v_actual
== I40E_MIN_MSIX
) {
10462 /* Adjust for minimal MSIX use */
10463 pf
->num_vmdq_vsis
= 0;
10464 pf
->num_vmdq_qps
= 0;
10465 pf
->num_lan_qps
= 1;
10466 pf
->num_lan_msix
= 1;
10468 } else if (v_actual
!= v_budget
) {
10469 /* If we have limited resources, we will start with no vectors
10470 * for the special features and then allocate vectors to some
10471 * of these features based on the policy and at the end disable
10472 * the features that did not get any vectors.
10476 dev_info(&pf
->pdev
->dev
,
10477 "MSI-X vector limit reached with %d, wanted %d, attempting to redistribute vectors\n",
10478 v_actual
, v_budget
);
10479 /* reserve the misc vector */
10480 vec
= v_actual
- 1;
10482 /* Scale vector usage down */
10483 pf
->num_vmdq_msix
= 1; /* force VMDqs to only one vector */
10484 pf
->num_vmdq_vsis
= 1;
10485 pf
->num_vmdq_qps
= 1;
10487 /* partition out the remaining vectors */
10490 pf
->num_lan_msix
= 1;
10493 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
10494 pf
->num_lan_msix
= 1;
10495 pf
->num_iwarp_msix
= 1;
10497 pf
->num_lan_msix
= 2;
10501 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
10502 pf
->num_iwarp_msix
= min_t(int, (vec
/ 3),
10504 pf
->num_vmdq_vsis
= min_t(int, (vec
/ 3),
10505 I40E_DEFAULT_NUM_VMDQ_VSI
);
10507 pf
->num_vmdq_vsis
= min_t(int, (vec
/ 2),
10508 I40E_DEFAULT_NUM_VMDQ_VSI
);
10510 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
10511 pf
->num_fdsb_msix
= 1;
10514 pf
->num_lan_msix
= min_t(int,
10515 (vec
- (pf
->num_iwarp_msix
+ pf
->num_vmdq_vsis
)),
10517 pf
->num_lan_qps
= pf
->num_lan_msix
;
10522 if ((pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) &&
10523 (pf
->num_fdsb_msix
== 0)) {
10524 dev_info(&pf
->pdev
->dev
, "Sideband Flowdir disabled, not enough MSI-X vectors\n");
10525 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
10526 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
10528 if ((pf
->flags
& I40E_FLAG_VMDQ_ENABLED
) &&
10529 (pf
->num_vmdq_msix
== 0)) {
10530 dev_info(&pf
->pdev
->dev
, "VMDq disabled, not enough MSI-X vectors\n");
10531 pf
->flags
&= ~I40E_FLAG_VMDQ_ENABLED
;
10534 if ((pf
->flags
& I40E_FLAG_IWARP_ENABLED
) &&
10535 (pf
->num_iwarp_msix
== 0)) {
10536 dev_info(&pf
->pdev
->dev
, "IWARP disabled, not enough MSI-X vectors\n");
10537 pf
->flags
&= ~I40E_FLAG_IWARP_ENABLED
;
10539 i40e_debug(&pf
->hw
, I40E_DEBUG_INIT
,
10540 "MSI-X vector distribution: PF %d, VMDq %d, FDSB %d, iWARP %d\n",
10542 pf
->num_vmdq_msix
* pf
->num_vmdq_vsis
,
10544 pf
->num_iwarp_msix
);
10550 * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
10551 * @vsi: the VSI being configured
10552 * @v_idx: index of the vector in the vsi struct
10553 * @cpu: cpu to be used on affinity_mask
10555 * We allocate one q_vector. If allocation fails we return -ENOMEM.
10557 static int i40e_vsi_alloc_q_vector(struct i40e_vsi
*vsi
, int v_idx
, int cpu
)
10559 struct i40e_q_vector
*q_vector
;
10561 /* allocate q_vector */
10562 q_vector
= kzalloc(sizeof(struct i40e_q_vector
), GFP_KERNEL
);
10566 q_vector
->vsi
= vsi
;
10567 q_vector
->v_idx
= v_idx
;
10568 cpumask_copy(&q_vector
->affinity_mask
, cpu_possible_mask
);
10571 netif_napi_add(vsi
->netdev
, &q_vector
->napi
,
10572 i40e_napi_poll
, NAPI_POLL_WEIGHT
);
10574 /* tie q_vector and vsi together */
10575 vsi
->q_vectors
[v_idx
] = q_vector
;
10581 * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
10582 * @vsi: the VSI being configured
10584 * We allocate one q_vector per queue interrupt. If allocation fails we
10587 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi
*vsi
)
10589 struct i40e_pf
*pf
= vsi
->back
;
10590 int err
, v_idx
, num_q_vectors
, current_cpu
;
10592 /* if not MSIX, give the one vector only to the LAN VSI */
10593 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
10594 num_q_vectors
= vsi
->num_q_vectors
;
10595 else if (vsi
== pf
->vsi
[pf
->lan_vsi
])
10600 current_cpu
= cpumask_first(cpu_online_mask
);
10602 for (v_idx
= 0; v_idx
< num_q_vectors
; v_idx
++) {
10603 err
= i40e_vsi_alloc_q_vector(vsi
, v_idx
, current_cpu
);
10606 current_cpu
= cpumask_next(current_cpu
, cpu_online_mask
);
10607 if (unlikely(current_cpu
>= nr_cpu_ids
))
10608 current_cpu
= cpumask_first(cpu_online_mask
);
10615 i40e_free_q_vector(vsi
, v_idx
);
10621 * i40e_init_interrupt_scheme - Determine proper interrupt scheme
10622 * @pf: board private structure to initialize
10624 static int i40e_init_interrupt_scheme(struct i40e_pf
*pf
)
10629 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
10630 vectors
= i40e_init_msix(pf
);
10632 pf
->flags
&= ~(I40E_FLAG_MSIX_ENABLED
|
10633 I40E_FLAG_IWARP_ENABLED
|
10634 I40E_FLAG_RSS_ENABLED
|
10635 I40E_FLAG_DCB_CAPABLE
|
10636 I40E_FLAG_DCB_ENABLED
|
10637 I40E_FLAG_SRIOV_ENABLED
|
10638 I40E_FLAG_FD_SB_ENABLED
|
10639 I40E_FLAG_FD_ATR_ENABLED
|
10640 I40E_FLAG_VMDQ_ENABLED
);
10641 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
10643 /* rework the queue expectations without MSIX */
10644 i40e_determine_queue_usage(pf
);
10648 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
) &&
10649 (pf
->flags
& I40E_FLAG_MSI_ENABLED
)) {
10650 dev_info(&pf
->pdev
->dev
, "MSI-X not available, trying MSI\n");
10651 vectors
= pci_enable_msi(pf
->pdev
);
10653 dev_info(&pf
->pdev
->dev
, "MSI init failed - %d\n",
10655 pf
->flags
&= ~I40E_FLAG_MSI_ENABLED
;
10657 vectors
= 1; /* one MSI or Legacy vector */
10660 if (!(pf
->flags
& (I40E_FLAG_MSIX_ENABLED
| I40E_FLAG_MSI_ENABLED
)))
10661 dev_info(&pf
->pdev
->dev
, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
10663 /* set up vector assignment tracking */
10664 size
= sizeof(struct i40e_lump_tracking
) + (sizeof(u16
) * vectors
);
10665 pf
->irq_pile
= kzalloc(size
, GFP_KERNEL
);
10669 pf
->irq_pile
->num_entries
= vectors
;
10670 pf
->irq_pile
->search_hint
= 0;
10672 /* track first vector for misc interrupts, ignore return */
10673 (void)i40e_get_lump(pf
, pf
->irq_pile
, 1, I40E_PILE_VALID_BIT
- 1);
10679 * i40e_restore_interrupt_scheme - Restore the interrupt scheme
10680 * @pf: private board data structure
10682 * Restore the interrupt scheme that was cleared when we suspended the
10683 * device. This should be called during resume to re-allocate the q_vectors
10684 * and reacquire IRQs.
10686 static int i40e_restore_interrupt_scheme(struct i40e_pf
*pf
)
10690 /* We cleared the MSI and MSI-X flags when disabling the old interrupt
10691 * scheme. We need to re-enabled them here in order to attempt to
10692 * re-acquire the MSI or MSI-X vectors
10694 pf
->flags
|= (I40E_FLAG_MSIX_ENABLED
| I40E_FLAG_MSI_ENABLED
);
10696 err
= i40e_init_interrupt_scheme(pf
);
10700 /* Now that we've re-acquired IRQs, we need to remap the vectors and
10701 * rings together again.
10703 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
10705 err
= i40e_vsi_alloc_q_vectors(pf
->vsi
[i
]);
10708 i40e_vsi_map_rings_to_vectors(pf
->vsi
[i
]);
10712 err
= i40e_setup_misc_vector(pf
);
10716 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
)
10717 i40e_client_update_msix_info(pf
);
10724 i40e_vsi_free_q_vectors(pf
->vsi
[i
]);
10731 * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
10732 * @pf: board private structure
10734 * This sets up the handler for MSIX 0, which is used to manage the
10735 * non-queue interrupts, e.g. AdminQ and errors. This is not used
10736 * when in MSI or Legacy interrupt mode.
10738 static int i40e_setup_misc_vector(struct i40e_pf
*pf
)
10740 struct i40e_hw
*hw
= &pf
->hw
;
10743 /* Only request the IRQ once, the first time through. */
10744 if (!test_and_set_bit(__I40E_MISC_IRQ_REQUESTED
, pf
->state
)) {
10745 err
= request_irq(pf
->msix_entries
[0].vector
,
10746 i40e_intr
, 0, pf
->int_name
, pf
);
10748 clear_bit(__I40E_MISC_IRQ_REQUESTED
, pf
->state
);
10749 dev_info(&pf
->pdev
->dev
,
10750 "request_irq for %s failed: %d\n",
10751 pf
->int_name
, err
);
10756 i40e_enable_misc_int_causes(pf
);
10758 /* associate no queues to the misc vector */
10759 wr32(hw
, I40E_PFINT_LNKLST0
, I40E_QUEUE_END_OF_LIST
);
10760 wr32(hw
, I40E_PFINT_ITR0(I40E_RX_ITR
), I40E_ITR_8K
);
10764 i40e_irq_dynamic_enable_icr0(pf
);
10770 * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
10771 * @vsi: Pointer to vsi structure
10772 * @seed: Buffter to store the hash keys
10773 * @lut: Buffer to store the lookup table entries
10774 * @lut_size: Size of buffer to store the lookup table entries
10776 * Return 0 on success, negative on failure
10778 static int i40e_get_rss_aq(struct i40e_vsi
*vsi
, const u8
*seed
,
10779 u8
*lut
, u16 lut_size
)
10781 struct i40e_pf
*pf
= vsi
->back
;
10782 struct i40e_hw
*hw
= &pf
->hw
;
10786 ret
= i40e_aq_get_rss_key(hw
, vsi
->id
,
10787 (struct i40e_aqc_get_set_rss_key_data
*)seed
);
10789 dev_info(&pf
->pdev
->dev
,
10790 "Cannot get RSS key, err %s aq_err %s\n",
10791 i40e_stat_str(&pf
->hw
, ret
),
10792 i40e_aq_str(&pf
->hw
,
10793 pf
->hw
.aq
.asq_last_status
));
10799 bool pf_lut
= vsi
->type
== I40E_VSI_MAIN
? true : false;
10801 ret
= i40e_aq_get_rss_lut(hw
, vsi
->id
, pf_lut
, lut
, lut_size
);
10803 dev_info(&pf
->pdev
->dev
,
10804 "Cannot get RSS lut, err %s aq_err %s\n",
10805 i40e_stat_str(&pf
->hw
, ret
),
10806 i40e_aq_str(&pf
->hw
,
10807 pf
->hw
.aq
.asq_last_status
));
10816 * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
10817 * @vsi: Pointer to vsi structure
10818 * @seed: RSS hash seed
10819 * @lut: Lookup table
10820 * @lut_size: Lookup table size
10822 * Returns 0 on success, negative on failure
10824 static int i40e_config_rss_reg(struct i40e_vsi
*vsi
, const u8
*seed
,
10825 const u8
*lut
, u16 lut_size
)
10827 struct i40e_pf
*pf
= vsi
->back
;
10828 struct i40e_hw
*hw
= &pf
->hw
;
10829 u16 vf_id
= vsi
->vf_id
;
10832 /* Fill out hash function seed */
10834 u32
*seed_dw
= (u32
*)seed
;
10836 if (vsi
->type
== I40E_VSI_MAIN
) {
10837 for (i
= 0; i
<= I40E_PFQF_HKEY_MAX_INDEX
; i
++)
10838 wr32(hw
, I40E_PFQF_HKEY(i
), seed_dw
[i
]);
10839 } else if (vsi
->type
== I40E_VSI_SRIOV
) {
10840 for (i
= 0; i
<= I40E_VFQF_HKEY1_MAX_INDEX
; i
++)
10841 wr32(hw
, I40E_VFQF_HKEY1(i
, vf_id
), seed_dw
[i
]);
10843 dev_err(&pf
->pdev
->dev
, "Cannot set RSS seed - invalid VSI type\n");
10848 u32
*lut_dw
= (u32
*)lut
;
10850 if (vsi
->type
== I40E_VSI_MAIN
) {
10851 if (lut_size
!= I40E_HLUT_ARRAY_SIZE
)
10853 for (i
= 0; i
<= I40E_PFQF_HLUT_MAX_INDEX
; i
++)
10854 wr32(hw
, I40E_PFQF_HLUT(i
), lut_dw
[i
]);
10855 } else if (vsi
->type
== I40E_VSI_SRIOV
) {
10856 if (lut_size
!= I40E_VF_HLUT_ARRAY_SIZE
)
10858 for (i
= 0; i
<= I40E_VFQF_HLUT_MAX_INDEX
; i
++)
10859 wr32(hw
, I40E_VFQF_HLUT1(i
, vf_id
), lut_dw
[i
]);
10861 dev_err(&pf
->pdev
->dev
, "Cannot set RSS LUT - invalid VSI type\n");
10870 * i40e_get_rss_reg - Get the RSS keys and lut by reading registers
10871 * @vsi: Pointer to VSI structure
10872 * @seed: Buffer to store the keys
10873 * @lut: Buffer to store the lookup table entries
10874 * @lut_size: Size of buffer to store the lookup table entries
10876 * Returns 0 on success, negative on failure
10878 static int i40e_get_rss_reg(struct i40e_vsi
*vsi
, u8
*seed
,
10879 u8
*lut
, u16 lut_size
)
10881 struct i40e_pf
*pf
= vsi
->back
;
10882 struct i40e_hw
*hw
= &pf
->hw
;
10886 u32
*seed_dw
= (u32
*)seed
;
10888 for (i
= 0; i
<= I40E_PFQF_HKEY_MAX_INDEX
; i
++)
10889 seed_dw
[i
] = i40e_read_rx_ctl(hw
, I40E_PFQF_HKEY(i
));
10892 u32
*lut_dw
= (u32
*)lut
;
10894 if (lut_size
!= I40E_HLUT_ARRAY_SIZE
)
10896 for (i
= 0; i
<= I40E_PFQF_HLUT_MAX_INDEX
; i
++)
10897 lut_dw
[i
] = rd32(hw
, I40E_PFQF_HLUT(i
));
10904 * i40e_config_rss - Configure RSS keys and lut
10905 * @vsi: Pointer to VSI structure
10906 * @seed: RSS hash seed
10907 * @lut: Lookup table
10908 * @lut_size: Lookup table size
10910 * Returns 0 on success, negative on failure
10912 int i40e_config_rss(struct i40e_vsi
*vsi
, u8
*seed
, u8
*lut
, u16 lut_size
)
10914 struct i40e_pf
*pf
= vsi
->back
;
10916 if (pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
)
10917 return i40e_config_rss_aq(vsi
, seed
, lut
, lut_size
);
10919 return i40e_config_rss_reg(vsi
, seed
, lut
, lut_size
);
10923 * i40e_get_rss - Get RSS keys and lut
10924 * @vsi: Pointer to VSI structure
10925 * @seed: Buffer to store the keys
10926 * @lut: Buffer to store the lookup table entries
10927 * @lut_size: Size of buffer to store the lookup table entries
10929 * Returns 0 on success, negative on failure
10931 int i40e_get_rss(struct i40e_vsi
*vsi
, u8
*seed
, u8
*lut
, u16 lut_size
)
10933 struct i40e_pf
*pf
= vsi
->back
;
10935 if (pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
)
10936 return i40e_get_rss_aq(vsi
, seed
, lut
, lut_size
);
10938 return i40e_get_rss_reg(vsi
, seed
, lut
, lut_size
);
10942 * i40e_fill_rss_lut - Fill the RSS lookup table with default values
10943 * @pf: Pointer to board private structure
10944 * @lut: Lookup table
10945 * @rss_table_size: Lookup table size
10946 * @rss_size: Range of queue number for hashing
10948 void i40e_fill_rss_lut(struct i40e_pf
*pf
, u8
*lut
,
10949 u16 rss_table_size
, u16 rss_size
)
10953 for (i
= 0; i
< rss_table_size
; i
++)
10954 lut
[i
] = i
% rss_size
;
10958 * i40e_pf_config_rss - Prepare for RSS if used
10959 * @pf: board private structure
10961 static int i40e_pf_config_rss(struct i40e_pf
*pf
)
10963 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
10964 u8 seed
[I40E_HKEY_ARRAY_SIZE
];
10966 struct i40e_hw
*hw
= &pf
->hw
;
10971 /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
10972 hena
= (u64
)i40e_read_rx_ctl(hw
, I40E_PFQF_HENA(0)) |
10973 ((u64
)i40e_read_rx_ctl(hw
, I40E_PFQF_HENA(1)) << 32);
10974 hena
|= i40e_pf_get_default_rss_hena(pf
);
10976 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(0), (u32
)hena
);
10977 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(1), (u32
)(hena
>> 32));
10979 /* Determine the RSS table size based on the hardware capabilities */
10980 reg_val
= i40e_read_rx_ctl(hw
, I40E_PFQF_CTL_0
);
10981 reg_val
= (pf
->rss_table_size
== 512) ?
10982 (reg_val
| I40E_PFQF_CTL_0_HASHLUTSIZE_512
) :
10983 (reg_val
& ~I40E_PFQF_CTL_0_HASHLUTSIZE_512
);
10984 i40e_write_rx_ctl(hw
, I40E_PFQF_CTL_0
, reg_val
);
10986 /* Determine the RSS size of the VSI */
10987 if (!vsi
->rss_size
) {
10989 /* If the firmware does something weird during VSI init, we
10990 * could end up with zero TCs. Check for that to avoid
10991 * divide-by-zero. It probably won't pass traffic, but it also
10994 qcount
= vsi
->num_queue_pairs
/
10995 (vsi
->tc_config
.numtc
? vsi
->tc_config
.numtc
: 1);
10996 vsi
->rss_size
= min_t(int, pf
->alloc_rss_size
, qcount
);
10998 if (!vsi
->rss_size
)
11001 lut
= kzalloc(vsi
->rss_table_size
, GFP_KERNEL
);
11005 /* Use user configured lut if there is one, otherwise use default */
11006 if (vsi
->rss_lut_user
)
11007 memcpy(lut
, vsi
->rss_lut_user
, vsi
->rss_table_size
);
11009 i40e_fill_rss_lut(pf
, lut
, vsi
->rss_table_size
, vsi
->rss_size
);
11011 /* Use user configured hash key if there is one, otherwise
11014 if (vsi
->rss_hkey_user
)
11015 memcpy(seed
, vsi
->rss_hkey_user
, I40E_HKEY_ARRAY_SIZE
);
11017 netdev_rss_key_fill((void *)seed
, I40E_HKEY_ARRAY_SIZE
);
11018 ret
= i40e_config_rss(vsi
, seed
, lut
, vsi
->rss_table_size
);
11025 * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
11026 * @pf: board private structure
11027 * @queue_count: the requested queue count for rss.
11029 * returns 0 if rss is not enabled, if enabled returns the final rss queue
11030 * count which may be different from the requested queue count.
11031 * Note: expects to be called while under rtnl_lock()
11033 int i40e_reconfig_rss_queues(struct i40e_pf
*pf
, int queue_count
)
11035 struct i40e_vsi
*vsi
= pf
->vsi
[pf
->lan_vsi
];
11038 if (!(pf
->flags
& I40E_FLAG_RSS_ENABLED
))
11041 new_rss_size
= min_t(int, queue_count
, pf
->rss_size_max
);
11043 if (queue_count
!= vsi
->num_queue_pairs
) {
11046 vsi
->req_queue_pairs
= queue_count
;
11047 i40e_prep_for_reset(pf
, true);
11049 pf
->alloc_rss_size
= new_rss_size
;
11051 i40e_reset_and_rebuild(pf
, true, true);
11053 /* Discard the user configured hash keys and lut, if less
11054 * queues are enabled.
11056 if (queue_count
< vsi
->rss_size
) {
11057 i40e_clear_rss_config_user(vsi
);
11058 dev_dbg(&pf
->pdev
->dev
,
11059 "discard user configured hash keys and lut\n");
11062 /* Reset vsi->rss_size, as number of enabled queues changed */
11063 qcount
= vsi
->num_queue_pairs
/ vsi
->tc_config
.numtc
;
11064 vsi
->rss_size
= min_t(int, pf
->alloc_rss_size
, qcount
);
11066 i40e_pf_config_rss(pf
);
11068 dev_info(&pf
->pdev
->dev
, "User requested queue count/HW max RSS count: %d/%d\n",
11069 vsi
->req_queue_pairs
, pf
->rss_size_max
);
11070 return pf
->alloc_rss_size
;
11074 * i40e_get_partition_bw_setting - Retrieve BW settings for this PF partition
11075 * @pf: board private structure
11077 i40e_status
i40e_get_partition_bw_setting(struct i40e_pf
*pf
)
11079 i40e_status status
;
11080 bool min_valid
, max_valid
;
11081 u32 max_bw
, min_bw
;
11083 status
= i40e_read_bw_from_alt_ram(&pf
->hw
, &max_bw
, &min_bw
,
11084 &min_valid
, &max_valid
);
11088 pf
->min_bw
= min_bw
;
11090 pf
->max_bw
= max_bw
;
11097 * i40e_set_partition_bw_setting - Set BW settings for this PF partition
11098 * @pf: board private structure
11100 i40e_status
i40e_set_partition_bw_setting(struct i40e_pf
*pf
)
11102 struct i40e_aqc_configure_partition_bw_data bw_data
;
11103 i40e_status status
;
11105 /* Set the valid bit for this PF */
11106 bw_data
.pf_valid_bits
= cpu_to_le16(BIT(pf
->hw
.pf_id
));
11107 bw_data
.max_bw
[pf
->hw
.pf_id
] = pf
->max_bw
& I40E_ALT_BW_VALUE_MASK
;
11108 bw_data
.min_bw
[pf
->hw
.pf_id
] = pf
->min_bw
& I40E_ALT_BW_VALUE_MASK
;
11110 /* Set the new bandwidths */
11111 status
= i40e_aq_configure_partition_bw(&pf
->hw
, &bw_data
, NULL
);
11117 * i40e_commit_partition_bw_setting - Commit BW settings for this PF partition
11118 * @pf: board private structure
11120 i40e_status
i40e_commit_partition_bw_setting(struct i40e_pf
*pf
)
11122 /* Commit temporary BW setting to permanent NVM image */
11123 enum i40e_admin_queue_err last_aq_status
;
11127 if (pf
->hw
.partition_id
!= 1) {
11128 dev_info(&pf
->pdev
->dev
,
11129 "Commit BW only works on partition 1! This is partition %d",
11130 pf
->hw
.partition_id
);
11131 ret
= I40E_NOT_SUPPORTED
;
11132 goto bw_commit_out
;
11135 /* Acquire NVM for read access */
11136 ret
= i40e_acquire_nvm(&pf
->hw
, I40E_RESOURCE_READ
);
11137 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
11139 dev_info(&pf
->pdev
->dev
,
11140 "Cannot acquire NVM for read access, err %s aq_err %s\n",
11141 i40e_stat_str(&pf
->hw
, ret
),
11142 i40e_aq_str(&pf
->hw
, last_aq_status
));
11143 goto bw_commit_out
;
11146 /* Read word 0x10 of NVM - SW compatibility word 1 */
11147 ret
= i40e_aq_read_nvm(&pf
->hw
,
11148 I40E_SR_NVM_CONTROL_WORD
,
11149 0x10, sizeof(nvm_word
), &nvm_word
,
11151 /* Save off last admin queue command status before releasing
11154 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
11155 i40e_release_nvm(&pf
->hw
);
11157 dev_info(&pf
->pdev
->dev
, "NVM read error, err %s aq_err %s\n",
11158 i40e_stat_str(&pf
->hw
, ret
),
11159 i40e_aq_str(&pf
->hw
, last_aq_status
));
11160 goto bw_commit_out
;
11163 /* Wait a bit for NVM release to complete */
11166 /* Acquire NVM for write access */
11167 ret
= i40e_acquire_nvm(&pf
->hw
, I40E_RESOURCE_WRITE
);
11168 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
11170 dev_info(&pf
->pdev
->dev
,
11171 "Cannot acquire NVM for write access, err %s aq_err %s\n",
11172 i40e_stat_str(&pf
->hw
, ret
),
11173 i40e_aq_str(&pf
->hw
, last_aq_status
));
11174 goto bw_commit_out
;
11176 /* Write it back out unchanged to initiate update NVM,
11177 * which will force a write of the shadow (alt) RAM to
11178 * the NVM - thus storing the bandwidth values permanently.
11180 ret
= i40e_aq_update_nvm(&pf
->hw
,
11181 I40E_SR_NVM_CONTROL_WORD
,
11182 0x10, sizeof(nvm_word
),
11183 &nvm_word
, true, 0, NULL
);
11184 /* Save off last admin queue command status before releasing
11187 last_aq_status
= pf
->hw
.aq
.asq_last_status
;
11188 i40e_release_nvm(&pf
->hw
);
11190 dev_info(&pf
->pdev
->dev
,
11191 "BW settings NOT SAVED, err %s aq_err %s\n",
11192 i40e_stat_str(&pf
->hw
, ret
),
11193 i40e_aq_str(&pf
->hw
, last_aq_status
));
11200 * i40e_sw_init - Initialize general software structures (struct i40e_pf)
11201 * @pf: board private structure to initialize
11203 * i40e_sw_init initializes the Adapter private data structure.
11204 * Fields are initialized based on PCI device information and
11205 * OS network device settings (MTU size).
11207 static int i40e_sw_init(struct i40e_pf
*pf
)
11212 /* Set default capability flags */
11213 pf
->flags
= I40E_FLAG_RX_CSUM_ENABLED
|
11214 I40E_FLAG_MSI_ENABLED
|
11215 I40E_FLAG_MSIX_ENABLED
;
11217 /* Set default ITR */
11218 pf
->rx_itr_default
= I40E_ITR_RX_DEF
;
11219 pf
->tx_itr_default
= I40E_ITR_TX_DEF
;
11221 /* Depending on PF configurations, it is possible that the RSS
11222 * maximum might end up larger than the available queues
11224 pf
->rss_size_max
= BIT(pf
->hw
.func_caps
.rss_table_entry_width
);
11225 pf
->alloc_rss_size
= 1;
11226 pf
->rss_table_size
= pf
->hw
.func_caps
.rss_table_size
;
11227 pf
->rss_size_max
= min_t(int, pf
->rss_size_max
,
11228 pf
->hw
.func_caps
.num_tx_qp
);
11229 if (pf
->hw
.func_caps
.rss
) {
11230 pf
->flags
|= I40E_FLAG_RSS_ENABLED
;
11231 pf
->alloc_rss_size
= min_t(int, pf
->rss_size_max
,
11232 num_online_cpus());
11235 /* MFP mode enabled */
11236 if (pf
->hw
.func_caps
.npar_enable
|| pf
->hw
.func_caps
.flex10_enable
) {
11237 pf
->flags
|= I40E_FLAG_MFP_ENABLED
;
11238 dev_info(&pf
->pdev
->dev
, "MFP mode Enabled\n");
11239 if (i40e_get_partition_bw_setting(pf
)) {
11240 dev_warn(&pf
->pdev
->dev
,
11241 "Could not get partition bw settings\n");
11243 dev_info(&pf
->pdev
->dev
,
11244 "Partition BW Min = %8.8x, Max = %8.8x\n",
11245 pf
->min_bw
, pf
->max_bw
);
11247 /* nudge the Tx scheduler */
11248 i40e_set_partition_bw_setting(pf
);
11252 if ((pf
->hw
.func_caps
.fd_filters_guaranteed
> 0) ||
11253 (pf
->hw
.func_caps
.fd_filters_best_effort
> 0)) {
11254 pf
->flags
|= I40E_FLAG_FD_ATR_ENABLED
;
11255 pf
->atr_sample_rate
= I40E_DEFAULT_ATR_SAMPLE_RATE
;
11256 if (pf
->flags
& I40E_FLAG_MFP_ENABLED
&&
11257 pf
->hw
.num_partitions
> 1)
11258 dev_info(&pf
->pdev
->dev
,
11259 "Flow Director Sideband mode Disabled in MFP mode\n");
11261 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
11262 pf
->fdir_pf_filter_count
=
11263 pf
->hw
.func_caps
.fd_filters_guaranteed
;
11264 pf
->hw
.fdir_shared_filter_count
=
11265 pf
->hw
.func_caps
.fd_filters_best_effort
;
11268 if (pf
->hw
.mac
.type
== I40E_MAC_X722
) {
11269 pf
->hw_features
|= (I40E_HW_RSS_AQ_CAPABLE
|
11270 I40E_HW_128_QP_RSS_CAPABLE
|
11271 I40E_HW_ATR_EVICT_CAPABLE
|
11272 I40E_HW_WB_ON_ITR_CAPABLE
|
11273 I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE
|
11274 I40E_HW_NO_PCI_LINK_CHECK
|
11275 I40E_HW_USE_SET_LLDP_MIB
|
11276 I40E_HW_GENEVE_OFFLOAD_CAPABLE
|
11277 I40E_HW_PTP_L4_CAPABLE
|
11278 I40E_HW_WOL_MC_MAGIC_PKT_WAKE
|
11279 I40E_HW_OUTER_UDP_CSUM_CAPABLE
);
11281 #define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
11282 if (rd32(&pf
->hw
, I40E_GLQF_FDEVICTENA(1)) !=
11283 I40E_FDEVICT_PCTYPE_DEFAULT
) {
11284 dev_warn(&pf
->pdev
->dev
,
11285 "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
11286 pf
->hw_features
&= ~I40E_HW_ATR_EVICT_CAPABLE
;
11288 } else if ((pf
->hw
.aq
.api_maj_ver
> 1) ||
11289 ((pf
->hw
.aq
.api_maj_ver
== 1) &&
11290 (pf
->hw
.aq
.api_min_ver
> 4))) {
11291 /* Supported in FW API version higher than 1.4 */
11292 pf
->hw_features
|= I40E_HW_GENEVE_OFFLOAD_CAPABLE
;
11295 /* Enable HW ATR eviction if possible */
11296 if (pf
->hw_features
& I40E_HW_ATR_EVICT_CAPABLE
)
11297 pf
->flags
|= I40E_FLAG_HW_ATR_EVICT_ENABLED
;
11299 if ((pf
->hw
.mac
.type
== I40E_MAC_XL710
) &&
11300 (((pf
->hw
.aq
.fw_maj_ver
== 4) && (pf
->hw
.aq
.fw_min_ver
< 33)) ||
11301 (pf
->hw
.aq
.fw_maj_ver
< 4))) {
11302 pf
->hw_features
|= I40E_HW_RESTART_AUTONEG
;
11303 /* No DCB support for FW < v4.33 */
11304 pf
->hw_features
|= I40E_HW_NO_DCB_SUPPORT
;
11307 /* Disable FW LLDP if FW < v4.3 */
11308 if ((pf
->hw
.mac
.type
== I40E_MAC_XL710
) &&
11309 (((pf
->hw
.aq
.fw_maj_ver
== 4) && (pf
->hw
.aq
.fw_min_ver
< 3)) ||
11310 (pf
->hw
.aq
.fw_maj_ver
< 4)))
11311 pf
->hw_features
|= I40E_HW_STOP_FW_LLDP
;
11313 /* Use the FW Set LLDP MIB API if FW > v4.40 */
11314 if ((pf
->hw
.mac
.type
== I40E_MAC_XL710
) &&
11315 (((pf
->hw
.aq
.fw_maj_ver
== 4) && (pf
->hw
.aq
.fw_min_ver
>= 40)) ||
11316 (pf
->hw
.aq
.fw_maj_ver
>= 5)))
11317 pf
->hw_features
|= I40E_HW_USE_SET_LLDP_MIB
;
11319 /* Enable PTP L4 if FW > v6.0 */
11320 if (pf
->hw
.mac
.type
== I40E_MAC_XL710
&&
11321 pf
->hw
.aq
.fw_maj_ver
>= 6)
11322 pf
->hw_features
|= I40E_HW_PTP_L4_CAPABLE
;
11324 if (pf
->hw
.func_caps
.vmdq
&& num_online_cpus() != 1) {
11325 pf
->num_vmdq_vsis
= I40E_DEFAULT_NUM_VMDQ_VSI
;
11326 pf
->flags
|= I40E_FLAG_VMDQ_ENABLED
;
11327 pf
->num_vmdq_qps
= i40e_default_queues_per_vmdq(pf
);
11330 if (pf
->hw
.func_caps
.iwarp
&& num_online_cpus() != 1) {
11331 pf
->flags
|= I40E_FLAG_IWARP_ENABLED
;
11332 /* IWARP needs one extra vector for CQP just like MISC.*/
11333 pf
->num_iwarp_msix
= (int)num_online_cpus() + 1;
11335 /* Stopping the FW LLDP engine is only supported on the
11336 * XL710 with a FW ver >= 1.7. Also, stopping FW LLDP
11337 * engine is not supported if NPAR is functioning on this
11340 if (pf
->hw
.mac
.type
== I40E_MAC_XL710
&&
11341 !pf
->hw
.func_caps
.npar_enable
&&
11342 (pf
->hw
.aq
.api_maj_ver
> 1 ||
11343 (pf
->hw
.aq
.api_maj_ver
== 1 && pf
->hw
.aq
.api_min_ver
> 6)))
11344 pf
->hw_features
|= I40E_HW_STOPPABLE_FW_LLDP
;
11346 #ifdef CONFIG_PCI_IOV
11347 if (pf
->hw
.func_caps
.num_vfs
&& pf
->hw
.partition_id
== 1) {
11348 pf
->num_vf_qps
= I40E_DEFAULT_QUEUES_PER_VF
;
11349 pf
->flags
|= I40E_FLAG_SRIOV_ENABLED
;
11350 pf
->num_req_vfs
= min_t(int,
11351 pf
->hw
.func_caps
.num_vfs
,
11352 I40E_MAX_VF_COUNT
);
11354 #endif /* CONFIG_PCI_IOV */
11355 pf
->eeprom_version
= 0xDEAD;
11356 pf
->lan_veb
= I40E_NO_VEB
;
11357 pf
->lan_vsi
= I40E_NO_VSI
;
11359 /* By default FW has this off for performance reasons */
11360 pf
->flags
&= ~I40E_FLAG_VEB_STATS_ENABLED
;
11362 /* set up queue assignment tracking */
11363 size
= sizeof(struct i40e_lump_tracking
)
11364 + (sizeof(u16
) * pf
->hw
.func_caps
.num_tx_qp
);
11365 pf
->qp_pile
= kzalloc(size
, GFP_KERNEL
);
11366 if (!pf
->qp_pile
) {
11370 pf
->qp_pile
->num_entries
= pf
->hw
.func_caps
.num_tx_qp
;
11371 pf
->qp_pile
->search_hint
= 0;
11373 pf
->tx_timeout_recovery_level
= 1;
11375 mutex_init(&pf
->switch_mutex
);
11382 * i40e_set_ntuple - set the ntuple feature flag and take action
11383 * @pf: board private structure to initialize
11384 * @features: the feature set that the stack is suggesting
11386 * returns a bool to indicate if reset needs to happen
11388 bool i40e_set_ntuple(struct i40e_pf
*pf
, netdev_features_t features
)
11390 bool need_reset
= false;
11392 /* Check if Flow Director n-tuple support was enabled or disabled. If
11393 * the state changed, we need to reset.
11395 if (features
& NETIF_F_NTUPLE
) {
11396 /* Enable filters and mark for reset */
11397 if (!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
))
11399 /* enable FD_SB only if there is MSI-X vector and no cloud
11402 if (pf
->num_fdsb_msix
> 0 && !pf
->num_cloud_filters
) {
11403 pf
->flags
|= I40E_FLAG_FD_SB_ENABLED
;
11404 pf
->flags
&= ~I40E_FLAG_FD_SB_INACTIVE
;
11407 /* turn off filters, mark for reset and clear SW filter list */
11408 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
11410 i40e_fdir_filter_exit(pf
);
11412 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
11413 clear_bit(__I40E_FD_SB_AUTO_DISABLED
, pf
->state
);
11414 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
11416 /* reset fd counters */
11417 pf
->fd_add_err
= 0;
11418 pf
->fd_atr_cnt
= 0;
11419 /* if ATR was auto disabled it can be re-enabled. */
11420 if (test_and_clear_bit(__I40E_FD_ATR_AUTO_DISABLED
, pf
->state
))
11421 if ((pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
) &&
11422 (I40E_DEBUG_FD
& pf
->hw
.debug_mask
))
11423 dev_info(&pf
->pdev
->dev
, "ATR re-enabled.\n");
11429 * i40e_clear_rss_lut - clear the rx hash lookup table
11430 * @vsi: the VSI being configured
11432 static void i40e_clear_rss_lut(struct i40e_vsi
*vsi
)
11434 struct i40e_pf
*pf
= vsi
->back
;
11435 struct i40e_hw
*hw
= &pf
->hw
;
11436 u16 vf_id
= vsi
->vf_id
;
11439 if (vsi
->type
== I40E_VSI_MAIN
) {
11440 for (i
= 0; i
<= I40E_PFQF_HLUT_MAX_INDEX
; i
++)
11441 wr32(hw
, I40E_PFQF_HLUT(i
), 0);
11442 } else if (vsi
->type
== I40E_VSI_SRIOV
) {
11443 for (i
= 0; i
<= I40E_VFQF_HLUT_MAX_INDEX
; i
++)
11444 i40e_write_rx_ctl(hw
, I40E_VFQF_HLUT1(i
, vf_id
), 0);
11446 dev_err(&pf
->pdev
->dev
, "Cannot set RSS LUT - invalid VSI type\n");
11451 * i40e_set_features - set the netdev feature flags
11452 * @netdev: ptr to the netdev being adjusted
11453 * @features: the feature set that the stack is suggesting
11454 * Note: expects to be called while under rtnl_lock()
11456 static int i40e_set_features(struct net_device
*netdev
,
11457 netdev_features_t features
)
11459 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11460 struct i40e_vsi
*vsi
= np
->vsi
;
11461 struct i40e_pf
*pf
= vsi
->back
;
11464 if (features
& NETIF_F_RXHASH
&& !(netdev
->features
& NETIF_F_RXHASH
))
11465 i40e_pf_config_rss(pf
);
11466 else if (!(features
& NETIF_F_RXHASH
) &&
11467 netdev
->features
& NETIF_F_RXHASH
)
11468 i40e_clear_rss_lut(vsi
);
11470 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
11471 i40e_vlan_stripping_enable(vsi
);
11473 i40e_vlan_stripping_disable(vsi
);
11475 if (!(features
& NETIF_F_HW_TC
) && pf
->num_cloud_filters
) {
11476 dev_err(&pf
->pdev
->dev
,
11477 "Offloaded tc filters active, can't turn hw_tc_offload off");
11481 need_reset
= i40e_set_ntuple(pf
, features
);
11484 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
, true);
11490 * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
11491 * @pf: board private structure
11492 * @port: The UDP port to look up
11494 * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
11496 static u8
i40e_get_udp_port_idx(struct i40e_pf
*pf
, u16 port
)
11500 for (i
= 0; i
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
; i
++) {
11501 /* Do not report ports with pending deletions as
11504 if (!port
&& (pf
->pending_udp_bitmap
& BIT_ULL(i
)))
11506 if (pf
->udp_ports
[i
].port
== port
)
11514 * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
11515 * @netdev: This physical port's netdev
11516 * @ti: Tunnel endpoint information
11518 static void i40e_udp_tunnel_add(struct net_device
*netdev
,
11519 struct udp_tunnel_info
*ti
)
11521 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11522 struct i40e_vsi
*vsi
= np
->vsi
;
11523 struct i40e_pf
*pf
= vsi
->back
;
11524 u16 port
= ntohs(ti
->port
);
11528 idx
= i40e_get_udp_port_idx(pf
, port
);
11530 /* Check if port already exists */
11531 if (idx
< I40E_MAX_PF_UDP_OFFLOAD_PORTS
) {
11532 netdev_info(netdev
, "port %d already offloaded\n", port
);
11536 /* Now check if there is space to add the new port */
11537 next_idx
= i40e_get_udp_port_idx(pf
, 0);
11539 if (next_idx
== I40E_MAX_PF_UDP_OFFLOAD_PORTS
) {
11540 netdev_info(netdev
, "maximum number of offloaded UDP ports reached, not adding port %d\n",
11545 switch (ti
->type
) {
11546 case UDP_TUNNEL_TYPE_VXLAN
:
11547 pf
->udp_ports
[next_idx
].type
= I40E_AQC_TUNNEL_TYPE_VXLAN
;
11549 case UDP_TUNNEL_TYPE_GENEVE
:
11550 if (!(pf
->hw_features
& I40E_HW_GENEVE_OFFLOAD_CAPABLE
))
11552 pf
->udp_ports
[next_idx
].type
= I40E_AQC_TUNNEL_TYPE_NGE
;
11558 /* New port: add it and mark its index in the bitmap */
11559 pf
->udp_ports
[next_idx
].port
= port
;
11560 pf
->udp_ports
[next_idx
].filter_index
= I40E_UDP_PORT_INDEX_UNUSED
;
11561 pf
->pending_udp_bitmap
|= BIT_ULL(next_idx
);
11562 set_bit(__I40E_UDP_FILTER_SYNC_PENDING
, pf
->state
);
11566 * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
11567 * @netdev: This physical port's netdev
11568 * @ti: Tunnel endpoint information
11570 static void i40e_udp_tunnel_del(struct net_device
*netdev
,
11571 struct udp_tunnel_info
*ti
)
11573 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11574 struct i40e_vsi
*vsi
= np
->vsi
;
11575 struct i40e_pf
*pf
= vsi
->back
;
11576 u16 port
= ntohs(ti
->port
);
11579 idx
= i40e_get_udp_port_idx(pf
, port
);
11581 /* Check if port already exists */
11582 if (idx
>= I40E_MAX_PF_UDP_OFFLOAD_PORTS
)
11585 switch (ti
->type
) {
11586 case UDP_TUNNEL_TYPE_VXLAN
:
11587 if (pf
->udp_ports
[idx
].type
!= I40E_AQC_TUNNEL_TYPE_VXLAN
)
11590 case UDP_TUNNEL_TYPE_GENEVE
:
11591 if (pf
->udp_ports
[idx
].type
!= I40E_AQC_TUNNEL_TYPE_NGE
)
11598 /* if port exists, set it to 0 (mark for deletion)
11599 * and make it pending
11601 pf
->udp_ports
[idx
].port
= 0;
11603 /* Toggle pending bit instead of setting it. This way if we are
11604 * deleting a port that has yet to be added we just clear the pending
11605 * bit and don't have to worry about it.
11607 pf
->pending_udp_bitmap
^= BIT_ULL(idx
);
11608 set_bit(__I40E_UDP_FILTER_SYNC_PENDING
, pf
->state
);
11612 netdev_warn(netdev
, "UDP port %d was not found, not deleting\n",
11616 static int i40e_get_phys_port_id(struct net_device
*netdev
,
11617 struct netdev_phys_item_id
*ppid
)
11619 struct i40e_netdev_priv
*np
= netdev_priv(netdev
);
11620 struct i40e_pf
*pf
= np
->vsi
->back
;
11621 struct i40e_hw
*hw
= &pf
->hw
;
11623 if (!(pf
->hw_features
& I40E_HW_PORT_ID_VALID
))
11624 return -EOPNOTSUPP
;
11626 ppid
->id_len
= min_t(int, sizeof(hw
->mac
.port_addr
), sizeof(ppid
->id
));
11627 memcpy(ppid
->id
, hw
->mac
.port_addr
, ppid
->id_len
);
11633 * i40e_ndo_fdb_add - add an entry to the hardware database
11634 * @ndm: the input from the stack
11635 * @tb: pointer to array of nladdr (unused)
11636 * @dev: the net device pointer
11637 * @addr: the MAC address entry being added
11639 * @flags: instructions from stack about fdb operation
11641 static int i40e_ndo_fdb_add(struct ndmsg
*ndm
, struct nlattr
*tb
[],
11642 struct net_device
*dev
,
11643 const unsigned char *addr
, u16 vid
,
11646 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
11647 struct i40e_pf
*pf
= np
->vsi
->back
;
11650 if (!(pf
->flags
& I40E_FLAG_SRIOV_ENABLED
))
11651 return -EOPNOTSUPP
;
11654 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev
->name
);
11658 /* Hardware does not support aging addresses so if a
11659 * ndm_state is given only allow permanent addresses
11661 if (ndm
->ndm_state
&& !(ndm
->ndm_state
& NUD_PERMANENT
)) {
11662 netdev_info(dev
, "FDB only supports static addresses\n");
11666 if (is_unicast_ether_addr(addr
) || is_link_local_ether_addr(addr
))
11667 err
= dev_uc_add_excl(dev
, addr
);
11668 else if (is_multicast_ether_addr(addr
))
11669 err
= dev_mc_add_excl(dev
, addr
);
11673 /* Only return duplicate errors if NLM_F_EXCL is set */
11674 if (err
== -EEXIST
&& !(flags
& NLM_F_EXCL
))
11681 * i40e_ndo_bridge_setlink - Set the hardware bridge mode
11682 * @dev: the netdev being configured
11683 * @nlh: RTNL message
11684 * @flags: bridge flags
11686 * Inserts a new hardware bridge if not already created and
11687 * enables the bridging mode requested (VEB or VEPA). If the
11688 * hardware bridge has already been inserted and the request
11689 * is to change the mode then that requires a PF reset to
11690 * allow rebuild of the components with required hardware
11691 * bridge mode enabled.
11693 * Note: expects to be called while under rtnl_lock()
11695 static int i40e_ndo_bridge_setlink(struct net_device
*dev
,
11696 struct nlmsghdr
*nlh
,
11699 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
11700 struct i40e_vsi
*vsi
= np
->vsi
;
11701 struct i40e_pf
*pf
= vsi
->back
;
11702 struct i40e_veb
*veb
= NULL
;
11703 struct nlattr
*attr
, *br_spec
;
11706 /* Only for PF VSI for now */
11707 if (vsi
->seid
!= pf
->vsi
[pf
->lan_vsi
]->seid
)
11708 return -EOPNOTSUPP
;
11710 /* Find the HW bridge for PF VSI */
11711 for (i
= 0; i
< I40E_MAX_VEB
&& !veb
; i
++) {
11712 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== vsi
->uplink_seid
)
11716 br_spec
= nlmsg_find_attr(nlh
, sizeof(struct ifinfomsg
), IFLA_AF_SPEC
);
11718 nla_for_each_nested(attr
, br_spec
, rem
) {
11721 if (nla_type(attr
) != IFLA_BRIDGE_MODE
)
11724 mode
= nla_get_u16(attr
);
11725 if ((mode
!= BRIDGE_MODE_VEPA
) &&
11726 (mode
!= BRIDGE_MODE_VEB
))
11729 /* Insert a new HW bridge */
11731 veb
= i40e_veb_setup(pf
, 0, vsi
->uplink_seid
, vsi
->seid
,
11732 vsi
->tc_config
.enabled_tc
);
11734 veb
->bridge_mode
= mode
;
11735 i40e_config_bridge_mode(veb
);
11737 /* No Bridge HW offload available */
11741 } else if (mode
!= veb
->bridge_mode
) {
11742 /* Existing HW bridge but different mode needs reset */
11743 veb
->bridge_mode
= mode
;
11744 /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
11745 if (mode
== BRIDGE_MODE_VEB
)
11746 pf
->flags
|= I40E_FLAG_VEB_MODE_ENABLED
;
11748 pf
->flags
&= ~I40E_FLAG_VEB_MODE_ENABLED
;
11749 i40e_do_reset(pf
, I40E_PF_RESET_FLAG
, true);
11758 * i40e_ndo_bridge_getlink - Get the hardware bridge mode
11761 * @seq: RTNL message seq #
11762 * @dev: the netdev being configured
11763 * @filter_mask: unused
11764 * @nlflags: netlink flags passed in
11766 * Return the mode in which the hardware bridge is operating in
11769 static int i40e_ndo_bridge_getlink(struct sk_buff
*skb
, u32 pid
, u32 seq
,
11770 struct net_device
*dev
,
11771 u32 __always_unused filter_mask
,
11774 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
11775 struct i40e_vsi
*vsi
= np
->vsi
;
11776 struct i40e_pf
*pf
= vsi
->back
;
11777 struct i40e_veb
*veb
= NULL
;
11780 /* Only for PF VSI for now */
11781 if (vsi
->seid
!= pf
->vsi
[pf
->lan_vsi
]->seid
)
11782 return -EOPNOTSUPP
;
11784 /* Find the HW bridge for the PF VSI */
11785 for (i
= 0; i
< I40E_MAX_VEB
&& !veb
; i
++) {
11786 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== vsi
->uplink_seid
)
11793 return ndo_dflt_bridge_getlink(skb
, pid
, seq
, dev
, veb
->bridge_mode
,
11794 0, 0, nlflags
, filter_mask
, NULL
);
11798 * i40e_features_check - Validate encapsulated packet conforms to limits
11800 * @dev: This physical port's netdev
11801 * @features: Offload features that the stack believes apply
11803 static netdev_features_t
i40e_features_check(struct sk_buff
*skb
,
11804 struct net_device
*dev
,
11805 netdev_features_t features
)
11809 /* No point in doing any of this if neither checksum nor GSO are
11810 * being requested for this frame. We can rule out both by just
11811 * checking for CHECKSUM_PARTIAL
11813 if (skb
->ip_summed
!= CHECKSUM_PARTIAL
)
11816 /* We cannot support GSO if the MSS is going to be less than
11817 * 64 bytes. If it is then we need to drop support for GSO.
11819 if (skb_is_gso(skb
) && (skb_shinfo(skb
)->gso_size
< 64))
11820 features
&= ~NETIF_F_GSO_MASK
;
11822 /* MACLEN can support at most 63 words */
11823 len
= skb_network_header(skb
) - skb
->data
;
11824 if (len
& ~(63 * 2))
11827 /* IPLEN and EIPLEN can support at most 127 dwords */
11828 len
= skb_transport_header(skb
) - skb_network_header(skb
);
11829 if (len
& ~(127 * 4))
11832 if (skb
->encapsulation
) {
11833 /* L4TUNLEN can support 127 words */
11834 len
= skb_inner_network_header(skb
) - skb_transport_header(skb
);
11835 if (len
& ~(127 * 2))
11838 /* IPLEN can support at most 127 dwords */
11839 len
= skb_inner_transport_header(skb
) -
11840 skb_inner_network_header(skb
);
11841 if (len
& ~(127 * 4))
11845 /* No need to validate L4LEN as TCP is the only protocol with a
11846 * a flexible value and we support all possible values supported
11847 * by TCP, which is at most 15 dwords
11852 return features
& ~(NETIF_F_CSUM_MASK
| NETIF_F_GSO_MASK
);
11856 * i40e_xdp_setup - add/remove an XDP program
11857 * @vsi: VSI to changed
11858 * @prog: XDP program
11860 static int i40e_xdp_setup(struct i40e_vsi
*vsi
,
11861 struct bpf_prog
*prog
)
11863 int frame_size
= vsi
->netdev
->mtu
+ ETH_HLEN
+ ETH_FCS_LEN
+ VLAN_HLEN
;
11864 struct i40e_pf
*pf
= vsi
->back
;
11865 struct bpf_prog
*old_prog
;
11869 /* Don't allow frames that span over multiple buffers */
11870 if (frame_size
> vsi
->rx_buf_len
)
11873 if (!i40e_enabled_xdp_vsi(vsi
) && !prog
)
11876 /* When turning XDP on->off/off->on we reset and rebuild the rings. */
11877 need_reset
= (i40e_enabled_xdp_vsi(vsi
) != !!prog
);
11880 i40e_prep_for_reset(pf
, true);
11882 old_prog
= xchg(&vsi
->xdp_prog
, prog
);
11885 i40e_reset_and_rebuild(pf
, true, true);
11887 for (i
= 0; i
< vsi
->num_queue_pairs
; i
++)
11888 WRITE_ONCE(vsi
->rx_rings
[i
]->xdp_prog
, vsi
->xdp_prog
);
11891 bpf_prog_put(old_prog
);
11897 * i40e_enter_busy_conf - Enters busy config state
11900 * Returns 0 on success, <0 for failure.
11902 static int i40e_enter_busy_conf(struct i40e_vsi
*vsi
)
11904 struct i40e_pf
*pf
= vsi
->back
;
11907 while (test_and_set_bit(__I40E_CONFIG_BUSY
, pf
->state
)) {
11911 usleep_range(1000, 2000);
11918 * i40e_exit_busy_conf - Exits busy config state
11921 static void i40e_exit_busy_conf(struct i40e_vsi
*vsi
)
11923 struct i40e_pf
*pf
= vsi
->back
;
11925 clear_bit(__I40E_CONFIG_BUSY
, pf
->state
);
11929 * i40e_queue_pair_reset_stats - Resets all statistics for a queue pair
11931 * @queue_pair: queue pair
11933 static void i40e_queue_pair_reset_stats(struct i40e_vsi
*vsi
, int queue_pair
)
11935 memset(&vsi
->rx_rings
[queue_pair
]->rx_stats
, 0,
11936 sizeof(vsi
->rx_rings
[queue_pair
]->rx_stats
));
11937 memset(&vsi
->tx_rings
[queue_pair
]->stats
, 0,
11938 sizeof(vsi
->tx_rings
[queue_pair
]->stats
));
11939 if (i40e_enabled_xdp_vsi(vsi
)) {
11940 memset(&vsi
->xdp_rings
[queue_pair
]->stats
, 0,
11941 sizeof(vsi
->xdp_rings
[queue_pair
]->stats
));
11946 * i40e_queue_pair_clean_rings - Cleans all the rings of a queue pair
11948 * @queue_pair: queue pair
11950 static void i40e_queue_pair_clean_rings(struct i40e_vsi
*vsi
, int queue_pair
)
11952 i40e_clean_tx_ring(vsi
->tx_rings
[queue_pair
]);
11953 if (i40e_enabled_xdp_vsi(vsi
))
11954 i40e_clean_tx_ring(vsi
->xdp_rings
[queue_pair
]);
11955 i40e_clean_rx_ring(vsi
->rx_rings
[queue_pair
]);
11959 * i40e_queue_pair_toggle_napi - Enables/disables NAPI for a queue pair
11961 * @queue_pair: queue pair
11962 * @enable: true for enable, false for disable
11964 static void i40e_queue_pair_toggle_napi(struct i40e_vsi
*vsi
, int queue_pair
,
11967 struct i40e_ring
*rxr
= vsi
->rx_rings
[queue_pair
];
11968 struct i40e_q_vector
*q_vector
= rxr
->q_vector
;
11973 /* All rings in a qp belong to the same qvector. */
11974 if (q_vector
->rx
.ring
|| q_vector
->tx
.ring
) {
11976 napi_enable(&q_vector
->napi
);
11978 napi_disable(&q_vector
->napi
);
11983 * i40e_queue_pair_toggle_rings - Enables/disables all rings for a queue pair
11985 * @queue_pair: queue pair
11986 * @enable: true for enable, false for disable
11988 * Returns 0 on success, <0 on failure.
11990 static int i40e_queue_pair_toggle_rings(struct i40e_vsi
*vsi
, int queue_pair
,
11993 struct i40e_pf
*pf
= vsi
->back
;
11996 pf_q
= vsi
->base_queue
+ queue_pair
;
11997 ret
= i40e_control_wait_tx_q(vsi
->seid
, pf
, pf_q
,
11998 false /*is xdp*/, enable
);
12000 dev_info(&pf
->pdev
->dev
,
12001 "VSI seid %d Tx ring %d %sable timeout\n",
12002 vsi
->seid
, pf_q
, (enable
? "en" : "dis"));
12006 i40e_control_rx_q(pf
, pf_q
, enable
);
12007 ret
= i40e_pf_rxq_wait(pf
, pf_q
, enable
);
12009 dev_info(&pf
->pdev
->dev
,
12010 "VSI seid %d Rx ring %d %sable timeout\n",
12011 vsi
->seid
, pf_q
, (enable
? "en" : "dis"));
12015 /* Due to HW errata, on Rx disable only, the register can
12016 * indicate done before it really is. Needs 50ms to be sure
12021 if (!i40e_enabled_xdp_vsi(vsi
))
12024 ret
= i40e_control_wait_tx_q(vsi
->seid
, pf
,
12025 pf_q
+ vsi
->alloc_queue_pairs
,
12026 true /*is xdp*/, enable
);
12028 dev_info(&pf
->pdev
->dev
,
12029 "VSI seid %d XDP Tx ring %d %sable timeout\n",
12030 vsi
->seid
, pf_q
, (enable
? "en" : "dis"));
12037 * i40e_queue_pair_enable_irq - Enables interrupts for a queue pair
12039 * @queue_pair: queue_pair
12041 static void i40e_queue_pair_enable_irq(struct i40e_vsi
*vsi
, int queue_pair
)
12043 struct i40e_ring
*rxr
= vsi
->rx_rings
[queue_pair
];
12044 struct i40e_pf
*pf
= vsi
->back
;
12045 struct i40e_hw
*hw
= &pf
->hw
;
12047 /* All rings in a qp belong to the same qvector. */
12048 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
)
12049 i40e_irq_dynamic_enable(vsi
, rxr
->q_vector
->v_idx
);
12051 i40e_irq_dynamic_enable_icr0(pf
);
12057 * i40e_queue_pair_disable_irq - Disables interrupts for a queue pair
12059 * @queue_pair: queue_pair
12061 static void i40e_queue_pair_disable_irq(struct i40e_vsi
*vsi
, int queue_pair
)
12063 struct i40e_ring
*rxr
= vsi
->rx_rings
[queue_pair
];
12064 struct i40e_pf
*pf
= vsi
->back
;
12065 struct i40e_hw
*hw
= &pf
->hw
;
12067 /* For simplicity, instead of removing the qp interrupt causes
12068 * from the interrupt linked list, we simply disable the interrupt, and
12069 * leave the list intact.
12071 * All rings in a qp belong to the same qvector.
12073 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
12074 u32 intpf
= vsi
->base_vector
+ rxr
->q_vector
->v_idx
;
12076 wr32(hw
, I40E_PFINT_DYN_CTLN(intpf
- 1), 0);
12078 synchronize_irq(pf
->msix_entries
[intpf
].vector
);
12080 /* Legacy and MSI mode - this stops all interrupt handling */
12081 wr32(hw
, I40E_PFINT_ICR0_ENA
, 0);
12082 wr32(hw
, I40E_PFINT_DYN_CTL0
, 0);
12084 synchronize_irq(pf
->pdev
->irq
);
12089 * i40e_queue_pair_disable - Disables a queue pair
12091 * @queue_pair: queue pair
12093 * Returns 0 on success, <0 on failure.
12095 int i40e_queue_pair_disable(struct i40e_vsi
*vsi
, int queue_pair
)
12099 err
= i40e_enter_busy_conf(vsi
);
12103 i40e_queue_pair_disable_irq(vsi
, queue_pair
);
12104 err
= i40e_queue_pair_toggle_rings(vsi
, queue_pair
, false /* off */);
12105 i40e_queue_pair_toggle_napi(vsi
, queue_pair
, false /* off */);
12106 i40e_queue_pair_clean_rings(vsi
, queue_pair
);
12107 i40e_queue_pair_reset_stats(vsi
, queue_pair
);
12113 * i40e_queue_pair_enable - Enables a queue pair
12115 * @queue_pair: queue pair
12117 * Returns 0 on success, <0 on failure.
12119 int i40e_queue_pair_enable(struct i40e_vsi
*vsi
, int queue_pair
)
12123 err
= i40e_configure_tx_ring(vsi
->tx_rings
[queue_pair
]);
12127 if (i40e_enabled_xdp_vsi(vsi
)) {
12128 err
= i40e_configure_tx_ring(vsi
->xdp_rings
[queue_pair
]);
12133 err
= i40e_configure_rx_ring(vsi
->rx_rings
[queue_pair
]);
12137 err
= i40e_queue_pair_toggle_rings(vsi
, queue_pair
, true /* on */);
12138 i40e_queue_pair_toggle_napi(vsi
, queue_pair
, true /* on */);
12139 i40e_queue_pair_enable_irq(vsi
, queue_pair
);
12141 i40e_exit_busy_conf(vsi
);
12147 * i40e_xdp - implements ndo_bpf for i40e
12149 * @xdp: XDP command
12151 static int i40e_xdp(struct net_device
*dev
,
12152 struct netdev_bpf
*xdp
)
12154 struct i40e_netdev_priv
*np
= netdev_priv(dev
);
12155 struct i40e_vsi
*vsi
= np
->vsi
;
12157 if (vsi
->type
!= I40E_VSI_MAIN
)
12160 switch (xdp
->command
) {
12161 case XDP_SETUP_PROG
:
12162 return i40e_xdp_setup(vsi
, xdp
->prog
);
12163 case XDP_QUERY_PROG
:
12164 xdp
->prog_id
= vsi
->xdp_prog
? vsi
->xdp_prog
->aux
->id
: 0;
12166 case XDP_QUERY_XSK_UMEM
:
12167 return i40e_xsk_umem_query(vsi
, &xdp
->xsk
.umem
,
12168 xdp
->xsk
.queue_id
);
12169 case XDP_SETUP_XSK_UMEM
:
12170 return i40e_xsk_umem_setup(vsi
, xdp
->xsk
.umem
,
12171 xdp
->xsk
.queue_id
);
12177 static const struct net_device_ops i40e_netdev_ops
= {
12178 .ndo_open
= i40e_open
,
12179 .ndo_stop
= i40e_close
,
12180 .ndo_start_xmit
= i40e_lan_xmit_frame
,
12181 .ndo_get_stats64
= i40e_get_netdev_stats_struct
,
12182 .ndo_set_rx_mode
= i40e_set_rx_mode
,
12183 .ndo_validate_addr
= eth_validate_addr
,
12184 .ndo_set_mac_address
= i40e_set_mac
,
12185 .ndo_change_mtu
= i40e_change_mtu
,
12186 .ndo_do_ioctl
= i40e_ioctl
,
12187 .ndo_tx_timeout
= i40e_tx_timeout
,
12188 .ndo_vlan_rx_add_vid
= i40e_vlan_rx_add_vid
,
12189 .ndo_vlan_rx_kill_vid
= i40e_vlan_rx_kill_vid
,
12190 #ifdef CONFIG_NET_POLL_CONTROLLER
12191 .ndo_poll_controller
= i40e_netpoll
,
12193 .ndo_setup_tc
= __i40e_setup_tc
,
12194 .ndo_set_features
= i40e_set_features
,
12195 .ndo_set_vf_mac
= i40e_ndo_set_vf_mac
,
12196 .ndo_set_vf_vlan
= i40e_ndo_set_vf_port_vlan
,
12197 .ndo_set_vf_rate
= i40e_ndo_set_vf_bw
,
12198 .ndo_get_vf_config
= i40e_ndo_get_vf_config
,
12199 .ndo_set_vf_link_state
= i40e_ndo_set_vf_link_state
,
12200 .ndo_set_vf_spoofchk
= i40e_ndo_set_vf_spoofchk
,
12201 .ndo_set_vf_trust
= i40e_ndo_set_vf_trust
,
12202 .ndo_udp_tunnel_add
= i40e_udp_tunnel_add
,
12203 .ndo_udp_tunnel_del
= i40e_udp_tunnel_del
,
12204 .ndo_get_phys_port_id
= i40e_get_phys_port_id
,
12205 .ndo_fdb_add
= i40e_ndo_fdb_add
,
12206 .ndo_features_check
= i40e_features_check
,
12207 .ndo_bridge_getlink
= i40e_ndo_bridge_getlink
,
12208 .ndo_bridge_setlink
= i40e_ndo_bridge_setlink
,
12209 .ndo_bpf
= i40e_xdp
,
12210 .ndo_xdp_xmit
= i40e_xdp_xmit
,
12211 .ndo_xsk_async_xmit
= i40e_xsk_async_xmit
,
12215 * i40e_config_netdev - Setup the netdev flags
12216 * @vsi: the VSI being configured
12218 * Returns 0 on success, negative value on failure
12220 static int i40e_config_netdev(struct i40e_vsi
*vsi
)
12222 struct i40e_pf
*pf
= vsi
->back
;
12223 struct i40e_hw
*hw
= &pf
->hw
;
12224 struct i40e_netdev_priv
*np
;
12225 struct net_device
*netdev
;
12226 u8 broadcast
[ETH_ALEN
];
12227 u8 mac_addr
[ETH_ALEN
];
12229 netdev_features_t hw_enc_features
;
12230 netdev_features_t hw_features
;
12232 etherdev_size
= sizeof(struct i40e_netdev_priv
);
12233 netdev
= alloc_etherdev_mq(etherdev_size
, vsi
->alloc_queue_pairs
);
12237 vsi
->netdev
= netdev
;
12238 np
= netdev_priv(netdev
);
12241 hw_enc_features
= NETIF_F_SG
|
12243 NETIF_F_IPV6_CSUM
|
12245 NETIF_F_SOFT_FEATURES
|
12250 NETIF_F_GSO_GRE_CSUM
|
12251 NETIF_F_GSO_PARTIAL
|
12252 NETIF_F_GSO_IPXIP4
|
12253 NETIF_F_GSO_IPXIP6
|
12254 NETIF_F_GSO_UDP_TUNNEL
|
12255 NETIF_F_GSO_UDP_TUNNEL_CSUM
|
12261 if (!(pf
->hw_features
& I40E_HW_OUTER_UDP_CSUM_CAPABLE
))
12262 netdev
->gso_partial_features
|= NETIF_F_GSO_UDP_TUNNEL_CSUM
;
12264 netdev
->gso_partial_features
|= NETIF_F_GSO_GRE_CSUM
;
12266 netdev
->hw_enc_features
|= hw_enc_features
;
12268 /* record features VLANs can make use of */
12269 netdev
->vlan_features
|= hw_enc_features
| NETIF_F_TSO_MANGLEID
;
12271 hw_features
= hw_enc_features
|
12272 NETIF_F_HW_VLAN_CTAG_TX
|
12273 NETIF_F_HW_VLAN_CTAG_RX
;
12275 if (!(pf
->flags
& I40E_FLAG_MFP_ENABLED
))
12276 hw_features
|= NETIF_F_NTUPLE
| NETIF_F_HW_TC
;
12278 netdev
->hw_features
|= hw_features
;
12280 netdev
->features
|= hw_features
| NETIF_F_HW_VLAN_CTAG_FILTER
;
12281 netdev
->hw_enc_features
|= NETIF_F_TSO_MANGLEID
;
12283 if (vsi
->type
== I40E_VSI_MAIN
) {
12284 SET_NETDEV_DEV(netdev
, &pf
->pdev
->dev
);
12285 ether_addr_copy(mac_addr
, hw
->mac
.perm_addr
);
12286 /* The following steps are necessary for two reasons. First,
12287 * some older NVM configurations load a default MAC-VLAN
12288 * filter that will accept any tagged packet, and we want to
12289 * replace this with a normal filter. Additionally, it is
12290 * possible our MAC address was provided by the platform using
12291 * Open Firmware or similar.
12293 * Thus, we need to remove the default filter and install one
12294 * specific to the MAC address.
12296 i40e_rm_default_mac_filter(vsi
, mac_addr
);
12297 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
12298 i40e_add_mac_filter(vsi
, mac_addr
);
12299 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
12301 /* Relate the VSI_VMDQ name to the VSI_MAIN name. Note that we
12302 * are still limited by IFNAMSIZ, but we're adding 'v%d\0' to
12303 * the end, which is 4 bytes long, so force truncation of the
12304 * original name by IFNAMSIZ - 4
12306 snprintf(netdev
->name
, IFNAMSIZ
, "%.*sv%%d",
12308 pf
->vsi
[pf
->lan_vsi
]->netdev
->name
);
12309 eth_random_addr(mac_addr
);
12311 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
12312 i40e_add_mac_filter(vsi
, mac_addr
);
12313 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
12316 /* Add the broadcast filter so that we initially will receive
12317 * broadcast packets. Note that when a new VLAN is first added the
12318 * driver will convert all filters marked I40E_VLAN_ANY into VLAN
12319 * specific filters as part of transitioning into "vlan" operation.
12320 * When more VLANs are added, the driver will copy each existing MAC
12321 * filter and add it for the new VLAN.
12323 * Broadcast filters are handled specially by
12324 * i40e_sync_filters_subtask, as the driver must to set the broadcast
12325 * promiscuous bit instead of adding this directly as a MAC/VLAN
12326 * filter. The subtask will update the correct broadcast promiscuous
12327 * bits as VLANs become active or inactive.
12329 eth_broadcast_addr(broadcast
);
12330 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
12331 i40e_add_mac_filter(vsi
, broadcast
);
12332 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
12334 ether_addr_copy(netdev
->dev_addr
, mac_addr
);
12335 ether_addr_copy(netdev
->perm_addr
, mac_addr
);
12337 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
12338 netdev
->priv_flags
|= IFF_SUPP_NOFCS
;
12339 /* Setup netdev TC information */
12340 i40e_vsi_config_netdev_tc(vsi
, vsi
->tc_config
.enabled_tc
);
12342 netdev
->netdev_ops
= &i40e_netdev_ops
;
12343 netdev
->watchdog_timeo
= 5 * HZ
;
12344 i40e_set_ethtool_ops(netdev
);
12346 /* MTU range: 68 - 9706 */
12347 netdev
->min_mtu
= ETH_MIN_MTU
;
12348 netdev
->max_mtu
= I40E_MAX_RXBUFFER
- I40E_PACKET_HDR_PAD
;
12354 * i40e_vsi_delete - Delete a VSI from the switch
12355 * @vsi: the VSI being removed
12357 * Returns 0 on success, negative value on failure
12359 static void i40e_vsi_delete(struct i40e_vsi
*vsi
)
12361 /* remove default VSI is not allowed */
12362 if (vsi
== vsi
->back
->vsi
[vsi
->back
->lan_vsi
])
12365 i40e_aq_delete_element(&vsi
->back
->hw
, vsi
->seid
, NULL
);
12369 * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
12370 * @vsi: the VSI being queried
12372 * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
12374 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi
*vsi
)
12376 struct i40e_veb
*veb
;
12377 struct i40e_pf
*pf
= vsi
->back
;
12379 /* Uplink is not a bridge so default to VEB */
12380 if (vsi
->veb_idx
== I40E_NO_VEB
)
12383 veb
= pf
->veb
[vsi
->veb_idx
];
12385 dev_info(&pf
->pdev
->dev
,
12386 "There is no veb associated with the bridge\n");
12390 /* Uplink is a bridge in VEPA mode */
12391 if (veb
->bridge_mode
& BRIDGE_MODE_VEPA
) {
12394 /* Uplink is a bridge in VEB mode */
12398 /* VEPA is now default bridge, so return 0 */
12403 * i40e_add_vsi - Add a VSI to the switch
12404 * @vsi: the VSI being configured
12406 * This initializes a VSI context depending on the VSI type to be added and
12407 * passes it down to the add_vsi aq command.
12409 static int i40e_add_vsi(struct i40e_vsi
*vsi
)
12412 struct i40e_pf
*pf
= vsi
->back
;
12413 struct i40e_hw
*hw
= &pf
->hw
;
12414 struct i40e_vsi_context ctxt
;
12415 struct i40e_mac_filter
*f
;
12416 struct hlist_node
*h
;
12419 u8 enabled_tc
= 0x1; /* TC0 enabled */
12422 memset(&ctxt
, 0, sizeof(ctxt
));
12423 switch (vsi
->type
) {
12424 case I40E_VSI_MAIN
:
12425 /* The PF's main VSI is already setup as part of the
12426 * device initialization, so we'll not bother with
12427 * the add_vsi call, but we will retrieve the current
12430 ctxt
.seid
= pf
->main_vsi_seid
;
12431 ctxt
.pf_num
= pf
->hw
.pf_id
;
12433 ret
= i40e_aq_get_vsi_params(&pf
->hw
, &ctxt
, NULL
);
12434 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
12436 dev_info(&pf
->pdev
->dev
,
12437 "couldn't get PF vsi config, err %s aq_err %s\n",
12438 i40e_stat_str(&pf
->hw
, ret
),
12439 i40e_aq_str(&pf
->hw
,
12440 pf
->hw
.aq
.asq_last_status
));
12443 vsi
->info
= ctxt
.info
;
12444 vsi
->info
.valid_sections
= 0;
12446 vsi
->seid
= ctxt
.seid
;
12447 vsi
->id
= ctxt
.vsi_number
;
12449 enabled_tc
= i40e_pf_get_tc_map(pf
);
12451 /* Source pruning is enabled by default, so the flag is
12452 * negative logic - if it's set, we need to fiddle with
12453 * the VSI to disable source pruning.
12455 if (pf
->flags
& I40E_FLAG_SOURCE_PRUNING_DISABLED
) {
12456 memset(&ctxt
, 0, sizeof(ctxt
));
12457 ctxt
.seid
= pf
->main_vsi_seid
;
12458 ctxt
.pf_num
= pf
->hw
.pf_id
;
12460 ctxt
.info
.valid_sections
|=
12461 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
12462 ctxt
.info
.switch_id
=
12463 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB
);
12464 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
12466 dev_info(&pf
->pdev
->dev
,
12467 "update vsi failed, err %s aq_err %s\n",
12468 i40e_stat_str(&pf
->hw
, ret
),
12469 i40e_aq_str(&pf
->hw
,
12470 pf
->hw
.aq
.asq_last_status
));
12476 /* MFP mode setup queue map and update VSI */
12477 if ((pf
->flags
& I40E_FLAG_MFP_ENABLED
) &&
12478 !(pf
->hw
.func_caps
.iscsi
)) { /* NIC type PF */
12479 memset(&ctxt
, 0, sizeof(ctxt
));
12480 ctxt
.seid
= pf
->main_vsi_seid
;
12481 ctxt
.pf_num
= pf
->hw
.pf_id
;
12483 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, false);
12484 ret
= i40e_aq_update_vsi_params(hw
, &ctxt
, NULL
);
12486 dev_info(&pf
->pdev
->dev
,
12487 "update vsi failed, err %s aq_err %s\n",
12488 i40e_stat_str(&pf
->hw
, ret
),
12489 i40e_aq_str(&pf
->hw
,
12490 pf
->hw
.aq
.asq_last_status
));
12494 /* update the local VSI info queue map */
12495 i40e_vsi_update_queue_map(vsi
, &ctxt
);
12496 vsi
->info
.valid_sections
= 0;
12498 /* Default/Main VSI is only enabled for TC0
12499 * reconfigure it to enable all TCs that are
12500 * available on the port in SFP mode.
12501 * For MFP case the iSCSI PF would use this
12502 * flow to enable LAN+iSCSI TC.
12504 ret
= i40e_vsi_config_tc(vsi
, enabled_tc
);
12506 /* Single TC condition is not fatal,
12507 * message and continue
12509 dev_info(&pf
->pdev
->dev
,
12510 "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
12512 i40e_stat_str(&pf
->hw
, ret
),
12513 i40e_aq_str(&pf
->hw
,
12514 pf
->hw
.aq
.asq_last_status
));
12519 case I40E_VSI_FDIR
:
12520 ctxt
.pf_num
= hw
->pf_id
;
12522 ctxt
.uplink_seid
= vsi
->uplink_seid
;
12523 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
12524 ctxt
.flags
= I40E_AQ_VSI_TYPE_PF
;
12525 if ((pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
) &&
12526 (i40e_is_vsi_uplink_mode_veb(vsi
))) {
12527 ctxt
.info
.valid_sections
|=
12528 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
12529 ctxt
.info
.switch_id
=
12530 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
12532 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, true);
12535 case I40E_VSI_VMDQ2
:
12536 ctxt
.pf_num
= hw
->pf_id
;
12538 ctxt
.uplink_seid
= vsi
->uplink_seid
;
12539 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
12540 ctxt
.flags
= I40E_AQ_VSI_TYPE_VMDQ2
;
12542 /* This VSI is connected to VEB so the switch_id
12543 * should be set to zero by default.
12545 if (i40e_is_vsi_uplink_mode_veb(vsi
)) {
12546 ctxt
.info
.valid_sections
|=
12547 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
12548 ctxt
.info
.switch_id
=
12549 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
12552 /* Setup the VSI tx/rx queue map for TC0 only for now */
12553 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, true);
12556 case I40E_VSI_SRIOV
:
12557 ctxt
.pf_num
= hw
->pf_id
;
12558 ctxt
.vf_num
= vsi
->vf_id
+ hw
->func_caps
.vf_base_id
;
12559 ctxt
.uplink_seid
= vsi
->uplink_seid
;
12560 ctxt
.connection_type
= I40E_AQ_VSI_CONN_TYPE_NORMAL
;
12561 ctxt
.flags
= I40E_AQ_VSI_TYPE_VF
;
12563 /* This VSI is connected to VEB so the switch_id
12564 * should be set to zero by default.
12566 if (i40e_is_vsi_uplink_mode_veb(vsi
)) {
12567 ctxt
.info
.valid_sections
|=
12568 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID
);
12569 ctxt
.info
.switch_id
=
12570 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB
);
12573 if (vsi
->back
->flags
& I40E_FLAG_IWARP_ENABLED
) {
12574 ctxt
.info
.valid_sections
|=
12575 cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID
);
12576 ctxt
.info
.queueing_opt_flags
|=
12577 (I40E_AQ_VSI_QUE_OPT_TCP_ENA
|
12578 I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI
);
12581 ctxt
.info
.valid_sections
|= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID
);
12582 ctxt
.info
.port_vlan_flags
|= I40E_AQ_VSI_PVLAN_MODE_ALL
;
12583 if (pf
->vf
[vsi
->vf_id
].spoofchk
) {
12584 ctxt
.info
.valid_sections
|=
12585 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID
);
12586 ctxt
.info
.sec_flags
|=
12587 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK
|
12588 I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK
);
12590 /* Setup the VSI tx/rx queue map for TC0 only for now */
12591 i40e_vsi_setup_queue_map(vsi
, &ctxt
, enabled_tc
, true);
12594 case I40E_VSI_IWARP
:
12595 /* send down message to iWARP */
12602 if (vsi
->type
!= I40E_VSI_MAIN
) {
12603 ret
= i40e_aq_add_vsi(hw
, &ctxt
, NULL
);
12605 dev_info(&vsi
->back
->pdev
->dev
,
12606 "add vsi failed, err %s aq_err %s\n",
12607 i40e_stat_str(&pf
->hw
, ret
),
12608 i40e_aq_str(&pf
->hw
,
12609 pf
->hw
.aq
.asq_last_status
));
12613 vsi
->info
= ctxt
.info
;
12614 vsi
->info
.valid_sections
= 0;
12615 vsi
->seid
= ctxt
.seid
;
12616 vsi
->id
= ctxt
.vsi_number
;
12619 vsi
->active_filters
= 0;
12620 clear_bit(__I40E_VSI_OVERFLOW_PROMISC
, vsi
->state
);
12621 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
12622 /* If macvlan filters already exist, force them to get loaded */
12623 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
) {
12624 f
->state
= I40E_FILTER_NEW
;
12627 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
12630 vsi
->flags
|= I40E_VSI_FLAG_FILTER_CHANGED
;
12631 set_bit(__I40E_MACVLAN_SYNC_PENDING
, pf
->state
);
12634 /* Update VSI BW information */
12635 ret
= i40e_vsi_get_bw_info(vsi
);
12637 dev_info(&pf
->pdev
->dev
,
12638 "couldn't get vsi bw info, err %s aq_err %s\n",
12639 i40e_stat_str(&pf
->hw
, ret
),
12640 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
12641 /* VSI is already added so not tearing that up */
12650 * i40e_vsi_release - Delete a VSI and free its resources
12651 * @vsi: the VSI being removed
12653 * Returns 0 on success or < 0 on error
12655 int i40e_vsi_release(struct i40e_vsi
*vsi
)
12657 struct i40e_mac_filter
*f
;
12658 struct hlist_node
*h
;
12659 struct i40e_veb
*veb
= NULL
;
12660 struct i40e_pf
*pf
;
12666 /* release of a VEB-owner or last VSI is not allowed */
12667 if (vsi
->flags
& I40E_VSI_FLAG_VEB_OWNER
) {
12668 dev_info(&pf
->pdev
->dev
, "VSI %d has existing VEB %d\n",
12669 vsi
->seid
, vsi
->uplink_seid
);
12672 if (vsi
== pf
->vsi
[pf
->lan_vsi
] &&
12673 !test_bit(__I40E_DOWN
, pf
->state
)) {
12674 dev_info(&pf
->pdev
->dev
, "Can't remove PF VSI\n");
12678 uplink_seid
= vsi
->uplink_seid
;
12679 if (vsi
->type
!= I40E_VSI_SRIOV
) {
12680 if (vsi
->netdev_registered
) {
12681 vsi
->netdev_registered
= false;
12683 /* results in a call to i40e_close() */
12684 unregister_netdev(vsi
->netdev
);
12687 i40e_vsi_close(vsi
);
12689 i40e_vsi_disable_irq(vsi
);
12692 spin_lock_bh(&vsi
->mac_filter_hash_lock
);
12694 /* clear the sync flag on all filters */
12696 __dev_uc_unsync(vsi
->netdev
, NULL
);
12697 __dev_mc_unsync(vsi
->netdev
, NULL
);
12700 /* make sure any remaining filters are marked for deletion */
12701 hash_for_each_safe(vsi
->mac_filter_hash
, bkt
, h
, f
, hlist
)
12702 __i40e_del_filter(vsi
, f
);
12704 spin_unlock_bh(&vsi
->mac_filter_hash_lock
);
12706 i40e_sync_vsi_filters(vsi
);
12708 i40e_vsi_delete(vsi
);
12709 i40e_vsi_free_q_vectors(vsi
);
12711 free_netdev(vsi
->netdev
);
12712 vsi
->netdev
= NULL
;
12714 i40e_vsi_clear_rings(vsi
);
12715 i40e_vsi_clear(vsi
);
12717 /* If this was the last thing on the VEB, except for the
12718 * controlling VSI, remove the VEB, which puts the controlling
12719 * VSI onto the next level down in the switch.
12721 * Well, okay, there's one more exception here: don't remove
12722 * the orphan VEBs yet. We'll wait for an explicit remove request
12723 * from up the network stack.
12725 for (n
= 0, i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
12727 pf
->vsi
[i
]->uplink_seid
== uplink_seid
&&
12728 (pf
->vsi
[i
]->flags
& I40E_VSI_FLAG_VEB_OWNER
) == 0) {
12729 n
++; /* count the VSIs */
12732 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
12735 if (pf
->veb
[i
]->uplink_seid
== uplink_seid
)
12736 n
++; /* count the VEBs */
12737 if (pf
->veb
[i
]->seid
== uplink_seid
)
12740 if (n
== 0 && veb
&& veb
->uplink_seid
!= 0)
12741 i40e_veb_release(veb
);
12747 * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
12748 * @vsi: ptr to the VSI
12750 * This should only be called after i40e_vsi_mem_alloc() which allocates the
12751 * corresponding SW VSI structure and initializes num_queue_pairs for the
12752 * newly allocated VSI.
12754 * Returns 0 on success or negative on failure
12756 static int i40e_vsi_setup_vectors(struct i40e_vsi
*vsi
)
12759 struct i40e_pf
*pf
= vsi
->back
;
12761 if (vsi
->q_vectors
[0]) {
12762 dev_info(&pf
->pdev
->dev
, "VSI %d has existing q_vectors\n",
12767 if (vsi
->base_vector
) {
12768 dev_info(&pf
->pdev
->dev
, "VSI %d has non-zero base vector %d\n",
12769 vsi
->seid
, vsi
->base_vector
);
12773 ret
= i40e_vsi_alloc_q_vectors(vsi
);
12775 dev_info(&pf
->pdev
->dev
,
12776 "failed to allocate %d q_vector for VSI %d, ret=%d\n",
12777 vsi
->num_q_vectors
, vsi
->seid
, ret
);
12778 vsi
->num_q_vectors
= 0;
12779 goto vector_setup_out
;
12782 /* In Legacy mode, we do not have to get any other vector since we
12783 * piggyback on the misc/ICR0 for queue interrupts.
12785 if (!(pf
->flags
& I40E_FLAG_MSIX_ENABLED
))
12787 if (vsi
->num_q_vectors
)
12788 vsi
->base_vector
= i40e_get_lump(pf
, pf
->irq_pile
,
12789 vsi
->num_q_vectors
, vsi
->idx
);
12790 if (vsi
->base_vector
< 0) {
12791 dev_info(&pf
->pdev
->dev
,
12792 "failed to get tracking for %d vectors for VSI %d, err=%d\n",
12793 vsi
->num_q_vectors
, vsi
->seid
, vsi
->base_vector
);
12794 i40e_vsi_free_q_vectors(vsi
);
12796 goto vector_setup_out
;
12804 * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
12805 * @vsi: pointer to the vsi.
12807 * This re-allocates a vsi's queue resources.
12809 * Returns pointer to the successfully allocated and configured VSI sw struct
12810 * on success, otherwise returns NULL on failure.
12812 static struct i40e_vsi
*i40e_vsi_reinit_setup(struct i40e_vsi
*vsi
)
12814 u16 alloc_queue_pairs
;
12815 struct i40e_pf
*pf
;
12824 i40e_put_lump(pf
->qp_pile
, vsi
->base_queue
, vsi
->idx
);
12825 i40e_vsi_clear_rings(vsi
);
12827 i40e_vsi_free_arrays(vsi
, false);
12828 i40e_set_num_rings_in_vsi(vsi
);
12829 ret
= i40e_vsi_alloc_arrays(vsi
, false);
12833 alloc_queue_pairs
= vsi
->alloc_queue_pairs
*
12834 (i40e_enabled_xdp_vsi(vsi
) ? 2 : 1);
12836 ret
= i40e_get_lump(pf
, pf
->qp_pile
, alloc_queue_pairs
, vsi
->idx
);
12838 dev_info(&pf
->pdev
->dev
,
12839 "failed to get tracking for %d queues for VSI %d err %d\n",
12840 alloc_queue_pairs
, vsi
->seid
, ret
);
12843 vsi
->base_queue
= ret
;
12845 /* Update the FW view of the VSI. Force a reset of TC and queue
12846 * layout configurations.
12848 enabled_tc
= pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
;
12849 pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
= 0;
12850 pf
->vsi
[pf
->lan_vsi
]->seid
= pf
->main_vsi_seid
;
12851 i40e_vsi_config_tc(pf
->vsi
[pf
->lan_vsi
], enabled_tc
);
12852 if (vsi
->type
== I40E_VSI_MAIN
)
12853 i40e_rm_default_mac_filter(vsi
, pf
->hw
.mac
.perm_addr
);
12855 /* assign it some queues */
12856 ret
= i40e_alloc_rings(vsi
);
12860 /* map all of the rings to the q_vectors */
12861 i40e_vsi_map_rings_to_vectors(vsi
);
12865 i40e_vsi_free_q_vectors(vsi
);
12866 if (vsi
->netdev_registered
) {
12867 vsi
->netdev_registered
= false;
12868 unregister_netdev(vsi
->netdev
);
12869 free_netdev(vsi
->netdev
);
12870 vsi
->netdev
= NULL
;
12872 i40e_aq_delete_element(&pf
->hw
, vsi
->seid
, NULL
);
12874 i40e_vsi_clear(vsi
);
12879 * i40e_vsi_setup - Set up a VSI by a given type
12880 * @pf: board private structure
12882 * @uplink_seid: the switch element to link to
12883 * @param1: usage depends upon VSI type. For VF types, indicates VF id
12885 * This allocates the sw VSI structure and its queue resources, then add a VSI
12886 * to the identified VEB.
12888 * Returns pointer to the successfully allocated and configure VSI sw struct on
12889 * success, otherwise returns NULL on failure.
12891 struct i40e_vsi
*i40e_vsi_setup(struct i40e_pf
*pf
, u8 type
,
12892 u16 uplink_seid
, u32 param1
)
12894 struct i40e_vsi
*vsi
= NULL
;
12895 struct i40e_veb
*veb
= NULL
;
12896 u16 alloc_queue_pairs
;
12900 /* The requested uplink_seid must be either
12901 * - the PF's port seid
12902 * no VEB is needed because this is the PF
12903 * or this is a Flow Director special case VSI
12904 * - seid of an existing VEB
12905 * - seid of a VSI that owns an existing VEB
12906 * - seid of a VSI that doesn't own a VEB
12907 * a new VEB is created and the VSI becomes the owner
12908 * - seid of the PF VSI, which is what creates the first VEB
12909 * this is a special case of the previous
12911 * Find which uplink_seid we were given and create a new VEB if needed
12913 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
12914 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== uplink_seid
) {
12920 if (!veb
&& uplink_seid
!= pf
->mac_seid
) {
12922 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
12923 if (pf
->vsi
[i
] && pf
->vsi
[i
]->seid
== uplink_seid
) {
12929 dev_info(&pf
->pdev
->dev
, "no such uplink_seid %d\n",
12934 if (vsi
->uplink_seid
== pf
->mac_seid
)
12935 veb
= i40e_veb_setup(pf
, 0, pf
->mac_seid
, vsi
->seid
,
12936 vsi
->tc_config
.enabled_tc
);
12937 else if ((vsi
->flags
& I40E_VSI_FLAG_VEB_OWNER
) == 0)
12938 veb
= i40e_veb_setup(pf
, 0, vsi
->uplink_seid
, vsi
->seid
,
12939 vsi
->tc_config
.enabled_tc
);
12941 if (vsi
->seid
!= pf
->vsi
[pf
->lan_vsi
]->seid
) {
12942 dev_info(&vsi
->back
->pdev
->dev
,
12943 "New VSI creation error, uplink seid of LAN VSI expected.\n");
12946 /* We come up by default in VEPA mode if SRIOV is not
12947 * already enabled, in which case we can't force VEPA
12950 if (!(pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)) {
12951 veb
->bridge_mode
= BRIDGE_MODE_VEPA
;
12952 pf
->flags
&= ~I40E_FLAG_VEB_MODE_ENABLED
;
12954 i40e_config_bridge_mode(veb
);
12956 for (i
= 0; i
< I40E_MAX_VEB
&& !veb
; i
++) {
12957 if (pf
->veb
[i
] && pf
->veb
[i
]->seid
== vsi
->uplink_seid
)
12961 dev_info(&pf
->pdev
->dev
, "couldn't add VEB\n");
12965 vsi
->flags
|= I40E_VSI_FLAG_VEB_OWNER
;
12966 uplink_seid
= veb
->seid
;
12969 /* get vsi sw struct */
12970 v_idx
= i40e_vsi_mem_alloc(pf
, type
);
12973 vsi
= pf
->vsi
[v_idx
];
12977 vsi
->veb_idx
= (veb
? veb
->idx
: I40E_NO_VEB
);
12979 if (type
== I40E_VSI_MAIN
)
12980 pf
->lan_vsi
= v_idx
;
12981 else if (type
== I40E_VSI_SRIOV
)
12982 vsi
->vf_id
= param1
;
12983 /* assign it some queues */
12984 alloc_queue_pairs
= vsi
->alloc_queue_pairs
*
12985 (i40e_enabled_xdp_vsi(vsi
) ? 2 : 1);
12987 ret
= i40e_get_lump(pf
, pf
->qp_pile
, alloc_queue_pairs
, vsi
->idx
);
12989 dev_info(&pf
->pdev
->dev
,
12990 "failed to get tracking for %d queues for VSI %d err=%d\n",
12991 alloc_queue_pairs
, vsi
->seid
, ret
);
12994 vsi
->base_queue
= ret
;
12996 /* get a VSI from the hardware */
12997 vsi
->uplink_seid
= uplink_seid
;
12998 ret
= i40e_add_vsi(vsi
);
13002 switch (vsi
->type
) {
13003 /* setup the netdev if needed */
13004 case I40E_VSI_MAIN
:
13005 case I40E_VSI_VMDQ2
:
13006 ret
= i40e_config_netdev(vsi
);
13009 ret
= register_netdev(vsi
->netdev
);
13012 vsi
->netdev_registered
= true;
13013 netif_carrier_off(vsi
->netdev
);
13014 #ifdef CONFIG_I40E_DCB
13015 /* Setup DCB netlink interface */
13016 i40e_dcbnl_setup(vsi
);
13017 #endif /* CONFIG_I40E_DCB */
13020 case I40E_VSI_FDIR
:
13021 /* set up vectors and rings if needed */
13022 ret
= i40e_vsi_setup_vectors(vsi
);
13026 ret
= i40e_alloc_rings(vsi
);
13030 /* map all of the rings to the q_vectors */
13031 i40e_vsi_map_rings_to_vectors(vsi
);
13033 i40e_vsi_reset_stats(vsi
);
13037 /* no netdev or rings for the other VSI types */
13041 if ((pf
->hw_features
& I40E_HW_RSS_AQ_CAPABLE
) &&
13042 (vsi
->type
== I40E_VSI_VMDQ2
)) {
13043 ret
= i40e_vsi_config_rss(vsi
);
13048 i40e_vsi_free_q_vectors(vsi
);
13050 if (vsi
->netdev_registered
) {
13051 vsi
->netdev_registered
= false;
13052 unregister_netdev(vsi
->netdev
);
13053 free_netdev(vsi
->netdev
);
13054 vsi
->netdev
= NULL
;
13057 i40e_aq_delete_element(&pf
->hw
, vsi
->seid
, NULL
);
13059 i40e_vsi_clear(vsi
);
13065 * i40e_veb_get_bw_info - Query VEB BW information
13066 * @veb: the veb to query
13068 * Query the Tx scheduler BW configuration data for given VEB
13070 static int i40e_veb_get_bw_info(struct i40e_veb
*veb
)
13072 struct i40e_aqc_query_switching_comp_ets_config_resp ets_data
;
13073 struct i40e_aqc_query_switching_comp_bw_config_resp bw_data
;
13074 struct i40e_pf
*pf
= veb
->pf
;
13075 struct i40e_hw
*hw
= &pf
->hw
;
13080 ret
= i40e_aq_query_switch_comp_bw_config(hw
, veb
->seid
,
13083 dev_info(&pf
->pdev
->dev
,
13084 "query veb bw config failed, err %s aq_err %s\n",
13085 i40e_stat_str(&pf
->hw
, ret
),
13086 i40e_aq_str(&pf
->hw
, hw
->aq
.asq_last_status
));
13090 ret
= i40e_aq_query_switch_comp_ets_config(hw
, veb
->seid
,
13093 dev_info(&pf
->pdev
->dev
,
13094 "query veb bw ets config failed, err %s aq_err %s\n",
13095 i40e_stat_str(&pf
->hw
, ret
),
13096 i40e_aq_str(&pf
->hw
, hw
->aq
.asq_last_status
));
13100 veb
->bw_limit
= le16_to_cpu(ets_data
.port_bw_limit
);
13101 veb
->bw_max_quanta
= ets_data
.tc_bw_max
;
13102 veb
->is_abs_credits
= bw_data
.absolute_credits_enable
;
13103 veb
->enabled_tc
= ets_data
.tc_valid_bits
;
13104 tc_bw_max
= le16_to_cpu(bw_data
.tc_bw_max
[0]) |
13105 (le16_to_cpu(bw_data
.tc_bw_max
[1]) << 16);
13106 for (i
= 0; i
< I40E_MAX_TRAFFIC_CLASS
; i
++) {
13107 veb
->bw_tc_share_credits
[i
] = bw_data
.tc_bw_share_credits
[i
];
13108 veb
->bw_tc_limit_credits
[i
] =
13109 le16_to_cpu(bw_data
.tc_bw_limits
[i
]);
13110 veb
->bw_tc_max_quanta
[i
] = ((tc_bw_max
>> (i
*4)) & 0x7);
13118 * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
13119 * @pf: board private structure
13121 * On error: returns error code (negative)
13122 * On success: returns vsi index in PF (positive)
13124 static int i40e_veb_mem_alloc(struct i40e_pf
*pf
)
13127 struct i40e_veb
*veb
;
13130 /* Need to protect the allocation of switch elements at the PF level */
13131 mutex_lock(&pf
->switch_mutex
);
13133 /* VEB list may be fragmented if VEB creation/destruction has
13134 * been happening. We can afford to do a quick scan to look
13135 * for any free slots in the list.
13137 * find next empty veb slot, looping back around if necessary
13140 while ((i
< I40E_MAX_VEB
) && (pf
->veb
[i
] != NULL
))
13142 if (i
>= I40E_MAX_VEB
) {
13144 goto err_alloc_veb
; /* out of VEB slots! */
13147 veb
= kzalloc(sizeof(*veb
), GFP_KERNEL
);
13150 goto err_alloc_veb
;
13154 veb
->enabled_tc
= 1;
13159 mutex_unlock(&pf
->switch_mutex
);
13164 * i40e_switch_branch_release - Delete a branch of the switch tree
13165 * @branch: where to start deleting
13167 * This uses recursion to find the tips of the branch to be
13168 * removed, deleting until we get back to and can delete this VEB.
13170 static void i40e_switch_branch_release(struct i40e_veb
*branch
)
13172 struct i40e_pf
*pf
= branch
->pf
;
13173 u16 branch_seid
= branch
->seid
;
13174 u16 veb_idx
= branch
->idx
;
13177 /* release any VEBs on this VEB - RECURSION */
13178 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
13181 if (pf
->veb
[i
]->uplink_seid
== branch
->seid
)
13182 i40e_switch_branch_release(pf
->veb
[i
]);
13185 /* Release the VSIs on this VEB, but not the owner VSI.
13187 * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
13188 * the VEB itself, so don't use (*branch) after this loop.
13190 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
13193 if (pf
->vsi
[i
]->uplink_seid
== branch_seid
&&
13194 (pf
->vsi
[i
]->flags
& I40E_VSI_FLAG_VEB_OWNER
) == 0) {
13195 i40e_vsi_release(pf
->vsi
[i
]);
13199 /* There's one corner case where the VEB might not have been
13200 * removed, so double check it here and remove it if needed.
13201 * This case happens if the veb was created from the debugfs
13202 * commands and no VSIs were added to it.
13204 if (pf
->veb
[veb_idx
])
13205 i40e_veb_release(pf
->veb
[veb_idx
]);
13209 * i40e_veb_clear - remove veb struct
13210 * @veb: the veb to remove
13212 static void i40e_veb_clear(struct i40e_veb
*veb
)
13218 struct i40e_pf
*pf
= veb
->pf
;
13220 mutex_lock(&pf
->switch_mutex
);
13221 if (pf
->veb
[veb
->idx
] == veb
)
13222 pf
->veb
[veb
->idx
] = NULL
;
13223 mutex_unlock(&pf
->switch_mutex
);
13230 * i40e_veb_release - Delete a VEB and free its resources
13231 * @veb: the VEB being removed
13233 void i40e_veb_release(struct i40e_veb
*veb
)
13235 struct i40e_vsi
*vsi
= NULL
;
13236 struct i40e_pf
*pf
;
13241 /* find the remaining VSI and check for extras */
13242 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
13243 if (pf
->vsi
[i
] && pf
->vsi
[i
]->uplink_seid
== veb
->seid
) {
13249 dev_info(&pf
->pdev
->dev
,
13250 "can't remove VEB %d with %d VSIs left\n",
13255 /* move the remaining VSI to uplink veb */
13256 vsi
->flags
&= ~I40E_VSI_FLAG_VEB_OWNER
;
13257 if (veb
->uplink_seid
) {
13258 vsi
->uplink_seid
= veb
->uplink_seid
;
13259 if (veb
->uplink_seid
== pf
->mac_seid
)
13260 vsi
->veb_idx
= I40E_NO_VEB
;
13262 vsi
->veb_idx
= veb
->veb_idx
;
13265 vsi
->uplink_seid
= pf
->vsi
[pf
->lan_vsi
]->uplink_seid
;
13266 vsi
->veb_idx
= pf
->vsi
[pf
->lan_vsi
]->veb_idx
;
13269 i40e_aq_delete_element(&pf
->hw
, veb
->seid
, NULL
);
13270 i40e_veb_clear(veb
);
13274 * i40e_add_veb - create the VEB in the switch
13275 * @veb: the VEB to be instantiated
13276 * @vsi: the controlling VSI
13278 static int i40e_add_veb(struct i40e_veb
*veb
, struct i40e_vsi
*vsi
)
13280 struct i40e_pf
*pf
= veb
->pf
;
13281 bool enable_stats
= !!(pf
->flags
& I40E_FLAG_VEB_STATS_ENABLED
);
13284 ret
= i40e_aq_add_veb(&pf
->hw
, veb
->uplink_seid
, vsi
->seid
,
13285 veb
->enabled_tc
, false,
13286 &veb
->seid
, enable_stats
, NULL
);
13288 /* get a VEB from the hardware */
13290 dev_info(&pf
->pdev
->dev
,
13291 "couldn't add VEB, err %s aq_err %s\n",
13292 i40e_stat_str(&pf
->hw
, ret
),
13293 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13297 /* get statistics counter */
13298 ret
= i40e_aq_get_veb_parameters(&pf
->hw
, veb
->seid
, NULL
, NULL
,
13299 &veb
->stats_idx
, NULL
, NULL
, NULL
);
13301 dev_info(&pf
->pdev
->dev
,
13302 "couldn't get VEB statistics idx, err %s aq_err %s\n",
13303 i40e_stat_str(&pf
->hw
, ret
),
13304 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13307 ret
= i40e_veb_get_bw_info(veb
);
13309 dev_info(&pf
->pdev
->dev
,
13310 "couldn't get VEB bw info, err %s aq_err %s\n",
13311 i40e_stat_str(&pf
->hw
, ret
),
13312 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13313 i40e_aq_delete_element(&pf
->hw
, veb
->seid
, NULL
);
13317 vsi
->uplink_seid
= veb
->seid
;
13318 vsi
->veb_idx
= veb
->idx
;
13319 vsi
->flags
|= I40E_VSI_FLAG_VEB_OWNER
;
13325 * i40e_veb_setup - Set up a VEB
13326 * @pf: board private structure
13327 * @flags: VEB setup flags
13328 * @uplink_seid: the switch element to link to
13329 * @vsi_seid: the initial VSI seid
13330 * @enabled_tc: Enabled TC bit-map
13332 * This allocates the sw VEB structure and links it into the switch
13333 * It is possible and legal for this to be a duplicate of an already
13334 * existing VEB. It is also possible for both uplink and vsi seids
13335 * to be zero, in order to create a floating VEB.
13337 * Returns pointer to the successfully allocated VEB sw struct on
13338 * success, otherwise returns NULL on failure.
13340 struct i40e_veb
*i40e_veb_setup(struct i40e_pf
*pf
, u16 flags
,
13341 u16 uplink_seid
, u16 vsi_seid
,
13344 struct i40e_veb
*veb
, *uplink_veb
= NULL
;
13345 int vsi_idx
, veb_idx
;
13348 /* if one seid is 0, the other must be 0 to create a floating relay */
13349 if ((uplink_seid
== 0 || vsi_seid
== 0) &&
13350 (uplink_seid
+ vsi_seid
!= 0)) {
13351 dev_info(&pf
->pdev
->dev
,
13352 "one, not both seid's are 0: uplink=%d vsi=%d\n",
13353 uplink_seid
, vsi_seid
);
13357 /* make sure there is such a vsi and uplink */
13358 for (vsi_idx
= 0; vsi_idx
< pf
->num_alloc_vsi
; vsi_idx
++)
13359 if (pf
->vsi
[vsi_idx
] && pf
->vsi
[vsi_idx
]->seid
== vsi_seid
)
13361 if (vsi_idx
== pf
->num_alloc_vsi
&& vsi_seid
!= 0) {
13362 dev_info(&pf
->pdev
->dev
, "vsi seid %d not found\n",
13367 if (uplink_seid
&& uplink_seid
!= pf
->mac_seid
) {
13368 for (veb_idx
= 0; veb_idx
< I40E_MAX_VEB
; veb_idx
++) {
13369 if (pf
->veb
[veb_idx
] &&
13370 pf
->veb
[veb_idx
]->seid
== uplink_seid
) {
13371 uplink_veb
= pf
->veb
[veb_idx
];
13376 dev_info(&pf
->pdev
->dev
,
13377 "uplink seid %d not found\n", uplink_seid
);
13382 /* get veb sw struct */
13383 veb_idx
= i40e_veb_mem_alloc(pf
);
13386 veb
= pf
->veb
[veb_idx
];
13387 veb
->flags
= flags
;
13388 veb
->uplink_seid
= uplink_seid
;
13389 veb
->veb_idx
= (uplink_veb
? uplink_veb
->idx
: I40E_NO_VEB
);
13390 veb
->enabled_tc
= (enabled_tc
? enabled_tc
: 0x1);
13392 /* create the VEB in the switch */
13393 ret
= i40e_add_veb(veb
, pf
->vsi
[vsi_idx
]);
13396 if (vsi_idx
== pf
->lan_vsi
)
13397 pf
->lan_veb
= veb
->idx
;
13402 i40e_veb_clear(veb
);
13408 * i40e_setup_pf_switch_element - set PF vars based on switch type
13409 * @pf: board private structure
13410 * @ele: element we are building info from
13411 * @num_reported: total number of elements
13412 * @printconfig: should we print the contents
13414 * helper function to assist in extracting a few useful SEID values.
13416 static void i40e_setup_pf_switch_element(struct i40e_pf
*pf
,
13417 struct i40e_aqc_switch_config_element_resp
*ele
,
13418 u16 num_reported
, bool printconfig
)
13420 u16 downlink_seid
= le16_to_cpu(ele
->downlink_seid
);
13421 u16 uplink_seid
= le16_to_cpu(ele
->uplink_seid
);
13422 u8 element_type
= ele
->element_type
;
13423 u16 seid
= le16_to_cpu(ele
->seid
);
13426 dev_info(&pf
->pdev
->dev
,
13427 "type=%d seid=%d uplink=%d downlink=%d\n",
13428 element_type
, seid
, uplink_seid
, downlink_seid
);
13430 switch (element_type
) {
13431 case I40E_SWITCH_ELEMENT_TYPE_MAC
:
13432 pf
->mac_seid
= seid
;
13434 case I40E_SWITCH_ELEMENT_TYPE_VEB
:
13436 if (uplink_seid
!= pf
->mac_seid
)
13438 if (pf
->lan_veb
== I40E_NO_VEB
) {
13441 /* find existing or else empty VEB */
13442 for (v
= 0; v
< I40E_MAX_VEB
; v
++) {
13443 if (pf
->veb
[v
] && (pf
->veb
[v
]->seid
== seid
)) {
13448 if (pf
->lan_veb
== I40E_NO_VEB
) {
13449 v
= i40e_veb_mem_alloc(pf
);
13456 pf
->veb
[pf
->lan_veb
]->seid
= seid
;
13457 pf
->veb
[pf
->lan_veb
]->uplink_seid
= pf
->mac_seid
;
13458 pf
->veb
[pf
->lan_veb
]->pf
= pf
;
13459 pf
->veb
[pf
->lan_veb
]->veb_idx
= I40E_NO_VEB
;
13461 case I40E_SWITCH_ELEMENT_TYPE_VSI
:
13462 if (num_reported
!= 1)
13464 /* This is immediately after a reset so we can assume this is
13467 pf
->mac_seid
= uplink_seid
;
13468 pf
->pf_seid
= downlink_seid
;
13469 pf
->main_vsi_seid
= seid
;
13471 dev_info(&pf
->pdev
->dev
,
13472 "pf_seid=%d main_vsi_seid=%d\n",
13473 pf
->pf_seid
, pf
->main_vsi_seid
);
13475 case I40E_SWITCH_ELEMENT_TYPE_PF
:
13476 case I40E_SWITCH_ELEMENT_TYPE_VF
:
13477 case I40E_SWITCH_ELEMENT_TYPE_EMP
:
13478 case I40E_SWITCH_ELEMENT_TYPE_BMC
:
13479 case I40E_SWITCH_ELEMENT_TYPE_PE
:
13480 case I40E_SWITCH_ELEMENT_TYPE_PA
:
13481 /* ignore these for now */
13484 dev_info(&pf
->pdev
->dev
, "unknown element type=%d seid=%d\n",
13485 element_type
, seid
);
13491 * i40e_fetch_switch_configuration - Get switch config from firmware
13492 * @pf: board private structure
13493 * @printconfig: should we print the contents
13495 * Get the current switch configuration from the device and
13496 * extract a few useful SEID values.
13498 int i40e_fetch_switch_configuration(struct i40e_pf
*pf
, bool printconfig
)
13500 struct i40e_aqc_get_switch_config_resp
*sw_config
;
13506 aq_buf
= kzalloc(I40E_AQ_LARGE_BUF
, GFP_KERNEL
);
13510 sw_config
= (struct i40e_aqc_get_switch_config_resp
*)aq_buf
;
13512 u16 num_reported
, num_total
;
13514 ret
= i40e_aq_get_switch_config(&pf
->hw
, sw_config
,
13518 dev_info(&pf
->pdev
->dev
,
13519 "get switch config failed err %s aq_err %s\n",
13520 i40e_stat_str(&pf
->hw
, ret
),
13521 i40e_aq_str(&pf
->hw
,
13522 pf
->hw
.aq
.asq_last_status
));
13527 num_reported
= le16_to_cpu(sw_config
->header
.num_reported
);
13528 num_total
= le16_to_cpu(sw_config
->header
.num_total
);
13531 dev_info(&pf
->pdev
->dev
,
13532 "header: %d reported %d total\n",
13533 num_reported
, num_total
);
13535 for (i
= 0; i
< num_reported
; i
++) {
13536 struct i40e_aqc_switch_config_element_resp
*ele
=
13537 &sw_config
->element
[i
];
13539 i40e_setup_pf_switch_element(pf
, ele
, num_reported
,
13542 } while (next_seid
!= 0);
13549 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
13550 * @pf: board private structure
13551 * @reinit: if the Main VSI needs to re-initialized.
13553 * Returns 0 on success, negative value on failure
13555 static int i40e_setup_pf_switch(struct i40e_pf
*pf
, bool reinit
)
13560 /* find out what's out there already */
13561 ret
= i40e_fetch_switch_configuration(pf
, false);
13563 dev_info(&pf
->pdev
->dev
,
13564 "couldn't fetch switch config, err %s aq_err %s\n",
13565 i40e_stat_str(&pf
->hw
, ret
),
13566 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
13569 i40e_pf_reset_stats(pf
);
13571 /* set the switch config bit for the whole device to
13572 * support limited promisc or true promisc
13573 * when user requests promisc. The default is limited
13577 if ((pf
->hw
.pf_id
== 0) &&
13578 !(pf
->flags
& I40E_FLAG_TRUE_PROMISC_SUPPORT
)) {
13579 flags
= I40E_AQ_SET_SWITCH_CFG_PROMISC
;
13580 pf
->last_sw_conf_flags
= flags
;
13583 if (pf
->hw
.pf_id
== 0) {
13586 valid_flags
= I40E_AQ_SET_SWITCH_CFG_PROMISC
;
13587 ret
= i40e_aq_set_switch_config(&pf
->hw
, flags
, valid_flags
, 0,
13589 if (ret
&& pf
->hw
.aq
.asq_last_status
!= I40E_AQ_RC_ESRCH
) {
13590 dev_info(&pf
->pdev
->dev
,
13591 "couldn't set switch config bits, err %s aq_err %s\n",
13592 i40e_stat_str(&pf
->hw
, ret
),
13593 i40e_aq_str(&pf
->hw
,
13594 pf
->hw
.aq
.asq_last_status
));
13595 /* not a fatal problem, just keep going */
13597 pf
->last_sw_conf_valid_flags
= valid_flags
;
13600 /* first time setup */
13601 if (pf
->lan_vsi
== I40E_NO_VSI
|| reinit
) {
13602 struct i40e_vsi
*vsi
= NULL
;
13605 /* Set up the PF VSI associated with the PF's main VSI
13606 * that is already in the HW switch
13608 if (pf
->lan_veb
!= I40E_NO_VEB
&& pf
->veb
[pf
->lan_veb
])
13609 uplink_seid
= pf
->veb
[pf
->lan_veb
]->seid
;
13611 uplink_seid
= pf
->mac_seid
;
13612 if (pf
->lan_vsi
== I40E_NO_VSI
)
13613 vsi
= i40e_vsi_setup(pf
, I40E_VSI_MAIN
, uplink_seid
, 0);
13615 vsi
= i40e_vsi_reinit_setup(pf
->vsi
[pf
->lan_vsi
]);
13617 dev_info(&pf
->pdev
->dev
, "setup of MAIN VSI failed\n");
13618 i40e_cloud_filter_exit(pf
);
13619 i40e_fdir_teardown(pf
);
13623 /* force a reset of TC and queue layout configurations */
13624 u8 enabled_tc
= pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
;
13626 pf
->vsi
[pf
->lan_vsi
]->tc_config
.enabled_tc
= 0;
13627 pf
->vsi
[pf
->lan_vsi
]->seid
= pf
->main_vsi_seid
;
13628 i40e_vsi_config_tc(pf
->vsi
[pf
->lan_vsi
], enabled_tc
);
13630 i40e_vlan_stripping_disable(pf
->vsi
[pf
->lan_vsi
]);
13632 i40e_fdir_sb_setup(pf
);
13634 /* Setup static PF queue filter control settings */
13635 ret
= i40e_setup_pf_filter_control(pf
);
13637 dev_info(&pf
->pdev
->dev
, "setup_pf_filter_control failed: %d\n",
13639 /* Failure here should not stop continuing other steps */
13642 /* enable RSS in the HW, even for only one queue, as the stack can use
13645 if ((pf
->flags
& I40E_FLAG_RSS_ENABLED
))
13646 i40e_pf_config_rss(pf
);
13648 /* fill in link information and enable LSE reporting */
13649 i40e_link_event(pf
);
13651 /* Initialize user-specific link properties */
13652 pf
->fc_autoneg_status
= ((pf
->hw
.phy
.link_info
.an_info
&
13653 I40E_AQ_AN_COMPLETED
) ? true : false);
13657 /* repopulate tunnel port filters */
13658 i40e_sync_udp_filters(pf
);
13664 * i40e_determine_queue_usage - Work out queue distribution
13665 * @pf: board private structure
13667 static void i40e_determine_queue_usage(struct i40e_pf
*pf
)
13672 pf
->num_lan_qps
= 0;
13674 /* Find the max queues to be put into basic use. We'll always be
13675 * using TC0, whether or not DCB is running, and TC0 will get the
13678 queues_left
= pf
->hw
.func_caps
.num_tx_qp
;
13680 if ((queues_left
== 1) ||
13681 !(pf
->flags
& I40E_FLAG_MSIX_ENABLED
)) {
13682 /* one qp for PF, no queues for anything else */
13684 pf
->alloc_rss_size
= pf
->num_lan_qps
= 1;
13686 /* make sure all the fancies are disabled */
13687 pf
->flags
&= ~(I40E_FLAG_RSS_ENABLED
|
13688 I40E_FLAG_IWARP_ENABLED
|
13689 I40E_FLAG_FD_SB_ENABLED
|
13690 I40E_FLAG_FD_ATR_ENABLED
|
13691 I40E_FLAG_DCB_CAPABLE
|
13692 I40E_FLAG_DCB_ENABLED
|
13693 I40E_FLAG_SRIOV_ENABLED
|
13694 I40E_FLAG_VMDQ_ENABLED
);
13695 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
13696 } else if (!(pf
->flags
& (I40E_FLAG_RSS_ENABLED
|
13697 I40E_FLAG_FD_SB_ENABLED
|
13698 I40E_FLAG_FD_ATR_ENABLED
|
13699 I40E_FLAG_DCB_CAPABLE
))) {
13700 /* one qp for PF */
13701 pf
->alloc_rss_size
= pf
->num_lan_qps
= 1;
13702 queues_left
-= pf
->num_lan_qps
;
13704 pf
->flags
&= ~(I40E_FLAG_RSS_ENABLED
|
13705 I40E_FLAG_IWARP_ENABLED
|
13706 I40E_FLAG_FD_SB_ENABLED
|
13707 I40E_FLAG_FD_ATR_ENABLED
|
13708 I40E_FLAG_DCB_ENABLED
|
13709 I40E_FLAG_VMDQ_ENABLED
);
13710 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
13712 /* Not enough queues for all TCs */
13713 if ((pf
->flags
& I40E_FLAG_DCB_CAPABLE
) &&
13714 (queues_left
< I40E_MAX_TRAFFIC_CLASS
)) {
13715 pf
->flags
&= ~(I40E_FLAG_DCB_CAPABLE
|
13716 I40E_FLAG_DCB_ENABLED
);
13717 dev_info(&pf
->pdev
->dev
, "not enough queues for DCB. DCB is disabled.\n");
13720 /* limit lan qps to the smaller of qps, cpus or msix */
13721 q_max
= max_t(int, pf
->rss_size_max
, num_online_cpus());
13722 q_max
= min_t(int, q_max
, pf
->hw
.func_caps
.num_tx_qp
);
13723 q_max
= min_t(int, q_max
, pf
->hw
.func_caps
.num_msix_vectors
);
13724 pf
->num_lan_qps
= q_max
;
13726 queues_left
-= pf
->num_lan_qps
;
13729 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
13730 if (queues_left
> 1) {
13731 queues_left
-= 1; /* save 1 queue for FD */
13733 pf
->flags
&= ~I40E_FLAG_FD_SB_ENABLED
;
13734 pf
->flags
|= I40E_FLAG_FD_SB_INACTIVE
;
13735 dev_info(&pf
->pdev
->dev
, "not enough queues for Flow Director. Flow Director feature is disabled\n");
13739 if ((pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) &&
13740 pf
->num_vf_qps
&& pf
->num_req_vfs
&& queues_left
) {
13741 pf
->num_req_vfs
= min_t(int, pf
->num_req_vfs
,
13742 (queues_left
/ pf
->num_vf_qps
));
13743 queues_left
-= (pf
->num_req_vfs
* pf
->num_vf_qps
);
13746 if ((pf
->flags
& I40E_FLAG_VMDQ_ENABLED
) &&
13747 pf
->num_vmdq_vsis
&& pf
->num_vmdq_qps
&& queues_left
) {
13748 pf
->num_vmdq_vsis
= min_t(int, pf
->num_vmdq_vsis
,
13749 (queues_left
/ pf
->num_vmdq_qps
));
13750 queues_left
-= (pf
->num_vmdq_vsis
* pf
->num_vmdq_qps
);
13753 pf
->queues_left
= queues_left
;
13754 dev_dbg(&pf
->pdev
->dev
,
13755 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
13756 pf
->hw
.func_caps
.num_tx_qp
,
13757 !!(pf
->flags
& I40E_FLAG_FD_SB_ENABLED
),
13758 pf
->num_lan_qps
, pf
->alloc_rss_size
, pf
->num_req_vfs
,
13759 pf
->num_vf_qps
, pf
->num_vmdq_vsis
, pf
->num_vmdq_qps
,
13764 * i40e_setup_pf_filter_control - Setup PF static filter control
13765 * @pf: PF to be setup
13767 * i40e_setup_pf_filter_control sets up a PF's initial filter control
13768 * settings. If PE/FCoE are enabled then it will also set the per PF
13769 * based filter sizes required for them. It also enables Flow director,
13770 * ethertype and macvlan type filter settings for the pf.
13772 * Returns 0 on success, negative on failure
13774 static int i40e_setup_pf_filter_control(struct i40e_pf
*pf
)
13776 struct i40e_filter_control_settings
*settings
= &pf
->filter_settings
;
13778 settings
->hash_lut_size
= I40E_HASH_LUT_SIZE_128
;
13780 /* Flow Director is enabled */
13781 if (pf
->flags
& (I40E_FLAG_FD_SB_ENABLED
| I40E_FLAG_FD_ATR_ENABLED
))
13782 settings
->enable_fdir
= true;
13784 /* Ethtype and MACVLAN filters enabled for PF */
13785 settings
->enable_ethtype
= true;
13786 settings
->enable_macvlan
= true;
13788 if (i40e_set_filter_control(&pf
->hw
, settings
))
13794 #define INFO_STRING_LEN 255
13795 #define REMAIN(__x) (INFO_STRING_LEN - (__x))
13796 static void i40e_print_features(struct i40e_pf
*pf
)
13798 struct i40e_hw
*hw
= &pf
->hw
;
13802 buf
= kmalloc(INFO_STRING_LEN
, GFP_KERNEL
);
13806 i
= snprintf(buf
, INFO_STRING_LEN
, "Features: PF-id[%d]", hw
->pf_id
);
13807 #ifdef CONFIG_PCI_IOV
13808 i
+= snprintf(&buf
[i
], REMAIN(i
), " VFs: %d", pf
->num_req_vfs
);
13810 i
+= snprintf(&buf
[i
], REMAIN(i
), " VSIs: %d QP: %d",
13811 pf
->hw
.func_caps
.num_vsis
,
13812 pf
->vsi
[pf
->lan_vsi
]->num_queue_pairs
);
13813 if (pf
->flags
& I40E_FLAG_RSS_ENABLED
)
13814 i
+= snprintf(&buf
[i
], REMAIN(i
), " RSS");
13815 if (pf
->flags
& I40E_FLAG_FD_ATR_ENABLED
)
13816 i
+= snprintf(&buf
[i
], REMAIN(i
), " FD_ATR");
13817 if (pf
->flags
& I40E_FLAG_FD_SB_ENABLED
) {
13818 i
+= snprintf(&buf
[i
], REMAIN(i
), " FD_SB");
13819 i
+= snprintf(&buf
[i
], REMAIN(i
), " NTUPLE");
13821 if (pf
->flags
& I40E_FLAG_DCB_CAPABLE
)
13822 i
+= snprintf(&buf
[i
], REMAIN(i
), " DCB");
13823 i
+= snprintf(&buf
[i
], REMAIN(i
), " VxLAN");
13824 i
+= snprintf(&buf
[i
], REMAIN(i
), " Geneve");
13825 if (pf
->flags
& I40E_FLAG_PTP
)
13826 i
+= snprintf(&buf
[i
], REMAIN(i
), " PTP");
13827 if (pf
->flags
& I40E_FLAG_VEB_MODE_ENABLED
)
13828 i
+= snprintf(&buf
[i
], REMAIN(i
), " VEB");
13830 i
+= snprintf(&buf
[i
], REMAIN(i
), " VEPA");
13832 dev_info(&pf
->pdev
->dev
, "%s\n", buf
);
13834 WARN_ON(i
> INFO_STRING_LEN
);
13838 * i40e_get_platform_mac_addr - get platform-specific MAC address
13839 * @pdev: PCI device information struct
13840 * @pf: board private structure
13842 * Look up the MAC address for the device. First we'll try
13843 * eth_platform_get_mac_address, which will check Open Firmware, or arch
13844 * specific fallback. Otherwise, we'll default to the stored value in
13847 static void i40e_get_platform_mac_addr(struct pci_dev
*pdev
, struct i40e_pf
*pf
)
13849 if (eth_platform_get_mac_address(&pdev
->dev
, pf
->hw
.mac
.addr
))
13850 i40e_get_mac_addr(&pf
->hw
, pf
->hw
.mac
.addr
);
13854 * i40e_probe - Device initialization routine
13855 * @pdev: PCI device information struct
13856 * @ent: entry in i40e_pci_tbl
13858 * i40e_probe initializes a PF identified by a pci_dev structure.
13859 * The OS initialization, configuring of the PF private structure,
13860 * and a hardware reset occur.
13862 * Returns 0 on success, negative on failure
13864 static int i40e_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
13866 struct i40e_aq_get_phy_abilities_resp abilities
;
13867 struct i40e_pf
*pf
;
13868 struct i40e_hw
*hw
;
13869 static u16 pfs_found
;
13877 err
= pci_enable_device_mem(pdev
);
13881 /* set up for high or low dma */
13882 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
13884 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
13886 dev_err(&pdev
->dev
,
13887 "DMA configuration failed: 0x%x\n", err
);
13892 /* set up pci connections */
13893 err
= pci_request_mem_regions(pdev
, i40e_driver_name
);
13895 dev_info(&pdev
->dev
,
13896 "pci_request_selected_regions failed %d\n", err
);
13900 pci_enable_pcie_error_reporting(pdev
);
13901 pci_set_master(pdev
);
13903 /* Now that we have a PCI connection, we need to do the
13904 * low level device setup. This is primarily setting up
13905 * the Admin Queue structures and then querying for the
13906 * device's current profile information.
13908 pf
= kzalloc(sizeof(*pf
), GFP_KERNEL
);
13915 set_bit(__I40E_DOWN
, pf
->state
);
13920 pf
->ioremap_len
= min_t(int, pci_resource_len(pdev
, 0),
13921 I40E_MAX_CSR_SPACE
);
13923 hw
->hw_addr
= ioremap(pci_resource_start(pdev
, 0), pf
->ioremap_len
);
13924 if (!hw
->hw_addr
) {
13926 dev_info(&pdev
->dev
, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
13927 (unsigned int)pci_resource_start(pdev
, 0),
13928 pf
->ioremap_len
, err
);
13931 hw
->vendor_id
= pdev
->vendor
;
13932 hw
->device_id
= pdev
->device
;
13933 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &hw
->revision_id
);
13934 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
13935 hw
->subsystem_device_id
= pdev
->subsystem_device
;
13936 hw
->bus
.device
= PCI_SLOT(pdev
->devfn
);
13937 hw
->bus
.func
= PCI_FUNC(pdev
->devfn
);
13938 hw
->bus
.bus_id
= pdev
->bus
->number
;
13939 pf
->instance
= pfs_found
;
13941 /* Select something other than the 802.1ad ethertype for the
13942 * switch to use internally and drop on ingress.
13944 hw
->switch_tag
= 0xffff;
13945 hw
->first_tag
= ETH_P_8021AD
;
13946 hw
->second_tag
= ETH_P_8021Q
;
13948 INIT_LIST_HEAD(&pf
->l3_flex_pit_list
);
13949 INIT_LIST_HEAD(&pf
->l4_flex_pit_list
);
13951 /* set up the locks for the AQ, do this only once in probe
13952 * and destroy them only once in remove
13954 mutex_init(&hw
->aq
.asq_mutex
);
13955 mutex_init(&hw
->aq
.arq_mutex
);
13957 pf
->msg_enable
= netif_msg_init(debug
,
13962 pf
->hw
.debug_mask
= debug
;
13964 /* do a special CORER for clearing PXE mode once at init */
13965 if (hw
->revision_id
== 0 &&
13966 (rd32(hw
, I40E_GLLAN_RCTL_0
) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK
)) {
13967 wr32(hw
, I40E_GLGEN_RTRIG
, I40E_GLGEN_RTRIG_CORER_MASK
);
13972 i40e_clear_pxe_mode(hw
);
13975 /* Reset here to make sure all is clean and to define PF 'n' */
13977 err
= i40e_pf_reset(hw
);
13979 dev_info(&pdev
->dev
, "Initial pf_reset failed: %d\n", err
);
13984 hw
->aq
.num_arq_entries
= I40E_AQ_LEN
;
13985 hw
->aq
.num_asq_entries
= I40E_AQ_LEN
;
13986 hw
->aq
.arq_buf_size
= I40E_MAX_AQ_BUF_SIZE
;
13987 hw
->aq
.asq_buf_size
= I40E_MAX_AQ_BUF_SIZE
;
13988 pf
->adminq_work_limit
= I40E_AQ_WORK_LIMIT
;
13990 snprintf(pf
->int_name
, sizeof(pf
->int_name
) - 1,
13992 dev_driver_string(&pf
->pdev
->dev
), dev_name(&pdev
->dev
));
13994 err
= i40e_init_shared_code(hw
);
13996 dev_warn(&pdev
->dev
, "unidentified MAC or BLANK NVM: %d\n",
14001 /* set up a default setting for link flow control */
14002 pf
->hw
.fc
.requested_mode
= I40E_FC_NONE
;
14004 err
= i40e_init_adminq(hw
);
14006 if (err
== I40E_ERR_FIRMWARE_API_VERSION
)
14007 dev_info(&pdev
->dev
,
14008 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
14010 dev_info(&pdev
->dev
,
14011 "The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
14015 i40e_get_oem_version(hw
);
14017 /* provide nvm, fw, api versions */
14018 dev_info(&pdev
->dev
, "fw %d.%d.%05d api %d.%d nvm %s\n",
14019 hw
->aq
.fw_maj_ver
, hw
->aq
.fw_min_ver
, hw
->aq
.fw_build
,
14020 hw
->aq
.api_maj_ver
, hw
->aq
.api_min_ver
,
14021 i40e_nvm_version_str(hw
));
14023 if (hw
->aq
.api_maj_ver
== I40E_FW_API_VERSION_MAJOR
&&
14024 hw
->aq
.api_min_ver
> I40E_FW_MINOR_VERSION(hw
))
14025 dev_info(&pdev
->dev
,
14026 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
14027 else if (hw
->aq
.api_maj_ver
== 1 && hw
->aq
.api_min_ver
< 4)
14028 dev_info(&pdev
->dev
,
14029 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
14031 i40e_verify_eeprom(pf
);
14033 /* Rev 0 hardware was never productized */
14034 if (hw
->revision_id
< 1)
14035 dev_warn(&pdev
->dev
, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
14037 i40e_clear_pxe_mode(hw
);
14038 err
= i40e_get_capabilities(pf
, i40e_aqc_opc_list_func_capabilities
);
14040 goto err_adminq_setup
;
14042 err
= i40e_sw_init(pf
);
14044 dev_info(&pdev
->dev
, "sw_init failed: %d\n", err
);
14048 err
= i40e_init_lan_hmc(hw
, hw
->func_caps
.num_tx_qp
,
14049 hw
->func_caps
.num_rx_qp
, 0, 0);
14051 dev_info(&pdev
->dev
, "init_lan_hmc failed: %d\n", err
);
14052 goto err_init_lan_hmc
;
14055 err
= i40e_configure_lan_hmc(hw
, I40E_HMC_MODEL_DIRECT_ONLY
);
14057 dev_info(&pdev
->dev
, "configure_lan_hmc failed: %d\n", err
);
14059 goto err_configure_lan_hmc
;
14062 /* Disable LLDP for NICs that have firmware versions lower than v4.3.
14063 * Ignore error return codes because if it was already disabled via
14064 * hardware settings this will fail
14066 if (pf
->hw_features
& I40E_HW_STOP_FW_LLDP
) {
14067 dev_info(&pdev
->dev
, "Stopping firmware LLDP agent.\n");
14068 i40e_aq_stop_lldp(hw
, true, NULL
);
14071 /* allow a platform config to override the HW addr */
14072 i40e_get_platform_mac_addr(pdev
, pf
);
14074 if (!is_valid_ether_addr(hw
->mac
.addr
)) {
14075 dev_info(&pdev
->dev
, "invalid MAC address %pM\n", hw
->mac
.addr
);
14079 dev_info(&pdev
->dev
, "MAC address: %pM\n", hw
->mac
.addr
);
14080 ether_addr_copy(hw
->mac
.perm_addr
, hw
->mac
.addr
);
14081 i40e_get_port_mac_addr(hw
, hw
->mac
.port_addr
);
14082 if (is_valid_ether_addr(hw
->mac
.port_addr
))
14083 pf
->hw_features
|= I40E_HW_PORT_ID_VALID
;
14085 pci_set_drvdata(pdev
, pf
);
14086 pci_save_state(pdev
);
14088 /* Enable FW to write default DCB config on link-up */
14089 i40e_aq_set_dcb_parameters(hw
, true, NULL
);
14091 #ifdef CONFIG_I40E_DCB
14092 err
= i40e_init_pf_dcb(pf
);
14094 dev_info(&pdev
->dev
, "DCB init failed %d, disabled\n", err
);
14095 pf
->flags
&= ~(I40E_FLAG_DCB_CAPABLE
| I40E_FLAG_DCB_ENABLED
);
14096 /* Continue without DCB enabled */
14098 #endif /* CONFIG_I40E_DCB */
14100 /* set up periodic task facility */
14101 timer_setup(&pf
->service_timer
, i40e_service_timer
, 0);
14102 pf
->service_timer_period
= HZ
;
14104 INIT_WORK(&pf
->service_task
, i40e_service_task
);
14105 clear_bit(__I40E_SERVICE_SCHED
, pf
->state
);
14107 /* NVM bit on means WoL disabled for the port */
14108 i40e_read_nvm_word(hw
, I40E_SR_NVM_WAKE_ON_LAN
, &wol_nvm_bits
);
14109 if (BIT (hw
->port
) & wol_nvm_bits
|| hw
->partition_id
!= 1)
14110 pf
->wol_en
= false;
14113 device_set_wakeup_enable(&pf
->pdev
->dev
, pf
->wol_en
);
14115 /* set up the main switch operations */
14116 i40e_determine_queue_usage(pf
);
14117 err
= i40e_init_interrupt_scheme(pf
);
14119 goto err_switch_setup
;
14121 /* The number of VSIs reported by the FW is the minimum guaranteed
14122 * to us; HW supports far more and we share the remaining pool with
14123 * the other PFs. We allocate space for more than the guarantee with
14124 * the understanding that we might not get them all later.
14126 if (pf
->hw
.func_caps
.num_vsis
< I40E_MIN_VSI_ALLOC
)
14127 pf
->num_alloc_vsi
= I40E_MIN_VSI_ALLOC
;
14129 pf
->num_alloc_vsi
= pf
->hw
.func_caps
.num_vsis
;
14131 /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
14132 pf
->vsi
= kcalloc(pf
->num_alloc_vsi
, sizeof(struct i40e_vsi
*),
14136 goto err_switch_setup
;
14139 #ifdef CONFIG_PCI_IOV
14140 /* prep for VF support */
14141 if ((pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) &&
14142 (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) &&
14143 !test_bit(__I40E_BAD_EEPROM
, pf
->state
)) {
14144 if (pci_num_vf(pdev
))
14145 pf
->flags
|= I40E_FLAG_VEB_MODE_ENABLED
;
14148 err
= i40e_setup_pf_switch(pf
, false);
14150 dev_info(&pdev
->dev
, "setup_pf_switch failed: %d\n", err
);
14153 INIT_LIST_HEAD(&pf
->vsi
[pf
->lan_vsi
]->ch_list
);
14155 /* Make sure flow control is set according to current settings */
14156 err
= i40e_set_fc(hw
, &set_fc_aq_fail
, true);
14157 if (set_fc_aq_fail
& I40E_SET_FC_AQ_FAIL_GET
)
14158 dev_dbg(&pf
->pdev
->dev
,
14159 "Set fc with err %s aq_err %s on get_phy_cap\n",
14160 i40e_stat_str(hw
, err
),
14161 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
14162 if (set_fc_aq_fail
& I40E_SET_FC_AQ_FAIL_SET
)
14163 dev_dbg(&pf
->pdev
->dev
,
14164 "Set fc with err %s aq_err %s on set_phy_config\n",
14165 i40e_stat_str(hw
, err
),
14166 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
14167 if (set_fc_aq_fail
& I40E_SET_FC_AQ_FAIL_UPDATE
)
14168 dev_dbg(&pf
->pdev
->dev
,
14169 "Set fc with err %s aq_err %s on get_link_info\n",
14170 i40e_stat_str(hw
, err
),
14171 i40e_aq_str(hw
, hw
->aq
.asq_last_status
));
14173 /* if FDIR VSI was set up, start it now */
14174 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
14175 if (pf
->vsi
[i
] && pf
->vsi
[i
]->type
== I40E_VSI_FDIR
) {
14176 i40e_vsi_open(pf
->vsi
[i
]);
14181 /* The driver only wants link up/down and module qualification
14182 * reports from firmware. Note the negative logic.
14184 err
= i40e_aq_set_phy_int_mask(&pf
->hw
,
14185 ~(I40E_AQ_EVENT_LINK_UPDOWN
|
14186 I40E_AQ_EVENT_MEDIA_NA
|
14187 I40E_AQ_EVENT_MODULE_QUAL_FAIL
), NULL
);
14189 dev_info(&pf
->pdev
->dev
, "set phy mask fail, err %s aq_err %s\n",
14190 i40e_stat_str(&pf
->hw
, err
),
14191 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
14193 /* Reconfigure hardware for allowing smaller MSS in the case
14194 * of TSO, so that we avoid the MDD being fired and causing
14195 * a reset in the case of small MSS+TSO.
14197 val
= rd32(hw
, I40E_REG_MSS
);
14198 if ((val
& I40E_REG_MSS_MIN_MASK
) > I40E_64BYTE_MSS
) {
14199 val
&= ~I40E_REG_MSS_MIN_MASK
;
14200 val
|= I40E_64BYTE_MSS
;
14201 wr32(hw
, I40E_REG_MSS
, val
);
14204 if (pf
->hw_features
& I40E_HW_RESTART_AUTONEG
) {
14206 err
= i40e_aq_set_link_restart_an(&pf
->hw
, true, NULL
);
14208 dev_info(&pf
->pdev
->dev
, "link restart failed, err %s aq_err %s\n",
14209 i40e_stat_str(&pf
->hw
, err
),
14210 i40e_aq_str(&pf
->hw
,
14211 pf
->hw
.aq
.asq_last_status
));
14213 /* The main driver is (mostly) up and happy. We need to set this state
14214 * before setting up the misc vector or we get a race and the vector
14215 * ends up disabled forever.
14217 clear_bit(__I40E_DOWN
, pf
->state
);
14219 /* In case of MSIX we are going to setup the misc vector right here
14220 * to handle admin queue events etc. In case of legacy and MSI
14221 * the misc functionality and queue processing is combined in
14222 * the same vector and that gets setup at open.
14224 if (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) {
14225 err
= i40e_setup_misc_vector(pf
);
14227 dev_info(&pdev
->dev
,
14228 "setup of misc vector failed: %d\n", err
);
14233 #ifdef CONFIG_PCI_IOV
14234 /* prep for VF support */
14235 if ((pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) &&
14236 (pf
->flags
& I40E_FLAG_MSIX_ENABLED
) &&
14237 !test_bit(__I40E_BAD_EEPROM
, pf
->state
)) {
14238 /* disable link interrupts for VFs */
14239 val
= rd32(hw
, I40E_PFGEN_PORTMDIO_NUM
);
14240 val
&= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK
;
14241 wr32(hw
, I40E_PFGEN_PORTMDIO_NUM
, val
);
14244 if (pci_num_vf(pdev
)) {
14245 dev_info(&pdev
->dev
,
14246 "Active VFs found, allocating resources.\n");
14247 err
= i40e_alloc_vfs(pf
, pci_num_vf(pdev
));
14249 dev_info(&pdev
->dev
,
14250 "Error %d allocating resources for existing VFs\n",
14254 #endif /* CONFIG_PCI_IOV */
14256 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
14257 pf
->iwarp_base_vector
= i40e_get_lump(pf
, pf
->irq_pile
,
14258 pf
->num_iwarp_msix
,
14259 I40E_IWARP_IRQ_PILE_ID
);
14260 if (pf
->iwarp_base_vector
< 0) {
14261 dev_info(&pdev
->dev
,
14262 "failed to get tracking for %d vectors for IWARP err=%d\n",
14263 pf
->num_iwarp_msix
, pf
->iwarp_base_vector
);
14264 pf
->flags
&= ~I40E_FLAG_IWARP_ENABLED
;
14268 i40e_dbg_pf_init(pf
);
14270 /* tell the firmware that we're starting */
14271 i40e_send_version(pf
);
14273 /* since everything's happy, start the service_task timer */
14274 mod_timer(&pf
->service_timer
,
14275 round_jiffies(jiffies
+ pf
->service_timer_period
));
14277 /* add this PF to client device list and launch a client service task */
14278 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
14279 err
= i40e_lan_add_device(pf
);
14281 dev_info(&pdev
->dev
, "Failed to add PF to client API service list: %d\n",
14285 #define PCI_SPEED_SIZE 8
14286 #define PCI_WIDTH_SIZE 8
14287 /* Devices on the IOSF bus do not have this information
14288 * and will report PCI Gen 1 x 1 by default so don't bother
14291 if (!(pf
->hw_features
& I40E_HW_NO_PCI_LINK_CHECK
)) {
14292 char speed
[PCI_SPEED_SIZE
] = "Unknown";
14293 char width
[PCI_WIDTH_SIZE
] = "Unknown";
14295 /* Get the negotiated link width and speed from PCI config
14298 pcie_capability_read_word(pf
->pdev
, PCI_EXP_LNKSTA
,
14301 i40e_set_pci_config_data(hw
, link_status
);
14303 switch (hw
->bus
.speed
) {
14304 case i40e_bus_speed_8000
:
14305 strncpy(speed
, "8.0", PCI_SPEED_SIZE
); break;
14306 case i40e_bus_speed_5000
:
14307 strncpy(speed
, "5.0", PCI_SPEED_SIZE
); break;
14308 case i40e_bus_speed_2500
:
14309 strncpy(speed
, "2.5", PCI_SPEED_SIZE
); break;
14313 switch (hw
->bus
.width
) {
14314 case i40e_bus_width_pcie_x8
:
14315 strncpy(width
, "8", PCI_WIDTH_SIZE
); break;
14316 case i40e_bus_width_pcie_x4
:
14317 strncpy(width
, "4", PCI_WIDTH_SIZE
); break;
14318 case i40e_bus_width_pcie_x2
:
14319 strncpy(width
, "2", PCI_WIDTH_SIZE
); break;
14320 case i40e_bus_width_pcie_x1
:
14321 strncpy(width
, "1", PCI_WIDTH_SIZE
); break;
14326 dev_info(&pdev
->dev
, "PCI-Express: Speed %sGT/s Width x%s\n",
14329 if (hw
->bus
.width
< i40e_bus_width_pcie_x8
||
14330 hw
->bus
.speed
< i40e_bus_speed_8000
) {
14331 dev_warn(&pdev
->dev
, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
14332 dev_warn(&pdev
->dev
, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
14336 /* get the requested speeds from the fw */
14337 err
= i40e_aq_get_phy_capabilities(hw
, false, false, &abilities
, NULL
);
14339 dev_dbg(&pf
->pdev
->dev
, "get requested speeds ret = %s last_status = %s\n",
14340 i40e_stat_str(&pf
->hw
, err
),
14341 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
14342 pf
->hw
.phy
.link_info
.requested_speeds
= abilities
.link_speed
;
14344 /* get the supported phy types from the fw */
14345 err
= i40e_aq_get_phy_capabilities(hw
, false, true, &abilities
, NULL
);
14347 dev_dbg(&pf
->pdev
->dev
, "get supported phy types ret = %s last_status = %s\n",
14348 i40e_stat_str(&pf
->hw
, err
),
14349 i40e_aq_str(&pf
->hw
, pf
->hw
.aq
.asq_last_status
));
14351 /* Add a filter to drop all Flow control frames from any VSI from being
14352 * transmitted. By doing so we stop a malicious VF from sending out
14353 * PAUSE or PFC frames and potentially controlling traffic for other
14355 * The FW can still send Flow control frames if enabled.
14357 i40e_add_filter_to_drop_tx_flow_control_frames(&pf
->hw
,
14358 pf
->main_vsi_seid
);
14360 if ((pf
->hw
.device_id
== I40E_DEV_ID_10G_BASE_T
) ||
14361 (pf
->hw
.device_id
== I40E_DEV_ID_10G_BASE_T4
))
14362 pf
->hw_features
|= I40E_HW_PHY_CONTROLS_LEDS
;
14363 if (pf
->hw
.device_id
== I40E_DEV_ID_SFP_I_X722
)
14364 pf
->hw_features
|= I40E_HW_HAVE_CRT_RETIMER
;
14365 /* print a string summarizing features */
14366 i40e_print_features(pf
);
14370 /* Unwind what we've done if something failed in the setup */
14372 set_bit(__I40E_DOWN
, pf
->state
);
14373 i40e_clear_interrupt_scheme(pf
);
14376 i40e_reset_interrupt_capability(pf
);
14377 del_timer_sync(&pf
->service_timer
);
14379 err_configure_lan_hmc
:
14380 (void)i40e_shutdown_lan_hmc(hw
);
14382 kfree(pf
->qp_pile
);
14386 iounmap(hw
->hw_addr
);
14390 pci_disable_pcie_error_reporting(pdev
);
14391 pci_release_mem_regions(pdev
);
14394 pci_disable_device(pdev
);
14399 * i40e_remove - Device removal routine
14400 * @pdev: PCI device information struct
14402 * i40e_remove is called by the PCI subsystem to alert the driver
14403 * that is should release a PCI device. This could be caused by a
14404 * Hot-Plug event, or because the driver is going to be removed from
14407 static void i40e_remove(struct pci_dev
*pdev
)
14409 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14410 struct i40e_hw
*hw
= &pf
->hw
;
14411 i40e_status ret_code
;
14414 i40e_dbg_pf_exit(pf
);
14418 /* Disable RSS in hw */
14419 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(0), 0);
14420 i40e_write_rx_ctl(hw
, I40E_PFQF_HENA(1), 0);
14422 /* no more scheduling of any task */
14423 set_bit(__I40E_SUSPENDED
, pf
->state
);
14424 set_bit(__I40E_DOWN
, pf
->state
);
14425 if (pf
->service_timer
.function
)
14426 del_timer_sync(&pf
->service_timer
);
14427 if (pf
->service_task
.func
)
14428 cancel_work_sync(&pf
->service_task
);
14430 /* Client close must be called explicitly here because the timer
14431 * has been stopped.
14433 i40e_notify_client_of_netdev_close(pf
->vsi
[pf
->lan_vsi
], false);
14435 if (pf
->flags
& I40E_FLAG_SRIOV_ENABLED
) {
14437 pf
->flags
&= ~I40E_FLAG_SRIOV_ENABLED
;
14440 i40e_fdir_teardown(pf
);
14442 /* If there is a switch structure or any orphans, remove them.
14443 * This will leave only the PF's VSI remaining.
14445 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
14449 if (pf
->veb
[i
]->uplink_seid
== pf
->mac_seid
||
14450 pf
->veb
[i
]->uplink_seid
== 0)
14451 i40e_switch_branch_release(pf
->veb
[i
]);
14454 /* Now we can shutdown the PF's VSI, just before we kill
14457 if (pf
->vsi
[pf
->lan_vsi
])
14458 i40e_vsi_release(pf
->vsi
[pf
->lan_vsi
]);
14460 i40e_cloud_filter_exit(pf
);
14462 /* remove attached clients */
14463 if (pf
->flags
& I40E_FLAG_IWARP_ENABLED
) {
14464 ret_code
= i40e_lan_del_device(pf
);
14466 dev_warn(&pdev
->dev
, "Failed to delete client device: %d\n",
14470 /* shutdown and destroy the HMC */
14471 if (hw
->hmc
.hmc_obj
) {
14472 ret_code
= i40e_shutdown_lan_hmc(hw
);
14474 dev_warn(&pdev
->dev
,
14475 "Failed to destroy the HMC resources: %d\n",
14479 /* shutdown the adminq */
14480 i40e_shutdown_adminq(hw
);
14482 /* destroy the locks only once, here */
14483 mutex_destroy(&hw
->aq
.arq_mutex
);
14484 mutex_destroy(&hw
->aq
.asq_mutex
);
14486 /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
14488 i40e_clear_interrupt_scheme(pf
);
14489 for (i
= 0; i
< pf
->num_alloc_vsi
; i
++) {
14491 i40e_vsi_clear_rings(pf
->vsi
[i
]);
14492 i40e_vsi_clear(pf
->vsi
[i
]);
14498 for (i
= 0; i
< I40E_MAX_VEB
; i
++) {
14503 kfree(pf
->qp_pile
);
14506 iounmap(hw
->hw_addr
);
14508 pci_release_mem_regions(pdev
);
14510 pci_disable_pcie_error_reporting(pdev
);
14511 pci_disable_device(pdev
);
14515 * i40e_pci_error_detected - warning that something funky happened in PCI land
14516 * @pdev: PCI device information struct
14517 * @error: the type of PCI error
14519 * Called to warn that something happened and the error handling steps
14520 * are in progress. Allows the driver to quiesce things, be ready for
14523 static pci_ers_result_t
i40e_pci_error_detected(struct pci_dev
*pdev
,
14524 enum pci_channel_state error
)
14526 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14528 dev_info(&pdev
->dev
, "%s: error %d\n", __func__
, error
);
14531 dev_info(&pdev
->dev
,
14532 "Cannot recover - error happened during device probe\n");
14533 return PCI_ERS_RESULT_DISCONNECT
;
14536 /* shutdown all operations */
14537 if (!test_bit(__I40E_SUSPENDED
, pf
->state
))
14538 i40e_prep_for_reset(pf
, false);
14540 /* Request a slot reset */
14541 return PCI_ERS_RESULT_NEED_RESET
;
14545 * i40e_pci_error_slot_reset - a PCI slot reset just happened
14546 * @pdev: PCI device information struct
14548 * Called to find if the driver can work with the device now that
14549 * the pci slot has been reset. If a basic connection seems good
14550 * (registers are readable and have sane content) then return a
14551 * happy little PCI_ERS_RESULT_xxx.
14553 static pci_ers_result_t
i40e_pci_error_slot_reset(struct pci_dev
*pdev
)
14555 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14556 pci_ers_result_t result
;
14559 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
14560 if (pci_enable_device_mem(pdev
)) {
14561 dev_info(&pdev
->dev
,
14562 "Cannot re-enable PCI device after reset.\n");
14563 result
= PCI_ERS_RESULT_DISCONNECT
;
14565 pci_set_master(pdev
);
14566 pci_restore_state(pdev
);
14567 pci_save_state(pdev
);
14568 pci_wake_from_d3(pdev
, false);
14570 reg
= rd32(&pf
->hw
, I40E_GLGEN_RTRIG
);
14572 result
= PCI_ERS_RESULT_RECOVERED
;
14574 result
= PCI_ERS_RESULT_DISCONNECT
;
14581 * i40e_pci_error_reset_prepare - prepare device driver for pci reset
14582 * @pdev: PCI device information struct
14584 static void i40e_pci_error_reset_prepare(struct pci_dev
*pdev
)
14586 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14588 i40e_prep_for_reset(pf
, false);
14592 * i40e_pci_error_reset_done - pci reset done, device driver reset can begin
14593 * @pdev: PCI device information struct
14595 static void i40e_pci_error_reset_done(struct pci_dev
*pdev
)
14597 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14599 i40e_reset_and_rebuild(pf
, false, false);
14603 * i40e_pci_error_resume - restart operations after PCI error recovery
14604 * @pdev: PCI device information struct
14606 * Called to allow the driver to bring things back up after PCI error
14607 * and/or reset recovery has finished.
14609 static void i40e_pci_error_resume(struct pci_dev
*pdev
)
14611 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14613 dev_dbg(&pdev
->dev
, "%s\n", __func__
);
14614 if (test_bit(__I40E_SUSPENDED
, pf
->state
))
14617 i40e_handle_reset_warning(pf
, false);
14621 * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
14622 * using the mac_address_write admin q function
14623 * @pf: pointer to i40e_pf struct
14625 static void i40e_enable_mc_magic_wake(struct i40e_pf
*pf
)
14627 struct i40e_hw
*hw
= &pf
->hw
;
14632 /* Get current MAC address in case it's an LAA */
14633 if (pf
->vsi
[pf
->lan_vsi
] && pf
->vsi
[pf
->lan_vsi
]->netdev
) {
14634 ether_addr_copy(mac_addr
,
14635 pf
->vsi
[pf
->lan_vsi
]->netdev
->dev_addr
);
14637 dev_err(&pf
->pdev
->dev
,
14638 "Failed to retrieve MAC address; using default\n");
14639 ether_addr_copy(mac_addr
, hw
->mac
.addr
);
14642 /* The FW expects the mac address write cmd to first be called with
14643 * one of these flags before calling it again with the multicast
14646 flags
= I40E_AQC_WRITE_TYPE_LAA_WOL
;
14648 if (hw
->func_caps
.flex10_enable
&& hw
->partition_id
!= 1)
14649 flags
= I40E_AQC_WRITE_TYPE_LAA_ONLY
;
14651 ret
= i40e_aq_mac_address_write(hw
, flags
, mac_addr
, NULL
);
14653 dev_err(&pf
->pdev
->dev
,
14654 "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
14658 flags
= I40E_AQC_MC_MAG_EN
14659 | I40E_AQC_WOL_PRESERVE_ON_PFR
14660 | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG
;
14661 ret
= i40e_aq_mac_address_write(hw
, flags
, mac_addr
, NULL
);
14663 dev_err(&pf
->pdev
->dev
,
14664 "Failed to enable Multicast Magic Packet wake up\n");
14668 * i40e_shutdown - PCI callback for shutting down
14669 * @pdev: PCI device information struct
14671 static void i40e_shutdown(struct pci_dev
*pdev
)
14673 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14674 struct i40e_hw
*hw
= &pf
->hw
;
14676 set_bit(__I40E_SUSPENDED
, pf
->state
);
14677 set_bit(__I40E_DOWN
, pf
->state
);
14679 del_timer_sync(&pf
->service_timer
);
14680 cancel_work_sync(&pf
->service_task
);
14681 i40e_cloud_filter_exit(pf
);
14682 i40e_fdir_teardown(pf
);
14684 /* Client close must be called explicitly here because the timer
14685 * has been stopped.
14687 i40e_notify_client_of_netdev_close(pf
->vsi
[pf
->lan_vsi
], false);
14689 if (pf
->wol_en
&& (pf
->hw_features
& I40E_HW_WOL_MC_MAGIC_PKT_WAKE
))
14690 i40e_enable_mc_magic_wake(pf
);
14692 i40e_prep_for_reset(pf
, false);
14694 wr32(hw
, I40E_PFPM_APM
,
14695 (pf
->wol_en
? I40E_PFPM_APM_APME_MASK
: 0));
14696 wr32(hw
, I40E_PFPM_WUFC
,
14697 (pf
->wol_en
? I40E_PFPM_WUFC_MAG_MASK
: 0));
14699 /* Since we're going to destroy queues during the
14700 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
14704 i40e_clear_interrupt_scheme(pf
);
14707 if (system_state
== SYSTEM_POWER_OFF
) {
14708 pci_wake_from_d3(pdev
, pf
->wol_en
);
14709 pci_set_power_state(pdev
, PCI_D3hot
);
14714 * i40e_suspend - PM callback for moving to D3
14715 * @dev: generic device information structure
14717 static int __maybe_unused
i40e_suspend(struct device
*dev
)
14719 struct pci_dev
*pdev
= to_pci_dev(dev
);
14720 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14721 struct i40e_hw
*hw
= &pf
->hw
;
14723 /* If we're already suspended, then there is nothing to do */
14724 if (test_and_set_bit(__I40E_SUSPENDED
, pf
->state
))
14727 set_bit(__I40E_DOWN
, pf
->state
);
14729 /* Ensure service task will not be running */
14730 del_timer_sync(&pf
->service_timer
);
14731 cancel_work_sync(&pf
->service_task
);
14733 /* Client close must be called explicitly here because the timer
14734 * has been stopped.
14736 i40e_notify_client_of_netdev_close(pf
->vsi
[pf
->lan_vsi
], false);
14738 if (pf
->wol_en
&& (pf
->hw_features
& I40E_HW_WOL_MC_MAGIC_PKT_WAKE
))
14739 i40e_enable_mc_magic_wake(pf
);
14741 /* Since we're going to destroy queues during the
14742 * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
14747 i40e_prep_for_reset(pf
, true);
14749 wr32(hw
, I40E_PFPM_APM
, (pf
->wol_en
? I40E_PFPM_APM_APME_MASK
: 0));
14750 wr32(hw
, I40E_PFPM_WUFC
, (pf
->wol_en
? I40E_PFPM_WUFC_MAG_MASK
: 0));
14752 /* Clear the interrupt scheme and release our IRQs so that the system
14753 * can safely hibernate even when there are a large number of CPUs.
14754 * Otherwise hibernation might fail when mapping all the vectors back
14757 i40e_clear_interrupt_scheme(pf
);
14765 * i40e_resume - PM callback for waking up from D3
14766 * @dev: generic device information structure
14768 static int __maybe_unused
i40e_resume(struct device
*dev
)
14770 struct pci_dev
*pdev
= to_pci_dev(dev
);
14771 struct i40e_pf
*pf
= pci_get_drvdata(pdev
);
14774 /* If we're not suspended, then there is nothing to do */
14775 if (!test_bit(__I40E_SUSPENDED
, pf
->state
))
14778 /* We need to hold the RTNL lock prior to restoring interrupt schemes,
14779 * since we're going to be restoring queues
14783 /* We cleared the interrupt scheme when we suspended, so we need to
14784 * restore it now to resume device functionality.
14786 err
= i40e_restore_interrupt_scheme(pf
);
14788 dev_err(&pdev
->dev
, "Cannot restore interrupt scheme: %d\n",
14792 clear_bit(__I40E_DOWN
, pf
->state
);
14793 i40e_reset_and_rebuild(pf
, false, true);
14797 /* Clear suspended state last after everything is recovered */
14798 clear_bit(__I40E_SUSPENDED
, pf
->state
);
14800 /* Restart the service task */
14801 mod_timer(&pf
->service_timer
,
14802 round_jiffies(jiffies
+ pf
->service_timer_period
));
14807 static const struct pci_error_handlers i40e_err_handler
= {
14808 .error_detected
= i40e_pci_error_detected
,
14809 .slot_reset
= i40e_pci_error_slot_reset
,
14810 .reset_prepare
= i40e_pci_error_reset_prepare
,
14811 .reset_done
= i40e_pci_error_reset_done
,
14812 .resume
= i40e_pci_error_resume
,
14815 static SIMPLE_DEV_PM_OPS(i40e_pm_ops
, i40e_suspend
, i40e_resume
);
14817 static struct pci_driver i40e_driver
= {
14818 .name
= i40e_driver_name
,
14819 .id_table
= i40e_pci_tbl
,
14820 .probe
= i40e_probe
,
14821 .remove
= i40e_remove
,
14823 .pm
= &i40e_pm_ops
,
14825 .shutdown
= i40e_shutdown
,
14826 .err_handler
= &i40e_err_handler
,
14827 .sriov_configure
= i40e_pci_sriov_configure
,
14831 * i40e_init_module - Driver registration routine
14833 * i40e_init_module is the first routine called when the driver is
14834 * loaded. All it does is register with the PCI subsystem.
14836 static int __init
i40e_init_module(void)
14838 pr_info("%s: %s - version %s\n", i40e_driver_name
,
14839 i40e_driver_string
, i40e_driver_version_str
);
14840 pr_info("%s: %s\n", i40e_driver_name
, i40e_copyright
);
14842 /* There is no need to throttle the number of active tasks because
14843 * each device limits its own task using a state bit for scheduling
14844 * the service task, and the device tasks do not interfere with each
14845 * other, so we don't set a max task limit. We must set WQ_MEM_RECLAIM
14846 * since we need to be able to guarantee forward progress even under
14849 i40e_wq
= alloc_workqueue("%s", WQ_MEM_RECLAIM
, 0, i40e_driver_name
);
14851 pr_err("%s: Failed to create workqueue\n", i40e_driver_name
);
14856 return pci_register_driver(&i40e_driver
);
14858 module_init(i40e_init_module
);
14861 * i40e_exit_module - Driver exit cleanup routine
14863 * i40e_exit_module is called just before the driver is removed
14866 static void __exit
i40e_exit_module(void)
14868 pci_unregister_driver(&i40e_driver
);
14869 destroy_workqueue(i40e_wq
);
14872 module_exit(i40e_exit_module
);