1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2019 Intel Corporation. */
4 #include <linux/module.h>
5 #include <linux/interrupt.h>
10 static const struct fm10k_info
*fm10k_info_tbl
[] = {
11 [fm10k_device_pf
] = &fm10k_pf_info
,
12 [fm10k_device_vf
] = &fm10k_vf_info
,
16 * fm10k_pci_tbl - PCI Device ID Table
18 * Wildcard entries (PCI_ANY_ID) should come last
19 * Last entry must be all 0s
21 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
22 * Class, Class Mask, private data (not used) }
24 static const struct pci_device_id fm10k_pci_tbl
[] = {
25 { PCI_VDEVICE(INTEL
, FM10K_DEV_ID_PF
), fm10k_device_pf
},
26 { PCI_VDEVICE(INTEL
, FM10K_DEV_ID_SDI_FM10420_QDA2
), fm10k_device_pf
},
27 { PCI_VDEVICE(INTEL
, FM10K_DEV_ID_SDI_FM10420_DA2
), fm10k_device_pf
},
28 { PCI_VDEVICE(INTEL
, FM10K_DEV_ID_VF
), fm10k_device_vf
},
29 /* required last entry */
32 MODULE_DEVICE_TABLE(pci
, fm10k_pci_tbl
);
34 u16
fm10k_read_pci_cfg_word(struct fm10k_hw
*hw
, u32 reg
)
36 struct fm10k_intfc
*interface
= hw
->back
;
39 if (FM10K_REMOVED(hw
->hw_addr
))
42 pci_read_config_word(interface
->pdev
, reg
, &value
);
44 fm10k_write_flush(hw
);
49 u32
fm10k_read_reg(struct fm10k_hw
*hw
, int reg
)
51 u32 __iomem
*hw_addr
= READ_ONCE(hw
->hw_addr
);
54 if (FM10K_REMOVED(hw_addr
))
57 value
= readl(&hw_addr
[reg
]);
58 if (!(~value
) && (!reg
|| !(~readl(hw_addr
)))) {
59 struct fm10k_intfc
*interface
= hw
->back
;
60 struct net_device
*netdev
= interface
->netdev
;
63 netif_device_detach(netdev
);
64 netdev_err(netdev
, "PCIe link lost, device now detached\n");
70 static int fm10k_hw_ready(struct fm10k_intfc
*interface
)
72 struct fm10k_hw
*hw
= &interface
->hw
;
74 fm10k_write_flush(hw
);
76 return FM10K_REMOVED(hw
->hw_addr
) ? -ENODEV
: 0;
80 * fm10k_macvlan_schedule - Schedule MAC/VLAN queue task
81 * @interface: fm10k private interface structure
83 * Schedule the MAC/VLAN queue monitor task. If the MAC/VLAN task cannot be
84 * started immediately, request that it be restarted when possible.
86 void fm10k_macvlan_schedule(struct fm10k_intfc
*interface
)
88 /* Avoid processing the MAC/VLAN queue when the service task is
89 * disabled, or when we're resetting the device.
91 if (!test_bit(__FM10K_MACVLAN_DISABLE
, interface
->state
) &&
92 !test_and_set_bit(__FM10K_MACVLAN_SCHED
, interface
->state
)) {
93 clear_bit(__FM10K_MACVLAN_REQUEST
, interface
->state
);
94 /* We delay the actual start of execution in order to allow
95 * multiple MAC/VLAN updates to accumulate before handling
96 * them, and to allow some time to let the mailbox drain
99 queue_delayed_work(fm10k_workqueue
,
100 &interface
->macvlan_task
, 10);
102 set_bit(__FM10K_MACVLAN_REQUEST
, interface
->state
);
107 * fm10k_stop_macvlan_task - Stop the MAC/VLAN queue monitor
108 * @interface: fm10k private interface structure
110 * Wait until the MAC/VLAN queue task has stopped, and cancel any future
113 static void fm10k_stop_macvlan_task(struct fm10k_intfc
*interface
)
115 /* Disable the MAC/VLAN work item */
116 set_bit(__FM10K_MACVLAN_DISABLE
, interface
->state
);
118 /* Make sure we waited until any current invocations have stopped */
119 cancel_delayed_work_sync(&interface
->macvlan_task
);
121 /* We set the __FM10K_MACVLAN_SCHED bit when we schedule the task.
122 * However, it may not be unset of the MAC/VLAN task never actually
123 * got a chance to run. Since we've canceled the task here, and it
124 * cannot be rescheuled right now, we need to ensure the scheduled bit
127 clear_bit(__FM10K_MACVLAN_SCHED
, interface
->state
);
131 * fm10k_resume_macvlan_task - Restart the MAC/VLAN queue monitor
132 * @interface: fm10k private interface structure
134 * Clear the __FM10K_MACVLAN_DISABLE bit and, if a request occurred, schedule
135 * the MAC/VLAN work monitor.
137 static void fm10k_resume_macvlan_task(struct fm10k_intfc
*interface
)
139 /* Re-enable the MAC/VLAN work item */
140 clear_bit(__FM10K_MACVLAN_DISABLE
, interface
->state
);
142 /* We might have received a MAC/VLAN request while disabled. If so,
143 * kick off the queue now.
145 if (test_bit(__FM10K_MACVLAN_REQUEST
, interface
->state
))
146 fm10k_macvlan_schedule(interface
);
149 void fm10k_service_event_schedule(struct fm10k_intfc
*interface
)
151 if (!test_bit(__FM10K_SERVICE_DISABLE
, interface
->state
) &&
152 !test_and_set_bit(__FM10K_SERVICE_SCHED
, interface
->state
)) {
153 clear_bit(__FM10K_SERVICE_REQUEST
, interface
->state
);
154 queue_work(fm10k_workqueue
, &interface
->service_task
);
156 set_bit(__FM10K_SERVICE_REQUEST
, interface
->state
);
160 static void fm10k_service_event_complete(struct fm10k_intfc
*interface
)
162 WARN_ON(!test_bit(__FM10K_SERVICE_SCHED
, interface
->state
));
164 /* flush memory to make sure state is correct before next watchog */
165 smp_mb__before_atomic();
166 clear_bit(__FM10K_SERVICE_SCHED
, interface
->state
);
168 /* If a service event was requested since we started, immediately
169 * re-schedule now. This ensures we don't drop a request until the
172 if (test_bit(__FM10K_SERVICE_REQUEST
, interface
->state
))
173 fm10k_service_event_schedule(interface
);
176 static void fm10k_stop_service_event(struct fm10k_intfc
*interface
)
178 set_bit(__FM10K_SERVICE_DISABLE
, interface
->state
);
179 cancel_work_sync(&interface
->service_task
);
181 /* It's possible that cancel_work_sync stopped the service task from
182 * running before it could actually start. In this case the
183 * __FM10K_SERVICE_SCHED bit will never be cleared. Since we know that
184 * the service task cannot be running at this point, we need to clear
185 * the scheduled bit, as otherwise the service task may never be
188 clear_bit(__FM10K_SERVICE_SCHED
, interface
->state
);
191 static void fm10k_start_service_event(struct fm10k_intfc
*interface
)
193 clear_bit(__FM10K_SERVICE_DISABLE
, interface
->state
);
194 fm10k_service_event_schedule(interface
);
198 * fm10k_service_timer - Timer Call-back
199 * @t: pointer to timer data
201 static void fm10k_service_timer(struct timer_list
*t
)
203 struct fm10k_intfc
*interface
= from_timer(interface
, t
,
206 /* Reset the timer */
207 mod_timer(&interface
->service_timer
, (HZ
* 2) + jiffies
);
209 fm10k_service_event_schedule(interface
);
213 * fm10k_prepare_for_reset - Prepare the driver and device for a pending reset
214 * @interface: fm10k private data structure
216 * This function prepares for a device reset by shutting as much down as we
217 * can. It does nothing and returns false if __FM10K_RESETTING was already set
218 * prior to calling this function. It returns true if it actually did work.
220 static bool fm10k_prepare_for_reset(struct fm10k_intfc
*interface
)
222 struct net_device
*netdev
= interface
->netdev
;
224 WARN_ON(in_interrupt());
226 /* put off any impending NetWatchDogTimeout */
227 netif_trans_update(netdev
);
229 /* Nothing to do if a reset is already in progress */
230 if (test_and_set_bit(__FM10K_RESETTING
, interface
->state
))
233 /* As the MAC/VLAN task will be accessing registers it must not be
234 * running while we reset. Although the task will not be scheduled
235 * once we start resetting it may already be running
237 fm10k_stop_macvlan_task(interface
);
241 fm10k_iov_suspend(interface
->pdev
);
243 if (netif_running(netdev
))
246 fm10k_mbx_free_irq(interface
);
248 /* free interrupts */
249 fm10k_clear_queueing_scheme(interface
);
251 /* delay any future reset requests */
252 interface
->last_reset
= jiffies
+ (10 * HZ
);
259 static int fm10k_handle_reset(struct fm10k_intfc
*interface
)
261 struct net_device
*netdev
= interface
->netdev
;
262 struct fm10k_hw
*hw
= &interface
->hw
;
265 WARN_ON(!test_bit(__FM10K_RESETTING
, interface
->state
));
269 pci_set_master(interface
->pdev
);
271 /* reset and initialize the hardware so it is in a known state */
272 err
= hw
->mac
.ops
.reset_hw(hw
);
274 dev_err(&interface
->pdev
->dev
, "reset_hw failed: %d\n", err
);
278 err
= hw
->mac
.ops
.init_hw(hw
);
280 dev_err(&interface
->pdev
->dev
, "init_hw failed: %d\n", err
);
284 err
= fm10k_init_queueing_scheme(interface
);
286 dev_err(&interface
->pdev
->dev
,
287 "init_queueing_scheme failed: %d\n", err
);
291 /* re-associate interrupts */
292 err
= fm10k_mbx_request_irq(interface
);
296 err
= fm10k_hw_ready(interface
);
300 /* update hardware address for VFs if perm_addr has changed */
301 if (hw
->mac
.type
== fm10k_mac_vf
) {
302 if (is_valid_ether_addr(hw
->mac
.perm_addr
)) {
303 ether_addr_copy(hw
->mac
.addr
, hw
->mac
.perm_addr
);
304 ether_addr_copy(netdev
->perm_addr
, hw
->mac
.perm_addr
);
305 ether_addr_copy(netdev
->dev_addr
, hw
->mac
.perm_addr
);
306 netdev
->addr_assign_type
&= ~NET_ADDR_RANDOM
;
309 if (hw
->mac
.vlan_override
)
310 netdev
->features
&= ~NETIF_F_HW_VLAN_CTAG_RX
;
312 netdev
->features
|= NETIF_F_HW_VLAN_CTAG_RX
;
315 err
= netif_running(netdev
) ? fm10k_open(netdev
) : 0;
319 fm10k_iov_resume(interface
->pdev
);
323 fm10k_resume_macvlan_task(interface
);
325 clear_bit(__FM10K_RESETTING
, interface
->state
);
329 fm10k_mbx_free_irq(interface
);
331 fm10k_clear_queueing_scheme(interface
);
333 netif_device_detach(netdev
);
337 clear_bit(__FM10K_RESETTING
, interface
->state
);
342 static void fm10k_detach_subtask(struct fm10k_intfc
*interface
)
344 struct net_device
*netdev
= interface
->netdev
;
345 u32 __iomem
*hw_addr
;
348 /* do nothing if netdev is still present or hw_addr is set */
349 if (netif_device_present(netdev
) || interface
->hw
.hw_addr
)
352 /* We've lost the PCIe register space, and can no longer access the
353 * device. Shut everything except the detach subtask down and prepare
354 * to reset the device in case we recover. If we actually prepare for
355 * reset, indicate that we're detached.
357 if (fm10k_prepare_for_reset(interface
))
358 set_bit(__FM10K_RESET_DETACHED
, interface
->state
);
360 /* check the real address space to see if we've recovered */
361 hw_addr
= READ_ONCE(interface
->uc_addr
);
362 value
= readl(hw_addr
);
366 /* Make sure the reset was initiated because we detached,
367 * otherwise we might race with a different reset flow.
369 if (!test_and_clear_bit(__FM10K_RESET_DETACHED
,
373 /* Restore the hardware address */
374 interface
->hw
.hw_addr
= interface
->uc_addr
;
376 /* PCIe link has been restored, and the device is active
377 * again. Restore everything and reset the device.
379 err
= fm10k_handle_reset(interface
);
381 netdev_err(netdev
, "Unable to reset device: %d\n", err
);
382 interface
->hw
.hw_addr
= NULL
;
386 /* Re-attach the netdev */
387 netif_device_attach(netdev
);
388 netdev_warn(netdev
, "PCIe link restored, device now attached\n");
393 static void fm10k_reset_subtask(struct fm10k_intfc
*interface
)
397 if (!test_and_clear_bit(FM10K_FLAG_RESET_REQUESTED
,
401 /* If another thread has already prepared to reset the device, we
402 * should not attempt to handle a reset here, since we'd race with
403 * that thread. This may happen if we suspend the device or if the
404 * PCIe link is lost. In this case, we'll just ignore the RESET
405 * request, as it will (eventually) be taken care of when the thread
406 * which actually started the reset is finished.
408 if (!fm10k_prepare_for_reset(interface
))
411 netdev_err(interface
->netdev
, "Reset interface\n");
413 err
= fm10k_handle_reset(interface
);
415 dev_err(&interface
->pdev
->dev
,
416 "fm10k_handle_reset failed: %d\n", err
);
420 * fm10k_configure_swpri_map - Configure Receive SWPRI to PC mapping
421 * @interface: board private structure
423 * Configure the SWPRI to PC mapping for the port.
425 static void fm10k_configure_swpri_map(struct fm10k_intfc
*interface
)
427 struct net_device
*netdev
= interface
->netdev
;
428 struct fm10k_hw
*hw
= &interface
->hw
;
431 /* clear flag indicating update is needed */
432 clear_bit(FM10K_FLAG_SWPRI_CONFIG
, interface
->flags
);
434 /* these registers are only available on the PF */
435 if (hw
->mac
.type
!= fm10k_mac_pf
)
438 /* configure SWPRI to PC map */
439 for (i
= 0; i
< FM10K_SWPRI_MAX
; i
++)
440 fm10k_write_reg(hw
, FM10K_SWPRI_MAP(i
),
441 netdev_get_prio_tc_map(netdev
, i
));
445 * fm10k_watchdog_update_host_state - Update the link status based on host.
446 * @interface: board private structure
448 static void fm10k_watchdog_update_host_state(struct fm10k_intfc
*interface
)
450 struct fm10k_hw
*hw
= &interface
->hw
;
453 if (test_bit(__FM10K_LINK_DOWN
, interface
->state
)) {
454 interface
->host_ready
= false;
455 if (time_is_after_jiffies(interface
->link_down_event
))
457 clear_bit(__FM10K_LINK_DOWN
, interface
->state
);
460 if (test_bit(FM10K_FLAG_SWPRI_CONFIG
, interface
->flags
)) {
461 if (rtnl_trylock()) {
462 fm10k_configure_swpri_map(interface
);
467 /* lock the mailbox for transmit and receive */
468 fm10k_mbx_lock(interface
);
470 err
= hw
->mac
.ops
.get_host_state(hw
, &interface
->host_ready
);
471 if (err
&& time_is_before_jiffies(interface
->last_reset
))
472 set_bit(FM10K_FLAG_RESET_REQUESTED
, interface
->flags
);
475 fm10k_mbx_unlock(interface
);
479 * fm10k_mbx_subtask - Process upstream and downstream mailboxes
480 * @interface: board private structure
482 * This function will process both the upstream and downstream mailboxes.
484 static void fm10k_mbx_subtask(struct fm10k_intfc
*interface
)
486 /* If we're resetting, bail out */
487 if (test_bit(__FM10K_RESETTING
, interface
->state
))
490 /* process upstream mailbox and update device state */
491 fm10k_watchdog_update_host_state(interface
);
493 /* process downstream mailboxes */
494 fm10k_iov_mbx(interface
);
498 * fm10k_watchdog_host_is_ready - Update netdev status based on host ready
499 * @interface: board private structure
501 static void fm10k_watchdog_host_is_ready(struct fm10k_intfc
*interface
)
503 struct net_device
*netdev
= interface
->netdev
;
505 /* only continue if link state is currently down */
506 if (netif_carrier_ok(netdev
))
509 netif_info(interface
, drv
, netdev
, "NIC Link is up\n");
511 netif_carrier_on(netdev
);
512 netif_tx_wake_all_queues(netdev
);
516 * fm10k_watchdog_host_not_ready - Update netdev status based on host not ready
517 * @interface: board private structure
519 static void fm10k_watchdog_host_not_ready(struct fm10k_intfc
*interface
)
521 struct net_device
*netdev
= interface
->netdev
;
523 /* only continue if link state is currently up */
524 if (!netif_carrier_ok(netdev
))
527 netif_info(interface
, drv
, netdev
, "NIC Link is down\n");
529 netif_carrier_off(netdev
);
530 netif_tx_stop_all_queues(netdev
);
534 * fm10k_update_stats - Update the board statistics counters.
535 * @interface: board private structure
537 void fm10k_update_stats(struct fm10k_intfc
*interface
)
539 struct net_device_stats
*net_stats
= &interface
->netdev
->stats
;
540 struct fm10k_hw
*hw
= &interface
->hw
;
541 u64 hw_csum_tx_good
= 0, hw_csum_rx_good
= 0, rx_length_errors
= 0;
542 u64 rx_switch_errors
= 0, rx_drops
= 0, rx_pp_errors
= 0;
543 u64 rx_link_errors
= 0;
544 u64 rx_errors
= 0, rx_csum_errors
= 0, tx_csum_errors
= 0;
545 u64 restart_queue
= 0, tx_busy
= 0, alloc_failed
= 0;
546 u64 rx_bytes_nic
= 0, rx_pkts_nic
= 0, rx_drops_nic
= 0;
547 u64 tx_bytes_nic
= 0, tx_pkts_nic
= 0;
551 /* ensure only one thread updates stats at a time */
552 if (test_and_set_bit(__FM10K_UPDATING_STATS
, interface
->state
))
555 /* do not allow stats update via service task for next second */
556 interface
->next_stats_update
= jiffies
+ HZ
;
558 /* gather some stats to the interface struct that are per queue */
559 for (bytes
= 0, pkts
= 0, i
= 0; i
< interface
->num_tx_queues
; i
++) {
560 struct fm10k_ring
*tx_ring
= READ_ONCE(interface
->tx_ring
[i
]);
565 restart_queue
+= tx_ring
->tx_stats
.restart_queue
;
566 tx_busy
+= tx_ring
->tx_stats
.tx_busy
;
567 tx_csum_errors
+= tx_ring
->tx_stats
.csum_err
;
568 bytes
+= tx_ring
->stats
.bytes
;
569 pkts
+= tx_ring
->stats
.packets
;
570 hw_csum_tx_good
+= tx_ring
->tx_stats
.csum_good
;
573 interface
->restart_queue
= restart_queue
;
574 interface
->tx_busy
= tx_busy
;
575 net_stats
->tx_bytes
= bytes
;
576 net_stats
->tx_packets
= pkts
;
577 interface
->tx_csum_errors
= tx_csum_errors
;
578 interface
->hw_csum_tx_good
= hw_csum_tx_good
;
580 /* gather some stats to the interface struct that are per queue */
581 for (bytes
= 0, pkts
= 0, i
= 0; i
< interface
->num_rx_queues
; i
++) {
582 struct fm10k_ring
*rx_ring
= READ_ONCE(interface
->rx_ring
[i
]);
587 bytes
+= rx_ring
->stats
.bytes
;
588 pkts
+= rx_ring
->stats
.packets
;
589 alloc_failed
+= rx_ring
->rx_stats
.alloc_failed
;
590 rx_csum_errors
+= rx_ring
->rx_stats
.csum_err
;
591 rx_errors
+= rx_ring
->rx_stats
.errors
;
592 hw_csum_rx_good
+= rx_ring
->rx_stats
.csum_good
;
593 rx_switch_errors
+= rx_ring
->rx_stats
.switch_errors
;
594 rx_drops
+= rx_ring
->rx_stats
.drops
;
595 rx_pp_errors
+= rx_ring
->rx_stats
.pp_errors
;
596 rx_link_errors
+= rx_ring
->rx_stats
.link_errors
;
597 rx_length_errors
+= rx_ring
->rx_stats
.length_errors
;
600 net_stats
->rx_bytes
= bytes
;
601 net_stats
->rx_packets
= pkts
;
602 interface
->alloc_failed
= alloc_failed
;
603 interface
->rx_csum_errors
= rx_csum_errors
;
604 interface
->hw_csum_rx_good
= hw_csum_rx_good
;
605 interface
->rx_switch_errors
= rx_switch_errors
;
606 interface
->rx_drops
= rx_drops
;
607 interface
->rx_pp_errors
= rx_pp_errors
;
608 interface
->rx_link_errors
= rx_link_errors
;
609 interface
->rx_length_errors
= rx_length_errors
;
611 hw
->mac
.ops
.update_hw_stats(hw
, &interface
->stats
);
613 for (i
= 0; i
< hw
->mac
.max_queues
; i
++) {
614 struct fm10k_hw_stats_q
*q
= &interface
->stats
.q
[i
];
616 tx_bytes_nic
+= q
->tx_bytes
.count
;
617 tx_pkts_nic
+= q
->tx_packets
.count
;
618 rx_bytes_nic
+= q
->rx_bytes
.count
;
619 rx_pkts_nic
+= q
->rx_packets
.count
;
620 rx_drops_nic
+= q
->rx_drops
.count
;
623 interface
->tx_bytes_nic
= tx_bytes_nic
;
624 interface
->tx_packets_nic
= tx_pkts_nic
;
625 interface
->rx_bytes_nic
= rx_bytes_nic
;
626 interface
->rx_packets_nic
= rx_pkts_nic
;
627 interface
->rx_drops_nic
= rx_drops_nic
;
629 /* Fill out the OS statistics structure */
630 net_stats
->rx_errors
= rx_errors
;
631 net_stats
->rx_dropped
= interface
->stats
.nodesc_drop
.count
;
633 /* Update VF statistics */
634 fm10k_iov_update_stats(interface
);
636 clear_bit(__FM10K_UPDATING_STATS
, interface
->state
);
640 * fm10k_watchdog_flush_tx - flush queues on host not ready
641 * @interface: pointer to the device interface structure
643 static void fm10k_watchdog_flush_tx(struct fm10k_intfc
*interface
)
645 int some_tx_pending
= 0;
648 /* nothing to do if carrier is up */
649 if (netif_carrier_ok(interface
->netdev
))
652 for (i
= 0; i
< interface
->num_tx_queues
; i
++) {
653 struct fm10k_ring
*tx_ring
= interface
->tx_ring
[i
];
655 if (tx_ring
->next_to_use
!= tx_ring
->next_to_clean
) {
661 /* We've lost link, so the controller stops DMA, but we've got
662 * queued Tx work that's never going to get done, so reset
663 * controller to flush Tx.
666 set_bit(FM10K_FLAG_RESET_REQUESTED
, interface
->flags
);
670 * fm10k_watchdog_subtask - check and bring link up
671 * @interface: pointer to the device interface structure
673 static void fm10k_watchdog_subtask(struct fm10k_intfc
*interface
)
675 /* if interface is down do nothing */
676 if (test_bit(__FM10K_DOWN
, interface
->state
) ||
677 test_bit(__FM10K_RESETTING
, interface
->state
))
680 if (interface
->host_ready
)
681 fm10k_watchdog_host_is_ready(interface
);
683 fm10k_watchdog_host_not_ready(interface
);
685 /* update stats only once every second */
686 if (time_is_before_jiffies(interface
->next_stats_update
))
687 fm10k_update_stats(interface
);
689 /* flush any uncompleted work */
690 fm10k_watchdog_flush_tx(interface
);
694 * fm10k_check_hang_subtask - check for hung queues and dropped interrupts
695 * @interface: pointer to the device interface structure
697 * This function serves two purposes. First it strobes the interrupt lines
698 * in order to make certain interrupts are occurring. Secondly it sets the
699 * bits needed to check for TX hangs. As a result we should immediately
700 * determine if a hang has occurred.
702 static void fm10k_check_hang_subtask(struct fm10k_intfc
*interface
)
704 /* If we're down or resetting, just bail */
705 if (test_bit(__FM10K_DOWN
, interface
->state
) ||
706 test_bit(__FM10K_RESETTING
, interface
->state
))
709 /* rate limit tx hang checks to only once every 2 seconds */
710 if (time_is_after_eq_jiffies(interface
->next_tx_hang_check
))
712 interface
->next_tx_hang_check
= jiffies
+ (2 * HZ
);
714 if (netif_carrier_ok(interface
->netdev
)) {
717 /* Force detection of hung controller */
718 for (i
= 0; i
< interface
->num_tx_queues
; i
++)
719 set_check_for_tx_hang(interface
->tx_ring
[i
]);
721 /* Rearm all in-use q_vectors for immediate firing */
722 for (i
= 0; i
< interface
->num_q_vectors
; i
++) {
723 struct fm10k_q_vector
*qv
= interface
->q_vector
[i
];
725 if (!qv
->tx
.count
&& !qv
->rx
.count
)
727 writel(FM10K_ITR_ENABLE
| FM10K_ITR_PENDING2
, qv
->itr
);
733 * fm10k_service_task - manages and runs subtasks
734 * @work: pointer to work_struct containing our data
736 static void fm10k_service_task(struct work_struct
*work
)
738 struct fm10k_intfc
*interface
;
740 interface
= container_of(work
, struct fm10k_intfc
, service_task
);
742 /* Check whether we're detached first */
743 fm10k_detach_subtask(interface
);
745 /* tasks run even when interface is down */
746 fm10k_mbx_subtask(interface
);
747 fm10k_reset_subtask(interface
);
749 /* tasks only run when interface is up */
750 fm10k_watchdog_subtask(interface
);
751 fm10k_check_hang_subtask(interface
);
753 /* release lock on service events to allow scheduling next event */
754 fm10k_service_event_complete(interface
);
758 * fm10k_macvlan_task - send queued MAC/VLAN requests to switch manager
759 * @work: pointer to work_struct containing our data
761 * This work item handles sending MAC/VLAN updates to the switch manager. When
762 * the interface is up, it will attempt to queue mailbox messages to the
763 * switch manager requesting updates for MAC/VLAN pairs. If the Tx fifo of the
764 * mailbox is full, it will reschedule itself to try again in a short while.
765 * This ensures that the driver does not overload the switch mailbox with too
766 * many simultaneous requests, causing an unnecessary reset.
768 static void fm10k_macvlan_task(struct work_struct
*work
)
770 struct fm10k_macvlan_request
*item
;
771 struct fm10k_intfc
*interface
;
772 struct delayed_work
*dwork
;
773 struct list_head
*requests
;
777 dwork
= to_delayed_work(work
);
778 interface
= container_of(dwork
, struct fm10k_intfc
, macvlan_task
);
780 requests
= &interface
->macvlan_requests
;
783 /* Pop the first item off the list */
784 spin_lock_irqsave(&interface
->macvlan_lock
, flags
);
785 item
= list_first_entry_or_null(requests
,
786 struct fm10k_macvlan_request
,
789 list_del_init(&item
->list
);
791 spin_unlock_irqrestore(&interface
->macvlan_lock
, flags
);
793 /* We have no more items to process */
797 fm10k_mbx_lock(interface
);
799 /* Check that we have plenty of space to send the message. We
800 * want to ensure that the mailbox stays low enough to avoid a
801 * change in the host state, otherwise we may see spurious
802 * link up / link down notifications.
804 if (!hw
->mbx
.ops
.tx_ready(&hw
->mbx
, FM10K_VFMBX_MSG_MTU
+ 5)) {
805 hw
->mbx
.ops
.process(hw
, &hw
->mbx
);
806 set_bit(__FM10K_MACVLAN_REQUEST
, interface
->state
);
807 fm10k_mbx_unlock(interface
);
809 /* Put the request back on the list */
810 spin_lock_irqsave(&interface
->macvlan_lock
, flags
);
811 list_add(&item
->list
, requests
);
812 spin_unlock_irqrestore(&interface
->macvlan_lock
, flags
);
816 switch (item
->type
) {
817 case FM10K_MC_MAC_REQUEST
:
818 hw
->mac
.ops
.update_mc_addr(hw
,
824 case FM10K_UC_MAC_REQUEST
:
825 hw
->mac
.ops
.update_uc_addr(hw
,
832 case FM10K_VLAN_REQUEST
:
833 hw
->mac
.ops
.update_vlan(hw
,
842 fm10k_mbx_unlock(interface
);
844 /* Free the item now that we've sent the update */
849 WARN_ON(!test_bit(__FM10K_MACVLAN_SCHED
, interface
->state
));
851 /* flush memory to make sure state is correct */
852 smp_mb__before_atomic();
853 clear_bit(__FM10K_MACVLAN_SCHED
, interface
->state
);
855 /* If a MAC/VLAN request was scheduled since we started, we should
856 * re-schedule. However, there is no reason to re-schedule if there is
859 if (test_bit(__FM10K_MACVLAN_REQUEST
, interface
->state
))
860 fm10k_macvlan_schedule(interface
);
864 * fm10k_configure_tx_ring - Configure Tx ring after Reset
865 * @interface: board private structure
866 * @ring: structure containing ring specific data
868 * Configure the Tx descriptor ring after a reset.
870 static void fm10k_configure_tx_ring(struct fm10k_intfc
*interface
,
871 struct fm10k_ring
*ring
)
873 struct fm10k_hw
*hw
= &interface
->hw
;
874 u64 tdba
= ring
->dma
;
875 u32 size
= ring
->count
* sizeof(struct fm10k_tx_desc
);
876 u32 txint
= FM10K_INT_MAP_DISABLE
;
877 u32 txdctl
= BIT(FM10K_TXDCTL_MAX_TIME_SHIFT
) | FM10K_TXDCTL_ENABLE
;
878 u8 reg_idx
= ring
->reg_idx
;
880 /* disable queue to avoid issues while updating state */
881 fm10k_write_reg(hw
, FM10K_TXDCTL(reg_idx
), 0);
882 fm10k_write_flush(hw
);
884 /* possible poll here to verify ring resources have been cleaned */
886 /* set location and size for descriptor ring */
887 fm10k_write_reg(hw
, FM10K_TDBAL(reg_idx
), tdba
& DMA_BIT_MASK(32));
888 fm10k_write_reg(hw
, FM10K_TDBAH(reg_idx
), tdba
>> 32);
889 fm10k_write_reg(hw
, FM10K_TDLEN(reg_idx
), size
);
891 /* reset head and tail pointers */
892 fm10k_write_reg(hw
, FM10K_TDH(reg_idx
), 0);
893 fm10k_write_reg(hw
, FM10K_TDT(reg_idx
), 0);
895 /* store tail pointer */
896 ring
->tail
= &interface
->uc_addr
[FM10K_TDT(reg_idx
)];
898 /* reset ntu and ntc to place SW in sync with hardware */
899 ring
->next_to_clean
= 0;
900 ring
->next_to_use
= 0;
903 if (ring
->q_vector
) {
904 txint
= ring
->q_vector
->v_idx
+ NON_Q_VECTORS
;
905 txint
|= FM10K_INT_MAP_TIMER0
;
908 fm10k_write_reg(hw
, FM10K_TXINT(reg_idx
), txint
);
910 /* enable use of FTAG bit in Tx descriptor, register is RO for VF */
911 fm10k_write_reg(hw
, FM10K_PFVTCTL(reg_idx
),
912 FM10K_PFVTCTL_FTAG_DESC_ENABLE
);
915 if (!test_and_set_bit(__FM10K_TX_XPS_INIT_DONE
, ring
->state
) &&
917 netif_set_xps_queue(ring
->netdev
,
918 &ring
->q_vector
->affinity_mask
,
922 fm10k_write_reg(hw
, FM10K_TXDCTL(reg_idx
), txdctl
);
926 * fm10k_enable_tx_ring - Verify Tx ring is enabled after configuration
927 * @interface: board private structure
928 * @ring: structure containing ring specific data
930 * Verify the Tx descriptor ring is ready for transmit.
932 static void fm10k_enable_tx_ring(struct fm10k_intfc
*interface
,
933 struct fm10k_ring
*ring
)
935 struct fm10k_hw
*hw
= &interface
->hw
;
938 u8 reg_idx
= ring
->reg_idx
;
940 /* if we are already enabled just exit */
941 if (fm10k_read_reg(hw
, FM10K_TXDCTL(reg_idx
)) & FM10K_TXDCTL_ENABLE
)
944 /* poll to verify queue is enabled */
946 usleep_range(1000, 2000);
947 txdctl
= fm10k_read_reg(hw
, FM10K_TXDCTL(reg_idx
));
948 } while (!(txdctl
& FM10K_TXDCTL_ENABLE
) && --wait_loop
);
950 netif_err(interface
, drv
, interface
->netdev
,
951 "Could not enable Tx Queue %d\n", reg_idx
);
955 * fm10k_configure_tx - Configure Transmit Unit after Reset
956 * @interface: board private structure
958 * Configure the Tx unit of the MAC after a reset.
960 static void fm10k_configure_tx(struct fm10k_intfc
*interface
)
964 /* Setup the HW Tx Head and Tail descriptor pointers */
965 for (i
= 0; i
< interface
->num_tx_queues
; i
++)
966 fm10k_configure_tx_ring(interface
, interface
->tx_ring
[i
]);
968 /* poll here to verify that Tx rings are now enabled */
969 for (i
= 0; i
< interface
->num_tx_queues
; i
++)
970 fm10k_enable_tx_ring(interface
, interface
->tx_ring
[i
]);
974 * fm10k_configure_rx_ring - Configure Rx ring after Reset
975 * @interface: board private structure
976 * @ring: structure containing ring specific data
978 * Configure the Rx descriptor ring after a reset.
980 static void fm10k_configure_rx_ring(struct fm10k_intfc
*interface
,
981 struct fm10k_ring
*ring
)
983 u64 rdba
= ring
->dma
;
984 struct fm10k_hw
*hw
= &interface
->hw
;
985 u32 size
= ring
->count
* sizeof(union fm10k_rx_desc
);
986 u32 rxqctl
, rxdctl
= FM10K_RXDCTL_WRITE_BACK_MIN_DELAY
;
987 u32 srrctl
= FM10K_SRRCTL_BUFFER_CHAINING_EN
;
988 u32 rxint
= FM10K_INT_MAP_DISABLE
;
989 u8 rx_pause
= interface
->rx_pause
;
990 u8 reg_idx
= ring
->reg_idx
;
992 /* disable queue to avoid issues while updating state */
993 rxqctl
= fm10k_read_reg(hw
, FM10K_RXQCTL(reg_idx
));
994 rxqctl
&= ~FM10K_RXQCTL_ENABLE
;
995 fm10k_write_reg(hw
, FM10K_RXQCTL(reg_idx
), rxqctl
);
996 fm10k_write_flush(hw
);
998 /* possible poll here to verify ring resources have been cleaned */
1000 /* set location and size for descriptor ring */
1001 fm10k_write_reg(hw
, FM10K_RDBAL(reg_idx
), rdba
& DMA_BIT_MASK(32));
1002 fm10k_write_reg(hw
, FM10K_RDBAH(reg_idx
), rdba
>> 32);
1003 fm10k_write_reg(hw
, FM10K_RDLEN(reg_idx
), size
);
1005 /* reset head and tail pointers */
1006 fm10k_write_reg(hw
, FM10K_RDH(reg_idx
), 0);
1007 fm10k_write_reg(hw
, FM10K_RDT(reg_idx
), 0);
1009 /* store tail pointer */
1010 ring
->tail
= &interface
->uc_addr
[FM10K_RDT(reg_idx
)];
1012 /* reset ntu and ntc to place SW in sync with hardware */
1013 ring
->next_to_clean
= 0;
1014 ring
->next_to_use
= 0;
1015 ring
->next_to_alloc
= 0;
1017 /* Configure the Rx buffer size for one buff without split */
1018 srrctl
|= FM10K_RX_BUFSZ
>> FM10K_SRRCTL_BSIZEPKT_SHIFT
;
1020 /* Configure the Rx ring to suppress loopback packets */
1021 srrctl
|= FM10K_SRRCTL_LOOPBACK_SUPPRESS
;
1022 fm10k_write_reg(hw
, FM10K_SRRCTL(reg_idx
), srrctl
);
1024 /* Enable drop on empty */
1026 if (interface
->pfc_en
)
1027 rx_pause
= interface
->pfc_en
;
1029 if (!(rx_pause
& BIT(ring
->qos_pc
)))
1030 rxdctl
|= FM10K_RXDCTL_DROP_ON_EMPTY
;
1032 fm10k_write_reg(hw
, FM10K_RXDCTL(reg_idx
), rxdctl
);
1034 /* assign default VLAN to queue */
1035 ring
->vid
= hw
->mac
.default_vid
;
1037 /* if we have an active VLAN, disable default VLAN ID */
1038 if (test_bit(hw
->mac
.default_vid
, interface
->active_vlans
))
1039 ring
->vid
|= FM10K_VLAN_CLEAR
;
1042 if (ring
->q_vector
) {
1043 rxint
= ring
->q_vector
->v_idx
+ NON_Q_VECTORS
;
1044 rxint
|= FM10K_INT_MAP_TIMER1
;
1047 fm10k_write_reg(hw
, FM10K_RXINT(reg_idx
), rxint
);
1050 rxqctl
= fm10k_read_reg(hw
, FM10K_RXQCTL(reg_idx
));
1051 rxqctl
|= FM10K_RXQCTL_ENABLE
;
1052 fm10k_write_reg(hw
, FM10K_RXQCTL(reg_idx
), rxqctl
);
1054 /* place buffers on ring for receive data */
1055 fm10k_alloc_rx_buffers(ring
, fm10k_desc_unused(ring
));
1059 * fm10k_update_rx_drop_en - Configures the drop enable bits for Rx rings
1060 * @interface: board private structure
1062 * Configure the drop enable bits for the Rx rings.
1064 void fm10k_update_rx_drop_en(struct fm10k_intfc
*interface
)
1066 struct fm10k_hw
*hw
= &interface
->hw
;
1067 u8 rx_pause
= interface
->rx_pause
;
1071 if (interface
->pfc_en
)
1072 rx_pause
= interface
->pfc_en
;
1075 for (i
= 0; i
< interface
->num_rx_queues
; i
++) {
1076 struct fm10k_ring
*ring
= interface
->rx_ring
[i
];
1077 u32 rxdctl
= FM10K_RXDCTL_WRITE_BACK_MIN_DELAY
;
1078 u8 reg_idx
= ring
->reg_idx
;
1080 if (!(rx_pause
& BIT(ring
->qos_pc
)))
1081 rxdctl
|= FM10K_RXDCTL_DROP_ON_EMPTY
;
1083 fm10k_write_reg(hw
, FM10K_RXDCTL(reg_idx
), rxdctl
);
1088 * fm10k_configure_dglort - Configure Receive DGLORT after reset
1089 * @interface: board private structure
1091 * Configure the DGLORT description and RSS tables.
1093 static void fm10k_configure_dglort(struct fm10k_intfc
*interface
)
1095 struct fm10k_dglort_cfg dglort
= { 0 };
1096 struct fm10k_hw
*hw
= &interface
->hw
;
1100 /* Fill out hash function seeds */
1101 for (i
= 0; i
< FM10K_RSSRK_SIZE
; i
++)
1102 fm10k_write_reg(hw
, FM10K_RSSRK(0, i
), interface
->rssrk
[i
]);
1104 /* Write RETA table to hardware */
1105 for (i
= 0; i
< FM10K_RETA_SIZE
; i
++)
1106 fm10k_write_reg(hw
, FM10K_RETA(0, i
), interface
->reta
[i
]);
1108 /* Generate RSS hash based on packet types, TCP/UDP
1109 * port numbers and/or IPv4/v6 src and dst addresses
1111 mrqc
= FM10K_MRQC_IPV4
|
1112 FM10K_MRQC_TCP_IPV4
|
1114 FM10K_MRQC_TCP_IPV6
;
1116 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV4_UDP
, interface
->flags
))
1117 mrqc
|= FM10K_MRQC_UDP_IPV4
;
1118 if (test_bit(FM10K_FLAG_RSS_FIELD_IPV6_UDP
, interface
->flags
))
1119 mrqc
|= FM10K_MRQC_UDP_IPV6
;
1121 fm10k_write_reg(hw
, FM10K_MRQC(0), mrqc
);
1123 /* configure default DGLORT mapping for RSS/DCB */
1124 dglort
.inner_rss
= 1;
1125 dglort
.rss_l
= fls(interface
->ring_feature
[RING_F_RSS
].mask
);
1126 dglort
.pc_l
= fls(interface
->ring_feature
[RING_F_QOS
].mask
);
1127 hw
->mac
.ops
.configure_dglort_map(hw
, &dglort
);
1129 /* assign GLORT per queue for queue mapped testing */
1130 if (interface
->glort_count
> 64) {
1131 memset(&dglort
, 0, sizeof(dglort
));
1132 dglort
.inner_rss
= 1;
1133 dglort
.glort
= interface
->glort
+ 64;
1134 dglort
.idx
= fm10k_dglort_pf_queue
;
1135 dglort
.queue_l
= fls(interface
->num_rx_queues
- 1);
1136 hw
->mac
.ops
.configure_dglort_map(hw
, &dglort
);
1139 /* assign glort value for RSS/DCB specific to this interface */
1140 memset(&dglort
, 0, sizeof(dglort
));
1141 dglort
.inner_rss
= 1;
1142 dglort
.glort
= interface
->glort
;
1143 dglort
.rss_l
= fls(interface
->ring_feature
[RING_F_RSS
].mask
);
1144 dglort
.pc_l
= fls(interface
->ring_feature
[RING_F_QOS
].mask
);
1145 /* configure DGLORT mapping for RSS/DCB */
1146 dglort
.idx
= fm10k_dglort_pf_rss
;
1147 if (interface
->l2_accel
)
1148 dglort
.shared_l
= fls(interface
->l2_accel
->size
);
1149 hw
->mac
.ops
.configure_dglort_map(hw
, &dglort
);
1153 * fm10k_configure_rx - Configure Receive Unit after Reset
1154 * @interface: board private structure
1156 * Configure the Rx unit of the MAC after a reset.
1158 static void fm10k_configure_rx(struct fm10k_intfc
*interface
)
1162 /* Configure SWPRI to PC map */
1163 fm10k_configure_swpri_map(interface
);
1165 /* Configure RSS and DGLORT map */
1166 fm10k_configure_dglort(interface
);
1168 /* Setup the HW Rx Head and Tail descriptor pointers */
1169 for (i
= 0; i
< interface
->num_rx_queues
; i
++)
1170 fm10k_configure_rx_ring(interface
, interface
->rx_ring
[i
]);
1172 /* possible poll here to verify that Rx rings are now enabled */
1175 static void fm10k_napi_enable_all(struct fm10k_intfc
*interface
)
1177 struct fm10k_q_vector
*q_vector
;
1180 for (q_idx
= 0; q_idx
< interface
->num_q_vectors
; q_idx
++) {
1181 q_vector
= interface
->q_vector
[q_idx
];
1182 napi_enable(&q_vector
->napi
);
1186 static irqreturn_t
fm10k_msix_clean_rings(int __always_unused irq
, void *data
)
1188 struct fm10k_q_vector
*q_vector
= data
;
1190 if (q_vector
->rx
.count
|| q_vector
->tx
.count
)
1191 napi_schedule_irqoff(&q_vector
->napi
);
1196 static irqreturn_t
fm10k_msix_mbx_vf(int __always_unused irq
, void *data
)
1198 struct fm10k_intfc
*interface
= data
;
1199 struct fm10k_hw
*hw
= &interface
->hw
;
1200 struct fm10k_mbx_info
*mbx
= &hw
->mbx
;
1202 /* re-enable mailbox interrupt and indicate 20us delay */
1203 fm10k_write_reg(hw
, FM10K_VFITR(FM10K_MBX_VECTOR
),
1204 (FM10K_MBX_INT_DELAY
>> hw
->mac
.itr_scale
) |
1207 /* service upstream mailbox */
1208 if (fm10k_mbx_trylock(interface
)) {
1209 mbx
->ops
.process(hw
, mbx
);
1210 fm10k_mbx_unlock(interface
);
1213 hw
->mac
.get_host_state
= true;
1214 fm10k_service_event_schedule(interface
);
1219 #define FM10K_ERR_MSG(type) case (type): error = #type; break
1220 static void fm10k_handle_fault(struct fm10k_intfc
*interface
, int type
,
1221 struct fm10k_fault
*fault
)
1223 struct pci_dev
*pdev
= interface
->pdev
;
1224 struct fm10k_hw
*hw
= &interface
->hw
;
1225 struct fm10k_iov_data
*iov_data
= interface
->iov_data
;
1229 case FM10K_PCA_FAULT
:
1230 switch (fault
->type
) {
1232 error
= "Unknown PCA error";
1234 FM10K_ERR_MSG(PCA_NO_FAULT
);
1235 FM10K_ERR_MSG(PCA_UNMAPPED_ADDR
);
1236 FM10K_ERR_MSG(PCA_BAD_QACCESS_PF
);
1237 FM10K_ERR_MSG(PCA_BAD_QACCESS_VF
);
1238 FM10K_ERR_MSG(PCA_MALICIOUS_REQ
);
1239 FM10K_ERR_MSG(PCA_POISONED_TLP
);
1240 FM10K_ERR_MSG(PCA_TLP_ABORT
);
1243 case FM10K_THI_FAULT
:
1244 switch (fault
->type
) {
1246 error
= "Unknown THI error";
1248 FM10K_ERR_MSG(THI_NO_FAULT
);
1249 FM10K_ERR_MSG(THI_MAL_DIS_Q_FAULT
);
1252 case FM10K_FUM_FAULT
:
1253 switch (fault
->type
) {
1255 error
= "Unknown FUM error";
1257 FM10K_ERR_MSG(FUM_NO_FAULT
);
1258 FM10K_ERR_MSG(FUM_UNMAPPED_ADDR
);
1259 FM10K_ERR_MSG(FUM_BAD_VF_QACCESS
);
1260 FM10K_ERR_MSG(FUM_ADD_DECODE_ERR
);
1261 FM10K_ERR_MSG(FUM_RO_ERROR
);
1262 FM10K_ERR_MSG(FUM_QPRC_CRC_ERROR
);
1263 FM10K_ERR_MSG(FUM_CSR_TIMEOUT
);
1264 FM10K_ERR_MSG(FUM_INVALID_TYPE
);
1265 FM10K_ERR_MSG(FUM_INVALID_LENGTH
);
1266 FM10K_ERR_MSG(FUM_INVALID_BE
);
1267 FM10K_ERR_MSG(FUM_INVALID_ALIGN
);
1271 error
= "Undocumented fault";
1275 dev_warn(&pdev
->dev
,
1276 "%s Address: 0x%llx SpecInfo: 0x%x Func: %02x.%0x\n",
1277 error
, fault
->address
, fault
->specinfo
,
1278 PCI_SLOT(fault
->func
), PCI_FUNC(fault
->func
));
1280 /* For VF faults, clear out the respective LPORT, reset the queue
1281 * resources, and then reconnect to the mailbox. This allows the
1282 * VF in question to resume behavior. For transient faults that are
1283 * the result of non-malicious behavior this will log the fault and
1284 * allow the VF to resume functionality. Obviously for malicious VFs
1285 * they will be able to attempt malicious behavior again. In this
1286 * case, the system administrator will need to step in and manually
1287 * remove or disable the VF in question.
1289 if (fault
->func
&& iov_data
) {
1290 int vf
= fault
->func
- 1;
1291 struct fm10k_vf_info
*vf_info
= &iov_data
->vf_info
[vf
];
1293 hw
->iov
.ops
.reset_lport(hw
, vf_info
);
1294 hw
->iov
.ops
.reset_resources(hw
, vf_info
);
1296 /* reset_lport disables the VF, so re-enable it */
1297 hw
->iov
.ops
.set_lport(hw
, vf_info
, vf
,
1298 FM10K_VF_FLAG_MULTI_CAPABLE
);
1300 /* reset_resources will disconnect from the mbx */
1301 vf_info
->mbx
.ops
.connect(hw
, &vf_info
->mbx
);
1305 static void fm10k_report_fault(struct fm10k_intfc
*interface
, u32 eicr
)
1307 struct fm10k_hw
*hw
= &interface
->hw
;
1308 struct fm10k_fault fault
= { 0 };
1311 for (eicr
&= FM10K_EICR_FAULT_MASK
, type
= FM10K_PCA_FAULT
;
1313 eicr
>>= 1, type
+= FM10K_FAULT_SIZE
) {
1314 /* only check if there is an error reported */
1318 /* retrieve fault info */
1319 err
= hw
->mac
.ops
.get_fault(hw
, type
, &fault
);
1321 dev_err(&interface
->pdev
->dev
,
1322 "error reading fault\n");
1326 fm10k_handle_fault(interface
, type
, &fault
);
1330 static void fm10k_reset_drop_on_empty(struct fm10k_intfc
*interface
, u32 eicr
)
1332 struct fm10k_hw
*hw
= &interface
->hw
;
1333 const u32 rxdctl
= FM10K_RXDCTL_WRITE_BACK_MIN_DELAY
;
1337 if (!(eicr
& FM10K_EICR_MAXHOLDTIME
))
1340 maxholdq
= fm10k_read_reg(hw
, FM10K_MAXHOLDQ(7));
1342 fm10k_write_reg(hw
, FM10K_MAXHOLDQ(7), maxholdq
);
1344 if (maxholdq
& BIT(31)) {
1345 if (q
< FM10K_MAX_QUEUES_PF
) {
1346 interface
->rx_overrun_pf
++;
1347 fm10k_write_reg(hw
, FM10K_RXDCTL(q
), rxdctl
);
1349 interface
->rx_overrun_vf
++;
1363 maxholdq
= fm10k_read_reg(hw
, FM10K_MAXHOLDQ(q
/ 32));
1365 fm10k_write_reg(hw
, FM10K_MAXHOLDQ(q
/ 32), maxholdq
);
1369 static irqreturn_t
fm10k_msix_mbx_pf(int __always_unused irq
, void *data
)
1371 struct fm10k_intfc
*interface
= data
;
1372 struct fm10k_hw
*hw
= &interface
->hw
;
1373 struct fm10k_mbx_info
*mbx
= &hw
->mbx
;
1377 /* unmask any set bits related to this interrupt */
1378 eicr
= fm10k_read_reg(hw
, FM10K_EICR
);
1379 fm10k_write_reg(hw
, FM10K_EICR
, eicr
& (FM10K_EICR_MAILBOX
|
1380 FM10K_EICR_SWITCHREADY
|
1381 FM10K_EICR_SWITCHNOTREADY
));
1383 /* report any faults found to the message log */
1384 fm10k_report_fault(interface
, eicr
);
1386 /* reset any queues disabled due to receiver overrun */
1387 fm10k_reset_drop_on_empty(interface
, eicr
);
1389 /* service mailboxes */
1390 if (fm10k_mbx_trylock(interface
)) {
1391 err
= mbx
->ops
.process(hw
, mbx
);
1392 /* handle VFLRE events */
1393 fm10k_iov_event(interface
);
1394 fm10k_mbx_unlock(interface
);
1397 if (err
== FM10K_ERR_RESET_REQUESTED
)
1398 set_bit(FM10K_FLAG_RESET_REQUESTED
, interface
->flags
);
1400 /* if switch toggled state we should reset GLORTs */
1401 if (eicr
& FM10K_EICR_SWITCHNOTREADY
) {
1402 /* force link down for at least 4 seconds */
1403 interface
->link_down_event
= jiffies
+ (4 * HZ
);
1404 set_bit(__FM10K_LINK_DOWN
, interface
->state
);
1406 /* reset dglort_map back to no config */
1407 hw
->mac
.dglort_map
= FM10K_DGLORTMAP_NONE
;
1410 /* we should validate host state after interrupt event */
1411 hw
->mac
.get_host_state
= true;
1413 /* validate host state, and handle VF mailboxes in the service task */
1414 fm10k_service_event_schedule(interface
);
1416 /* re-enable mailbox interrupt and indicate 20us delay */
1417 fm10k_write_reg(hw
, FM10K_ITR(FM10K_MBX_VECTOR
),
1418 (FM10K_MBX_INT_DELAY
>> hw
->mac
.itr_scale
) |
1424 void fm10k_mbx_free_irq(struct fm10k_intfc
*interface
)
1426 struct fm10k_hw
*hw
= &interface
->hw
;
1427 struct msix_entry
*entry
;
1430 /* no mailbox IRQ to free if MSI-X is not enabled */
1431 if (!interface
->msix_entries
)
1434 entry
= &interface
->msix_entries
[FM10K_MBX_VECTOR
];
1436 /* disconnect the mailbox */
1437 hw
->mbx
.ops
.disconnect(hw
, &hw
->mbx
);
1439 /* disable Mailbox cause */
1440 if (hw
->mac
.type
== fm10k_mac_pf
) {
1441 fm10k_write_reg(hw
, FM10K_EIMR
,
1442 FM10K_EIMR_DISABLE(PCA_FAULT
) |
1443 FM10K_EIMR_DISABLE(FUM_FAULT
) |
1444 FM10K_EIMR_DISABLE(MAILBOX
) |
1445 FM10K_EIMR_DISABLE(SWITCHREADY
) |
1446 FM10K_EIMR_DISABLE(SWITCHNOTREADY
) |
1447 FM10K_EIMR_DISABLE(SRAMERROR
) |
1448 FM10K_EIMR_DISABLE(VFLR
) |
1449 FM10K_EIMR_DISABLE(MAXHOLDTIME
));
1450 itr_reg
= FM10K_ITR(FM10K_MBX_VECTOR
);
1452 itr_reg
= FM10K_VFITR(FM10K_MBX_VECTOR
);
1455 fm10k_write_reg(hw
, itr_reg
, FM10K_ITR_MASK_SET
);
1457 free_irq(entry
->vector
, interface
);
1460 static s32
fm10k_mbx_mac_addr(struct fm10k_hw
*hw
, u32
**results
,
1461 struct fm10k_mbx_info
*mbx
)
1463 bool vlan_override
= hw
->mac
.vlan_override
;
1464 u16 default_vid
= hw
->mac
.default_vid
;
1465 struct fm10k_intfc
*interface
;
1468 err
= fm10k_msg_mac_vlan_vf(hw
, results
, mbx
);
1472 interface
= container_of(hw
, struct fm10k_intfc
, hw
);
1474 /* MAC was changed so we need reset */
1475 if (is_valid_ether_addr(hw
->mac
.perm_addr
) &&
1476 !ether_addr_equal(hw
->mac
.perm_addr
, hw
->mac
.addr
))
1477 set_bit(FM10K_FLAG_RESET_REQUESTED
, interface
->flags
);
1479 /* VLAN override was changed, or default VLAN changed */
1480 if ((vlan_override
!= hw
->mac
.vlan_override
) ||
1481 (default_vid
!= hw
->mac
.default_vid
))
1482 set_bit(FM10K_FLAG_RESET_REQUESTED
, interface
->flags
);
1487 /* generic error handler for mailbox issues */
1488 static s32
fm10k_mbx_error(struct fm10k_hw
*hw
, u32
**results
,
1489 struct fm10k_mbx_info __always_unused
*mbx
)
1491 struct fm10k_intfc
*interface
;
1492 struct pci_dev
*pdev
;
1494 interface
= container_of(hw
, struct fm10k_intfc
, hw
);
1495 pdev
= interface
->pdev
;
1497 dev_err(&pdev
->dev
, "Unknown message ID %u\n",
1498 **results
& FM10K_TLV_ID_MASK
);
1503 static const struct fm10k_msg_data vf_mbx_data
[] = {
1504 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test
),
1505 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr
),
1506 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf
),
1507 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error
),
1510 static int fm10k_mbx_request_irq_vf(struct fm10k_intfc
*interface
)
1512 struct msix_entry
*entry
= &interface
->msix_entries
[FM10K_MBX_VECTOR
];
1513 struct net_device
*dev
= interface
->netdev
;
1514 struct fm10k_hw
*hw
= &interface
->hw
;
1517 /* Use timer0 for interrupt moderation on the mailbox */
1518 u32 itr
= entry
->entry
| FM10K_INT_MAP_TIMER0
;
1520 /* register mailbox handlers */
1521 err
= hw
->mbx
.ops
.register_handlers(&hw
->mbx
, vf_mbx_data
);
1525 /* request the IRQ */
1526 err
= request_irq(entry
->vector
, fm10k_msix_mbx_vf
, 0,
1527 dev
->name
, interface
);
1529 netif_err(interface
, probe
, dev
,
1530 "request_irq for msix_mbx failed: %d\n", err
);
1534 /* map all of the interrupt sources */
1535 fm10k_write_reg(hw
, FM10K_VFINT_MAP
, itr
);
1537 /* enable interrupt */
1538 fm10k_write_reg(hw
, FM10K_VFITR(entry
->entry
), FM10K_ITR_ENABLE
);
1543 static s32
fm10k_lport_map(struct fm10k_hw
*hw
, u32
**results
,
1544 struct fm10k_mbx_info
*mbx
)
1546 struct fm10k_intfc
*interface
;
1547 u32 dglort_map
= hw
->mac
.dglort_map
;
1550 interface
= container_of(hw
, struct fm10k_intfc
, hw
);
1552 err
= fm10k_msg_err_pf(hw
, results
, mbx
);
1553 if (!err
&& hw
->swapi
.status
) {
1554 /* force link down for a reasonable delay */
1555 interface
->link_down_event
= jiffies
+ (2 * HZ
);
1556 set_bit(__FM10K_LINK_DOWN
, interface
->state
);
1558 /* reset dglort_map back to no config */
1559 hw
->mac
.dglort_map
= FM10K_DGLORTMAP_NONE
;
1561 fm10k_service_event_schedule(interface
);
1563 /* prevent overloading kernel message buffer */
1564 if (interface
->lport_map_failed
)
1567 interface
->lport_map_failed
= true;
1569 if (hw
->swapi
.status
== FM10K_MSG_ERR_PEP_NOT_SCHEDULED
)
1570 dev_warn(&interface
->pdev
->dev
,
1571 "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n");
1572 dev_warn(&interface
->pdev
->dev
,
1573 "request logical port map failed: %d\n",
1579 err
= fm10k_msg_lport_map_pf(hw
, results
, mbx
);
1583 interface
->lport_map_failed
= false;
1585 /* we need to reset if port count was just updated */
1586 if (dglort_map
!= hw
->mac
.dglort_map
)
1587 set_bit(FM10K_FLAG_RESET_REQUESTED
, interface
->flags
);
1592 static s32
fm10k_update_pvid(struct fm10k_hw
*hw
, u32
**results
,
1593 struct fm10k_mbx_info __always_unused
*mbx
)
1595 struct fm10k_intfc
*interface
;
1600 err
= fm10k_tlv_attr_get_u32(results
[FM10K_PF_ATTR_ID_UPDATE_PVID
],
1605 /* extract values from the pvid update */
1606 glort
= FM10K_MSG_HDR_FIELD_GET(pvid_update
, UPDATE_PVID_GLORT
);
1607 pvid
= FM10K_MSG_HDR_FIELD_GET(pvid_update
, UPDATE_PVID_PVID
);
1609 /* if glort is not valid return error */
1610 if (!fm10k_glort_valid_pf(hw
, glort
))
1611 return FM10K_ERR_PARAM
;
1613 /* verify VLAN ID is valid */
1614 if (pvid
>= FM10K_VLAN_TABLE_VID_MAX
)
1615 return FM10K_ERR_PARAM
;
1617 interface
= container_of(hw
, struct fm10k_intfc
, hw
);
1619 /* check to see if this belongs to one of the VFs */
1620 err
= fm10k_iov_update_pvid(interface
, glort
, pvid
);
1624 /* we need to reset if default VLAN was just updated */
1625 if (pvid
!= hw
->mac
.default_vid
)
1626 set_bit(FM10K_FLAG_RESET_REQUESTED
, interface
->flags
);
1628 hw
->mac
.default_vid
= pvid
;
1633 static const struct fm10k_msg_data pf_mbx_data
[] = {
1634 FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES
, fm10k_msg_err_pf
),
1635 FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE
, fm10k_msg_err_pf
),
1636 FM10K_PF_MSG_LPORT_MAP_HANDLER(fm10k_lport_map
),
1637 FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE
, fm10k_msg_err_pf
),
1638 FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE
, fm10k_msg_err_pf
),
1639 FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid
),
1640 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error
),
1643 static int fm10k_mbx_request_irq_pf(struct fm10k_intfc
*interface
)
1645 struct msix_entry
*entry
= &interface
->msix_entries
[FM10K_MBX_VECTOR
];
1646 struct net_device
*dev
= interface
->netdev
;
1647 struct fm10k_hw
*hw
= &interface
->hw
;
1650 /* Use timer0 for interrupt moderation on the mailbox */
1651 u32 mbx_itr
= entry
->entry
| FM10K_INT_MAP_TIMER0
;
1652 u32 other_itr
= entry
->entry
| FM10K_INT_MAP_IMMEDIATE
;
1654 /* register mailbox handlers */
1655 err
= hw
->mbx
.ops
.register_handlers(&hw
->mbx
, pf_mbx_data
);
1659 /* request the IRQ */
1660 err
= request_irq(entry
->vector
, fm10k_msix_mbx_pf
, 0,
1661 dev
->name
, interface
);
1663 netif_err(interface
, probe
, dev
,
1664 "request_irq for msix_mbx failed: %d\n", err
);
1668 /* Enable interrupts w/ no moderation for "other" interrupts */
1669 fm10k_write_reg(hw
, FM10K_INT_MAP(fm10k_int_pcie_fault
), other_itr
);
1670 fm10k_write_reg(hw
, FM10K_INT_MAP(fm10k_int_switch_up_down
), other_itr
);
1671 fm10k_write_reg(hw
, FM10K_INT_MAP(fm10k_int_sram
), other_itr
);
1672 fm10k_write_reg(hw
, FM10K_INT_MAP(fm10k_int_max_hold_time
), other_itr
);
1673 fm10k_write_reg(hw
, FM10K_INT_MAP(fm10k_int_vflr
), other_itr
);
1675 /* Enable interrupts w/ moderation for mailbox */
1676 fm10k_write_reg(hw
, FM10K_INT_MAP(fm10k_int_mailbox
), mbx_itr
);
1678 /* Enable individual interrupt causes */
1679 fm10k_write_reg(hw
, FM10K_EIMR
, FM10K_EIMR_ENABLE(PCA_FAULT
) |
1680 FM10K_EIMR_ENABLE(FUM_FAULT
) |
1681 FM10K_EIMR_ENABLE(MAILBOX
) |
1682 FM10K_EIMR_ENABLE(SWITCHREADY
) |
1683 FM10K_EIMR_ENABLE(SWITCHNOTREADY
) |
1684 FM10K_EIMR_ENABLE(SRAMERROR
) |
1685 FM10K_EIMR_ENABLE(VFLR
) |
1686 FM10K_EIMR_ENABLE(MAXHOLDTIME
));
1688 /* enable interrupt */
1689 fm10k_write_reg(hw
, FM10K_ITR(entry
->entry
), FM10K_ITR_ENABLE
);
1694 int fm10k_mbx_request_irq(struct fm10k_intfc
*interface
)
1696 struct fm10k_hw
*hw
= &interface
->hw
;
1699 /* enable Mailbox cause */
1700 if (hw
->mac
.type
== fm10k_mac_pf
)
1701 err
= fm10k_mbx_request_irq_pf(interface
);
1703 err
= fm10k_mbx_request_irq_vf(interface
);
1707 /* connect mailbox */
1708 err
= hw
->mbx
.ops
.connect(hw
, &hw
->mbx
);
1710 /* if the mailbox failed to connect, then free IRQ */
1712 fm10k_mbx_free_irq(interface
);
1718 * fm10k_qv_free_irq - release interrupts associated with queue vectors
1719 * @interface: board private structure
1721 * Release all interrupts associated with this interface
1723 void fm10k_qv_free_irq(struct fm10k_intfc
*interface
)
1725 int vector
= interface
->num_q_vectors
;
1726 struct msix_entry
*entry
;
1728 entry
= &interface
->msix_entries
[NON_Q_VECTORS
+ vector
];
1731 struct fm10k_q_vector
*q_vector
;
1735 q_vector
= interface
->q_vector
[vector
];
1737 if (!q_vector
->tx
.count
&& !q_vector
->rx
.count
)
1740 /* clear the affinity_mask in the IRQ descriptor */
1741 irq_set_affinity_hint(entry
->vector
, NULL
);
1743 /* disable interrupts */
1744 writel(FM10K_ITR_MASK_SET
, q_vector
->itr
);
1746 free_irq(entry
->vector
, q_vector
);
1751 * fm10k_qv_request_irq - initialize interrupts for queue vectors
1752 * @interface: board private structure
1754 * Attempts to configure interrupts using the best available
1755 * capabilities of the hardware and kernel.
1757 int fm10k_qv_request_irq(struct fm10k_intfc
*interface
)
1759 struct net_device
*dev
= interface
->netdev
;
1760 struct fm10k_hw
*hw
= &interface
->hw
;
1761 struct msix_entry
*entry
;
1762 unsigned int ri
= 0, ti
= 0;
1765 entry
= &interface
->msix_entries
[NON_Q_VECTORS
];
1767 for (vector
= 0; vector
< interface
->num_q_vectors
; vector
++) {
1768 struct fm10k_q_vector
*q_vector
= interface
->q_vector
[vector
];
1770 /* name the vector */
1771 if (q_vector
->tx
.count
&& q_vector
->rx
.count
) {
1772 snprintf(q_vector
->name
, sizeof(q_vector
->name
),
1773 "%s-TxRx-%u", dev
->name
, ri
++);
1775 } else if (q_vector
->rx
.count
) {
1776 snprintf(q_vector
->name
, sizeof(q_vector
->name
),
1777 "%s-rx-%u", dev
->name
, ri
++);
1778 } else if (q_vector
->tx
.count
) {
1779 snprintf(q_vector
->name
, sizeof(q_vector
->name
),
1780 "%s-tx-%u", dev
->name
, ti
++);
1782 /* skip this unused q_vector */
1786 /* Assign ITR register to q_vector */
1787 q_vector
->itr
= (hw
->mac
.type
== fm10k_mac_pf
) ?
1788 &interface
->uc_addr
[FM10K_ITR(entry
->entry
)] :
1789 &interface
->uc_addr
[FM10K_VFITR(entry
->entry
)];
1791 /* request the IRQ */
1792 err
= request_irq(entry
->vector
, &fm10k_msix_clean_rings
, 0,
1793 q_vector
->name
, q_vector
);
1795 netif_err(interface
, probe
, dev
,
1796 "request_irq failed for MSIX interrupt Error: %d\n",
1801 /* assign the mask for this irq */
1802 irq_set_affinity_hint(entry
->vector
, &q_vector
->affinity_mask
);
1804 /* Enable q_vector */
1805 writel(FM10K_ITR_ENABLE
, q_vector
->itr
);
1813 /* wind through the ring freeing all entries and vectors */
1815 struct fm10k_q_vector
*q_vector
;
1819 q_vector
= interface
->q_vector
[vector
];
1821 if (!q_vector
->tx
.count
&& !q_vector
->rx
.count
)
1824 /* clear the affinity_mask in the IRQ descriptor */
1825 irq_set_affinity_hint(entry
->vector
, NULL
);
1827 /* disable interrupts */
1828 writel(FM10K_ITR_MASK_SET
, q_vector
->itr
);
1830 free_irq(entry
->vector
, q_vector
);
1836 void fm10k_up(struct fm10k_intfc
*interface
)
1838 struct fm10k_hw
*hw
= &interface
->hw
;
1840 /* Enable Tx/Rx DMA */
1841 hw
->mac
.ops
.start_hw(hw
);
1843 /* configure Tx descriptor rings */
1844 fm10k_configure_tx(interface
);
1846 /* configure Rx descriptor rings */
1847 fm10k_configure_rx(interface
);
1849 /* configure interrupts */
1850 hw
->mac
.ops
.update_int_moderator(hw
);
1852 /* enable statistics capture again */
1853 clear_bit(__FM10K_UPDATING_STATS
, interface
->state
);
1855 /* clear down bit to indicate we are ready to go */
1856 clear_bit(__FM10K_DOWN
, interface
->state
);
1858 /* enable polling cleanups */
1859 fm10k_napi_enable_all(interface
);
1861 /* re-establish Rx filters */
1862 fm10k_restore_rx_state(interface
);
1864 /* enable transmits */
1865 netif_tx_start_all_queues(interface
->netdev
);
1867 /* kick off the service timer now */
1868 hw
->mac
.get_host_state
= true;
1869 mod_timer(&interface
->service_timer
, jiffies
);
1872 static void fm10k_napi_disable_all(struct fm10k_intfc
*interface
)
1874 struct fm10k_q_vector
*q_vector
;
1877 for (q_idx
= 0; q_idx
< interface
->num_q_vectors
; q_idx
++) {
1878 q_vector
= interface
->q_vector
[q_idx
];
1879 napi_disable(&q_vector
->napi
);
1883 void fm10k_down(struct fm10k_intfc
*interface
)
1885 struct net_device
*netdev
= interface
->netdev
;
1886 struct fm10k_hw
*hw
= &interface
->hw
;
1887 int err
, i
= 0, count
= 0;
1889 /* signal that we are down to the interrupt handler and service task */
1890 if (test_and_set_bit(__FM10K_DOWN
, interface
->state
))
1893 /* call carrier off first to avoid false dev_watchdog timeouts */
1894 netif_carrier_off(netdev
);
1896 /* disable transmits */
1897 netif_tx_stop_all_queues(netdev
);
1898 netif_tx_disable(netdev
);
1900 /* reset Rx filters */
1901 fm10k_reset_rx_state(interface
);
1903 /* disable polling routines */
1904 fm10k_napi_disable_all(interface
);
1906 /* capture stats one last time before stopping interface */
1907 fm10k_update_stats(interface
);
1909 /* prevent updating statistics while we're down */
1910 while (test_and_set_bit(__FM10K_UPDATING_STATS
, interface
->state
))
1911 usleep_range(1000, 2000);
1913 /* skip waiting for TX DMA if we lost PCIe link */
1914 if (FM10K_REMOVED(hw
->hw_addr
))
1915 goto skip_tx_dma_drain
;
1917 /* In some rare circumstances it can take a while for Tx queues to
1918 * quiesce and be fully disabled. Attempt to .stop_hw() first, and
1919 * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop
1920 * until the Tx queues have emptied, or until a number of retries. If
1921 * we fail to clear within the retry loop, we will issue a warning
1922 * indicating that Tx DMA is probably hung. Note this means we call
1923 * .stop_hw() twice but this shouldn't cause any problems.
1925 err
= hw
->mac
.ops
.stop_hw(hw
);
1926 if (err
!= FM10K_ERR_REQUESTS_PENDING
)
1927 goto skip_tx_dma_drain
;
1929 #define TX_DMA_DRAIN_RETRIES 25
1930 for (count
= 0; count
< TX_DMA_DRAIN_RETRIES
; count
++) {
1931 usleep_range(10000, 20000);
1933 /* start checking at the last ring to have pending Tx */
1934 for (; i
< interface
->num_tx_queues
; i
++)
1935 if (fm10k_get_tx_pending(interface
->tx_ring
[i
], false))
1938 /* if all the queues are drained, we can break now */
1939 if (i
== interface
->num_tx_queues
)
1943 if (count
>= TX_DMA_DRAIN_RETRIES
)
1944 dev_err(&interface
->pdev
->dev
,
1945 "Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n",
1948 /* Disable DMA engine for Tx/Rx */
1949 err
= hw
->mac
.ops
.stop_hw(hw
);
1950 if (err
== FM10K_ERR_REQUESTS_PENDING
)
1951 dev_err(&interface
->pdev
->dev
,
1952 "due to pending requests hw was not shut down gracefully\n");
1954 dev_err(&interface
->pdev
->dev
, "stop_hw failed: %d\n", err
);
1956 /* free any buffers still on the rings */
1957 fm10k_clean_all_tx_rings(interface
);
1958 fm10k_clean_all_rx_rings(interface
);
1962 * fm10k_sw_init - Initialize general software structures
1963 * @interface: host interface private structure to initialize
1964 * @ent: PCI device ID entry
1966 * fm10k_sw_init initializes the interface private data structure.
1967 * Fields are initialized based on PCI device information and
1968 * OS network device settings (MTU size).
1970 static int fm10k_sw_init(struct fm10k_intfc
*interface
,
1971 const struct pci_device_id
*ent
)
1973 const struct fm10k_info
*fi
= fm10k_info_tbl
[ent
->driver_data
];
1974 struct fm10k_hw
*hw
= &interface
->hw
;
1975 struct pci_dev
*pdev
= interface
->pdev
;
1976 struct net_device
*netdev
= interface
->netdev
;
1977 u32 rss_key
[FM10K_RSSRK_SIZE
];
1981 /* initialize back pointer */
1982 hw
->back
= interface
;
1983 hw
->hw_addr
= interface
->uc_addr
;
1985 /* PCI config space info */
1986 hw
->vendor_id
= pdev
->vendor
;
1987 hw
->device_id
= pdev
->device
;
1988 hw
->revision_id
= pdev
->revision
;
1989 hw
->subsystem_vendor_id
= pdev
->subsystem_vendor
;
1990 hw
->subsystem_device_id
= pdev
->subsystem_device
;
1993 memcpy(&hw
->mac
.ops
, fi
->mac_ops
, sizeof(hw
->mac
.ops
));
1994 hw
->mac
.type
= fi
->mac
;
1996 /* Setup IOV handlers */
1998 memcpy(&hw
->iov
.ops
, fi
->iov_ops
, sizeof(hw
->iov
.ops
));
2000 /* Set common capability flags and settings */
2001 rss
= min_t(int, FM10K_MAX_RSS_INDICES
, num_online_cpus());
2002 interface
->ring_feature
[RING_F_RSS
].limit
= rss
;
2003 fi
->get_invariants(hw
);
2005 /* pick up the PCIe bus settings for reporting later */
2006 if (hw
->mac
.ops
.get_bus_info
)
2007 hw
->mac
.ops
.get_bus_info(hw
);
2009 /* limit the usable DMA range */
2010 if (hw
->mac
.ops
.set_dma_mask
)
2011 hw
->mac
.ops
.set_dma_mask(hw
, dma_get_mask(&pdev
->dev
));
2013 /* update netdev with DMA restrictions */
2014 if (dma_get_mask(&pdev
->dev
) > DMA_BIT_MASK(32)) {
2015 netdev
->features
|= NETIF_F_HIGHDMA
;
2016 netdev
->vlan_features
|= NETIF_F_HIGHDMA
;
2019 /* reset and initialize the hardware so it is in a known state */
2020 err
= hw
->mac
.ops
.reset_hw(hw
);
2022 dev_err(&pdev
->dev
, "reset_hw failed: %d\n", err
);
2026 err
= hw
->mac
.ops
.init_hw(hw
);
2028 dev_err(&pdev
->dev
, "init_hw failed: %d\n", err
);
2032 /* initialize hardware statistics */
2033 hw
->mac
.ops
.update_hw_stats(hw
, &interface
->stats
);
2035 /* Set upper limit on IOV VFs that can be allocated */
2036 pci_sriov_set_totalvfs(pdev
, hw
->iov
.total_vfs
);
2038 /* Start with random Ethernet address */
2039 eth_random_addr(hw
->mac
.addr
);
2041 /* Initialize MAC address from hardware */
2042 err
= hw
->mac
.ops
.read_mac_addr(hw
);
2044 dev_warn(&pdev
->dev
,
2045 "Failed to obtain MAC address defaulting to random\n");
2046 /* tag address assignment as random */
2047 netdev
->addr_assign_type
|= NET_ADDR_RANDOM
;
2050 ether_addr_copy(netdev
->dev_addr
, hw
->mac
.addr
);
2051 ether_addr_copy(netdev
->perm_addr
, hw
->mac
.addr
);
2053 if (!is_valid_ether_addr(netdev
->perm_addr
)) {
2054 dev_err(&pdev
->dev
, "Invalid MAC Address\n");
2058 /* initialize DCBNL interface */
2059 fm10k_dcbnl_set_ops(netdev
);
2061 /* set default ring sizes */
2062 interface
->tx_ring_count
= FM10K_DEFAULT_TXD
;
2063 interface
->rx_ring_count
= FM10K_DEFAULT_RXD
;
2065 /* set default interrupt moderation */
2066 interface
->tx_itr
= FM10K_TX_ITR_DEFAULT
;
2067 interface
->rx_itr
= FM10K_ITR_ADAPTIVE
| FM10K_RX_ITR_DEFAULT
;
2069 /* initialize udp port lists */
2070 INIT_LIST_HEAD(&interface
->vxlan_port
);
2071 INIT_LIST_HEAD(&interface
->geneve_port
);
2073 /* Initialize the MAC/VLAN queue */
2074 INIT_LIST_HEAD(&interface
->macvlan_requests
);
2076 netdev_rss_key_fill(rss_key
, sizeof(rss_key
));
2077 memcpy(interface
->rssrk
, rss_key
, sizeof(rss_key
));
2079 /* Initialize the mailbox lock */
2080 spin_lock_init(&interface
->mbx_lock
);
2081 spin_lock_init(&interface
->macvlan_lock
);
2083 /* Start off interface as being down */
2084 set_bit(__FM10K_DOWN
, interface
->state
);
2085 set_bit(__FM10K_UPDATING_STATS
, interface
->state
);
2091 * fm10k_probe - Device Initialization Routine
2092 * @pdev: PCI device information struct
2093 * @ent: entry in fm10k_pci_tbl
2095 * Returns 0 on success, negative on failure
2097 * fm10k_probe initializes an interface identified by a pci_dev structure.
2098 * The OS initialization, configuring of the interface private structure,
2099 * and a hardware reset occur.
2101 static int fm10k_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
2103 struct net_device
*netdev
;
2104 struct fm10k_intfc
*interface
;
2107 if (pdev
->error_state
!= pci_channel_io_normal
) {
2109 "PCI device still in an error state. Unable to load...\n");
2113 err
= pci_enable_device_mem(pdev
);
2116 "PCI enable device failed: %d\n", err
);
2120 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(48));
2122 err
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
2125 "DMA configuration failed: %d\n", err
);
2129 err
= pci_request_mem_regions(pdev
, fm10k_driver_name
);
2132 "pci_request_selected_regions failed: %d\n", err
);
2136 pci_enable_pcie_error_reporting(pdev
);
2138 pci_set_master(pdev
);
2139 pci_save_state(pdev
);
2141 netdev
= fm10k_alloc_netdev(fm10k_info_tbl
[ent
->driver_data
]);
2144 goto err_alloc_netdev
;
2147 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2149 interface
= netdev_priv(netdev
);
2150 pci_set_drvdata(pdev
, interface
);
2152 interface
->netdev
= netdev
;
2153 interface
->pdev
= pdev
;
2155 interface
->uc_addr
= ioremap(pci_resource_start(pdev
, 0),
2156 FM10K_UC_ADDR_SIZE
);
2157 if (!interface
->uc_addr
) {
2162 err
= fm10k_sw_init(interface
, ent
);
2166 /* enable debugfs support */
2167 fm10k_dbg_intfc_init(interface
);
2169 err
= fm10k_init_queueing_scheme(interface
);
2173 /* the mbx interrupt might attempt to schedule the service task, so we
2174 * must ensure it is disabled since we haven't yet requested the timer
2177 set_bit(__FM10K_SERVICE_DISABLE
, interface
->state
);
2179 err
= fm10k_mbx_request_irq(interface
);
2181 goto err_mbx_interrupt
;
2183 /* final check of hardware state before registering the interface */
2184 err
= fm10k_hw_ready(interface
);
2188 err
= register_netdev(netdev
);
2192 /* carrier off reporting is important to ethtool even BEFORE open */
2193 netif_carrier_off(netdev
);
2195 /* stop all the transmit queues from transmitting until link is up */
2196 netif_tx_stop_all_queues(netdev
);
2198 /* Initialize service timer and service task late in order to avoid
2201 timer_setup(&interface
->service_timer
, fm10k_service_timer
, 0);
2202 INIT_WORK(&interface
->service_task
, fm10k_service_task
);
2204 /* Setup the MAC/VLAN queue */
2205 INIT_DELAYED_WORK(&interface
->macvlan_task
, fm10k_macvlan_task
);
2207 /* kick off service timer now, even when interface is down */
2208 mod_timer(&interface
->service_timer
, (HZ
* 2) + jiffies
);
2210 /* print warning for non-optimal configurations */
2211 pcie_print_link_status(interface
->pdev
);
2213 /* report MAC address for logging */
2214 dev_info(&pdev
->dev
, "%pM\n", netdev
->dev_addr
);
2216 /* enable SR-IOV after registering netdev to enforce PF/VF ordering */
2217 fm10k_iov_configure(pdev
, 0);
2219 /* clear the service task disable bit and kick off service task */
2220 clear_bit(__FM10K_SERVICE_DISABLE
, interface
->state
);
2221 fm10k_service_event_schedule(interface
);
2226 fm10k_mbx_free_irq(interface
);
2228 fm10k_clear_queueing_scheme(interface
);
2230 if (interface
->sw_addr
)
2231 iounmap(interface
->sw_addr
);
2232 iounmap(interface
->uc_addr
);
2234 free_netdev(netdev
);
2236 pci_release_mem_regions(pdev
);
2239 pci_disable_device(pdev
);
2244 * fm10k_remove - Device Removal Routine
2245 * @pdev: PCI device information struct
2247 * fm10k_remove is called by the PCI subsystem to alert the driver
2248 * that it should release a PCI device. The could be caused by a
2249 * Hot-Plug event, or because the driver is going to be removed from
2252 static void fm10k_remove(struct pci_dev
*pdev
)
2254 struct fm10k_intfc
*interface
= pci_get_drvdata(pdev
);
2255 struct net_device
*netdev
= interface
->netdev
;
2257 del_timer_sync(&interface
->service_timer
);
2259 fm10k_stop_service_event(interface
);
2260 fm10k_stop_macvlan_task(interface
);
2262 /* Remove all pending MAC/VLAN requests */
2263 fm10k_clear_macvlan_queue(interface
, interface
->glort
, true);
2265 /* free netdev, this may bounce the interrupts due to setup_tc */
2266 if (netdev
->reg_state
== NETREG_REGISTERED
)
2267 unregister_netdev(netdev
);
2270 fm10k_iov_disable(pdev
);
2272 /* disable mailbox interrupt */
2273 fm10k_mbx_free_irq(interface
);
2275 /* free interrupts */
2276 fm10k_clear_queueing_scheme(interface
);
2278 /* remove any debugfs interfaces */
2279 fm10k_dbg_intfc_exit(interface
);
2281 if (interface
->sw_addr
)
2282 iounmap(interface
->sw_addr
);
2283 iounmap(interface
->uc_addr
);
2285 free_netdev(netdev
);
2287 pci_release_mem_regions(pdev
);
2289 pci_disable_pcie_error_reporting(pdev
);
2291 pci_disable_device(pdev
);
2294 static void fm10k_prepare_suspend(struct fm10k_intfc
*interface
)
2296 /* the watchdog task reads from registers, which might appear like
2297 * a surprise remove if the PCIe device is disabled while we're
2298 * stopped. We stop the watchdog task until after we resume software
2301 * Note that the MAC/VLAN task will be stopped as part of preparing
2302 * for reset so we don't need to handle it here.
2304 fm10k_stop_service_event(interface
);
2306 if (fm10k_prepare_for_reset(interface
))
2307 set_bit(__FM10K_RESET_SUSPENDED
, interface
->state
);
2310 static int fm10k_handle_resume(struct fm10k_intfc
*interface
)
2312 struct fm10k_hw
*hw
= &interface
->hw
;
2315 /* Even if we didn't properly prepare for reset in
2316 * fm10k_prepare_suspend, we'll attempt to resume anyways.
2318 if (!test_and_clear_bit(__FM10K_RESET_SUSPENDED
, interface
->state
))
2319 dev_warn(&interface
->pdev
->dev
,
2320 "Device was shut down as part of suspend... Attempting to recover\n");
2322 /* reset statistics starting values */
2323 hw
->mac
.ops
.rebind_hw_stats(hw
, &interface
->stats
);
2325 err
= fm10k_handle_reset(interface
);
2329 /* assume host is not ready, to prevent race with watchdog in case we
2330 * actually don't have connection to the switch
2332 interface
->host_ready
= false;
2333 fm10k_watchdog_host_not_ready(interface
);
2335 /* force link to stay down for a second to prevent link flutter */
2336 interface
->link_down_event
= jiffies
+ (HZ
);
2337 set_bit(__FM10K_LINK_DOWN
, interface
->state
);
2339 /* restart the service task */
2340 fm10k_start_service_event(interface
);
2342 /* Restart the MAC/VLAN request queue in-case of outstanding events */
2343 fm10k_macvlan_schedule(interface
);
2349 * fm10k_resume - Generic PM resume hook
2350 * @dev: generic device structure
2352 * Generic PM hook used when waking the device from a low power state after
2353 * suspend or hibernation. This function does not need to handle lower PCIe
2354 * device state as the stack takes care of that for us.
2356 static int __maybe_unused
fm10k_resume(struct device
*dev
)
2358 struct fm10k_intfc
*interface
= dev_get_drvdata(dev
);
2359 struct net_device
*netdev
= interface
->netdev
;
2360 struct fm10k_hw
*hw
= &interface
->hw
;
2363 /* refresh hw_addr in case it was dropped */
2364 hw
->hw_addr
= interface
->uc_addr
;
2366 err
= fm10k_handle_resume(interface
);
2370 netif_device_attach(netdev
);
2376 * fm10k_suspend - Generic PM suspend hook
2377 * @dev: generic device structure
2379 * Generic PM hook used when setting the device into a low power state for
2380 * system suspend or hibernation. This function does not need to handle lower
2381 * PCIe device state as the stack takes care of that for us.
2383 static int __maybe_unused
fm10k_suspend(struct device
*dev
)
2385 struct fm10k_intfc
*interface
= dev_get_drvdata(dev
);
2386 struct net_device
*netdev
= interface
->netdev
;
2388 netif_device_detach(netdev
);
2390 fm10k_prepare_suspend(interface
);
2396 * fm10k_io_error_detected - called when PCI error is detected
2397 * @pdev: Pointer to PCI device
2398 * @state: The current pci connection state
2400 * This function is called after a PCI bus error affecting
2401 * this device has been detected.
2403 static pci_ers_result_t
fm10k_io_error_detected(struct pci_dev
*pdev
,
2404 pci_channel_state_t state
)
2406 struct fm10k_intfc
*interface
= pci_get_drvdata(pdev
);
2407 struct net_device
*netdev
= interface
->netdev
;
2409 netif_device_detach(netdev
);
2411 if (state
== pci_channel_io_perm_failure
)
2412 return PCI_ERS_RESULT_DISCONNECT
;
2414 fm10k_prepare_suspend(interface
);
2416 /* Request a slot reset. */
2417 return PCI_ERS_RESULT_NEED_RESET
;
2421 * fm10k_io_slot_reset - called after the pci bus has been reset.
2422 * @pdev: Pointer to PCI device
2424 * Restart the card from scratch, as if from a cold-boot.
2426 static pci_ers_result_t
fm10k_io_slot_reset(struct pci_dev
*pdev
)
2428 pci_ers_result_t result
;
2430 if (pci_reenable_device(pdev
)) {
2432 "Cannot re-enable PCI device after reset.\n");
2433 result
= PCI_ERS_RESULT_DISCONNECT
;
2435 pci_set_master(pdev
);
2436 pci_restore_state(pdev
);
2438 /* After second error pci->state_saved is false, this
2439 * resets it so EEH doesn't break.
2441 pci_save_state(pdev
);
2443 pci_wake_from_d3(pdev
, false);
2445 result
= PCI_ERS_RESULT_RECOVERED
;
2452 * fm10k_io_resume - called when traffic can start flowing again.
2453 * @pdev: Pointer to PCI device
2455 * This callback is called when the error recovery driver tells us that
2456 * its OK to resume normal operation.
2458 static void fm10k_io_resume(struct pci_dev
*pdev
)
2460 struct fm10k_intfc
*interface
= pci_get_drvdata(pdev
);
2461 struct net_device
*netdev
= interface
->netdev
;
2464 err
= fm10k_handle_resume(interface
);
2467 dev_warn(&pdev
->dev
,
2468 "%s failed: %d\n", __func__
, err
);
2470 netif_device_attach(netdev
);
2474 * fm10k_io_reset_prepare - called when PCI function is about to be reset
2475 * @pdev: Pointer to PCI device
2477 * This callback is called when the PCI function is about to be reset,
2478 * allowing the device driver to prepare for it.
2480 static void fm10k_io_reset_prepare(struct pci_dev
*pdev
)
2482 /* warn incase we have any active VF devices */
2483 if (pci_num_vf(pdev
))
2484 dev_warn(&pdev
->dev
,
2485 "PCIe FLR may cause issues for any active VF devices\n");
2486 fm10k_prepare_suspend(pci_get_drvdata(pdev
));
2490 * fm10k_io_reset_done - called when PCI function has finished resetting
2491 * @pdev: Pointer to PCI device
2493 * This callback is called just after the PCI function is reset, such as via
2494 * /sys/class/net/<enpX>/device/reset or similar.
2496 static void fm10k_io_reset_done(struct pci_dev
*pdev
)
2498 struct fm10k_intfc
*interface
= pci_get_drvdata(pdev
);
2499 int err
= fm10k_handle_resume(interface
);
2502 dev_warn(&pdev
->dev
,
2503 "%s failed: %d\n", __func__
, err
);
2504 netif_device_detach(interface
->netdev
);
2508 static const struct pci_error_handlers fm10k_err_handler
= {
2509 .error_detected
= fm10k_io_error_detected
,
2510 .slot_reset
= fm10k_io_slot_reset
,
2511 .resume
= fm10k_io_resume
,
2512 .reset_prepare
= fm10k_io_reset_prepare
,
2513 .reset_done
= fm10k_io_reset_done
,
2516 static SIMPLE_DEV_PM_OPS(fm10k_pm_ops
, fm10k_suspend
, fm10k_resume
);
2518 static struct pci_driver fm10k_driver
= {
2519 .name
= fm10k_driver_name
,
2520 .id_table
= fm10k_pci_tbl
,
2521 .probe
= fm10k_probe
,
2522 .remove
= fm10k_remove
,
2524 .pm
= &fm10k_pm_ops
,
2526 .sriov_configure
= fm10k_iov_configure
,
2527 .err_handler
= &fm10k_err_handler
2531 * fm10k_register_pci_driver - register driver interface
2533 * This function is called on module load in order to register the driver.
2535 int fm10k_register_pci_driver(void)
2537 return pci_register_driver(&fm10k_driver
);
2541 * fm10k_unregister_pci_driver - unregister driver interface
2543 * This function is called on module unload in order to remove the driver.
2545 void fm10k_unregister_pci_driver(void)
2547 pci_unregister_driver(&fm10k_driver
);