1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include <net/switchdev.h>
25 #include "liquidio_common.h"
26 #include "octeon_droq.h"
27 #include "octeon_iq.h"
28 #include "response_manager.h"
29 #include "octeon_device.h"
30 #include "octeon_nic.h"
31 #include "octeon_main.h"
32 #include "octeon_network.h"
33 #include "cn66xx_regs.h"
34 #include "cn66xx_device.h"
35 #include "cn68xx_device.h"
36 #include "cn23xx_pf_device.h"
37 #include "liquidio_image.h"
38 #include "lio_vf_rep.h"
40 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
41 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
42 MODULE_LICENSE("GPL");
43 MODULE_VERSION(LIQUIDIO_VERSION
);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX
);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX
);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX
);
50 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
51 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX
);
53 static int ddr_timeout
= 10000;
54 module_param(ddr_timeout
, int, 0644);
55 MODULE_PARM_DESC(ddr_timeout
,
56 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
58 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
60 static int debug
= -1;
61 module_param(debug
, int, 0644);
62 MODULE_PARM_DESC(debug
, "NETIF_MSG debug bits");
64 static char fw_type
[LIO_MAX_FW_TYPE_LEN
] = LIO_FW_NAME_TYPE_AUTO
;
65 module_param_string(fw_type
, fw_type
, sizeof(fw_type
), 0444);
66 MODULE_PARM_DESC(fw_type
, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
68 static u32 console_bitmask
;
69 module_param(console_bitmask
, int, 0644);
70 MODULE_PARM_DESC(console_bitmask
,
71 "Bitmask indicating which consoles have debug output redirected to syslog.");
74 * \brief determines if a given console has debug enabled.
75 * @param console console to check
76 * @returns 1 = enabled. 0 otherwise
78 static int octeon_console_debug_enabled(u32 console
)
80 return (console_bitmask
>> (console
)) & 0x1;
83 /* Polling interval for determining when NIC application is alive */
84 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
86 /* runtime link query interval */
87 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
88 /* update localtime to octeon firmware every 60 seconds.
89 * make firmware to use same time reference, so that it will be easy to
90 * correlate firmware logged events/errors with host events, for debugging.
92 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
94 struct liquidio_if_cfg_context
{
102 struct liquidio_if_cfg_resp
{
104 struct liquidio_if_cfg_info cfg_info
;
108 struct liquidio_rx_ctl_context
{
111 wait_queue_head_t wc
;
116 struct oct_link_status_resp
{
118 struct oct_link_info link_info
;
122 struct oct_timestamp_resp
{
128 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
133 #ifdef __BIG_ENDIAN_BITFIELD
145 /** Octeon device properties to be used by the NIC module.
146 * Each octeon device in the system will be represented
147 * by this structure in the NIC module.
150 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
152 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
153 #define OCTNIC_GSO_MAX_SIZE \
154 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
156 /** Structure of a node in list of gather components maintained by
157 * NIC driver for each network device.
159 struct octnic_gather
{
160 /** List manipulation. Next and prev pointers. */
161 struct list_head list
;
163 /** Size of the gather component at sg in bytes. */
166 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
169 /** Gather component that can accommodate max sized fragment list
170 * received from the IP layer.
172 struct octeon_sg_entry
*sg
;
174 dma_addr_t sg_dma_ptr
;
178 struct completion init
;
179 struct completion started
;
180 struct pci_dev
*pci_dev
;
185 #ifdef CONFIG_PCI_IOV
186 static int liquidio_enable_sriov(struct pci_dev
*dev
, int num_vfs
);
189 static int octeon_dbg_console_print(struct octeon_device
*oct
, u32 console_num
,
190 char *prefix
, char *suffix
);
192 static int octeon_device_init(struct octeon_device
*);
193 static int liquidio_stop(struct net_device
*netdev
);
194 static void liquidio_remove(struct pci_dev
*pdev
);
195 static int liquidio_probe(struct pci_dev
*pdev
,
196 const struct pci_device_id
*ent
);
197 static int liquidio_set_vf_link_state(struct net_device
*netdev
, int vfidx
,
200 static struct handshake handshake
[MAX_OCTEON_DEVICES
];
201 static struct completion first_stage
;
203 static void octeon_droq_bh(unsigned long pdev
)
207 struct octeon_device
*oct
= (struct octeon_device
*)pdev
;
208 struct octeon_device_priv
*oct_priv
=
209 (struct octeon_device_priv
*)oct
->priv
;
211 for (q_no
= 0; q_no
< MAX_OCTEON_OUTPUT_QUEUES(oct
); q_no
++) {
212 if (!(oct
->io_qmask
.oq
& BIT_ULL(q_no
)))
214 reschedule
|= octeon_droq_process_packets(oct
, oct
->droq
[q_no
],
216 lio_enable_irq(oct
->droq
[q_no
], NULL
);
218 if (OCTEON_CN23XX_PF(oct
) && oct
->msix_on
) {
219 /* set time and cnt interrupt thresholds for this DROQ
222 int adjusted_q_no
= q_no
+ oct
->sriov_info
.pf_srn
;
225 oct
, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no
),
228 oct
, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no
), 0);
233 tasklet_schedule(&oct_priv
->droq_tasklet
);
236 static int lio_wait_for_oq_pkts(struct octeon_device
*oct
)
238 struct octeon_device_priv
*oct_priv
=
239 (struct octeon_device_priv
*)oct
->priv
;
240 int retry
= 100, pkt_cnt
= 0, pending_pkts
= 0;
246 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
247 if (!(oct
->io_qmask
.oq
& BIT_ULL(i
)))
249 pkt_cnt
+= octeon_droq_check_hw_for_pkts(oct
->droq
[i
]);
252 pending_pkts
+= pkt_cnt
;
253 tasklet_schedule(&oct_priv
->droq_tasklet
);
256 schedule_timeout_uninterruptible(1);
258 } while (retry
-- && pending_pkts
);
264 * \brief Forces all IO queues off on a given device
265 * @param oct Pointer to Octeon device
267 static void force_io_queues_off(struct octeon_device
*oct
)
269 if ((oct
->chip_id
== OCTEON_CN66XX
) ||
270 (oct
->chip_id
== OCTEON_CN68XX
)) {
271 /* Reset the Enable bits for Input Queues. */
272 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, 0);
274 /* Reset the Enable bits for Output Queues. */
275 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, 0);
280 * \brief Cause device to go quiet so it can be safely removed/reset/etc
281 * @param oct Pointer to Octeon device
283 static inline void pcierror_quiesce_device(struct octeon_device
*oct
)
287 /* Disable the input and output queues now. No more packets will
288 * arrive from Octeon, but we should wait for all packet processing
291 force_io_queues_off(oct
);
293 /* To allow for in-flight requests */
294 schedule_timeout_uninterruptible(100);
296 if (wait_for_pending_requests(oct
))
297 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
299 /* Force all requests waiting to be fetched by OCTEON to complete. */
300 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
301 struct octeon_instr_queue
*iq
;
303 if (!(oct
->io_qmask
.iq
& BIT_ULL(i
)))
305 iq
= oct
->instr_queue
[i
];
307 if (atomic_read(&iq
->instr_pending
)) {
308 spin_lock_bh(&iq
->lock
);
310 iq
->octeon_read_index
= iq
->host_write_index
;
311 iq
->stats
.instr_processed
+=
312 atomic_read(&iq
->instr_pending
);
313 lio_process_iq_request_list(oct
, iq
, 0);
314 spin_unlock_bh(&iq
->lock
);
318 /* Force all pending ordered list requests to time out. */
319 lio_process_ordered_list(oct
, 1);
321 /* We do not need to wait for output queue packets to be processed. */
325 * \brief Cleanup PCI AER uncorrectable error status
326 * @param dev Pointer to PCI device
328 static void cleanup_aer_uncorrect_error_status(struct pci_dev
*dev
)
333 pr_info("%s :\n", __func__
);
335 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, &status
);
336 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_SEVER
, &mask
);
337 if (dev
->error_state
== pci_channel_io_normal
)
338 status
&= ~mask
; /* Clear corresponding nonfatal bits */
340 status
&= mask
; /* Clear corresponding fatal bits */
341 pci_write_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, status
);
345 * \brief Stop all PCI IO to a given device
346 * @param dev Pointer to Octeon device
348 static void stop_pci_io(struct octeon_device
*oct
)
350 /* No more instructions will be forwarded. */
351 atomic_set(&oct
->status
, OCT_DEV_IN_RESET
);
353 pci_disable_device(oct
->pci_dev
);
355 /* Disable interrupts */
356 oct
->fn_list
.disable_interrupt(oct
, OCTEON_ALL_INTR
);
358 pcierror_quiesce_device(oct
);
360 /* Release the interrupt line */
361 free_irq(oct
->pci_dev
->irq
, oct
);
363 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
364 pci_disable_msi(oct
->pci_dev
);
366 dev_dbg(&oct
->pci_dev
->dev
, "Device state is now %s\n",
367 lio_get_state_string(&oct
->status
));
369 /* making it a common function for all OCTEON models */
370 cleanup_aer_uncorrect_error_status(oct
->pci_dev
);
374 * \brief called when PCI error is detected
375 * @param pdev Pointer to PCI device
376 * @param state The current pci connection state
378 * This function is called after a PCI bus error affecting
379 * this device has been detected.
381 static pci_ers_result_t
liquidio_pcie_error_detected(struct pci_dev
*pdev
,
382 pci_channel_state_t state
)
384 struct octeon_device
*oct
= pci_get_drvdata(pdev
);
386 /* Non-correctable Non-fatal errors */
387 if (state
== pci_channel_io_normal
) {
388 dev_err(&oct
->pci_dev
->dev
, "Non-correctable non-fatal error reported:\n");
389 cleanup_aer_uncorrect_error_status(oct
->pci_dev
);
390 return PCI_ERS_RESULT_CAN_RECOVER
;
393 /* Non-correctable Fatal errors */
394 dev_err(&oct
->pci_dev
->dev
, "Non-correctable FATAL reported by PCI AER driver\n");
397 /* Always return a DISCONNECT. There is no support for recovery but only
398 * for a clean shutdown.
400 return PCI_ERS_RESULT_DISCONNECT
;
404 * \brief mmio handler
405 * @param pdev Pointer to PCI device
407 static pci_ers_result_t
liquidio_pcie_mmio_enabled(
408 struct pci_dev
*pdev
__attribute__((unused
)))
410 /* We should never hit this since we never ask for a reset for a Fatal
411 * Error. We always return DISCONNECT in io_error above.
412 * But play safe and return RECOVERED for now.
414 return PCI_ERS_RESULT_RECOVERED
;
418 * \brief called after the pci bus has been reset.
419 * @param pdev Pointer to PCI device
421 * Restart the card from scratch, as if from a cold-boot. Implementation
422 * resembles the first-half of the octeon_resume routine.
424 static pci_ers_result_t
liquidio_pcie_slot_reset(
425 struct pci_dev
*pdev
__attribute__((unused
)))
427 /* We should never hit this since we never ask for a reset for a Fatal
428 * Error. We always return DISCONNECT in io_error above.
429 * But play safe and return RECOVERED for now.
431 return PCI_ERS_RESULT_RECOVERED
;
435 * \brief called when traffic can start flowing again.
436 * @param pdev Pointer to PCI device
438 * This callback is called when the error recovery driver tells us that
439 * its OK to resume normal operation. Implementation resembles the
440 * second-half of the octeon_resume routine.
442 static void liquidio_pcie_resume(struct pci_dev
*pdev
__attribute__((unused
)))
444 /* Nothing to be done here. */
449 * \brief called when suspending
450 * @param pdev Pointer to PCI device
451 * @param state state to suspend to
453 static int liquidio_suspend(struct pci_dev
*pdev
__attribute__((unused
)),
454 pm_message_t state
__attribute__((unused
)))
460 * \brief called when resuming
461 * @param pdev Pointer to PCI device
463 static int liquidio_resume(struct pci_dev
*pdev
__attribute__((unused
)))
469 /* For PCI-E Advanced Error Recovery (AER) Interface */
470 static const struct pci_error_handlers liquidio_err_handler
= {
471 .error_detected
= liquidio_pcie_error_detected
,
472 .mmio_enabled
= liquidio_pcie_mmio_enabled
,
473 .slot_reset
= liquidio_pcie_slot_reset
,
474 .resume
= liquidio_pcie_resume
,
477 static const struct pci_device_id liquidio_pci_tbl
[] = {
479 PCI_VENDOR_ID_CAVIUM
, 0x91, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
482 PCI_VENDOR_ID_CAVIUM
, 0x92, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
485 PCI_VENDOR_ID_CAVIUM
, 0x9702, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
491 MODULE_DEVICE_TABLE(pci
, liquidio_pci_tbl
);
493 static struct pci_driver liquidio_pci_driver
= {
495 .id_table
= liquidio_pci_tbl
,
496 .probe
= liquidio_probe
,
497 .remove
= liquidio_remove
,
498 .err_handler
= &liquidio_err_handler
, /* For AER */
501 .suspend
= liquidio_suspend
,
502 .resume
= liquidio_resume
,
504 #ifdef CONFIG_PCI_IOV
505 .sriov_configure
= liquidio_enable_sriov
,
510 * \brief register PCI driver
512 static int liquidio_init_pci(void)
514 return pci_register_driver(&liquidio_pci_driver
);
518 * \brief unregister PCI driver
520 static void liquidio_deinit_pci(void)
522 pci_unregister_driver(&liquidio_pci_driver
);
526 * \brief Stop Tx queues
527 * @param netdev network device
529 static inline void txqs_stop(struct net_device
*netdev
)
531 if (netif_is_multiqueue(netdev
)) {
534 for (i
= 0; i
< netdev
->num_tx_queues
; i
++)
535 netif_stop_subqueue(netdev
, i
);
537 netif_stop_queue(netdev
);
542 * \brief Start Tx queues
543 * @param netdev network device
545 static inline void txqs_start(struct net_device
*netdev
)
547 if (netif_is_multiqueue(netdev
)) {
550 for (i
= 0; i
< netdev
->num_tx_queues
; i
++)
551 netif_start_subqueue(netdev
, i
);
553 netif_start_queue(netdev
);
558 * \brief Wake Tx queues
559 * @param netdev network device
561 static inline void txqs_wake(struct net_device
*netdev
)
563 struct lio
*lio
= GET_LIO(netdev
);
565 if (netif_is_multiqueue(netdev
)) {
568 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
569 int qno
= lio
->linfo
.txpciq
[i
%
570 lio
->oct_dev
->num_iqs
].s
.q_no
;
572 if (__netif_subqueue_stopped(netdev
, i
)) {
573 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, qno
,
575 netif_wake_subqueue(netdev
, i
);
579 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, lio
->txq
,
581 netif_wake_queue(netdev
);
586 * \brief Stop Tx queue
587 * @param netdev network device
589 static void stop_txq(struct net_device
*netdev
)
595 * \brief Start Tx queue
596 * @param netdev network device
598 static void start_txq(struct net_device
*netdev
)
600 struct lio
*lio
= GET_LIO(netdev
);
602 if (lio
->linfo
.link
.s
.link_up
) {
609 * \brief Wake a queue
610 * @param netdev network device
611 * @param q which queue to wake
613 static inline void wake_q(struct net_device
*netdev
, int q
)
615 if (netif_is_multiqueue(netdev
))
616 netif_wake_subqueue(netdev
, q
);
618 netif_wake_queue(netdev
);
622 * \brief Stop a queue
623 * @param netdev network device
624 * @param q which queue to stop
626 static inline void stop_q(struct net_device
*netdev
, int q
)
628 if (netif_is_multiqueue(netdev
))
629 netif_stop_subqueue(netdev
, q
);
631 netif_stop_queue(netdev
);
635 * \brief Check Tx queue status, and take appropriate action
636 * @param lio per-network private data
637 * @returns 0 if full, number of queues woken up otherwise
639 static inline int check_txq_status(struct lio
*lio
)
643 if (netif_is_multiqueue(lio
->netdev
)) {
644 int numqs
= lio
->netdev
->num_tx_queues
;
647 /* check each sub-queue state */
648 for (q
= 0; q
< numqs
; q
++) {
649 iq
= lio
->linfo
.txpciq
[q
%
650 lio
->oct_dev
->num_iqs
].s
.q_no
;
651 if (octnet_iq_is_full(lio
->oct_dev
, iq
))
653 if (__netif_subqueue_stopped(lio
->netdev
, q
)) {
654 wake_q(lio
->netdev
, q
);
655 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, iq
,
661 if (octnet_iq_is_full(lio
->oct_dev
, lio
->txq
))
663 wake_q(lio
->netdev
, lio
->txq
);
664 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, lio
->txq
,
672 * Remove the node at the head of the list. The list would be empty at
673 * the end of this call if there are no more nodes in the list.
675 static inline struct list_head
*list_delete_head(struct list_head
*root
)
677 struct list_head
*node
;
679 if ((root
->prev
== root
) && (root
->next
== root
))
691 * \brief Delete gather lists
692 * @param lio per-network private data
694 static void delete_glists(struct lio
*lio
)
696 struct octnic_gather
*g
;
699 kfree(lio
->glist_lock
);
700 lio
->glist_lock
= NULL
;
705 for (i
= 0; i
< lio
->linfo
.num_txpciq
; i
++) {
707 g
= (struct octnic_gather
*)
708 list_delete_head(&lio
->glist
[i
]);
713 if (lio
->glists_virt_base
&& lio
->glists_virt_base
[i
] &&
714 lio
->glists_dma_base
&& lio
->glists_dma_base
[i
]) {
715 lio_dma_free(lio
->oct_dev
,
716 lio
->glist_entry_size
* lio
->tx_qsize
,
717 lio
->glists_virt_base
[i
],
718 lio
->glists_dma_base
[i
]);
722 kfree(lio
->glists_virt_base
);
723 lio
->glists_virt_base
= NULL
;
725 kfree(lio
->glists_dma_base
);
726 lio
->glists_dma_base
= NULL
;
733 * \brief Setup gather lists
734 * @param lio per-network private data
736 static int setup_glists(struct octeon_device
*oct
, struct lio
*lio
, int num_iqs
)
739 struct octnic_gather
*g
;
741 lio
->glist_lock
= kcalloc(num_iqs
, sizeof(*lio
->glist_lock
),
743 if (!lio
->glist_lock
)
746 lio
->glist
= kcalloc(num_iqs
, sizeof(*lio
->glist
),
749 kfree(lio
->glist_lock
);
750 lio
->glist_lock
= NULL
;
754 lio
->glist_entry_size
=
755 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG
) >> 2) * OCT_SG_ENTRY_SIZE
);
757 /* allocate memory to store virtual and dma base address of
758 * per glist consistent memory
760 lio
->glists_virt_base
= kcalloc(num_iqs
, sizeof(*lio
->glists_virt_base
),
762 lio
->glists_dma_base
= kcalloc(num_iqs
, sizeof(*lio
->glists_dma_base
),
765 if (!lio
->glists_virt_base
|| !lio
->glists_dma_base
) {
770 for (i
= 0; i
< num_iqs
; i
++) {
771 int numa_node
= dev_to_node(&oct
->pci_dev
->dev
);
773 spin_lock_init(&lio
->glist_lock
[i
]);
775 INIT_LIST_HEAD(&lio
->glist
[i
]);
777 lio
->glists_virt_base
[i
] =
779 lio
->glist_entry_size
* lio
->tx_qsize
,
780 &lio
->glists_dma_base
[i
]);
782 if (!lio
->glists_virt_base
[i
]) {
787 for (j
= 0; j
< lio
->tx_qsize
; j
++) {
788 g
= kzalloc_node(sizeof(*g
), GFP_KERNEL
,
791 g
= kzalloc(sizeof(*g
), GFP_KERNEL
);
795 g
->sg
= lio
->glists_virt_base
[i
] +
796 (j
* lio
->glist_entry_size
);
798 g
->sg_dma_ptr
= lio
->glists_dma_base
[i
] +
799 (j
* lio
->glist_entry_size
);
801 list_add_tail(&g
->list
, &lio
->glist
[i
]);
804 if (j
!= lio
->tx_qsize
) {
814 * \brief Print link information
815 * @param netdev network device
817 static void print_link_info(struct net_device
*netdev
)
819 struct lio
*lio
= GET_LIO(netdev
);
821 if (!ifstate_check(lio
, LIO_IFSTATE_RESETTING
) &&
822 ifstate_check(lio
, LIO_IFSTATE_REGISTERED
)) {
823 struct oct_link_info
*linfo
= &lio
->linfo
;
825 if (linfo
->link
.s
.link_up
) {
826 netif_info(lio
, link
, lio
->netdev
, "%d Mbps %s Duplex UP\n",
828 (linfo
->link
.s
.duplex
) ? "Full" : "Half");
830 netif_info(lio
, link
, lio
->netdev
, "Link Down\n");
836 * \brief Routine to notify MTU change
837 * @param work work_struct data structure
839 static void octnet_link_status_change(struct work_struct
*work
)
841 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
842 struct lio
*lio
= (struct lio
*)wk
->ctxptr
;
845 call_netdevice_notifiers(NETDEV_CHANGEMTU
, lio
->netdev
);
850 * \brief Sets up the mtu status change work
851 * @param netdev network device
853 static inline int setup_link_status_change_wq(struct net_device
*netdev
)
855 struct lio
*lio
= GET_LIO(netdev
);
856 struct octeon_device
*oct
= lio
->oct_dev
;
858 lio
->link_status_wq
.wq
= alloc_workqueue("link-status",
860 if (!lio
->link_status_wq
.wq
) {
861 dev_err(&oct
->pci_dev
->dev
, "unable to create cavium link status wq\n");
864 INIT_DELAYED_WORK(&lio
->link_status_wq
.wk
.work
,
865 octnet_link_status_change
);
866 lio
->link_status_wq
.wk
.ctxptr
= lio
;
871 static inline void cleanup_link_status_change_wq(struct net_device
*netdev
)
873 struct lio
*lio
= GET_LIO(netdev
);
875 if (lio
->link_status_wq
.wq
) {
876 cancel_delayed_work_sync(&lio
->link_status_wq
.wk
.work
);
877 destroy_workqueue(lio
->link_status_wq
.wq
);
882 * \brief Update link status
883 * @param netdev network device
884 * @param ls link status structure
886 * Called on receipt of a link status response from the core application to
887 * update each interface's link status.
889 static inline void update_link_status(struct net_device
*netdev
,
890 union oct_link_status
*ls
)
892 struct lio
*lio
= GET_LIO(netdev
);
893 int changed
= (lio
->linfo
.link
.u64
!= ls
->u64
);
895 lio
->linfo
.link
.u64
= ls
->u64
;
897 if ((lio
->intf_open
) && (changed
)) {
898 print_link_info(netdev
);
901 if (lio
->linfo
.link
.s
.link_up
) {
902 netif_carrier_on(netdev
);
905 netif_carrier_off(netdev
);
912 * lio_sync_octeon_time_cb - callback that is invoked when soft command
913 * sent by lio_sync_octeon_time() has completed successfully or failed
915 * @oct - octeon device structure
916 * @status - indicates success or failure
917 * @buf - pointer to the command that was sent to firmware
919 static void lio_sync_octeon_time_cb(struct octeon_device
*oct
,
920 u32 status
, void *buf
)
922 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)buf
;
925 dev_err(&oct
->pci_dev
->dev
,
926 "Failed to sync time to octeon; error=%d\n", status
);
928 octeon_free_soft_command(oct
, sc
);
932 * lio_sync_octeon_time - send latest localtime to octeon firmware so that
933 * firmware will correct it's time, in case there is a time skew
935 * @work: work scheduled to send time update to octeon firmware
937 static void lio_sync_octeon_time(struct work_struct
*work
)
939 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
940 struct lio
*lio
= (struct lio
*)wk
->ctxptr
;
941 struct octeon_device
*oct
= lio
->oct_dev
;
942 struct octeon_soft_command
*sc
;
943 struct timespec64 ts
;
947 sc
= octeon_alloc_soft_command(oct
, sizeof(struct lio_time
), 0, 0);
949 dev_err(&oct
->pci_dev
->dev
,
950 "Failed to sync time to octeon: soft command allocation failed\n");
954 lt
= (struct lio_time
*)sc
->virtdptr
;
956 /* Get time of the day */
957 getnstimeofday64(&ts
);
959 lt
->nsec
= ts
.tv_nsec
;
960 octeon_swap_8B_data((u64
*)lt
, (sizeof(struct lio_time
)) / 8);
962 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
963 octeon_prepare_soft_command(oct
, sc
, OPCODE_NIC
,
964 OPCODE_NIC_SYNC_OCTEON_TIME
, 0, 0, 0);
966 sc
->callback
= lio_sync_octeon_time_cb
;
967 sc
->callback_arg
= sc
;
968 sc
->wait_time
= 1000;
970 ret
= octeon_send_soft_command(oct
, sc
);
971 if (ret
== IQ_SEND_FAILED
) {
972 dev_err(&oct
->pci_dev
->dev
,
973 "Failed to sync time to octeon: failed to send soft command\n");
974 octeon_free_soft_command(oct
, sc
);
977 queue_delayed_work(lio
->sync_octeon_time_wq
.wq
,
978 &lio
->sync_octeon_time_wq
.wk
.work
,
979 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS
));
983 * setup_sync_octeon_time_wq - Sets up the work to periodically update
984 * local time to octeon firmware
986 * @netdev - network device which should send time update to firmware
988 static inline int setup_sync_octeon_time_wq(struct net_device
*netdev
)
990 struct lio
*lio
= GET_LIO(netdev
);
991 struct octeon_device
*oct
= lio
->oct_dev
;
993 lio
->sync_octeon_time_wq
.wq
=
994 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM
, 0);
995 if (!lio
->sync_octeon_time_wq
.wq
) {
996 dev_err(&oct
->pci_dev
->dev
, "Unable to create wq to update octeon time\n");
999 INIT_DELAYED_WORK(&lio
->sync_octeon_time_wq
.wk
.work
,
1000 lio_sync_octeon_time
);
1001 lio
->sync_octeon_time_wq
.wk
.ctxptr
= lio
;
1002 queue_delayed_work(lio
->sync_octeon_time_wq
.wq
,
1003 &lio
->sync_octeon_time_wq
.wk
.work
,
1004 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS
));
1010 * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
1011 * to periodically update local time to octeon firmware
1013 * @netdev - network device which should send time update to firmware
1015 static inline void cleanup_sync_octeon_time_wq(struct net_device
*netdev
)
1017 struct lio
*lio
= GET_LIO(netdev
);
1018 struct cavium_wq
*time_wq
= &lio
->sync_octeon_time_wq
;
1021 cancel_delayed_work_sync(&time_wq
->wk
.work
);
1022 destroy_workqueue(time_wq
->wq
);
1026 static struct octeon_device
*get_other_octeon_device(struct octeon_device
*oct
)
1028 struct octeon_device
*other_oct
;
1030 other_oct
= lio_get_device(oct
->octeon_id
+ 1);
1032 if (other_oct
&& other_oct
->pci_dev
) {
1033 int oct_busnum
, other_oct_busnum
;
1035 oct_busnum
= oct
->pci_dev
->bus
->number
;
1036 other_oct_busnum
= other_oct
->pci_dev
->bus
->number
;
1038 if (oct_busnum
== other_oct_busnum
) {
1039 int oct_slot
, other_oct_slot
;
1041 oct_slot
= PCI_SLOT(oct
->pci_dev
->devfn
);
1042 other_oct_slot
= PCI_SLOT(other_oct
->pci_dev
->devfn
);
1044 if (oct_slot
== other_oct_slot
)
1052 static void disable_all_vf_links(struct octeon_device
*oct
)
1054 struct net_device
*netdev
;
1060 max_vfs
= oct
->sriov_info
.max_vfs
;
1062 for (i
= 0; i
< oct
->ifcount
; i
++) {
1063 netdev
= oct
->props
[i
].netdev
;
1067 for (vf
= 0; vf
< max_vfs
; vf
++)
1068 liquidio_set_vf_link_state(netdev
, vf
,
1069 IFLA_VF_LINK_STATE_DISABLE
);
1073 static int liquidio_watchdog(void *param
)
1075 bool err_msg_was_printed
[LIO_MAX_CORES
];
1076 u16 mask_of_crashed_or_stuck_cores
= 0;
1077 bool all_vf_links_are_disabled
= false;
1078 struct octeon_device
*oct
= param
;
1079 struct octeon_device
*other_oct
;
1080 #ifdef CONFIG_MODULE_UNLOAD
1081 long refcount
, vfs_referencing_pf
;
1082 u64 vfs_mask1
, vfs_mask2
;
1086 memset(err_msg_was_printed
, 0, sizeof(err_msg_was_printed
));
1088 while (!kthread_should_stop()) {
1089 /* sleep for a couple of seconds so that we don't hog the CPU */
1090 set_current_state(TASK_INTERRUPTIBLE
);
1091 schedule_timeout(msecs_to_jiffies(2000));
1093 mask_of_crashed_or_stuck_cores
=
1094 (u16
)octeon_read_csr64(oct
, CN23XX_SLI_SCRATCH2
);
1096 if (!mask_of_crashed_or_stuck_cores
)
1099 WRITE_ONCE(oct
->cores_crashed
, true);
1100 other_oct
= get_other_octeon_device(oct
);
1102 WRITE_ONCE(other_oct
->cores_crashed
, true);
1104 for (core
= 0; core
< LIO_MAX_CORES
; core
++) {
1105 bool core_crashed_or_got_stuck
;
1107 core_crashed_or_got_stuck
=
1108 (mask_of_crashed_or_stuck_cores
1111 if (core_crashed_or_got_stuck
&&
1112 !err_msg_was_printed
[core
]) {
1113 dev_err(&oct
->pci_dev
->dev
,
1114 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
1116 err_msg_was_printed
[core
] = true;
1120 if (all_vf_links_are_disabled
)
1123 disable_all_vf_links(oct
);
1124 disable_all_vf_links(other_oct
);
1125 all_vf_links_are_disabled
= true;
1127 #ifdef CONFIG_MODULE_UNLOAD
1128 vfs_mask1
= READ_ONCE(oct
->sriov_info
.vf_drv_loaded_mask
);
1129 vfs_mask2
= READ_ONCE(other_oct
->sriov_info
.vf_drv_loaded_mask
);
1131 vfs_referencing_pf
= hweight64(vfs_mask1
);
1132 vfs_referencing_pf
+= hweight64(vfs_mask2
);
1134 refcount
= module_refcount(THIS_MODULE
);
1135 if (refcount
>= vfs_referencing_pf
) {
1136 while (vfs_referencing_pf
) {
1137 module_put(THIS_MODULE
);
1138 vfs_referencing_pf
--;
1148 * \brief PCI probe handler
1149 * @param pdev PCI device structure
1153 liquidio_probe(struct pci_dev
*pdev
,
1154 const struct pci_device_id
*ent
__attribute__((unused
)))
1156 struct octeon_device
*oct_dev
= NULL
;
1157 struct handshake
*hs
;
1159 oct_dev
= octeon_allocate_device(pdev
->device
,
1160 sizeof(struct octeon_device_priv
));
1162 dev_err(&pdev
->dev
, "Unable to allocate device\n");
1166 if (pdev
->device
== OCTEON_CN23XX_PF_VID
)
1167 oct_dev
->msix_on
= LIO_FLAG_MSIX_ENABLED
;
1169 /* Enable PTP for 6XXX Device */
1170 if (((pdev
->device
== OCTEON_CN66XX
) ||
1171 (pdev
->device
== OCTEON_CN68XX
)))
1172 oct_dev
->ptp_enable
= true;
1174 oct_dev
->ptp_enable
= false;
1176 dev_info(&pdev
->dev
, "Initializing device %x:%x.\n",
1177 (u32
)pdev
->vendor
, (u32
)pdev
->device
);
1179 /* Assign octeon_device for this device to the private data area. */
1180 pci_set_drvdata(pdev
, oct_dev
);
1182 /* set linux specific device pointer */
1183 oct_dev
->pci_dev
= (void *)pdev
;
1185 hs
= &handshake
[oct_dev
->octeon_id
];
1186 init_completion(&hs
->init
);
1187 init_completion(&hs
->started
);
1190 if (oct_dev
->octeon_id
== 0)
1191 /* first LiquidIO NIC is detected */
1192 complete(&first_stage
);
1194 if (octeon_device_init(oct_dev
)) {
1195 complete(&hs
->init
);
1196 liquidio_remove(pdev
);
1200 if (OCTEON_CN23XX_PF(oct_dev
)) {
1201 u8 bus
, device
, function
;
1203 if (atomic_read(oct_dev
->adapter_refcount
) == 1) {
1204 /* Each NIC gets one watchdog kernel thread. The first
1205 * PF (of each NIC) that gets pci_driver->probe()'d
1206 * creates that thread.
1208 bus
= pdev
->bus
->number
;
1209 device
= PCI_SLOT(pdev
->devfn
);
1210 function
= PCI_FUNC(pdev
->devfn
);
1211 oct_dev
->watchdog_task
= kthread_create(
1212 liquidio_watchdog
, oct_dev
,
1213 "liowd/%02hhx:%02hhx.%hhx", bus
, device
, function
);
1214 if (!IS_ERR(oct_dev
->watchdog_task
)) {
1215 wake_up_process(oct_dev
->watchdog_task
);
1217 oct_dev
->watchdog_task
= NULL
;
1218 dev_err(&oct_dev
->pci_dev
->dev
,
1219 "failed to create kernel_thread\n");
1220 liquidio_remove(pdev
);
1226 oct_dev
->rx_pause
= 1;
1227 oct_dev
->tx_pause
= 1;
1229 dev_dbg(&oct_dev
->pci_dev
->dev
, "Device is ready\n");
1234 static bool fw_type_is_auto(void)
1236 return strncmp(fw_type
, LIO_FW_NAME_TYPE_AUTO
,
1237 sizeof(LIO_FW_NAME_TYPE_AUTO
)) == 0;
1241 * \brief PCI FLR for each Octeon device.
1242 * @param oct octeon device
1244 static void octeon_pci_flr(struct octeon_device
*oct
)
1248 pci_save_state(oct
->pci_dev
);
1250 pci_cfg_access_lock(oct
->pci_dev
);
1252 /* Quiesce the device completely */
1253 pci_write_config_word(oct
->pci_dev
, PCI_COMMAND
,
1254 PCI_COMMAND_INTX_DISABLE
);
1256 rc
= __pci_reset_function_locked(oct
->pci_dev
);
1259 dev_err(&oct
->pci_dev
->dev
, "Error %d resetting PCI function %d\n",
1262 pci_cfg_access_unlock(oct
->pci_dev
);
1264 pci_restore_state(oct
->pci_dev
);
1268 *\brief Destroy resources associated with octeon device
1269 * @param pdev PCI device structure
1272 static void octeon_destroy_resources(struct octeon_device
*oct
)
1275 struct msix_entry
*msix_entries
;
1276 struct octeon_device_priv
*oct_priv
=
1277 (struct octeon_device_priv
*)oct
->priv
;
1279 struct handshake
*hs
;
1281 switch (atomic_read(&oct
->status
)) {
1282 case OCT_DEV_RUNNING
:
1283 case OCT_DEV_CORE_OK
:
1285 /* No more instructions will be forwarded. */
1286 atomic_set(&oct
->status
, OCT_DEV_IN_RESET
);
1288 oct
->app_mode
= CVM_DRV_INVALID_APP
;
1289 dev_dbg(&oct
->pci_dev
->dev
, "Device state is now %s\n",
1290 lio_get_state_string(&oct
->status
));
1292 schedule_timeout_uninterruptible(HZ
/ 10);
1295 case OCT_DEV_HOST_OK
:
1298 case OCT_DEV_CONSOLE_INIT_DONE
:
1299 /* Remove any consoles */
1300 octeon_remove_consoles(oct
);
1303 case OCT_DEV_IO_QUEUES_DONE
:
1304 if (wait_for_pending_requests(oct
))
1305 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
1307 if (lio_wait_for_instr_fetch(oct
))
1308 dev_err(&oct
->pci_dev
->dev
, "IQ had pending instructions\n");
1310 /* Disable the input and output queues now. No more packets will
1311 * arrive from Octeon, but we should wait for all packet
1312 * processing to finish.
1314 oct
->fn_list
.disable_io_queues(oct
);
1316 if (lio_wait_for_oq_pkts(oct
))
1317 dev_err(&oct
->pci_dev
->dev
, "OQ had pending packets\n");
1320 case OCT_DEV_INTR_SET_DONE
:
1321 /* Disable interrupts */
1322 oct
->fn_list
.disable_interrupt(oct
, OCTEON_ALL_INTR
);
1325 msix_entries
= (struct msix_entry
*)oct
->msix_entries
;
1326 for (i
= 0; i
< oct
->num_msix_irqs
- 1; i
++) {
1327 if (oct
->ioq_vector
[i
].vector
) {
1328 /* clear the affinity_cpumask */
1329 irq_set_affinity_hint(
1330 msix_entries
[i
].vector
,
1332 free_irq(msix_entries
[i
].vector
,
1333 &oct
->ioq_vector
[i
]);
1334 oct
->ioq_vector
[i
].vector
= 0;
1337 /* non-iov vector's argument is oct struct */
1338 free_irq(msix_entries
[i
].vector
, oct
);
1340 pci_disable_msix(oct
->pci_dev
);
1341 kfree(oct
->msix_entries
);
1342 oct
->msix_entries
= NULL
;
1344 /* Release the interrupt line */
1345 free_irq(oct
->pci_dev
->irq
, oct
);
1347 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
1348 pci_disable_msi(oct
->pci_dev
);
1351 kfree(oct
->irq_name_storage
);
1352 oct
->irq_name_storage
= NULL
;
1355 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE
:
1356 if (OCTEON_CN23XX_PF(oct
))
1357 octeon_free_ioq_vector(oct
);
1360 case OCT_DEV_MBOX_SETUP_DONE
:
1361 if (OCTEON_CN23XX_PF(oct
))
1362 oct
->fn_list
.free_mbox(oct
);
1365 case OCT_DEV_IN_RESET
:
1366 case OCT_DEV_DROQ_INIT_DONE
:
1367 /* Wait for any pending operations */
1369 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
1370 if (!(oct
->io_qmask
.oq
& BIT_ULL(i
)))
1372 octeon_delete_droq(oct
, i
);
1375 /* Force any pending handshakes to complete */
1376 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
1380 handshake
[oct
->octeon_id
].init_ok
= 0;
1381 complete(&handshake
[oct
->octeon_id
].init
);
1382 handshake
[oct
->octeon_id
].started_ok
= 0;
1383 complete(&handshake
[oct
->octeon_id
].started
);
1388 case OCT_DEV_RESP_LIST_INIT_DONE
:
1389 octeon_delete_response_list(oct
);
1392 case OCT_DEV_INSTR_QUEUE_INIT_DONE
:
1393 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
1394 if (!(oct
->io_qmask
.iq
& BIT_ULL(i
)))
1396 octeon_delete_instr_queue(oct
, i
);
1398 #ifdef CONFIG_PCI_IOV
1399 if (oct
->sriov_info
.sriov_enabled
)
1400 pci_disable_sriov(oct
->pci_dev
);
1403 case OCT_DEV_SC_BUFF_POOL_INIT_DONE
:
1404 octeon_free_sc_buffer_pool(oct
);
1407 case OCT_DEV_DISPATCH_INIT_DONE
:
1408 octeon_delete_dispatch_list(oct
);
1409 cancel_delayed_work_sync(&oct
->nic_poll_work
.work
);
1412 case OCT_DEV_PCI_MAP_DONE
:
1413 refcount
= octeon_deregister_device(oct
);
1415 /* Soft reset the octeon device before exiting.
1416 * However, if fw was loaded from card (i.e. autoboot),
1417 * perform an FLR instead.
1418 * Implementation note: only soft-reset the device
1419 * if it is a CN6XXX OR the LAST CN23XX device.
1421 if (atomic_read(oct
->adapter_fw_state
) == FW_IS_PRELOADED
)
1422 octeon_pci_flr(oct
);
1423 else if (OCTEON_CN6XXX(oct
) || !refcount
)
1424 oct
->fn_list
.soft_reset(oct
);
1426 octeon_unmap_pci_barx(oct
, 0);
1427 octeon_unmap_pci_barx(oct
, 1);
1430 case OCT_DEV_PCI_ENABLE_DONE
:
1431 pci_clear_master(oct
->pci_dev
);
1432 /* Disable the device, releasing the PCI INT */
1433 pci_disable_device(oct
->pci_dev
);
1436 case OCT_DEV_BEGIN_STATE
:
1437 /* Nothing to be done here either */
1439 } /* end switch (oct->status) */
1441 tasklet_kill(&oct_priv
->droq_tasklet
);
1445 * \brief Callback for rx ctrl
1446 * @param status status of request
1447 * @param buf pointer to resp structure
1449 static void rx_ctl_callback(struct octeon_device
*oct
,
1453 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)buf
;
1454 struct liquidio_rx_ctl_context
*ctx
;
1456 ctx
= (struct liquidio_rx_ctl_context
*)sc
->ctxptr
;
1458 oct
= lio_get_device(ctx
->octeon_id
);
1460 dev_err(&oct
->pci_dev
->dev
, "rx ctl instruction failed. Status: %llx\n",
1461 CVM_CAST64(status
));
1462 WRITE_ONCE(ctx
->cond
, 1);
1464 /* This barrier is required to be sure that the response has been
1465 * written fully before waking up the handler
1469 wake_up_interruptible(&ctx
->wc
);
1473 * \brief Send Rx control command
1474 * @param lio per-network private data
1475 * @param start_stop whether to start or stop
1477 static void send_rx_ctrl_cmd(struct lio
*lio
, int start_stop
)
1479 struct octeon_soft_command
*sc
;
1480 struct liquidio_rx_ctl_context
*ctx
;
1481 union octnet_cmd
*ncmd
;
1482 int ctx_size
= sizeof(struct liquidio_rx_ctl_context
);
1483 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1486 if (oct
->props
[lio
->ifidx
].rx_on
== start_stop
)
1489 sc
= (struct octeon_soft_command
*)
1490 octeon_alloc_soft_command(oct
, OCTNET_CMD_SIZE
,
1493 ncmd
= (union octnet_cmd
*)sc
->virtdptr
;
1494 ctx
= (struct liquidio_rx_ctl_context
*)sc
->ctxptr
;
1496 WRITE_ONCE(ctx
->cond
, 0);
1497 ctx
->octeon_id
= lio_get_device_id(oct
);
1498 init_waitqueue_head(&ctx
->wc
);
1501 ncmd
->s
.cmd
= OCTNET_CMD_RX_CTL
;
1502 ncmd
->s
.param1
= start_stop
;
1504 octeon_swap_8B_data((u64
*)ncmd
, (OCTNET_CMD_SIZE
>> 3));
1506 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
1508 octeon_prepare_soft_command(oct
, sc
, OPCODE_NIC
,
1509 OPCODE_NIC_CMD
, 0, 0, 0);
1511 sc
->callback
= rx_ctl_callback
;
1512 sc
->callback_arg
= sc
;
1513 sc
->wait_time
= 5000;
1515 retval
= octeon_send_soft_command(oct
, sc
);
1516 if (retval
== IQ_SEND_FAILED
) {
1517 netif_info(lio
, rx_err
, lio
->netdev
, "Failed to send RX Control message\n");
1519 /* Sleep on a wait queue till the cond flag indicates that the
1520 * response arrived or timed-out.
1522 if (sleep_cond(&ctx
->wc
, &ctx
->cond
) == -EINTR
)
1524 oct
->props
[lio
->ifidx
].rx_on
= start_stop
;
1527 octeon_free_soft_command(oct
, sc
);
1531 * \brief Destroy NIC device interface
1532 * @param oct octeon device
1533 * @param ifidx which interface to destroy
1535 * Cleanup associated with each interface for an Octeon device when NIC
1536 * module is being unloaded or if initialization fails during load.
1538 static void liquidio_destroy_nic_device(struct octeon_device
*oct
, int ifidx
)
1540 struct net_device
*netdev
= oct
->props
[ifidx
].netdev
;
1542 struct napi_struct
*napi
, *n
;
1545 dev_err(&oct
->pci_dev
->dev
, "%s No netdevice ptr for index %d\n",
1550 lio
= GET_LIO(netdev
);
1552 dev_dbg(&oct
->pci_dev
->dev
, "NIC device cleanup\n");
1554 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_RUNNING
)
1555 liquidio_stop(netdev
);
1557 if (oct
->props
[lio
->ifidx
].napi_enabled
== 1) {
1558 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
1561 oct
->props
[lio
->ifidx
].napi_enabled
= 0;
1563 if (OCTEON_CN23XX_PF(oct
))
1564 oct
->droq
[0]->ops
.poll_mode
= 0;
1568 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
1569 netif_napi_del(napi
);
1571 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_REGISTERED
)
1572 unregister_netdev(netdev
);
1574 cleanup_sync_octeon_time_wq(netdev
);
1575 cleanup_link_status_change_wq(netdev
);
1577 cleanup_rx_oom_poll_fn(netdev
);
1581 free_netdev(netdev
);
1583 oct
->props
[ifidx
].gmxport
= -1;
1585 oct
->props
[ifidx
].netdev
= NULL
;
1589 * \brief Stop complete NIC functionality
1590 * @param oct octeon device
1592 static int liquidio_stop_nic_module(struct octeon_device
*oct
)
1597 dev_dbg(&oct
->pci_dev
->dev
, "Stopping network interfaces\n");
1598 if (!oct
->ifcount
) {
1599 dev_err(&oct
->pci_dev
->dev
, "Init for Octeon was not completed\n");
1603 spin_lock_bh(&oct
->cmd_resp_wqlock
);
1604 oct
->cmd_resp_state
= OCT_DRV_OFFLINE
;
1605 spin_unlock_bh(&oct
->cmd_resp_wqlock
);
1607 lio_vf_rep_destroy(oct
);
1609 for (i
= 0; i
< oct
->ifcount
; i
++) {
1610 lio
= GET_LIO(oct
->props
[i
].netdev
);
1611 for (j
= 0; j
< oct
->num_oqs
; j
++)
1612 octeon_unregister_droq_ops(oct
,
1613 lio
->linfo
.rxpciq
[j
].s
.q_no
);
1616 for (i
= 0; i
< oct
->ifcount
; i
++)
1617 liquidio_destroy_nic_device(oct
, i
);
1620 devlink_unregister(oct
->devlink
);
1621 devlink_free(oct
->devlink
);
1622 oct
->devlink
= NULL
;
1625 dev_dbg(&oct
->pci_dev
->dev
, "Network interfaces stopped\n");
1630 * \brief Cleans up resources at unload time
1631 * @param pdev PCI device structure
1633 static void liquidio_remove(struct pci_dev
*pdev
)
1635 struct octeon_device
*oct_dev
= pci_get_drvdata(pdev
);
1637 dev_dbg(&oct_dev
->pci_dev
->dev
, "Stopping device\n");
1639 if (oct_dev
->watchdog_task
)
1640 kthread_stop(oct_dev
->watchdog_task
);
1642 if (!oct_dev
->octeon_id
&&
1643 oct_dev
->fw_info
.app_cap_flags
& LIQUIDIO_SWITCHDEV_CAP
)
1644 lio_vf_rep_modexit();
1646 if (oct_dev
->app_mode
&& (oct_dev
->app_mode
== CVM_DRV_NIC_APP
))
1647 liquidio_stop_nic_module(oct_dev
);
1649 /* Reset the octeon device and cleanup all memory allocated for
1650 * the octeon device by driver.
1652 octeon_destroy_resources(oct_dev
);
1654 dev_info(&oct_dev
->pci_dev
->dev
, "Device removed\n");
1656 /* This octeon device has been removed. Update the global
1657 * data structure to reflect this. Free the device structure.
1659 octeon_free_device_mem(oct_dev
);
1663 * \brief Identify the Octeon device and to map the BAR address space
1664 * @param oct octeon device
1666 static int octeon_chip_specific_setup(struct octeon_device
*oct
)
1672 pci_read_config_dword(oct
->pci_dev
, 0, &dev_id
);
1673 pci_read_config_dword(oct
->pci_dev
, 8, &rev_id
);
1674 oct
->rev_id
= rev_id
& 0xff;
1677 case OCTEON_CN68XX_PCIID
:
1678 oct
->chip_id
= OCTEON_CN68XX
;
1679 ret
= lio_setup_cn68xx_octeon_device(oct
);
1683 case OCTEON_CN66XX_PCIID
:
1684 oct
->chip_id
= OCTEON_CN66XX
;
1685 ret
= lio_setup_cn66xx_octeon_device(oct
);
1689 case OCTEON_CN23XX_PCIID_PF
:
1690 oct
->chip_id
= OCTEON_CN23XX_PF_VID
;
1691 ret
= setup_cn23xx_octeon_pf_device(oct
);
1694 #ifdef CONFIG_PCI_IOV
1696 pci_sriov_set_totalvfs(oct
->pci_dev
,
1697 oct
->sriov_info
.max_vfs
);
1704 dev_err(&oct
->pci_dev
->dev
, "Unknown device found (dev_id: %x)\n",
1709 dev_info(&oct
->pci_dev
->dev
, "%s PASS%d.%d %s Version: %s\n", s
,
1710 OCTEON_MAJOR_REV(oct
),
1711 OCTEON_MINOR_REV(oct
),
1712 octeon_get_conf(oct
)->card_name
,
1719 * \brief PCI initialization for each Octeon device.
1720 * @param oct octeon device
1722 static int octeon_pci_os_setup(struct octeon_device
*oct
)
1724 /* setup PCI stuff first */
1725 if (pci_enable_device(oct
->pci_dev
)) {
1726 dev_err(&oct
->pci_dev
->dev
, "pci_enable_device failed\n");
1730 if (dma_set_mask_and_coherent(&oct
->pci_dev
->dev
, DMA_BIT_MASK(64))) {
1731 dev_err(&oct
->pci_dev
->dev
, "Unexpected DMA device capability\n");
1732 pci_disable_device(oct
->pci_dev
);
1736 /* Enable PCI DMA Master. */
1737 pci_set_master(oct
->pci_dev
);
1742 static inline int skb_iq(struct lio
*lio
, struct sk_buff
*skb
)
1746 if (netif_is_multiqueue(lio
->netdev
))
1747 q
= skb
->queue_mapping
% lio
->linfo
.num_txpciq
;
1753 * \brief Check Tx queue state for a given network buffer
1754 * @param lio per-network private data
1755 * @param skb network buffer
1757 static inline int check_txq_state(struct lio
*lio
, struct sk_buff
*skb
)
1761 if (netif_is_multiqueue(lio
->netdev
)) {
1762 q
= skb
->queue_mapping
;
1763 iq
= lio
->linfo
.txpciq
[(q
% lio
->oct_dev
->num_iqs
)].s
.q_no
;
1769 if (octnet_iq_is_full(lio
->oct_dev
, iq
))
1772 if (__netif_subqueue_stopped(lio
->netdev
, q
)) {
1773 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, iq
, tx_restart
, 1);
1774 wake_q(lio
->netdev
, q
);
1780 * \brief Unmap and free network buffer
1783 static void free_netbuf(void *buf
)
1785 struct sk_buff
*skb
;
1786 struct octnet_buf_free_info
*finfo
;
1789 finfo
= (struct octnet_buf_free_info
*)buf
;
1793 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
, finfo
->dptr
, skb
->len
,
1796 check_txq_state(lio
, skb
);
1798 tx_buffer_free(skb
);
1802 * \brief Unmap and free gather buffer
1805 static void free_netsgbuf(void *buf
)
1807 struct octnet_buf_free_info
*finfo
;
1808 struct sk_buff
*skb
;
1810 struct octnic_gather
*g
;
1813 finfo
= (struct octnet_buf_free_info
*)buf
;
1817 frags
= skb_shinfo(skb
)->nr_frags
;
1819 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1820 g
->sg
[0].ptr
[0], (skb
->len
- skb
->data_len
),
1825 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1827 pci_unmap_page((lio
->oct_dev
)->pci_dev
,
1828 g
->sg
[(i
>> 2)].ptr
[(i
& 3)],
1829 frag
->size
, DMA_TO_DEVICE
);
1833 iq
= skb_iq(lio
, skb
);
1834 spin_lock(&lio
->glist_lock
[iq
]);
1835 list_add_tail(&g
->list
, &lio
->glist
[iq
]);
1836 spin_unlock(&lio
->glist_lock
[iq
]);
1838 check_txq_state(lio
, skb
); /* mq support: sub-queue state check */
1840 tx_buffer_free(skb
);
1844 * \brief Unmap and free gather buffer with response
1847 static void free_netsgbuf_with_resp(void *buf
)
1849 struct octeon_soft_command
*sc
;
1850 struct octnet_buf_free_info
*finfo
;
1851 struct sk_buff
*skb
;
1853 struct octnic_gather
*g
;
1856 sc
= (struct octeon_soft_command
*)buf
;
1857 skb
= (struct sk_buff
*)sc
->callback_arg
;
1858 finfo
= (struct octnet_buf_free_info
*)&skb
->cb
;
1862 frags
= skb_shinfo(skb
)->nr_frags
;
1864 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1865 g
->sg
[0].ptr
[0], (skb
->len
- skb
->data_len
),
1870 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1872 pci_unmap_page((lio
->oct_dev
)->pci_dev
,
1873 g
->sg
[(i
>> 2)].ptr
[(i
& 3)],
1874 frag
->size
, DMA_TO_DEVICE
);
1878 iq
= skb_iq(lio
, skb
);
1880 spin_lock(&lio
->glist_lock
[iq
]);
1881 list_add_tail(&g
->list
, &lio
->glist
[iq
]);
1882 spin_unlock(&lio
->glist_lock
[iq
]);
1884 /* Don't free the skb yet */
1886 check_txq_state(lio
, skb
);
1890 * \brief Adjust ptp frequency
1891 * @param ptp PTP clock info
1892 * @param ppb how much to adjust by, in parts-per-billion
1894 static int liquidio_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
1896 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1897 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1899 unsigned long flags
;
1900 bool neg_adj
= false;
1907 /* The hardware adds the clock compensation value to the
1908 * PTP clock on every coprocessor clock cycle, so we
1909 * compute the delta in terms of coprocessor clocks.
1911 delta
= (u64
)ppb
<< 32;
1912 do_div(delta
, oct
->coproc_clock_rate
);
1914 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1915 comp
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1920 lio_pci_writeq(oct
, comp
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1921 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1927 * \brief Adjust ptp time
1928 * @param ptp PTP clock info
1929 * @param delta how much to adjust by, in nanosecs
1931 static int liquidio_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
1933 unsigned long flags
;
1934 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1936 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1937 lio
->ptp_adjust
+= delta
;
1938 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1944 * \brief Get hardware clock time, including any adjustment
1945 * @param ptp PTP clock info
1946 * @param ts timespec
1948 static int liquidio_ptp_gettime(struct ptp_clock_info
*ptp
,
1949 struct timespec64
*ts
)
1952 unsigned long flags
;
1953 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1954 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1956 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1957 ns
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_HI
);
1958 ns
+= lio
->ptp_adjust
;
1959 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1961 *ts
= ns_to_timespec64(ns
);
1967 * \brief Set hardware clock time. Reset adjustment
1968 * @param ptp PTP clock info
1969 * @param ts timespec
1971 static int liquidio_ptp_settime(struct ptp_clock_info
*ptp
,
1972 const struct timespec64
*ts
)
1975 unsigned long flags
;
1976 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1977 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1979 ns
= timespec64_to_ns(ts
);
1981 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1982 lio_pci_writeq(oct
, ns
, CN6XXX_MIO_PTP_CLOCK_HI
);
1983 lio
->ptp_adjust
= 0;
1984 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1990 * \brief Check if PTP is enabled
1991 * @param ptp PTP clock info
1993 * @param on is it on
1996 liquidio_ptp_enable(struct ptp_clock_info
*ptp
__attribute__((unused
)),
1997 struct ptp_clock_request
*rq
__attribute__((unused
)),
1998 int on
__attribute__((unused
)))
2004 * \brief Open PTP clock source
2005 * @param netdev network device
2007 static void oct_ptp_open(struct net_device
*netdev
)
2009 struct lio
*lio
= GET_LIO(netdev
);
2010 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
2012 spin_lock_init(&lio
->ptp_lock
);
2014 snprintf(lio
->ptp_info
.name
, 16, "%s", netdev
->name
);
2015 lio
->ptp_info
.owner
= THIS_MODULE
;
2016 lio
->ptp_info
.max_adj
= 250000000;
2017 lio
->ptp_info
.n_alarm
= 0;
2018 lio
->ptp_info
.n_ext_ts
= 0;
2019 lio
->ptp_info
.n_per_out
= 0;
2020 lio
->ptp_info
.pps
= 0;
2021 lio
->ptp_info
.adjfreq
= liquidio_ptp_adjfreq
;
2022 lio
->ptp_info
.adjtime
= liquidio_ptp_adjtime
;
2023 lio
->ptp_info
.gettime64
= liquidio_ptp_gettime
;
2024 lio
->ptp_info
.settime64
= liquidio_ptp_settime
;
2025 lio
->ptp_info
.enable
= liquidio_ptp_enable
;
2027 lio
->ptp_adjust
= 0;
2029 lio
->ptp_clock
= ptp_clock_register(&lio
->ptp_info
,
2030 &oct
->pci_dev
->dev
);
2032 if (IS_ERR(lio
->ptp_clock
))
2033 lio
->ptp_clock
= NULL
;
2037 * \brief Init PTP clock
2038 * @param oct octeon device
2040 static void liquidio_ptp_init(struct octeon_device
*oct
)
2042 u64 clock_comp
, cfg
;
2044 clock_comp
= (u64
)NSEC_PER_SEC
<< 32;
2045 do_div(clock_comp
, oct
->coproc_clock_rate
);
2046 lio_pci_writeq(oct
, clock_comp
, CN6XXX_MIO_PTP_CLOCK_COMP
);
2049 cfg
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_CFG
);
2050 lio_pci_writeq(oct
, cfg
| 0x01, CN6XXX_MIO_PTP_CLOCK_CFG
);
2054 * \brief Load firmware to device
2055 * @param oct octeon device
2057 * Maps device to firmware filename, requests firmware, and downloads it
2059 static int load_firmware(struct octeon_device
*oct
)
2062 const struct firmware
*fw
;
2063 char fw_name
[LIO_MAX_FW_FILENAME_LEN
];
2066 if (fw_type_is_auto()) {
2067 tmp_fw_type
= LIO_FW_NAME_TYPE_NIC
;
2068 strncpy(fw_type
, tmp_fw_type
, sizeof(fw_type
));
2070 tmp_fw_type
= fw_type
;
2073 sprintf(fw_name
, "%s%s%s_%s%s", LIO_FW_DIR
, LIO_FW_BASE_NAME
,
2074 octeon_get_conf(oct
)->card_name
, tmp_fw_type
,
2075 LIO_FW_NAME_SUFFIX
);
2077 ret
= request_firmware(&fw
, fw_name
, &oct
->pci_dev
->dev
);
2079 dev_err(&oct
->pci_dev
->dev
, "Request firmware failed. Could not find file %s.\n.",
2081 release_firmware(fw
);
2085 ret
= octeon_download_firmware(oct
, fw
->data
, fw
->size
);
2087 release_firmware(fw
);
2093 * \brief Callback for getting interface configuration
2094 * @param status status of request
2095 * @param buf pointer to resp structure
2097 static void if_cfg_callback(struct octeon_device
*oct
,
2098 u32 status
__attribute__((unused
)),
2101 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)buf
;
2102 struct liquidio_if_cfg_resp
*resp
;
2103 struct liquidio_if_cfg_context
*ctx
;
2105 resp
= (struct liquidio_if_cfg_resp
*)sc
->virtrptr
;
2106 ctx
= (struct liquidio_if_cfg_context
*)sc
->ctxptr
;
2108 oct
= lio_get_device(ctx
->octeon_id
);
2110 dev_err(&oct
->pci_dev
->dev
, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n",
2111 CVM_CAST64(resp
->status
), status
);
2112 WRITE_ONCE(ctx
->cond
, 1);
2114 snprintf(oct
->fw_info
.liquidio_firmware_version
, 32, "%s",
2115 resp
->cfg_info
.liquidio_firmware_version
);
2117 /* This barrier is required to be sure that the response has been
2118 * written fully before waking up the handler
2122 wake_up_interruptible(&ctx
->wc
);
2126 * \brief Poll routine for checking transmit queue status
2127 * @param work work_struct data structure
2129 static void octnet_poll_check_txq_status(struct work_struct
*work
)
2131 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
2132 struct lio
*lio
= (struct lio
*)wk
->ctxptr
;
2134 if (!ifstate_check(lio
, LIO_IFSTATE_RUNNING
))
2137 check_txq_status(lio
);
2138 queue_delayed_work(lio
->txq_status_wq
.wq
,
2139 &lio
->txq_status_wq
.wk
.work
, msecs_to_jiffies(1));
2143 * \brief Sets up the txq poll check
2144 * @param netdev network device
2146 static inline int setup_tx_poll_fn(struct net_device
*netdev
)
2148 struct lio
*lio
= GET_LIO(netdev
);
2149 struct octeon_device
*oct
= lio
->oct_dev
;
2151 lio
->txq_status_wq
.wq
= alloc_workqueue("txq-status",
2153 if (!lio
->txq_status_wq
.wq
) {
2154 dev_err(&oct
->pci_dev
->dev
, "unable to create cavium txq status wq\n");
2157 INIT_DELAYED_WORK(&lio
->txq_status_wq
.wk
.work
,
2158 octnet_poll_check_txq_status
);
2159 lio
->txq_status_wq
.wk
.ctxptr
= lio
;
2160 queue_delayed_work(lio
->txq_status_wq
.wq
,
2161 &lio
->txq_status_wq
.wk
.work
, msecs_to_jiffies(1));
2165 static inline void cleanup_tx_poll_fn(struct net_device
*netdev
)
2167 struct lio
*lio
= GET_LIO(netdev
);
2169 if (lio
->txq_status_wq
.wq
) {
2170 cancel_delayed_work_sync(&lio
->txq_status_wq
.wk
.work
);
2171 destroy_workqueue(lio
->txq_status_wq
.wq
);
2176 * \brief Net device open for LiquidIO
2177 * @param netdev network device
2179 static int liquidio_open(struct net_device
*netdev
)
2181 struct lio
*lio
= GET_LIO(netdev
);
2182 struct octeon_device
*oct
= lio
->oct_dev
;
2183 struct napi_struct
*napi
, *n
;
2185 if (oct
->props
[lio
->ifidx
].napi_enabled
== 0) {
2186 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
2189 oct
->props
[lio
->ifidx
].napi_enabled
= 1;
2191 if (OCTEON_CN23XX_PF(oct
))
2192 oct
->droq
[0]->ops
.poll_mode
= 1;
2195 if (oct
->ptp_enable
)
2196 oct_ptp_open(netdev
);
2198 ifstate_set(lio
, LIO_IFSTATE_RUNNING
);
2200 /* Ready for link status updates */
2203 netif_info(lio
, ifup
, lio
->netdev
, "Interface Open, ready for traffic\n");
2205 if (OCTEON_CN23XX_PF(oct
)) {
2207 if (setup_tx_poll_fn(netdev
))
2210 if (setup_tx_poll_fn(netdev
))
2216 /* tell Octeon to start forwarding packets to host */
2217 send_rx_ctrl_cmd(lio
, 1);
2219 dev_info(&oct
->pci_dev
->dev
, "%s interface is opened\n",
2226 * \brief Net device stop for LiquidIO
2227 * @param netdev network device
2229 static int liquidio_stop(struct net_device
*netdev
)
2231 struct lio
*lio
= GET_LIO(netdev
);
2232 struct octeon_device
*oct
= lio
->oct_dev
;
2233 struct napi_struct
*napi
, *n
;
2235 if (oct
->props
[lio
->ifidx
].napi_enabled
) {
2236 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
2239 oct
->props
[lio
->ifidx
].napi_enabled
= 0;
2241 if (OCTEON_CN23XX_PF(oct
))
2242 oct
->droq
[0]->ops
.poll_mode
= 0;
2245 ifstate_reset(lio
, LIO_IFSTATE_RUNNING
);
2247 netif_tx_disable(netdev
);
2249 /* Inform that netif carrier is down */
2250 netif_carrier_off(netdev
);
2252 lio
->linfo
.link
.s
.link_up
= 0;
2253 lio
->link_changes
++;
2255 /* Tell Octeon that nic interface is down. */
2256 send_rx_ctrl_cmd(lio
, 0);
2258 if (OCTEON_CN23XX_PF(oct
)) {
2260 cleanup_tx_poll_fn(netdev
);
2262 cleanup_tx_poll_fn(netdev
);
2265 if (lio
->ptp_clock
) {
2266 ptp_clock_unregister(lio
->ptp_clock
);
2267 lio
->ptp_clock
= NULL
;
2270 dev_info(&oct
->pci_dev
->dev
, "%s interface is stopped\n", netdev
->name
);
2276 * \brief Converts a mask based on net device flags
2277 * @param netdev network device
2279 * This routine generates a octnet_ifflags mask from the net device flags
2280 * received from the OS.
2282 static inline enum octnet_ifflags
get_new_flags(struct net_device
*netdev
)
2284 enum octnet_ifflags f
= OCTNET_IFFLAG_UNICAST
;
2286 if (netdev
->flags
& IFF_PROMISC
)
2287 f
|= OCTNET_IFFLAG_PROMISC
;
2289 if (netdev
->flags
& IFF_ALLMULTI
)
2290 f
|= OCTNET_IFFLAG_ALLMULTI
;
2292 if (netdev
->flags
& IFF_MULTICAST
) {
2293 f
|= OCTNET_IFFLAG_MULTICAST
;
2295 /* Accept all multicast addresses if there are more than we
2298 if (netdev_mc_count(netdev
) > MAX_OCTEON_MULTICAST_ADDR
)
2299 f
|= OCTNET_IFFLAG_ALLMULTI
;
2302 if (netdev
->flags
& IFF_BROADCAST
)
2303 f
|= OCTNET_IFFLAG_BROADCAST
;
2309 * \brief Net device set_multicast_list
2310 * @param netdev network device
2312 static void liquidio_set_mcast_list(struct net_device
*netdev
)
2314 struct lio
*lio
= GET_LIO(netdev
);
2315 struct octeon_device
*oct
= lio
->oct_dev
;
2316 struct octnic_ctrl_pkt nctrl
;
2317 struct netdev_hw_addr
*ha
;
2320 int mc_count
= min(netdev_mc_count(netdev
), MAX_OCTEON_MULTICAST_ADDR
);
2322 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2324 /* Create a ctrl pkt command to be sent to core app. */
2326 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_SET_MULTI_LIST
;
2327 nctrl
.ncmd
.s
.param1
= get_new_flags(netdev
);
2328 nctrl
.ncmd
.s
.param2
= mc_count
;
2329 nctrl
.ncmd
.s
.more
= mc_count
;
2330 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2331 nctrl
.netpndev
= (u64
)netdev
;
2332 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2334 /* copy all the addresses into the udd */
2336 netdev_for_each_mc_addr(ha
, netdev
) {
2338 memcpy(((u8
*)mc
) + 2, ha
->addr
, ETH_ALEN
);
2339 /* no need to swap bytes */
2341 if (++mc
> &nctrl
.udd
[mc_count
])
2345 /* Apparently, any activity in this call from the kernel has to
2346 * be atomic. So we won't wait for response.
2348 nctrl
.wait_time
= 0;
2350 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2352 dev_err(&oct
->pci_dev
->dev
, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2358 * \brief Net device set_mac_address
2359 * @param netdev network device
2361 static int liquidio_set_mac(struct net_device
*netdev
, void *p
)
2364 struct lio
*lio
= GET_LIO(netdev
);
2365 struct octeon_device
*oct
= lio
->oct_dev
;
2366 struct sockaddr
*addr
= (struct sockaddr
*)p
;
2367 struct octnic_ctrl_pkt nctrl
;
2369 if (!is_valid_ether_addr(addr
->sa_data
))
2370 return -EADDRNOTAVAIL
;
2372 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2375 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MACADDR
;
2376 nctrl
.ncmd
.s
.param1
= 0;
2377 nctrl
.ncmd
.s
.more
= 1;
2378 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2379 nctrl
.netpndev
= (u64
)netdev
;
2380 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2381 nctrl
.wait_time
= 100;
2384 /* The MAC Address is presented in network byte order. */
2385 memcpy((u8
*)&nctrl
.udd
[0] + 2, addr
->sa_data
, ETH_ALEN
);
2387 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2389 dev_err(&oct
->pci_dev
->dev
, "MAC Address change failed\n");
2392 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2393 memcpy(((u8
*)&lio
->linfo
.hw_addr
) + 2, addr
->sa_data
, ETH_ALEN
);
2399 * \brief Net device get_stats
2400 * @param netdev network device
2402 static struct net_device_stats
*liquidio_get_stats(struct net_device
*netdev
)
2404 struct lio
*lio
= GET_LIO(netdev
);
2405 struct net_device_stats
*stats
= &netdev
->stats
;
2406 struct octeon_device
*oct
;
2407 u64 pkts
= 0, drop
= 0, bytes
= 0;
2408 struct oct_droq_stats
*oq_stats
;
2409 struct oct_iq_stats
*iq_stats
;
2410 int i
, iq_no
, oq_no
;
2414 if (ifstate_check(lio
, LIO_IFSTATE_RESETTING
))
2417 for (i
= 0; i
< oct
->num_iqs
; i
++) {
2418 iq_no
= lio
->linfo
.txpciq
[i
].s
.q_no
;
2419 iq_stats
= &oct
->instr_queue
[iq_no
]->stats
;
2420 pkts
+= iq_stats
->tx_done
;
2421 drop
+= iq_stats
->tx_dropped
;
2422 bytes
+= iq_stats
->tx_tot_bytes
;
2425 stats
->tx_packets
= pkts
;
2426 stats
->tx_bytes
= bytes
;
2427 stats
->tx_dropped
= drop
;
2433 for (i
= 0; i
< oct
->num_oqs
; i
++) {
2434 oq_no
= lio
->linfo
.rxpciq
[i
].s
.q_no
;
2435 oq_stats
= &oct
->droq
[oq_no
]->stats
;
2436 pkts
+= oq_stats
->rx_pkts_received
;
2437 drop
+= (oq_stats
->rx_dropped
+
2438 oq_stats
->dropped_nodispatch
+
2439 oq_stats
->dropped_toomany
+
2440 oq_stats
->dropped_nomem
);
2441 bytes
+= oq_stats
->rx_bytes_received
;
2444 stats
->rx_bytes
= bytes
;
2445 stats
->rx_packets
= pkts
;
2446 stats
->rx_dropped
= drop
;
2452 * \brief Net device change_mtu
2453 * @param netdev network device
2455 static int liquidio_change_mtu(struct net_device
*netdev
, int new_mtu
)
2457 struct lio
*lio
= GET_LIO(netdev
);
2458 struct octeon_device
*oct
= lio
->oct_dev
;
2459 struct octnic_ctrl_pkt nctrl
;
2462 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2465 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MTU
;
2466 nctrl
.ncmd
.s
.param1
= new_mtu
;
2467 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2468 nctrl
.wait_time
= 100;
2469 nctrl
.netpndev
= (u64
)netdev
;
2470 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2472 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2474 dev_err(&oct
->pci_dev
->dev
, "Failed to set MTU\n");
2484 * \brief Handler for SIOCSHWTSTAMP ioctl
2485 * @param netdev network device
2486 * @param ifr interface request
2487 * @param cmd command
2489 static int hwtstamp_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
)
2491 struct hwtstamp_config conf
;
2492 struct lio
*lio
= GET_LIO(netdev
);
2494 if (copy_from_user(&conf
, ifr
->ifr_data
, sizeof(conf
)))
2500 switch (conf
.tx_type
) {
2501 case HWTSTAMP_TX_ON
:
2502 case HWTSTAMP_TX_OFF
:
2508 switch (conf
.rx_filter
) {
2509 case HWTSTAMP_FILTER_NONE
:
2511 case HWTSTAMP_FILTER_ALL
:
2512 case HWTSTAMP_FILTER_SOME
:
2513 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
2514 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
2515 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
2516 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
2517 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
2518 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
2519 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
2520 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
2521 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
2522 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
2523 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
2524 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
2525 case HWTSTAMP_FILTER_NTP_ALL
:
2526 conf
.rx_filter
= HWTSTAMP_FILTER_ALL
;
2532 if (conf
.rx_filter
== HWTSTAMP_FILTER_ALL
)
2533 ifstate_set(lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
);
2536 ifstate_reset(lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
);
2538 return copy_to_user(ifr
->ifr_data
, &conf
, sizeof(conf
)) ? -EFAULT
: 0;
2542 * \brief ioctl handler
2543 * @param netdev network device
2544 * @param ifr interface request
2545 * @param cmd command
2547 static int liquidio_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2549 struct lio
*lio
= GET_LIO(netdev
);
2553 if (lio
->oct_dev
->ptp_enable
)
2554 return hwtstamp_ioctl(netdev
, ifr
);
2561 * \brief handle a Tx timestamp response
2562 * @param status response status
2563 * @param buf pointer to skb
2565 static void handle_timestamp(struct octeon_device
*oct
,
2569 struct octnet_buf_free_info
*finfo
;
2570 struct octeon_soft_command
*sc
;
2571 struct oct_timestamp_resp
*resp
;
2573 struct sk_buff
*skb
= (struct sk_buff
*)buf
;
2575 finfo
= (struct octnet_buf_free_info
*)skb
->cb
;
2579 resp
= (struct oct_timestamp_resp
*)sc
->virtrptr
;
2581 if (status
!= OCTEON_REQUEST_DONE
) {
2582 dev_err(&oct
->pci_dev
->dev
, "Tx timestamp instruction failed. Status: %llx\n",
2583 CVM_CAST64(status
));
2584 resp
->timestamp
= 0;
2587 octeon_swap_8B_data(&resp
->timestamp
, 1);
2589 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
) != 0)) {
2590 struct skb_shared_hwtstamps ts
;
2591 u64 ns
= resp
->timestamp
;
2593 netif_info(lio
, tx_done
, lio
->netdev
,
2594 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2595 skb
, (unsigned long long)ns
);
2596 ts
.hwtstamp
= ns_to_ktime(ns
+ lio
->ptp_adjust
);
2597 skb_tstamp_tx(skb
, &ts
);
2600 octeon_free_soft_command(oct
, sc
);
2601 tx_buffer_free(skb
);
2604 /* \brief Send a data packet that will be timestamped
2605 * @param oct octeon device
2606 * @param ndata pointer to network data
2607 * @param finfo pointer to private network data
2609 static inline int send_nic_timestamp_pkt(struct octeon_device
*oct
,
2610 struct octnic_data_pkt
*ndata
,
2611 struct octnet_buf_free_info
*finfo
,
2615 struct octeon_soft_command
*sc
;
2622 sc
= octeon_alloc_soft_command_resp(oct
, &ndata
->cmd
,
2623 sizeof(struct oct_timestamp_resp
));
2627 dev_err(&oct
->pci_dev
->dev
, "No memory for timestamped data packet\n");
2628 return IQ_SEND_FAILED
;
2631 if (ndata
->reqtype
== REQTYPE_NORESP_NET
)
2632 ndata
->reqtype
= REQTYPE_RESP_NET
;
2633 else if (ndata
->reqtype
== REQTYPE_NORESP_NET_SG
)
2634 ndata
->reqtype
= REQTYPE_RESP_NET_SG
;
2636 sc
->callback
= handle_timestamp
;
2637 sc
->callback_arg
= finfo
->skb
;
2638 sc
->iq_no
= ndata
->q_no
;
2640 if (OCTEON_CN23XX_PF(oct
))
2641 len
= (u32
)((struct octeon_instr_ih3
*)
2642 (&sc
->cmd
.cmd3
.ih3
))->dlengsz
;
2644 len
= (u32
)((struct octeon_instr_ih2
*)
2645 (&sc
->cmd
.cmd2
.ih2
))->dlengsz
;
2647 ring_doorbell
= !xmit_more
;
2649 retval
= octeon_send_command(oct
, sc
->iq_no
, ring_doorbell
, &sc
->cmd
,
2650 sc
, len
, ndata
->reqtype
);
2652 if (retval
== IQ_SEND_FAILED
) {
2653 dev_err(&oct
->pci_dev
->dev
, "timestamp data packet failed status: %x\n",
2655 octeon_free_soft_command(oct
, sc
);
2657 netif_info(lio
, tx_queued
, lio
->netdev
, "Queued timestamp packet\n");
2663 /** \brief Transmit networks packets to the Octeon interface
2664 * @param skbuff skbuff struct to be passed to network layer.
2665 * @param netdev pointer to network device
2666 * @returns whether the packet was transmitted to the device okay or not
2667 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2669 static int liquidio_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2672 struct octnet_buf_free_info
*finfo
;
2673 union octnic_cmd_setup cmdsetup
;
2674 struct octnic_data_pkt ndata
;
2675 struct octeon_device
*oct
;
2676 struct oct_iq_stats
*stats
;
2677 struct octeon_instr_irh
*irh
;
2678 union tx_info
*tx_info
;
2680 int q_idx
= 0, iq_no
= 0;
2681 int j
, xmit_more
= 0;
2685 lio
= GET_LIO(netdev
);
2688 if (netif_is_multiqueue(netdev
)) {
2689 q_idx
= skb
->queue_mapping
;
2690 q_idx
= (q_idx
% (lio
->linfo
.num_txpciq
));
2692 iq_no
= lio
->linfo
.txpciq
[q_idx
].s
.q_no
;
2697 stats
= &oct
->instr_queue
[iq_no
]->stats
;
2699 /* Check for all conditions in which the current packet cannot be
2702 if (!(atomic_read(&lio
->ifstate
) & LIO_IFSTATE_RUNNING
) ||
2703 (!lio
->linfo
.link
.s
.link_up
) ||
2705 netif_info(lio
, tx_err
, lio
->netdev
,
2706 "Transmit failed link_status : %d\n",
2707 lio
->linfo
.link
.s
.link_up
);
2708 goto lio_xmit_failed
;
2711 /* Use space in skb->cb to store info used to unmap and
2714 finfo
= (struct octnet_buf_free_info
*)skb
->cb
;
2719 /* Prepare the attributes for the data to be passed to OSI. */
2720 memset(&ndata
, 0, sizeof(struct octnic_data_pkt
));
2722 ndata
.buf
= (void *)finfo
;
2726 if (netif_is_multiqueue(netdev
)) {
2727 if (octnet_iq_is_full(oct
, ndata
.q_no
)) {
2728 /* defer sending if queue is full */
2729 netif_info(lio
, tx_err
, lio
->netdev
, "Transmit failed iq:%d full\n",
2731 stats
->tx_iq_busy
++;
2732 return NETDEV_TX_BUSY
;
2735 if (octnet_iq_is_full(oct
, lio
->txq
)) {
2736 /* defer sending if queue is full */
2737 stats
->tx_iq_busy
++;
2738 netif_info(lio
, tx_err
, lio
->netdev
, "Transmit failed iq:%d full\n",
2740 return NETDEV_TX_BUSY
;
2743 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2744 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2747 ndata
.datasize
= skb
->len
;
2750 cmdsetup
.s
.iq_no
= iq_no
;
2752 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2753 if (skb
->encapsulation
) {
2754 cmdsetup
.s
.tnl_csum
= 1;
2757 cmdsetup
.s
.transport_csum
= 1;
2760 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) {
2761 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
2762 cmdsetup
.s
.timestamp
= 1;
2765 if (skb_shinfo(skb
)->nr_frags
== 0) {
2766 cmdsetup
.s
.u
.datasize
= skb
->len
;
2767 octnet_prepare_pci_cmd(oct
, &ndata
.cmd
, &cmdsetup
, tag
);
2769 /* Offload checksum calculation for TCP/UDP packets */
2770 dptr
= dma_map_single(&oct
->pci_dev
->dev
,
2774 if (dma_mapping_error(&oct
->pci_dev
->dev
, dptr
)) {
2775 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 1\n",
2777 return NETDEV_TX_BUSY
;
2780 if (OCTEON_CN23XX_PF(oct
))
2781 ndata
.cmd
.cmd3
.dptr
= dptr
;
2783 ndata
.cmd
.cmd2
.dptr
= dptr
;
2785 ndata
.reqtype
= REQTYPE_NORESP_NET
;
2789 struct skb_frag_struct
*frag
;
2790 struct octnic_gather
*g
;
2792 spin_lock(&lio
->glist_lock
[q_idx
]);
2793 g
= (struct octnic_gather
*)
2794 list_delete_head(&lio
->glist
[q_idx
]);
2795 spin_unlock(&lio
->glist_lock
[q_idx
]);
2798 netif_info(lio
, tx_err
, lio
->netdev
,
2799 "Transmit scatter gather: glist null!\n");
2800 goto lio_xmit_failed
;
2803 cmdsetup
.s
.gather
= 1;
2804 cmdsetup
.s
.u
.gatherptrs
= (skb_shinfo(skb
)->nr_frags
+ 1);
2805 octnet_prepare_pci_cmd(oct
, &ndata
.cmd
, &cmdsetup
, tag
);
2807 memset(g
->sg
, 0, g
->sg_size
);
2809 g
->sg
[0].ptr
[0] = dma_map_single(&oct
->pci_dev
->dev
,
2811 (skb
->len
- skb
->data_len
),
2813 if (dma_mapping_error(&oct
->pci_dev
->dev
, g
->sg
[0].ptr
[0])) {
2814 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 2\n",
2816 return NETDEV_TX_BUSY
;
2818 add_sg_size(&g
->sg
[0], (skb
->len
- skb
->data_len
), 0);
2820 frags
= skb_shinfo(skb
)->nr_frags
;
2823 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
2825 g
->sg
[(i
>> 2)].ptr
[(i
& 3)] =
2826 dma_map_page(&oct
->pci_dev
->dev
,
2832 if (dma_mapping_error(&oct
->pci_dev
->dev
,
2833 g
->sg
[i
>> 2].ptr
[i
& 3])) {
2834 dma_unmap_single(&oct
->pci_dev
->dev
,
2836 skb
->len
- skb
->data_len
,
2838 for (j
= 1; j
< i
; j
++) {
2839 frag
= &skb_shinfo(skb
)->frags
[j
- 1];
2840 dma_unmap_page(&oct
->pci_dev
->dev
,
2841 g
->sg
[j
>> 2].ptr
[j
& 3],
2845 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 3\n",
2847 return NETDEV_TX_BUSY
;
2850 add_sg_size(&g
->sg
[(i
>> 2)], frag
->size
, (i
& 3));
2854 dptr
= g
->sg_dma_ptr
;
2856 if (OCTEON_CN23XX_PF(oct
))
2857 ndata
.cmd
.cmd3
.dptr
= dptr
;
2859 ndata
.cmd
.cmd2
.dptr
= dptr
;
2863 ndata
.reqtype
= REQTYPE_NORESP_NET_SG
;
2866 if (OCTEON_CN23XX_PF(oct
)) {
2867 irh
= (struct octeon_instr_irh
*)&ndata
.cmd
.cmd3
.irh
;
2868 tx_info
= (union tx_info
*)&ndata
.cmd
.cmd3
.ossp
[0];
2870 irh
= (struct octeon_instr_irh
*)&ndata
.cmd
.cmd2
.irh
;
2871 tx_info
= (union tx_info
*)&ndata
.cmd
.cmd2
.ossp
[0];
2874 if (skb_shinfo(skb
)->gso_size
) {
2875 tx_info
->s
.gso_size
= skb_shinfo(skb
)->gso_size
;
2876 tx_info
->s
.gso_segs
= skb_shinfo(skb
)->gso_segs
;
2880 /* HW insert VLAN tag */
2881 if (skb_vlan_tag_present(skb
)) {
2882 irh
->priority
= skb_vlan_tag_get(skb
) >> 13;
2883 irh
->vlan
= skb_vlan_tag_get(skb
) & 0xfff;
2886 xmit_more
= skb
->xmit_more
;
2888 if (unlikely(cmdsetup
.s
.timestamp
))
2889 status
= send_nic_timestamp_pkt(oct
, &ndata
, finfo
, xmit_more
);
2891 status
= octnet_send_nic_data_pkt(oct
, &ndata
, xmit_more
);
2892 if (status
== IQ_SEND_FAILED
)
2893 goto lio_xmit_failed
;
2895 netif_info(lio
, tx_queued
, lio
->netdev
, "Transmit queued successfully\n");
2897 if (status
== IQ_SEND_STOP
)
2898 stop_q(netdev
, q_idx
);
2900 netif_trans_update(netdev
);
2902 if (tx_info
->s
.gso_segs
)
2903 stats
->tx_done
+= tx_info
->s
.gso_segs
;
2906 stats
->tx_tot_bytes
+= ndata
.datasize
;
2908 return NETDEV_TX_OK
;
2911 stats
->tx_dropped
++;
2912 netif_info(lio
, tx_err
, lio
->netdev
, "IQ%d Transmit dropped:%llu\n",
2913 iq_no
, stats
->tx_dropped
);
2915 dma_unmap_single(&oct
->pci_dev
->dev
, dptr
,
2916 ndata
.datasize
, DMA_TO_DEVICE
);
2918 octeon_ring_doorbell_locked(oct
, iq_no
);
2920 tx_buffer_free(skb
);
2921 return NETDEV_TX_OK
;
2924 /** \brief Network device Tx timeout
2925 * @param netdev pointer to network device
2927 static void liquidio_tx_timeout(struct net_device
*netdev
)
2931 lio
= GET_LIO(netdev
);
2933 netif_info(lio
, tx_err
, lio
->netdev
,
2934 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2935 netdev
->stats
.tx_dropped
);
2936 netif_trans_update(netdev
);
2940 static int liquidio_vlan_rx_add_vid(struct net_device
*netdev
,
2941 __be16 proto
__attribute__((unused
)),
2944 struct lio
*lio
= GET_LIO(netdev
);
2945 struct octeon_device
*oct
= lio
->oct_dev
;
2946 struct octnic_ctrl_pkt nctrl
;
2949 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2952 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_ADD_VLAN_FILTER
;
2953 nctrl
.ncmd
.s
.param1
= vid
;
2954 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2955 nctrl
.wait_time
= 100;
2956 nctrl
.netpndev
= (u64
)netdev
;
2957 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2959 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2961 dev_err(&oct
->pci_dev
->dev
, "Add VLAN filter failed in core (ret: 0x%x)\n",
2968 static int liquidio_vlan_rx_kill_vid(struct net_device
*netdev
,
2969 __be16 proto
__attribute__((unused
)),
2972 struct lio
*lio
= GET_LIO(netdev
);
2973 struct octeon_device
*oct
= lio
->oct_dev
;
2974 struct octnic_ctrl_pkt nctrl
;
2977 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2980 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_DEL_VLAN_FILTER
;
2981 nctrl
.ncmd
.s
.param1
= vid
;
2982 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2983 nctrl
.wait_time
= 100;
2984 nctrl
.netpndev
= (u64
)netdev
;
2985 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2987 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2989 dev_err(&oct
->pci_dev
->dev
, "Add VLAN filter failed in core (ret: 0x%x)\n",
2995 /** Sending command to enable/disable RX checksum offload
2996 * @param netdev pointer to network device
2997 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
2998 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
2999 * OCTNET_CMD_RXCSUM_DISABLE
3000 * @returns SUCCESS or FAILURE
3002 static int liquidio_set_rxcsum_command(struct net_device
*netdev
, int command
,
3005 struct lio
*lio
= GET_LIO(netdev
);
3006 struct octeon_device
*oct
= lio
->oct_dev
;
3007 struct octnic_ctrl_pkt nctrl
;
3010 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3013 nctrl
.ncmd
.s
.cmd
= command
;
3014 nctrl
.ncmd
.s
.param1
= rx_cmd
;
3015 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3016 nctrl
.wait_time
= 100;
3017 nctrl
.netpndev
= (u64
)netdev
;
3018 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3020 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
3022 dev_err(&oct
->pci_dev
->dev
,
3023 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
3029 /** Sending command to add/delete VxLAN UDP port to firmware
3030 * @param netdev pointer to network device
3031 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
3032 * @param vxlan_port VxLAN port to be added or deleted
3033 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
3034 * OCTNET_CMD_VXLAN_PORT_DEL
3035 * @returns SUCCESS or FAILURE
3037 static int liquidio_vxlan_port_command(struct net_device
*netdev
, int command
,
3038 u16 vxlan_port
, u8 vxlan_cmd_bit
)
3040 struct lio
*lio
= GET_LIO(netdev
);
3041 struct octeon_device
*oct
= lio
->oct_dev
;
3042 struct octnic_ctrl_pkt nctrl
;
3045 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3048 nctrl
.ncmd
.s
.cmd
= command
;
3049 nctrl
.ncmd
.s
.more
= vxlan_cmd_bit
;
3050 nctrl
.ncmd
.s
.param1
= vxlan_port
;
3051 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3052 nctrl
.wait_time
= 100;
3053 nctrl
.netpndev
= (u64
)netdev
;
3054 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3056 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
3058 dev_err(&oct
->pci_dev
->dev
,
3059 "VxLAN port add/delete failed in core (ret:0x%x)\n",
3065 /** \brief Net device fix features
3066 * @param netdev pointer to network device
3067 * @param request features requested
3068 * @returns updated features list
3070 static netdev_features_t
liquidio_fix_features(struct net_device
*netdev
,
3071 netdev_features_t request
)
3073 struct lio
*lio
= netdev_priv(netdev
);
3075 if ((request
& NETIF_F_RXCSUM
) &&
3076 !(lio
->dev_capability
& NETIF_F_RXCSUM
))
3077 request
&= ~NETIF_F_RXCSUM
;
3079 if ((request
& NETIF_F_HW_CSUM
) &&
3080 !(lio
->dev_capability
& NETIF_F_HW_CSUM
))
3081 request
&= ~NETIF_F_HW_CSUM
;
3083 if ((request
& NETIF_F_TSO
) && !(lio
->dev_capability
& NETIF_F_TSO
))
3084 request
&= ~NETIF_F_TSO
;
3086 if ((request
& NETIF_F_TSO6
) && !(lio
->dev_capability
& NETIF_F_TSO6
))
3087 request
&= ~NETIF_F_TSO6
;
3089 if ((request
& NETIF_F_LRO
) && !(lio
->dev_capability
& NETIF_F_LRO
))
3090 request
&= ~NETIF_F_LRO
;
3092 /*Disable LRO if RXCSUM is off */
3093 if (!(request
& NETIF_F_RXCSUM
) && (netdev
->features
& NETIF_F_LRO
) &&
3094 (lio
->dev_capability
& NETIF_F_LRO
))
3095 request
&= ~NETIF_F_LRO
;
3097 if ((request
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
3098 !(lio
->dev_capability
& NETIF_F_HW_VLAN_CTAG_FILTER
))
3099 request
&= ~NETIF_F_HW_VLAN_CTAG_FILTER
;
3104 /** \brief Net device set features
3105 * @param netdev pointer to network device
3106 * @param features features to enable/disable
3108 static int liquidio_set_features(struct net_device
*netdev
,
3109 netdev_features_t features
)
3111 struct lio
*lio
= netdev_priv(netdev
);
3113 if ((features
& NETIF_F_LRO
) &&
3114 (lio
->dev_capability
& NETIF_F_LRO
) &&
3115 !(netdev
->features
& NETIF_F_LRO
))
3116 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_ENABLE
,
3117 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
3118 else if (!(features
& NETIF_F_LRO
) &&
3119 (lio
->dev_capability
& NETIF_F_LRO
) &&
3120 (netdev
->features
& NETIF_F_LRO
))
3121 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_DISABLE
,
3122 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
3124 /* Sending command to firmware to enable/disable RX checksum
3125 * offload settings using ethtool
3127 if (!(netdev
->features
& NETIF_F_RXCSUM
) &&
3128 (lio
->enc_dev_capability
& NETIF_F_RXCSUM
) &&
3129 (features
& NETIF_F_RXCSUM
))
3130 liquidio_set_rxcsum_command(netdev
,
3131 OCTNET_CMD_TNL_RX_CSUM_CTL
,
3132 OCTNET_CMD_RXCSUM_ENABLE
);
3133 else if ((netdev
->features
& NETIF_F_RXCSUM
) &&
3134 (lio
->enc_dev_capability
& NETIF_F_RXCSUM
) &&
3135 !(features
& NETIF_F_RXCSUM
))
3136 liquidio_set_rxcsum_command(netdev
, OCTNET_CMD_TNL_RX_CSUM_CTL
,
3137 OCTNET_CMD_RXCSUM_DISABLE
);
3139 if ((features
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
3140 (lio
->dev_capability
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
3141 !(netdev
->features
& NETIF_F_HW_VLAN_CTAG_FILTER
))
3142 liquidio_set_feature(netdev
, OCTNET_CMD_VLAN_FILTER_CTL
,
3143 OCTNET_CMD_VLAN_FILTER_ENABLE
);
3144 else if (!(features
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
3145 (lio
->dev_capability
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
3146 (netdev
->features
& NETIF_F_HW_VLAN_CTAG_FILTER
))
3147 liquidio_set_feature(netdev
, OCTNET_CMD_VLAN_FILTER_CTL
,
3148 OCTNET_CMD_VLAN_FILTER_DISABLE
);
3153 static void liquidio_add_vxlan_port(struct net_device
*netdev
,
3154 struct udp_tunnel_info
*ti
)
3156 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3159 liquidio_vxlan_port_command(netdev
,
3160 OCTNET_CMD_VXLAN_PORT_CONFIG
,
3162 OCTNET_CMD_VXLAN_PORT_ADD
);
3165 static void liquidio_del_vxlan_port(struct net_device
*netdev
,
3166 struct udp_tunnel_info
*ti
)
3168 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3171 liquidio_vxlan_port_command(netdev
,
3172 OCTNET_CMD_VXLAN_PORT_CONFIG
,
3174 OCTNET_CMD_VXLAN_PORT_DEL
);
3177 static int __liquidio_set_vf_mac(struct net_device
*netdev
, int vfidx
,
3178 u8
*mac
, bool is_admin_assigned
)
3180 struct lio
*lio
= GET_LIO(netdev
);
3181 struct octeon_device
*oct
= lio
->oct_dev
;
3182 struct octnic_ctrl_pkt nctrl
;
3184 if (!is_valid_ether_addr(mac
))
3187 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.max_vfs
)
3190 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3193 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MACADDR
;
3194 /* vfidx is 0 based, but vf_num (param1) is 1 based */
3195 nctrl
.ncmd
.s
.param1
= vfidx
+ 1;
3196 nctrl
.ncmd
.s
.param2
= (is_admin_assigned
? 1 : 0);
3197 nctrl
.ncmd
.s
.more
= 1;
3198 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3199 nctrl
.netpndev
= (u64
)netdev
;
3200 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3201 nctrl
.wait_time
= LIO_CMD_WAIT_TM
;
3204 /* The MAC Address is presented in network byte order. */
3205 ether_addr_copy((u8
*)&nctrl
.udd
[0] + 2, mac
);
3207 oct
->sriov_info
.vf_macaddr
[vfidx
] = nctrl
.udd
[0];
3209 octnet_send_nic_ctrl_pkt(oct
, &nctrl
);
3214 static int liquidio_set_vf_mac(struct net_device
*netdev
, int vfidx
, u8
*mac
)
3216 struct lio
*lio
= GET_LIO(netdev
);
3217 struct octeon_device
*oct
= lio
->oct_dev
;
3220 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.num_vfs_alloced
)
3223 retval
= __liquidio_set_vf_mac(netdev
, vfidx
, mac
, true);
3225 cn23xx_tell_vf_its_macaddr_changed(oct
, vfidx
, mac
);
3230 static int liquidio_set_vf_vlan(struct net_device
*netdev
, int vfidx
,
3231 u16 vlan
, u8 qos
, __be16 vlan_proto
)
3233 struct lio
*lio
= GET_LIO(netdev
);
3234 struct octeon_device
*oct
= lio
->oct_dev
;
3235 struct octnic_ctrl_pkt nctrl
;
3238 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.num_vfs_alloced
)
3241 if (vlan_proto
!= htons(ETH_P_8021Q
))
3242 return -EPROTONOSUPPORT
;
3244 if (vlan
>= VLAN_N_VID
|| qos
> 7)
3248 vlantci
= vlan
| (u16
)qos
<< VLAN_PRIO_SHIFT
;
3252 if (oct
->sriov_info
.vf_vlantci
[vfidx
] == vlantci
)
3255 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3258 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_ADD_VLAN_FILTER
;
3260 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_DEL_VLAN_FILTER
;
3262 nctrl
.ncmd
.s
.param1
= vlantci
;
3263 nctrl
.ncmd
.s
.param2
=
3264 vfidx
+ 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
3265 nctrl
.ncmd
.s
.more
= 0;
3266 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3268 nctrl
.wait_time
= LIO_CMD_WAIT_TM
;
3270 octnet_send_nic_ctrl_pkt(oct
, &nctrl
);
3272 oct
->sriov_info
.vf_vlantci
[vfidx
] = vlantci
;
3277 static int liquidio_get_vf_config(struct net_device
*netdev
, int vfidx
,
3278 struct ifla_vf_info
*ivi
)
3280 struct lio
*lio
= GET_LIO(netdev
);
3281 struct octeon_device
*oct
= lio
->oct_dev
;
3284 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.num_vfs_alloced
)
3288 macaddr
= 2 + (u8
*)&oct
->sriov_info
.vf_macaddr
[vfidx
];
3289 ether_addr_copy(&ivi
->mac
[0], macaddr
);
3290 ivi
->vlan
= oct
->sriov_info
.vf_vlantci
[vfidx
] & VLAN_VID_MASK
;
3291 ivi
->qos
= oct
->sriov_info
.vf_vlantci
[vfidx
] >> VLAN_PRIO_SHIFT
;
3292 ivi
->linkstate
= oct
->sriov_info
.vf_linkstate
[vfidx
];
3296 static int liquidio_set_vf_link_state(struct net_device
*netdev
, int vfidx
,
3299 struct lio
*lio
= GET_LIO(netdev
);
3300 struct octeon_device
*oct
= lio
->oct_dev
;
3301 struct octnic_ctrl_pkt nctrl
;
3303 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.num_vfs_alloced
)
3306 if (oct
->sriov_info
.vf_linkstate
[vfidx
] == linkstate
)
3309 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3310 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_SET_VF_LINKSTATE
;
3311 nctrl
.ncmd
.s
.param1
=
3312 vfidx
+ 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3313 nctrl
.ncmd
.s
.param2
= linkstate
;
3314 nctrl
.ncmd
.s
.more
= 0;
3315 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3317 nctrl
.wait_time
= LIO_CMD_WAIT_TM
;
3319 octnet_send_nic_ctrl_pkt(oct
, &nctrl
);
3321 oct
->sriov_info
.vf_linkstate
[vfidx
] = linkstate
;
3327 liquidio_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
)
3329 struct lio_devlink_priv
*priv
;
3330 struct octeon_device
*oct
;
3332 priv
= devlink_priv(devlink
);
3335 *mode
= oct
->eswitch_mode
;
3341 liquidio_eswitch_mode_set(struct devlink
*devlink
, u16 mode
)
3343 struct lio_devlink_priv
*priv
;
3344 struct octeon_device
*oct
;
3347 priv
= devlink_priv(devlink
);
3350 if (!(oct
->fw_info
.app_cap_flags
& LIQUIDIO_SWITCHDEV_CAP
))
3353 if (oct
->eswitch_mode
== mode
)
3357 case DEVLINK_ESWITCH_MODE_SWITCHDEV
:
3358 oct
->eswitch_mode
= mode
;
3359 ret
= lio_vf_rep_create(oct
);
3362 case DEVLINK_ESWITCH_MODE_LEGACY
:
3363 lio_vf_rep_destroy(oct
);
3364 oct
->eswitch_mode
= mode
;
3374 static const struct devlink_ops liquidio_devlink_ops
= {
3375 .eswitch_mode_get
= liquidio_eswitch_mode_get
,
3376 .eswitch_mode_set
= liquidio_eswitch_mode_set
,
3380 lio_pf_switchdev_attr_get(struct net_device
*dev
, struct switchdev_attr
*attr
)
3382 struct lio
*lio
= GET_LIO(dev
);
3383 struct octeon_device
*oct
= lio
->oct_dev
;
3385 if (oct
->eswitch_mode
!= DEVLINK_ESWITCH_MODE_SWITCHDEV
)
3389 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
3390 attr
->u
.ppid
.id_len
= ETH_ALEN
;
3391 ether_addr_copy(attr
->u
.ppid
.id
,
3392 (void *)&lio
->linfo
.hw_addr
+ 2);
3402 static const struct switchdev_ops lio_pf_switchdev_ops
= {
3403 .switchdev_port_attr_get
= lio_pf_switchdev_attr_get
,
3406 static const struct net_device_ops lionetdevops
= {
3407 .ndo_open
= liquidio_open
,
3408 .ndo_stop
= liquidio_stop
,
3409 .ndo_start_xmit
= liquidio_xmit
,
3410 .ndo_get_stats
= liquidio_get_stats
,
3411 .ndo_set_mac_address
= liquidio_set_mac
,
3412 .ndo_set_rx_mode
= liquidio_set_mcast_list
,
3413 .ndo_tx_timeout
= liquidio_tx_timeout
,
3415 .ndo_vlan_rx_add_vid
= liquidio_vlan_rx_add_vid
,
3416 .ndo_vlan_rx_kill_vid
= liquidio_vlan_rx_kill_vid
,
3417 .ndo_change_mtu
= liquidio_change_mtu
,
3418 .ndo_do_ioctl
= liquidio_ioctl
,
3419 .ndo_fix_features
= liquidio_fix_features
,
3420 .ndo_set_features
= liquidio_set_features
,
3421 .ndo_udp_tunnel_add
= liquidio_add_vxlan_port
,
3422 .ndo_udp_tunnel_del
= liquidio_del_vxlan_port
,
3423 .ndo_set_vf_mac
= liquidio_set_vf_mac
,
3424 .ndo_set_vf_vlan
= liquidio_set_vf_vlan
,
3425 .ndo_get_vf_config
= liquidio_get_vf_config
,
3426 .ndo_set_vf_link_state
= liquidio_set_vf_link_state
,
3429 /** \brief Entry point for the liquidio module
3431 static int __init
liquidio_init(void)
3434 struct handshake
*hs
;
3436 init_completion(&first_stage
);
3438 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT
);
3440 if (liquidio_init_pci())
3443 wait_for_completion_timeout(&first_stage
, msecs_to_jiffies(1000));
3445 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
3448 wait_for_completion(&hs
->init
);
3450 /* init handshake failed */
3451 dev_err(&hs
->pci_dev
->dev
,
3452 "Failed to init device\n");
3453 liquidio_deinit_pci();
3459 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
3462 wait_for_completion_timeout(&hs
->started
,
3463 msecs_to_jiffies(30000));
3464 if (!hs
->started_ok
) {
3465 /* starter handshake failed */
3466 dev_err(&hs
->pci_dev
->dev
,
3467 "Firmware failed to start\n");
3468 liquidio_deinit_pci();
3477 static int lio_nic_info(struct octeon_recv_info
*recv_info
, void *buf
)
3479 struct octeon_device
*oct
= (struct octeon_device
*)buf
;
3480 struct octeon_recv_pkt
*recv_pkt
= recv_info
->recv_pkt
;
3482 union oct_link_status
*ls
;
3485 if (recv_pkt
->buffer_size
[0] != (sizeof(*ls
) + OCT_DROQ_INFO_SIZE
)) {
3486 dev_err(&oct
->pci_dev
->dev
, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3487 recv_pkt
->buffer_size
[0],
3488 recv_pkt
->rh
.r_nic_info
.gmxport
);
3492 gmxport
= recv_pkt
->rh
.r_nic_info
.gmxport
;
3493 ls
= (union oct_link_status
*)(get_rbd(recv_pkt
->buffer_ptr
[0]) +
3494 OCT_DROQ_INFO_SIZE
);
3496 octeon_swap_8B_data((u64
*)ls
, (sizeof(union oct_link_status
)) >> 3);
3497 for (i
= 0; i
< oct
->ifcount
; i
++) {
3498 if (oct
->props
[i
].gmxport
== gmxport
) {
3499 update_link_status(oct
->props
[i
].netdev
, ls
);
3505 for (i
= 0; i
< recv_pkt
->buffer_count
; i
++)
3506 recv_buffer_free(recv_pkt
->buffer_ptr
[i
]);
3507 octeon_free_recv_info(recv_info
);
3512 * \brief Setup network interfaces
3513 * @param octeon_dev octeon device
3515 * Called during init time for each device. It assumes the NIC
3516 * is already up and running. The link information for each
3517 * interface is passed in link_info.
3519 static int setup_nic_devices(struct octeon_device
*octeon_dev
)
3521 struct lio
*lio
= NULL
;
3522 struct net_device
*netdev
;
3523 u8 mac
[6], i
, j
, *fw_ver
;
3524 struct octeon_soft_command
*sc
;
3525 struct liquidio_if_cfg_context
*ctx
;
3526 struct liquidio_if_cfg_resp
*resp
;
3527 struct octdev_props
*props
;
3528 int retval
, num_iqueues
, num_oqueues
;
3529 union oct_nic_if_cfg if_cfg
;
3530 unsigned int base_queue
;
3531 unsigned int gmx_port_id
;
3532 u32 resp_size
, ctx_size
, data_size
;
3534 struct lio_version
*vdata
;
3535 struct devlink
*devlink
;
3536 struct lio_devlink_priv
*lio_devlink
;
3538 /* This is to handle link status changes */
3539 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
3541 lio_nic_info
, octeon_dev
);
3543 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3544 * They are handled directly.
3546 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_NORESP_NET
,
3549 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_NORESP_NET_SG
,
3552 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_RESP_NET_SG
,
3553 free_netsgbuf_with_resp
);
3555 for (i
= 0; i
< octeon_dev
->ifcount
; i
++) {
3556 resp_size
= sizeof(struct liquidio_if_cfg_resp
);
3557 ctx_size
= sizeof(struct liquidio_if_cfg_context
);
3558 data_size
= sizeof(struct lio_version
);
3559 sc
= (struct octeon_soft_command
*)
3560 octeon_alloc_soft_command(octeon_dev
, data_size
,
3561 resp_size
, ctx_size
);
3562 resp
= (struct liquidio_if_cfg_resp
*)sc
->virtrptr
;
3563 ctx
= (struct liquidio_if_cfg_context
*)sc
->ctxptr
;
3564 vdata
= (struct lio_version
*)sc
->virtdptr
;
3566 *((u64
*)vdata
) = 0;
3567 vdata
->major
= cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION
);
3568 vdata
->minor
= cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION
);
3569 vdata
->micro
= cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION
);
3571 if (OCTEON_CN23XX_PF(octeon_dev
)) {
3572 num_iqueues
= octeon_dev
->sriov_info
.num_pf_rings
;
3573 num_oqueues
= octeon_dev
->sriov_info
.num_pf_rings
;
3574 base_queue
= octeon_dev
->sriov_info
.pf_srn
;
3576 gmx_port_id
= octeon_dev
->pf_num
;
3577 ifidx_or_pfnum
= octeon_dev
->pf_num
;
3579 num_iqueues
= CFG_GET_NUM_TXQS_NIC_IF(
3580 octeon_get_conf(octeon_dev
), i
);
3581 num_oqueues
= CFG_GET_NUM_RXQS_NIC_IF(
3582 octeon_get_conf(octeon_dev
), i
);
3583 base_queue
= CFG_GET_BASE_QUE_NIC_IF(
3584 octeon_get_conf(octeon_dev
), i
);
3585 gmx_port_id
= CFG_GET_GMXID_NIC_IF(
3586 octeon_get_conf(octeon_dev
), i
);
3590 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3591 "requesting config for interface %d, iqs %d, oqs %d\n",
3592 ifidx_or_pfnum
, num_iqueues
, num_oqueues
);
3593 WRITE_ONCE(ctx
->cond
, 0);
3594 ctx
->octeon_id
= lio_get_device_id(octeon_dev
);
3595 init_waitqueue_head(&ctx
->wc
);
3598 if_cfg
.s
.num_iqueues
= num_iqueues
;
3599 if_cfg
.s
.num_oqueues
= num_oqueues
;
3600 if_cfg
.s
.base_queue
= base_queue
;
3601 if_cfg
.s
.gmx_port_id
= gmx_port_id
;
3605 octeon_prepare_soft_command(octeon_dev
, sc
, OPCODE_NIC
,
3606 OPCODE_NIC_IF_CFG
, 0,
3609 sc
->callback
= if_cfg_callback
;
3610 sc
->callback_arg
= sc
;
3611 sc
->wait_time
= 3000;
3613 retval
= octeon_send_soft_command(octeon_dev
, sc
);
3614 if (retval
== IQ_SEND_FAILED
) {
3615 dev_err(&octeon_dev
->pci_dev
->dev
,
3616 "iq/oq config failed status: %x\n",
3618 /* Soft instr is freed by driver in case of failure. */
3619 goto setup_nic_dev_fail
;
3622 /* Sleep on a wait queue till the cond flag indicates that the
3623 * response arrived or timed-out.
3625 if (sleep_cond(&ctx
->wc
, &ctx
->cond
) == -EINTR
) {
3626 dev_err(&octeon_dev
->pci_dev
->dev
, "Wait interrupted\n");
3627 goto setup_nic_wait_intr
;
3630 retval
= resp
->status
;
3632 dev_err(&octeon_dev
->pci_dev
->dev
, "iq/oq config failed\n");
3633 goto setup_nic_dev_fail
;
3636 /* Verify f/w version (in case of 'auto' loading from flash) */
3637 fw_ver
= octeon_dev
->fw_info
.liquidio_firmware_version
;
3638 if (memcmp(LIQUIDIO_BASE_VERSION
,
3640 strlen(LIQUIDIO_BASE_VERSION
))) {
3641 dev_err(&octeon_dev
->pci_dev
->dev
,
3642 "Unmatched firmware version. Expected %s.x, got %s.\n",
3643 LIQUIDIO_BASE_VERSION
, fw_ver
);
3644 goto setup_nic_dev_fail
;
3645 } else if (atomic_read(octeon_dev
->adapter_fw_state
) ==
3647 dev_info(&octeon_dev
->pci_dev
->dev
,
3648 "Using auto-loaded firmware version %s.\n",
3652 octeon_swap_8B_data((u64
*)(&resp
->cfg_info
),
3653 (sizeof(struct liquidio_if_cfg_info
)) >> 3);
3655 num_iqueues
= hweight64(resp
->cfg_info
.iqmask
);
3656 num_oqueues
= hweight64(resp
->cfg_info
.oqmask
);
3658 if (!(num_iqueues
) || !(num_oqueues
)) {
3659 dev_err(&octeon_dev
->pci_dev
->dev
,
3660 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3661 resp
->cfg_info
.iqmask
,
3662 resp
->cfg_info
.oqmask
);
3663 goto setup_nic_dev_fail
;
3665 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3666 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3667 i
, resp
->cfg_info
.iqmask
, resp
->cfg_info
.oqmask
,
3668 num_iqueues
, num_oqueues
);
3669 netdev
= alloc_etherdev_mq(LIO_SIZE
, num_iqueues
);
3672 dev_err(&octeon_dev
->pci_dev
->dev
, "Device allocation failed\n");
3673 goto setup_nic_dev_fail
;
3676 SET_NETDEV_DEV(netdev
, &octeon_dev
->pci_dev
->dev
);
3678 /* Associate the routines that will handle different
3681 netdev
->netdev_ops
= &lionetdevops
;
3682 SWITCHDEV_SET_OPS(netdev
, &lio_pf_switchdev_ops
);
3684 lio
= GET_LIO(netdev
);
3686 memset(lio
, 0, sizeof(struct lio
));
3688 lio
->ifidx
= ifidx_or_pfnum
;
3690 props
= &octeon_dev
->props
[i
];
3691 props
->gmxport
= resp
->cfg_info
.linfo
.gmxport
;
3692 props
->netdev
= netdev
;
3694 lio
->linfo
.num_rxpciq
= num_oqueues
;
3695 lio
->linfo
.num_txpciq
= num_iqueues
;
3696 for (j
= 0; j
< num_oqueues
; j
++) {
3697 lio
->linfo
.rxpciq
[j
].u64
=
3698 resp
->cfg_info
.linfo
.rxpciq
[j
].u64
;
3700 for (j
= 0; j
< num_iqueues
; j
++) {
3701 lio
->linfo
.txpciq
[j
].u64
=
3702 resp
->cfg_info
.linfo
.txpciq
[j
].u64
;
3704 lio
->linfo
.hw_addr
= resp
->cfg_info
.linfo
.hw_addr
;
3705 lio
->linfo
.gmxport
= resp
->cfg_info
.linfo
.gmxport
;
3706 lio
->linfo
.link
.u64
= resp
->cfg_info
.linfo
.link
.u64
;
3708 lio
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3710 if (OCTEON_CN23XX_PF(octeon_dev
) ||
3711 OCTEON_CN6XXX(octeon_dev
)) {
3712 lio
->dev_capability
= NETIF_F_HIGHDMA
3715 | NETIF_F_SG
| NETIF_F_RXCSUM
3717 | NETIF_F_TSO
| NETIF_F_TSO6
3720 netif_set_gso_max_size(netdev
, OCTNIC_GSO_MAX_SIZE
);
3722 /* Copy of transmit encapsulation capabilities:
3723 * TSO, TSO6, Checksums for this device
3725 lio
->enc_dev_capability
= NETIF_F_IP_CSUM
3727 | NETIF_F_GSO_UDP_TUNNEL
3728 | NETIF_F_HW_CSUM
| NETIF_F_SG
3730 | NETIF_F_TSO
| NETIF_F_TSO6
3733 netdev
->hw_enc_features
= (lio
->enc_dev_capability
&
3736 lio
->dev_capability
|= NETIF_F_GSO_UDP_TUNNEL
;
3738 netdev
->vlan_features
= lio
->dev_capability
;
3739 /* Add any unchangeable hw features */
3740 lio
->dev_capability
|= NETIF_F_HW_VLAN_CTAG_FILTER
|
3741 NETIF_F_HW_VLAN_CTAG_RX
|
3742 NETIF_F_HW_VLAN_CTAG_TX
;
3744 netdev
->features
= (lio
->dev_capability
& ~NETIF_F_LRO
);
3746 netdev
->hw_features
= lio
->dev_capability
;
3747 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3748 netdev
->hw_features
= netdev
->hw_features
&
3749 ~NETIF_F_HW_VLAN_CTAG_RX
;
3751 /* MTU range: 68 - 16000 */
3752 netdev
->min_mtu
= LIO_MIN_MTU_SIZE
;
3753 netdev
->max_mtu
= LIO_MAX_MTU_SIZE
;
3755 /* Point to the properties for octeon device to which this
3756 * interface belongs.
3758 lio
->oct_dev
= octeon_dev
;
3759 lio
->octprops
= props
;
3760 lio
->netdev
= netdev
;
3762 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3763 "if%d gmx: %d hw_addr: 0x%llx\n", i
,
3764 lio
->linfo
.gmxport
, CVM_CAST64(lio
->linfo
.hw_addr
));
3766 for (j
= 0; j
< octeon_dev
->sriov_info
.max_vfs
; j
++) {
3769 random_ether_addr(&vfmac
[0]);
3770 if (__liquidio_set_vf_mac(netdev
, j
,
3771 &vfmac
[0], false)) {
3772 dev_err(&octeon_dev
->pci_dev
->dev
,
3773 "Error setting VF%d MAC address\n",
3775 goto setup_nic_dev_fail
;
3779 /* 64-bit swap required on LE machines */
3780 octeon_swap_8B_data(&lio
->linfo
.hw_addr
, 1);
3781 for (j
= 0; j
< 6; j
++)
3782 mac
[j
] = *((u8
*)(((u8
*)&lio
->linfo
.hw_addr
) + 2 + j
));
3784 /* Copy MAC Address to OS network device structure */
3786 ether_addr_copy(netdev
->dev_addr
, mac
);
3788 /* By default all interfaces on a single Octeon uses the same
3791 lio
->txq
= lio
->linfo
.txpciq
[0].s
.q_no
;
3792 lio
->rxq
= lio
->linfo
.rxpciq
[0].s
.q_no
;
3793 if (liquidio_setup_io_queues(octeon_dev
, i
,
3794 lio
->linfo
.num_txpciq
,
3795 lio
->linfo
.num_rxpciq
)) {
3796 dev_err(&octeon_dev
->pci_dev
->dev
, "I/O queues creation failed\n");
3797 goto setup_nic_dev_fail
;
3800 ifstate_set(lio
, LIO_IFSTATE_DROQ_OPS
);
3802 lio
->tx_qsize
= octeon_get_tx_qsize(octeon_dev
, lio
->txq
);
3803 lio
->rx_qsize
= octeon_get_rx_qsize(octeon_dev
, lio
->rxq
);
3805 if (setup_glists(octeon_dev
, lio
, num_iqueues
)) {
3806 dev_err(&octeon_dev
->pci_dev
->dev
,
3807 "Gather list allocation failed\n");
3808 goto setup_nic_dev_fail
;
3811 /* Register ethtool support */
3812 liquidio_set_ethtool_ops(netdev
);
3813 if (lio
->oct_dev
->chip_id
== OCTEON_CN23XX_PF_VID
)
3814 octeon_dev
->priv_flags
= OCT_PRIV_FLAG_DEFAULT
;
3816 octeon_dev
->priv_flags
= 0x0;
3818 if (netdev
->features
& NETIF_F_LRO
)
3819 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_ENABLE
,
3820 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
3822 liquidio_set_feature(netdev
, OCTNET_CMD_VLAN_FILTER_CTL
,
3823 OCTNET_CMD_VLAN_FILTER_ENABLE
);
3825 if ((debug
!= -1) && (debug
& NETIF_MSG_HW
))
3826 liquidio_set_feature(netdev
,
3827 OCTNET_CMD_VERBOSE_ENABLE
, 0);
3829 if (setup_link_status_change_wq(netdev
))
3830 goto setup_nic_dev_fail
;
3832 if ((octeon_dev
->fw_info
.app_cap_flags
&
3833 LIQUIDIO_TIME_SYNC_CAP
) &&
3834 setup_sync_octeon_time_wq(netdev
))
3835 goto setup_nic_dev_fail
;
3837 if (setup_rx_oom_poll_fn(netdev
))
3838 goto setup_nic_dev_fail
;
3840 /* Register the network device with the OS */
3841 if (register_netdev(netdev
)) {
3842 dev_err(&octeon_dev
->pci_dev
->dev
, "Device registration failed\n");
3843 goto setup_nic_dev_fail
;
3846 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3847 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3848 i
, mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5]);
3849 netif_carrier_off(netdev
);
3850 lio
->link_changes
++;
3852 ifstate_set(lio
, LIO_IFSTATE_REGISTERED
);
3854 /* Sending command to firmware to enable Rx checksum offload
3855 * by default at the time of setup of Liquidio driver for
3858 liquidio_set_rxcsum_command(netdev
, OCTNET_CMD_TNL_RX_CSUM_CTL
,
3859 OCTNET_CMD_RXCSUM_ENABLE
);
3860 liquidio_set_feature(netdev
, OCTNET_CMD_TNL_TX_CSUM_CTL
,
3861 OCTNET_CMD_TXCSUM_ENABLE
);
3863 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3864 "NIC ifidx:%d Setup successful\n", i
);
3866 octeon_free_soft_command(octeon_dev
, sc
);
3869 devlink
= devlink_alloc(&liquidio_devlink_ops
,
3870 sizeof(struct lio_devlink_priv
));
3872 dev_err(&octeon_dev
->pci_dev
->dev
, "devlink alloc failed\n");
3873 goto setup_nic_wait_intr
;
3876 lio_devlink
= devlink_priv(devlink
);
3877 lio_devlink
->oct
= octeon_dev
;
3879 if (devlink_register(devlink
, &octeon_dev
->pci_dev
->dev
)) {
3880 devlink_free(devlink
);
3881 dev_err(&octeon_dev
->pci_dev
->dev
,
3882 "devlink registration failed\n");
3883 goto setup_nic_wait_intr
;
3886 octeon_dev
->devlink
= devlink
;
3887 octeon_dev
->eswitch_mode
= DEVLINK_ESWITCH_MODE_LEGACY
;
3893 octeon_free_soft_command(octeon_dev
, sc
);
3895 setup_nic_wait_intr
:
3898 dev_err(&octeon_dev
->pci_dev
->dev
,
3899 "NIC ifidx:%d Setup failed\n", i
);
3900 liquidio_destroy_nic_device(octeon_dev
, i
);
3905 #ifdef CONFIG_PCI_IOV
3906 static int octeon_enable_sriov(struct octeon_device
*oct
)
3908 unsigned int num_vfs_alloced
= oct
->sriov_info
.num_vfs_alloced
;
3909 struct pci_dev
*vfdev
;
3913 if (OCTEON_CN23XX_PF(oct
) && num_vfs_alloced
) {
3914 err
= pci_enable_sriov(oct
->pci_dev
,
3915 oct
->sriov_info
.num_vfs_alloced
);
3917 dev_err(&oct
->pci_dev
->dev
,
3918 "OCTEON: Failed to enable PCI sriov: %d\n",
3920 oct
->sriov_info
.num_vfs_alloced
= 0;
3923 oct
->sriov_info
.sriov_enabled
= 1;
3925 /* init lookup table that maps DPI ring number to VF pci_dev
3929 vfdev
= pci_get_device(PCI_VENDOR_ID_CAVIUM
,
3930 OCTEON_CN23XX_VF_VID
, NULL
);
3932 if (vfdev
->is_virtfn
&&
3933 (vfdev
->physfn
== oct
->pci_dev
)) {
3934 oct
->sriov_info
.dpiring_to_vfpcidev_lut
[u
] =
3936 u
+= oct
->sriov_info
.rings_per_vf
;
3938 vfdev
= pci_get_device(PCI_VENDOR_ID_CAVIUM
,
3939 OCTEON_CN23XX_VF_VID
, vfdev
);
3943 return num_vfs_alloced
;
3946 static int lio_pci_sriov_disable(struct octeon_device
*oct
)
3950 if (pci_vfs_assigned(oct
->pci_dev
)) {
3951 dev_err(&oct
->pci_dev
->dev
, "VFs are still assigned to VMs.\n");
3955 pci_disable_sriov(oct
->pci_dev
);
3958 while (u
< MAX_POSSIBLE_VFS
) {
3959 oct
->sriov_info
.dpiring_to_vfpcidev_lut
[u
] = NULL
;
3960 u
+= oct
->sriov_info
.rings_per_vf
;
3963 oct
->sriov_info
.num_vfs_alloced
= 0;
3964 dev_info(&oct
->pci_dev
->dev
, "oct->pf_num:%d disabled VFs\n",
3970 static int liquidio_enable_sriov(struct pci_dev
*dev
, int num_vfs
)
3972 struct octeon_device
*oct
= pci_get_drvdata(dev
);
3975 if ((num_vfs
== oct
->sriov_info
.num_vfs_alloced
) &&
3976 (oct
->sriov_info
.sriov_enabled
)) {
3977 dev_info(&oct
->pci_dev
->dev
, "oct->pf_num:%d already enabled num_vfs:%d\n",
3978 oct
->pf_num
, num_vfs
);
3983 lio_vf_rep_destroy(oct
);
3984 ret
= lio_pci_sriov_disable(oct
);
3985 } else if (num_vfs
> oct
->sriov_info
.max_vfs
) {
3986 dev_err(&oct
->pci_dev
->dev
,
3987 "OCTEON: Max allowed VFs:%d user requested:%d",
3988 oct
->sriov_info
.max_vfs
, num_vfs
);
3991 oct
->sriov_info
.num_vfs_alloced
= num_vfs
;
3992 ret
= octeon_enable_sriov(oct
);
3993 dev_info(&oct
->pci_dev
->dev
, "oct->pf_num:%d num_vfs:%d\n",
3994 oct
->pf_num
, num_vfs
);
3995 ret
= lio_vf_rep_create(oct
);
3997 dev_info(&oct
->pci_dev
->dev
,
3998 "vf representor create failed");
4006 * \brief initialize the NIC
4007 * @param oct octeon device
4009 * This initialization routine is called once the Octeon device application is
4012 static int liquidio_init_nic_module(struct octeon_device
*oct
)
4015 int num_nic_ports
= CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct
));
4017 dev_dbg(&oct
->pci_dev
->dev
, "Initializing network interfaces\n");
4019 /* only default iq and oq were initialized
4020 * initialize the rest as well
4022 /* run port_config command for each port */
4023 oct
->ifcount
= num_nic_ports
;
4025 memset(oct
->props
, 0, sizeof(struct octdev_props
) * num_nic_ports
);
4027 for (i
= 0; i
< MAX_OCTEON_LINKS
; i
++)
4028 oct
->props
[i
].gmxport
= -1;
4030 retval
= setup_nic_devices(oct
);
4032 dev_err(&oct
->pci_dev
->dev
, "Setup NIC devices failed\n");
4033 goto octnet_init_failure
;
4036 /* Call vf_rep_modinit if the firmware is switchdev capable
4037 * and do it from the first liquidio function probed.
4039 if (!oct
->octeon_id
&&
4040 oct
->fw_info
.app_cap_flags
& LIQUIDIO_SWITCHDEV_CAP
) {
4041 retval
= lio_vf_rep_modinit();
4043 liquidio_stop_nic_module(oct
);
4044 goto octnet_init_failure
;
4048 liquidio_ptp_init(oct
);
4050 dev_dbg(&oct
->pci_dev
->dev
, "Network interfaces ready\n");
4054 octnet_init_failure
:
4062 * \brief starter callback that invokes the remaining initialization work after
4063 * the NIC is up and running.
4064 * @param octptr work struct work_struct
4066 static void nic_starter(struct work_struct
*work
)
4068 struct octeon_device
*oct
;
4069 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
4071 oct
= (struct octeon_device
*)wk
->ctxptr
;
4073 if (atomic_read(&oct
->status
) == OCT_DEV_RUNNING
)
4076 /* If the status of the device is CORE_OK, the core
4077 * application has reported its application type. Call
4078 * any registered handlers now and move to the RUNNING
4081 if (atomic_read(&oct
->status
) != OCT_DEV_CORE_OK
) {
4082 schedule_delayed_work(&oct
->nic_poll_work
.work
,
4083 LIQUIDIO_STARTER_POLL_INTERVAL_MS
);
4087 atomic_set(&oct
->status
, OCT_DEV_RUNNING
);
4089 if (oct
->app_mode
&& oct
->app_mode
== CVM_DRV_NIC_APP
) {
4090 dev_dbg(&oct
->pci_dev
->dev
, "Starting NIC module\n");
4092 if (liquidio_init_nic_module(oct
))
4093 dev_err(&oct
->pci_dev
->dev
, "NIC initialization failed\n");
4095 handshake
[oct
->octeon_id
].started_ok
= 1;
4097 dev_err(&oct
->pci_dev
->dev
,
4098 "Unexpected application running on NIC (%d). Check firmware.\n",
4102 complete(&handshake
[oct
->octeon_id
].started
);
4106 octeon_recv_vf_drv_notice(struct octeon_recv_info
*recv_info
, void *buf
)
4108 struct octeon_device
*oct
= (struct octeon_device
*)buf
;
4109 struct octeon_recv_pkt
*recv_pkt
= recv_info
->recv_pkt
;
4110 int i
, notice
, vf_idx
;
4114 notice
= recv_pkt
->rh
.r
.ossp
;
4115 data
= (u64
*)(get_rbd(recv_pkt
->buffer_ptr
[0]) + OCT_DROQ_INFO_SIZE
);
4117 /* the first 64-bit word of data is the vf_num */
4119 octeon_swap_8B_data(&vf_num
, 1);
4120 vf_idx
= (int)vf_num
- 1;
4122 cores_crashed
= READ_ONCE(oct
->cores_crashed
);
4124 if (notice
== VF_DRV_LOADED
) {
4125 if (!(oct
->sriov_info
.vf_drv_loaded_mask
& BIT_ULL(vf_idx
))) {
4126 oct
->sriov_info
.vf_drv_loaded_mask
|= BIT_ULL(vf_idx
);
4127 dev_info(&oct
->pci_dev
->dev
,
4128 "driver for VF%d was loaded\n", vf_idx
);
4130 try_module_get(THIS_MODULE
);
4132 } else if (notice
== VF_DRV_REMOVED
) {
4133 if (oct
->sriov_info
.vf_drv_loaded_mask
& BIT_ULL(vf_idx
)) {
4134 oct
->sriov_info
.vf_drv_loaded_mask
&= ~BIT_ULL(vf_idx
);
4135 dev_info(&oct
->pci_dev
->dev
,
4136 "driver for VF%d was removed\n", vf_idx
);
4138 module_put(THIS_MODULE
);
4140 } else if (notice
== VF_DRV_MACADDR_CHANGED
) {
4141 u8
*b
= (u8
*)&data
[1];
4143 oct
->sriov_info
.vf_macaddr
[vf_idx
] = data
[1];
4144 dev_info(&oct
->pci_dev
->dev
,
4145 "VF driver changed VF%d's MAC address to %pM\n",
4149 for (i
= 0; i
< recv_pkt
->buffer_count
; i
++)
4150 recv_buffer_free(recv_pkt
->buffer_ptr
[i
]);
4151 octeon_free_recv_info(recv_info
);
4157 * \brief Device initialization for each Octeon device that is probed
4158 * @param octeon_dev octeon device
4160 static int octeon_device_init(struct octeon_device
*octeon_dev
)
4163 char bootcmd
[] = "\n";
4164 char *dbg_enb
= NULL
;
4165 enum lio_fw_state fw_state
;
4166 struct octeon_device_priv
*oct_priv
=
4167 (struct octeon_device_priv
*)octeon_dev
->priv
;
4168 atomic_set(&octeon_dev
->status
, OCT_DEV_BEGIN_STATE
);
4170 /* Enable access to the octeon device and make its DMA capability
4173 if (octeon_pci_os_setup(octeon_dev
))
4176 atomic_set(&octeon_dev
->status
, OCT_DEV_PCI_ENABLE_DONE
);
4178 /* Identify the Octeon type and map the BAR address space. */
4179 if (octeon_chip_specific_setup(octeon_dev
)) {
4180 dev_err(&octeon_dev
->pci_dev
->dev
, "Chip specific setup failed\n");
4184 atomic_set(&octeon_dev
->status
, OCT_DEV_PCI_MAP_DONE
);
4186 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4187 * since that is what is required for the reference to be removed
4188 * during de-initialization (see 'octeon_destroy_resources').
4190 octeon_register_device(octeon_dev
, octeon_dev
->pci_dev
->bus
->number
,
4191 PCI_SLOT(octeon_dev
->pci_dev
->devfn
),
4192 PCI_FUNC(octeon_dev
->pci_dev
->devfn
),
4195 octeon_dev
->app_mode
= CVM_DRV_INVALID_APP
;
4197 /* CN23XX supports preloaded firmware if the following is true:
4199 * The adapter indicates that firmware is currently running AND
4200 * 'fw_type' is 'auto'.
4202 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4204 if (OCTEON_CN23XX_PF(octeon_dev
) &&
4205 cn23xx_fw_loaded(octeon_dev
) && fw_type_is_auto()) {
4206 atomic_cmpxchg(octeon_dev
->adapter_fw_state
,
4207 FW_NEEDS_TO_BE_LOADED
, FW_IS_PRELOADED
);
4210 /* If loading firmware, only first device of adapter needs to do so. */
4211 fw_state
= atomic_cmpxchg(octeon_dev
->adapter_fw_state
,
4212 FW_NEEDS_TO_BE_LOADED
,
4213 FW_IS_BEING_LOADED
);
4215 /* Here, [local variable] 'fw_state' is set to one of:
4217 * FW_IS_PRELOADED: No firmware is to be loaded (see above)
4218 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4219 * firmware to the adapter.
4220 * FW_IS_BEING_LOADED: The driver's second instance will not load
4221 * firmware to the adapter.
4224 /* Prior to f/w load, perform a soft reset of the Octeon device;
4225 * if error resetting, return w/error.
4227 if (fw_state
== FW_NEEDS_TO_BE_LOADED
)
4228 if (octeon_dev
->fn_list
.soft_reset(octeon_dev
))
4231 /* Initialize the dispatch mechanism used to push packets arriving on
4232 * Octeon Output queues.
4234 if (octeon_init_dispatch_list(octeon_dev
))
4237 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
4238 OPCODE_NIC_CORE_DRV_ACTIVE
,
4239 octeon_core_drv_init
,
4242 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
4243 OPCODE_NIC_VF_DRV_NOTICE
,
4244 octeon_recv_vf_drv_notice
, octeon_dev
);
4245 INIT_DELAYED_WORK(&octeon_dev
->nic_poll_work
.work
, nic_starter
);
4246 octeon_dev
->nic_poll_work
.ctxptr
= (void *)octeon_dev
;
4247 schedule_delayed_work(&octeon_dev
->nic_poll_work
.work
,
4248 LIQUIDIO_STARTER_POLL_INTERVAL_MS
);
4250 atomic_set(&octeon_dev
->status
, OCT_DEV_DISPATCH_INIT_DONE
);
4252 if (octeon_set_io_queues_off(octeon_dev
)) {
4253 dev_err(&octeon_dev
->pci_dev
->dev
, "setting io queues off failed\n");
4257 if (OCTEON_CN23XX_PF(octeon_dev
)) {
4258 ret
= octeon_dev
->fn_list
.setup_device_regs(octeon_dev
);
4260 dev_err(&octeon_dev
->pci_dev
->dev
, "OCTEON: Failed to configure device registers\n");
4265 /* Initialize soft command buffer pool
4267 if (octeon_setup_sc_buffer_pool(octeon_dev
)) {
4268 dev_err(&octeon_dev
->pci_dev
->dev
, "sc buffer pool allocation failed\n");
4271 atomic_set(&octeon_dev
->status
, OCT_DEV_SC_BUFF_POOL_INIT_DONE
);
4273 /* Setup the data structures that manage this Octeon's Input queues. */
4274 if (octeon_setup_instr_queues(octeon_dev
)) {
4275 dev_err(&octeon_dev
->pci_dev
->dev
,
4276 "instruction queue initialization failed\n");
4279 atomic_set(&octeon_dev
->status
, OCT_DEV_INSTR_QUEUE_INIT_DONE
);
4281 /* Initialize lists to manage the requests of different types that
4282 * arrive from user & kernel applications for this octeon device.
4284 if (octeon_setup_response_list(octeon_dev
)) {
4285 dev_err(&octeon_dev
->pci_dev
->dev
, "Response list allocation failed\n");
4288 atomic_set(&octeon_dev
->status
, OCT_DEV_RESP_LIST_INIT_DONE
);
4290 if (octeon_setup_output_queues(octeon_dev
)) {
4291 dev_err(&octeon_dev
->pci_dev
->dev
, "Output queue initialization failed\n");
4295 atomic_set(&octeon_dev
->status
, OCT_DEV_DROQ_INIT_DONE
);
4297 if (OCTEON_CN23XX_PF(octeon_dev
)) {
4298 if (octeon_dev
->fn_list
.setup_mbox(octeon_dev
)) {
4299 dev_err(&octeon_dev
->pci_dev
->dev
, "OCTEON: Mailbox setup failed\n");
4302 atomic_set(&octeon_dev
->status
, OCT_DEV_MBOX_SETUP_DONE
);
4304 if (octeon_allocate_ioq_vector(octeon_dev
)) {
4305 dev_err(&octeon_dev
->pci_dev
->dev
, "OCTEON: ioq vector allocation failed\n");
4308 atomic_set(&octeon_dev
->status
, OCT_DEV_MSIX_ALLOC_VECTOR_DONE
);
4311 /* The input and output queue registers were setup earlier (the
4312 * queues were not enabled). Any additional registers
4313 * that need to be programmed should be done now.
4315 ret
= octeon_dev
->fn_list
.setup_device_regs(octeon_dev
);
4317 dev_err(&octeon_dev
->pci_dev
->dev
,
4318 "Failed to configure device registers\n");
4323 /* Initialize the tasklet that handles output queue packet processing.*/
4324 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Initializing droq tasklet\n");
4325 tasklet_init(&oct_priv
->droq_tasklet
, octeon_droq_bh
,
4326 (unsigned long)octeon_dev
);
4328 /* Setup the interrupt handler and record the INT SUM register address
4330 if (octeon_setup_interrupt(octeon_dev
,
4331 octeon_dev
->sriov_info
.num_pf_rings
))
4334 /* Enable Octeon device interrupts */
4335 octeon_dev
->fn_list
.enable_interrupt(octeon_dev
, OCTEON_ALL_INTR
);
4337 atomic_set(&octeon_dev
->status
, OCT_DEV_INTR_SET_DONE
);
4339 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4340 * the output queue is enabled.
4341 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4342 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4343 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4344 * before any credits have been issued, causing the ring to be reset
4345 * (and the f/w appear to never have started).
4347 for (j
= 0; j
< octeon_dev
->num_oqs
; j
++)
4348 writel(octeon_dev
->droq
[j
]->max_count
,
4349 octeon_dev
->droq
[j
]->pkts_credit_reg
);
4351 /* Enable the input and output queues for this Octeon device */
4352 ret
= octeon_dev
->fn_list
.enable_io_queues(octeon_dev
);
4354 dev_err(&octeon_dev
->pci_dev
->dev
, "Failed to enable input/output queues");
4358 atomic_set(&octeon_dev
->status
, OCT_DEV_IO_QUEUES_DONE
);
4360 if (fw_state
== FW_NEEDS_TO_BE_LOADED
) {
4361 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Waiting for DDR initialization...\n");
4363 dev_info(&octeon_dev
->pci_dev
->dev
,
4364 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4367 schedule_timeout_uninterruptible(HZ
* LIO_RESET_SECS
);
4369 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4370 while (!ddr_timeout
) {
4371 set_current_state(TASK_INTERRUPTIBLE
);
4372 if (schedule_timeout(HZ
/ 10)) {
4373 /* user probably pressed Control-C */
4377 ret
= octeon_wait_for_ddr_init(octeon_dev
, &ddr_timeout
);
4379 dev_err(&octeon_dev
->pci_dev
->dev
,
4380 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4385 if (octeon_wait_for_bootloader(octeon_dev
, 1000)) {
4386 dev_err(&octeon_dev
->pci_dev
->dev
, "Board not responding\n");
4390 /* Divert uboot to take commands from host instead. */
4391 ret
= octeon_console_send_cmd(octeon_dev
, bootcmd
, 50);
4393 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Initializing consoles\n");
4394 ret
= octeon_init_consoles(octeon_dev
);
4396 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not access board consoles\n");
4399 /* If console debug enabled, specify empty string to use default
4400 * enablement ELSE specify NULL string for 'disabled'.
4402 dbg_enb
= octeon_console_debug_enabled(0) ? "" : NULL
;
4403 ret
= octeon_add_console(octeon_dev
, 0, dbg_enb
);
4405 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not access board console\n");
4407 } else if (octeon_console_debug_enabled(0)) {
4408 /* If console was added AND we're logging console output
4409 * then set our console print function.
4411 octeon_dev
->console
[0].print
= octeon_dbg_console_print
;
4414 atomic_set(&octeon_dev
->status
, OCT_DEV_CONSOLE_INIT_DONE
);
4416 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Loading firmware\n");
4417 ret
= load_firmware(octeon_dev
);
4419 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not load firmware to board\n");
4423 atomic_set(octeon_dev
->adapter_fw_state
, FW_HAS_BEEN_LOADED
);
4426 handshake
[octeon_dev
->octeon_id
].init_ok
= 1;
4427 complete(&handshake
[octeon_dev
->octeon_id
].init
);
4429 atomic_set(&octeon_dev
->status
, OCT_DEV_HOST_OK
);
4435 * \brief Debug console print function
4436 * @param octeon_dev octeon device
4437 * @param console_num console number
4438 * @param prefix first portion of line to display
4439 * @param suffix second portion of line to display
4441 * The OCTEON debug console outputs entire lines (excluding '\n').
4442 * Normally, the line will be passed in the 'prefix' parameter.
4443 * However, due to buffering, it is possible for a line to be split into two
4444 * parts, in which case they will be passed as the 'prefix' parameter and
4445 * 'suffix' parameter.
4447 static int octeon_dbg_console_print(struct octeon_device
*oct
, u32 console_num
,
4448 char *prefix
, char *suffix
)
4450 if (prefix
&& suffix
)
4451 dev_info(&oct
->pci_dev
->dev
, "%u: %s%s\n", console_num
, prefix
,
4454 dev_info(&oct
->pci_dev
->dev
, "%u: %s\n", console_num
, prefix
);
4456 dev_info(&oct
->pci_dev
->dev
, "%u: %s\n", console_num
, suffix
);
4462 * \brief Exits the module
4464 static void __exit
liquidio_exit(void)
4466 liquidio_deinit_pci();
4468 pr_info("LiquidIO network module is now unloaded\n");
4471 module_init(liquidio_init
);
4472 module_exit(liquidio_exit
);