1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include <net/switchdev.h>
25 #include "liquidio_common.h"
26 #include "octeon_droq.h"
27 #include "octeon_iq.h"
28 #include "response_manager.h"
29 #include "octeon_device.h"
30 #include "octeon_nic.h"
31 #include "octeon_main.h"
32 #include "octeon_network.h"
33 #include "cn66xx_regs.h"
34 #include "cn66xx_device.h"
35 #include "cn68xx_device.h"
36 #include "cn23xx_pf_device.h"
37 #include "liquidio_image.h"
38 #include "lio_vf_rep.h"
40 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
41 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
42 MODULE_LICENSE("GPL");
43 MODULE_VERSION(LIQUIDIO_VERSION
);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX
);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX
);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX
);
50 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
51 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX
);
53 static int ddr_timeout
= 10000;
54 module_param(ddr_timeout
, int, 0644);
55 MODULE_PARM_DESC(ddr_timeout
,
56 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
58 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
60 static int debug
= -1;
61 module_param(debug
, int, 0644);
62 MODULE_PARM_DESC(debug
, "NETIF_MSG debug bits");
64 static char fw_type
[LIO_MAX_FW_TYPE_LEN
] = LIO_FW_NAME_TYPE_AUTO
;
65 module_param_string(fw_type
, fw_type
, sizeof(fw_type
), 0444);
66 MODULE_PARM_DESC(fw_type
, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
68 static u32 console_bitmask
;
69 module_param(console_bitmask
, int, 0644);
70 MODULE_PARM_DESC(console_bitmask
,
71 "Bitmask indicating which consoles have debug output redirected to syslog.");
74 * \brief determines if a given console has debug enabled.
75 * @param console console to check
76 * @returns 1 = enabled. 0 otherwise
78 static int octeon_console_debug_enabled(u32 console
)
80 return (console_bitmask
>> (console
)) & 0x1;
83 /* Polling interval for determining when NIC application is alive */
84 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
86 /* runtime link query interval */
87 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
88 /* update localtime to octeon firmware every 60 seconds.
89 * make firmware to use same time reference, so that it will be easy to
90 * correlate firmware logged events/errors with host events, for debugging.
92 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
94 /* time to wait for possible in-flight requests in milliseconds */
95 #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
97 struct lio_trusted_vf_ctx
{
98 struct completion complete
;
102 struct liquidio_rx_ctl_context
{
105 wait_queue_head_t wc
;
110 struct oct_link_status_resp
{
112 struct oct_link_info link_info
;
116 struct oct_timestamp_resp
{
122 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
127 #ifdef __BIG_ENDIAN_BITFIELD
139 /** Octeon device properties to be used by the NIC module.
140 * Each octeon device in the system will be represented
141 * by this structure in the NIC module.
144 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
145 #define OCTNIC_GSO_MAX_SIZE \
146 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
149 struct completion init
;
150 struct completion started
;
151 struct pci_dev
*pci_dev
;
156 #ifdef CONFIG_PCI_IOV
157 static int liquidio_enable_sriov(struct pci_dev
*dev
, int num_vfs
);
160 static int octeon_dbg_console_print(struct octeon_device
*oct
, u32 console_num
,
161 char *prefix
, char *suffix
);
163 static int octeon_device_init(struct octeon_device
*);
164 static int liquidio_stop(struct net_device
*netdev
);
165 static void liquidio_remove(struct pci_dev
*pdev
);
166 static int liquidio_probe(struct pci_dev
*pdev
,
167 const struct pci_device_id
*ent
);
168 static int liquidio_set_vf_link_state(struct net_device
*netdev
, int vfidx
,
171 static struct handshake handshake
[MAX_OCTEON_DEVICES
];
172 static struct completion first_stage
;
174 static void octeon_droq_bh(unsigned long pdev
)
178 struct octeon_device
*oct
= (struct octeon_device
*)pdev
;
179 struct octeon_device_priv
*oct_priv
=
180 (struct octeon_device_priv
*)oct
->priv
;
182 for (q_no
= 0; q_no
< MAX_OCTEON_OUTPUT_QUEUES(oct
); q_no
++) {
183 if (!(oct
->io_qmask
.oq
& BIT_ULL(q_no
)))
185 reschedule
|= octeon_droq_process_packets(oct
, oct
->droq
[q_no
],
187 lio_enable_irq(oct
->droq
[q_no
], NULL
);
189 if (OCTEON_CN23XX_PF(oct
) && oct
->msix_on
) {
190 /* set time and cnt interrupt thresholds for this DROQ
193 int adjusted_q_no
= q_no
+ oct
->sriov_info
.pf_srn
;
196 oct
, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no
),
199 oct
, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no
), 0);
204 tasklet_schedule(&oct_priv
->droq_tasklet
);
207 static int lio_wait_for_oq_pkts(struct octeon_device
*oct
)
209 struct octeon_device_priv
*oct_priv
=
210 (struct octeon_device_priv
*)oct
->priv
;
211 int retry
= 100, pkt_cnt
= 0, pending_pkts
= 0;
217 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
218 if (!(oct
->io_qmask
.oq
& BIT_ULL(i
)))
220 pkt_cnt
+= octeon_droq_check_hw_for_pkts(oct
->droq
[i
]);
223 pending_pkts
+= pkt_cnt
;
224 tasklet_schedule(&oct_priv
->droq_tasklet
);
227 schedule_timeout_uninterruptible(1);
229 } while (retry
-- && pending_pkts
);
235 * \brief Forces all IO queues off on a given device
236 * @param oct Pointer to Octeon device
238 static void force_io_queues_off(struct octeon_device
*oct
)
240 if ((oct
->chip_id
== OCTEON_CN66XX
) ||
241 (oct
->chip_id
== OCTEON_CN68XX
)) {
242 /* Reset the Enable bits for Input Queues. */
243 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, 0);
245 /* Reset the Enable bits for Output Queues. */
246 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, 0);
251 * \brief Cause device to go quiet so it can be safely removed/reset/etc
252 * @param oct Pointer to Octeon device
254 static inline void pcierror_quiesce_device(struct octeon_device
*oct
)
258 /* Disable the input and output queues now. No more packets will
259 * arrive from Octeon, but we should wait for all packet processing
262 force_io_queues_off(oct
);
264 /* To allow for in-flight requests */
265 schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST
);
267 if (wait_for_pending_requests(oct
))
268 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
270 /* Force all requests waiting to be fetched by OCTEON to complete. */
271 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
272 struct octeon_instr_queue
*iq
;
274 if (!(oct
->io_qmask
.iq
& BIT_ULL(i
)))
276 iq
= oct
->instr_queue
[i
];
278 if (atomic_read(&iq
->instr_pending
)) {
279 spin_lock_bh(&iq
->lock
);
281 iq
->octeon_read_index
= iq
->host_write_index
;
282 iq
->stats
.instr_processed
+=
283 atomic_read(&iq
->instr_pending
);
284 lio_process_iq_request_list(oct
, iq
, 0);
285 spin_unlock_bh(&iq
->lock
);
289 /* Force all pending ordered list requests to time out. */
290 lio_process_ordered_list(oct
, 1);
292 /* We do not need to wait for output queue packets to be processed. */
296 * \brief Cleanup PCI AER uncorrectable error status
297 * @param dev Pointer to PCI device
299 static void cleanup_aer_uncorrect_error_status(struct pci_dev
*dev
)
304 pr_info("%s :\n", __func__
);
306 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, &status
);
307 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_SEVER
, &mask
);
308 if (dev
->error_state
== pci_channel_io_normal
)
309 status
&= ~mask
; /* Clear corresponding nonfatal bits */
311 status
&= mask
; /* Clear corresponding fatal bits */
312 pci_write_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, status
);
316 * \brief Stop all PCI IO to a given device
317 * @param dev Pointer to Octeon device
319 static void stop_pci_io(struct octeon_device
*oct
)
321 /* No more instructions will be forwarded. */
322 atomic_set(&oct
->status
, OCT_DEV_IN_RESET
);
324 pci_disable_device(oct
->pci_dev
);
326 /* Disable interrupts */
327 oct
->fn_list
.disable_interrupt(oct
, OCTEON_ALL_INTR
);
329 pcierror_quiesce_device(oct
);
331 /* Release the interrupt line */
332 free_irq(oct
->pci_dev
->irq
, oct
);
334 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
335 pci_disable_msi(oct
->pci_dev
);
337 dev_dbg(&oct
->pci_dev
->dev
, "Device state is now %s\n",
338 lio_get_state_string(&oct
->status
));
340 /* making it a common function for all OCTEON models */
341 cleanup_aer_uncorrect_error_status(oct
->pci_dev
);
345 * \brief called when PCI error is detected
346 * @param pdev Pointer to PCI device
347 * @param state The current pci connection state
349 * This function is called after a PCI bus error affecting
350 * this device has been detected.
352 static pci_ers_result_t
liquidio_pcie_error_detected(struct pci_dev
*pdev
,
353 pci_channel_state_t state
)
355 struct octeon_device
*oct
= pci_get_drvdata(pdev
);
357 /* Non-correctable Non-fatal errors */
358 if (state
== pci_channel_io_normal
) {
359 dev_err(&oct
->pci_dev
->dev
, "Non-correctable non-fatal error reported:\n");
360 cleanup_aer_uncorrect_error_status(oct
->pci_dev
);
361 return PCI_ERS_RESULT_CAN_RECOVER
;
364 /* Non-correctable Fatal errors */
365 dev_err(&oct
->pci_dev
->dev
, "Non-correctable FATAL reported by PCI AER driver\n");
368 /* Always return a DISCONNECT. There is no support for recovery but only
369 * for a clean shutdown.
371 return PCI_ERS_RESULT_DISCONNECT
;
375 * \brief mmio handler
376 * @param pdev Pointer to PCI device
378 static pci_ers_result_t
liquidio_pcie_mmio_enabled(
379 struct pci_dev
*pdev
__attribute__((unused
)))
381 /* We should never hit this since we never ask for a reset for a Fatal
382 * Error. We always return DISCONNECT in io_error above.
383 * But play safe and return RECOVERED for now.
385 return PCI_ERS_RESULT_RECOVERED
;
389 * \brief called after the pci bus has been reset.
390 * @param pdev Pointer to PCI device
392 * Restart the card from scratch, as if from a cold-boot. Implementation
393 * resembles the first-half of the octeon_resume routine.
395 static pci_ers_result_t
liquidio_pcie_slot_reset(
396 struct pci_dev
*pdev
__attribute__((unused
)))
398 /* We should never hit this since we never ask for a reset for a Fatal
399 * Error. We always return DISCONNECT in io_error above.
400 * But play safe and return RECOVERED for now.
402 return PCI_ERS_RESULT_RECOVERED
;
406 * \brief called when traffic can start flowing again.
407 * @param pdev Pointer to PCI device
409 * This callback is called when the error recovery driver tells us that
410 * its OK to resume normal operation. Implementation resembles the
411 * second-half of the octeon_resume routine.
413 static void liquidio_pcie_resume(struct pci_dev
*pdev
__attribute__((unused
)))
415 /* Nothing to be done here. */
420 * \brief called when suspending
421 * @param pdev Pointer to PCI device
422 * @param state state to suspend to
424 static int liquidio_suspend(struct pci_dev
*pdev
__attribute__((unused
)),
425 pm_message_t state
__attribute__((unused
)))
431 * \brief called when resuming
432 * @param pdev Pointer to PCI device
434 static int liquidio_resume(struct pci_dev
*pdev
__attribute__((unused
)))
440 /* For PCI-E Advanced Error Recovery (AER) Interface */
441 static const struct pci_error_handlers liquidio_err_handler
= {
442 .error_detected
= liquidio_pcie_error_detected
,
443 .mmio_enabled
= liquidio_pcie_mmio_enabled
,
444 .slot_reset
= liquidio_pcie_slot_reset
,
445 .resume
= liquidio_pcie_resume
,
448 static const struct pci_device_id liquidio_pci_tbl
[] = {
450 PCI_VENDOR_ID_CAVIUM
, 0x91, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
453 PCI_VENDOR_ID_CAVIUM
, 0x92, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
456 PCI_VENDOR_ID_CAVIUM
, 0x9702, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
462 MODULE_DEVICE_TABLE(pci
, liquidio_pci_tbl
);
464 static struct pci_driver liquidio_pci_driver
= {
466 .id_table
= liquidio_pci_tbl
,
467 .probe
= liquidio_probe
,
468 .remove
= liquidio_remove
,
469 .err_handler
= &liquidio_err_handler
, /* For AER */
472 .suspend
= liquidio_suspend
,
473 .resume
= liquidio_resume
,
475 #ifdef CONFIG_PCI_IOV
476 .sriov_configure
= liquidio_enable_sriov
,
481 * \brief register PCI driver
483 static int liquidio_init_pci(void)
485 return pci_register_driver(&liquidio_pci_driver
);
489 * \brief unregister PCI driver
491 static void liquidio_deinit_pci(void)
493 pci_unregister_driver(&liquidio_pci_driver
);
497 * \brief Check Tx queue status, and take appropriate action
498 * @param lio per-network private data
499 * @returns 0 if full, number of queues woken up otherwise
501 static inline int check_txq_status(struct lio
*lio
)
503 int numqs
= lio
->netdev
->real_num_tx_queues
;
507 /* check each sub-queue state */
508 for (q
= 0; q
< numqs
; q
++) {
509 iq
= lio
->linfo
.txpciq
[q
%
510 lio
->oct_dev
->num_iqs
].s
.q_no
;
511 if (octnet_iq_is_full(lio
->oct_dev
, iq
))
513 if (__netif_subqueue_stopped(lio
->netdev
, q
)) {
514 netif_wake_subqueue(lio
->netdev
, q
);
515 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, iq
,
525 * \brief Print link information
526 * @param netdev network device
528 static void print_link_info(struct net_device
*netdev
)
530 struct lio
*lio
= GET_LIO(netdev
);
532 if (!ifstate_check(lio
, LIO_IFSTATE_RESETTING
) &&
533 ifstate_check(lio
, LIO_IFSTATE_REGISTERED
)) {
534 struct oct_link_info
*linfo
= &lio
->linfo
;
536 if (linfo
->link
.s
.link_up
) {
537 netif_info(lio
, link
, lio
->netdev
, "%d Mbps %s Duplex UP\n",
539 (linfo
->link
.s
.duplex
) ? "Full" : "Half");
541 netif_info(lio
, link
, lio
->netdev
, "Link Down\n");
547 * \brief Routine to notify MTU change
548 * @param work work_struct data structure
550 static void octnet_link_status_change(struct work_struct
*work
)
552 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
553 struct lio
*lio
= (struct lio
*)wk
->ctxptr
;
555 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
556 * this API is invoked only when new max-MTU of the interface is
557 * less than current MTU.
560 dev_set_mtu(lio
->netdev
, lio
->linfo
.link
.s
.mtu
);
565 * \brief Sets up the mtu status change work
566 * @param netdev network device
568 static inline int setup_link_status_change_wq(struct net_device
*netdev
)
570 struct lio
*lio
= GET_LIO(netdev
);
571 struct octeon_device
*oct
= lio
->oct_dev
;
573 lio
->link_status_wq
.wq
= alloc_workqueue("link-status",
575 if (!lio
->link_status_wq
.wq
) {
576 dev_err(&oct
->pci_dev
->dev
, "unable to create cavium link status wq\n");
579 INIT_DELAYED_WORK(&lio
->link_status_wq
.wk
.work
,
580 octnet_link_status_change
);
581 lio
->link_status_wq
.wk
.ctxptr
= lio
;
586 static inline void cleanup_link_status_change_wq(struct net_device
*netdev
)
588 struct lio
*lio
= GET_LIO(netdev
);
590 if (lio
->link_status_wq
.wq
) {
591 cancel_delayed_work_sync(&lio
->link_status_wq
.wk
.work
);
592 destroy_workqueue(lio
->link_status_wq
.wq
);
597 * \brief Update link status
598 * @param netdev network device
599 * @param ls link status structure
601 * Called on receipt of a link status response from the core application to
602 * update each interface's link status.
604 static inline void update_link_status(struct net_device
*netdev
,
605 union oct_link_status
*ls
)
607 struct lio
*lio
= GET_LIO(netdev
);
608 int changed
= (lio
->linfo
.link
.u64
!= ls
->u64
);
609 int current_max_mtu
= lio
->linfo
.link
.s
.mtu
;
610 struct octeon_device
*oct
= lio
->oct_dev
;
612 dev_dbg(&oct
->pci_dev
->dev
, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
613 __func__
, lio
->linfo
.link
.u64
, ls
->u64
);
614 lio
->linfo
.link
.u64
= ls
->u64
;
616 if ((lio
->intf_open
) && (changed
)) {
617 print_link_info(netdev
);
620 if (lio
->linfo
.link
.s
.link_up
) {
621 dev_dbg(&oct
->pci_dev
->dev
, "%s: link_up", __func__
);
622 netif_carrier_on(netdev
);
625 dev_dbg(&oct
->pci_dev
->dev
, "%s: link_off", __func__
);
626 netif_carrier_off(netdev
);
629 if (lio
->linfo
.link
.s
.mtu
!= current_max_mtu
) {
630 netif_info(lio
, probe
, lio
->netdev
, "Max MTU changed from %d to %d\n",
631 current_max_mtu
, lio
->linfo
.link
.s
.mtu
);
632 netdev
->max_mtu
= lio
->linfo
.link
.s
.mtu
;
634 if (lio
->linfo
.link
.s
.mtu
< netdev
->mtu
) {
635 dev_warn(&oct
->pci_dev
->dev
,
636 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
637 netdev
->mtu
, lio
->linfo
.link
.s
.mtu
);
638 queue_delayed_work(lio
->link_status_wq
.wq
,
639 &lio
->link_status_wq
.wk
.work
, 0);
645 * lio_sync_octeon_time_cb - callback that is invoked when soft command
646 * sent by lio_sync_octeon_time() has completed successfully or failed
648 * @oct - octeon device structure
649 * @status - indicates success or failure
650 * @buf - pointer to the command that was sent to firmware
652 static void lio_sync_octeon_time_cb(struct octeon_device
*oct
,
653 u32 status
, void *buf
)
655 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)buf
;
658 dev_err(&oct
->pci_dev
->dev
,
659 "Failed to sync time to octeon; error=%d\n", status
);
661 octeon_free_soft_command(oct
, sc
);
665 * lio_sync_octeon_time - send latest localtime to octeon firmware so that
666 * firmware will correct it's time, in case there is a time skew
668 * @work: work scheduled to send time update to octeon firmware
670 static void lio_sync_octeon_time(struct work_struct
*work
)
672 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
673 struct lio
*lio
= (struct lio
*)wk
->ctxptr
;
674 struct octeon_device
*oct
= lio
->oct_dev
;
675 struct octeon_soft_command
*sc
;
676 struct timespec64 ts
;
680 sc
= octeon_alloc_soft_command(oct
, sizeof(struct lio_time
), 0, 0);
682 dev_err(&oct
->pci_dev
->dev
,
683 "Failed to sync time to octeon: soft command allocation failed\n");
687 lt
= (struct lio_time
*)sc
->virtdptr
;
689 /* Get time of the day */
690 ktime_get_real_ts64(&ts
);
692 lt
->nsec
= ts
.tv_nsec
;
693 octeon_swap_8B_data((u64
*)lt
, (sizeof(struct lio_time
)) / 8);
695 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
696 octeon_prepare_soft_command(oct
, sc
, OPCODE_NIC
,
697 OPCODE_NIC_SYNC_OCTEON_TIME
, 0, 0, 0);
699 sc
->callback
= lio_sync_octeon_time_cb
;
700 sc
->callback_arg
= sc
;
701 sc
->wait_time
= 1000;
703 ret
= octeon_send_soft_command(oct
, sc
);
704 if (ret
== IQ_SEND_FAILED
) {
705 dev_err(&oct
->pci_dev
->dev
,
706 "Failed to sync time to octeon: failed to send soft command\n");
707 octeon_free_soft_command(oct
, sc
);
710 queue_delayed_work(lio
->sync_octeon_time_wq
.wq
,
711 &lio
->sync_octeon_time_wq
.wk
.work
,
712 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS
));
716 * setup_sync_octeon_time_wq - Sets up the work to periodically update
717 * local time to octeon firmware
719 * @netdev - network device which should send time update to firmware
721 static inline int setup_sync_octeon_time_wq(struct net_device
*netdev
)
723 struct lio
*lio
= GET_LIO(netdev
);
724 struct octeon_device
*oct
= lio
->oct_dev
;
726 lio
->sync_octeon_time_wq
.wq
=
727 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM
, 0);
728 if (!lio
->sync_octeon_time_wq
.wq
) {
729 dev_err(&oct
->pci_dev
->dev
, "Unable to create wq to update octeon time\n");
732 INIT_DELAYED_WORK(&lio
->sync_octeon_time_wq
.wk
.work
,
733 lio_sync_octeon_time
);
734 lio
->sync_octeon_time_wq
.wk
.ctxptr
= lio
;
735 queue_delayed_work(lio
->sync_octeon_time_wq
.wq
,
736 &lio
->sync_octeon_time_wq
.wk
.work
,
737 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS
));
743 * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
744 * to periodically update local time to octeon firmware
746 * @netdev - network device which should send time update to firmware
748 static inline void cleanup_sync_octeon_time_wq(struct net_device
*netdev
)
750 struct lio
*lio
= GET_LIO(netdev
);
751 struct cavium_wq
*time_wq
= &lio
->sync_octeon_time_wq
;
754 cancel_delayed_work_sync(&time_wq
->wk
.work
);
755 destroy_workqueue(time_wq
->wq
);
759 static struct octeon_device
*get_other_octeon_device(struct octeon_device
*oct
)
761 struct octeon_device
*other_oct
;
763 other_oct
= lio_get_device(oct
->octeon_id
+ 1);
765 if (other_oct
&& other_oct
->pci_dev
) {
766 int oct_busnum
, other_oct_busnum
;
768 oct_busnum
= oct
->pci_dev
->bus
->number
;
769 other_oct_busnum
= other_oct
->pci_dev
->bus
->number
;
771 if (oct_busnum
== other_oct_busnum
) {
772 int oct_slot
, other_oct_slot
;
774 oct_slot
= PCI_SLOT(oct
->pci_dev
->devfn
);
775 other_oct_slot
= PCI_SLOT(other_oct
->pci_dev
->devfn
);
777 if (oct_slot
== other_oct_slot
)
785 static void disable_all_vf_links(struct octeon_device
*oct
)
787 struct net_device
*netdev
;
793 max_vfs
= oct
->sriov_info
.max_vfs
;
795 for (i
= 0; i
< oct
->ifcount
; i
++) {
796 netdev
= oct
->props
[i
].netdev
;
800 for (vf
= 0; vf
< max_vfs
; vf
++)
801 liquidio_set_vf_link_state(netdev
, vf
,
802 IFLA_VF_LINK_STATE_DISABLE
);
806 static int liquidio_watchdog(void *param
)
808 bool err_msg_was_printed
[LIO_MAX_CORES
];
809 u16 mask_of_crashed_or_stuck_cores
= 0;
810 bool all_vf_links_are_disabled
= false;
811 struct octeon_device
*oct
= param
;
812 struct octeon_device
*other_oct
;
813 #ifdef CONFIG_MODULE_UNLOAD
814 long refcount
, vfs_referencing_pf
;
815 u64 vfs_mask1
, vfs_mask2
;
819 memset(err_msg_was_printed
, 0, sizeof(err_msg_was_printed
));
821 while (!kthread_should_stop()) {
822 /* sleep for a couple of seconds so that we don't hog the CPU */
823 set_current_state(TASK_INTERRUPTIBLE
);
824 schedule_timeout(msecs_to_jiffies(2000));
826 mask_of_crashed_or_stuck_cores
=
827 (u16
)octeon_read_csr64(oct
, CN23XX_SLI_SCRATCH2
);
829 if (!mask_of_crashed_or_stuck_cores
)
832 WRITE_ONCE(oct
->cores_crashed
, true);
833 other_oct
= get_other_octeon_device(oct
);
835 WRITE_ONCE(other_oct
->cores_crashed
, true);
837 for (core
= 0; core
< LIO_MAX_CORES
; core
++) {
838 bool core_crashed_or_got_stuck
;
840 core_crashed_or_got_stuck
=
841 (mask_of_crashed_or_stuck_cores
844 if (core_crashed_or_got_stuck
&&
845 !err_msg_was_printed
[core
]) {
846 dev_err(&oct
->pci_dev
->dev
,
847 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
849 err_msg_was_printed
[core
] = true;
853 if (all_vf_links_are_disabled
)
856 disable_all_vf_links(oct
);
857 disable_all_vf_links(other_oct
);
858 all_vf_links_are_disabled
= true;
860 #ifdef CONFIG_MODULE_UNLOAD
861 vfs_mask1
= READ_ONCE(oct
->sriov_info
.vf_drv_loaded_mask
);
862 vfs_mask2
= READ_ONCE(other_oct
->sriov_info
.vf_drv_loaded_mask
);
864 vfs_referencing_pf
= hweight64(vfs_mask1
);
865 vfs_referencing_pf
+= hweight64(vfs_mask2
);
867 refcount
= module_refcount(THIS_MODULE
);
868 if (refcount
>= vfs_referencing_pf
) {
869 while (vfs_referencing_pf
) {
870 module_put(THIS_MODULE
);
871 vfs_referencing_pf
--;
881 * \brief PCI probe handler
882 * @param pdev PCI device structure
886 liquidio_probe(struct pci_dev
*pdev
,
887 const struct pci_device_id
*ent
__attribute__((unused
)))
889 struct octeon_device
*oct_dev
= NULL
;
890 struct handshake
*hs
;
892 oct_dev
= octeon_allocate_device(pdev
->device
,
893 sizeof(struct octeon_device_priv
));
895 dev_err(&pdev
->dev
, "Unable to allocate device\n");
899 if (pdev
->device
== OCTEON_CN23XX_PF_VID
)
900 oct_dev
->msix_on
= LIO_FLAG_MSIX_ENABLED
;
902 /* Enable PTP for 6XXX Device */
903 if (((pdev
->device
== OCTEON_CN66XX
) ||
904 (pdev
->device
== OCTEON_CN68XX
)))
905 oct_dev
->ptp_enable
= true;
907 oct_dev
->ptp_enable
= false;
909 dev_info(&pdev
->dev
, "Initializing device %x:%x.\n",
910 (u32
)pdev
->vendor
, (u32
)pdev
->device
);
912 /* Assign octeon_device for this device to the private data area. */
913 pci_set_drvdata(pdev
, oct_dev
);
915 /* set linux specific device pointer */
916 oct_dev
->pci_dev
= (void *)pdev
;
918 oct_dev
->subsystem_id
= pdev
->subsystem_vendor
|
919 (pdev
->subsystem_device
<< 16);
921 hs
= &handshake
[oct_dev
->octeon_id
];
922 init_completion(&hs
->init
);
923 init_completion(&hs
->started
);
926 if (oct_dev
->octeon_id
== 0)
927 /* first LiquidIO NIC is detected */
928 complete(&first_stage
);
930 if (octeon_device_init(oct_dev
)) {
932 liquidio_remove(pdev
);
936 if (OCTEON_CN23XX_PF(oct_dev
)) {
937 u8 bus
, device
, function
;
939 if (atomic_read(oct_dev
->adapter_refcount
) == 1) {
940 /* Each NIC gets one watchdog kernel thread. The first
941 * PF (of each NIC) that gets pci_driver->probe()'d
942 * creates that thread.
944 bus
= pdev
->bus
->number
;
945 device
= PCI_SLOT(pdev
->devfn
);
946 function
= PCI_FUNC(pdev
->devfn
);
947 oct_dev
->watchdog_task
= kthread_create(
948 liquidio_watchdog
, oct_dev
,
949 "liowd/%02hhx:%02hhx.%hhx", bus
, device
, function
);
950 if (!IS_ERR(oct_dev
->watchdog_task
)) {
951 wake_up_process(oct_dev
->watchdog_task
);
953 oct_dev
->watchdog_task
= NULL
;
954 dev_err(&oct_dev
->pci_dev
->dev
,
955 "failed to create kernel_thread\n");
956 liquidio_remove(pdev
);
962 oct_dev
->rx_pause
= 1;
963 oct_dev
->tx_pause
= 1;
965 dev_dbg(&oct_dev
->pci_dev
->dev
, "Device is ready\n");
970 static bool fw_type_is_auto(void)
972 return strncmp(fw_type
, LIO_FW_NAME_TYPE_AUTO
,
973 sizeof(LIO_FW_NAME_TYPE_AUTO
)) == 0;
977 * \brief PCI FLR for each Octeon device.
978 * @param oct octeon device
980 static void octeon_pci_flr(struct octeon_device
*oct
)
984 pci_save_state(oct
->pci_dev
);
986 pci_cfg_access_lock(oct
->pci_dev
);
988 /* Quiesce the device completely */
989 pci_write_config_word(oct
->pci_dev
, PCI_COMMAND
,
990 PCI_COMMAND_INTX_DISABLE
);
992 rc
= __pci_reset_function_locked(oct
->pci_dev
);
995 dev_err(&oct
->pci_dev
->dev
, "Error %d resetting PCI function %d\n",
998 pci_cfg_access_unlock(oct
->pci_dev
);
1000 pci_restore_state(oct
->pci_dev
);
1004 *\brief Destroy resources associated with octeon device
1005 * @param pdev PCI device structure
1008 static void octeon_destroy_resources(struct octeon_device
*oct
)
1011 struct msix_entry
*msix_entries
;
1012 struct octeon_device_priv
*oct_priv
=
1013 (struct octeon_device_priv
*)oct
->priv
;
1015 struct handshake
*hs
;
1017 switch (atomic_read(&oct
->status
)) {
1018 case OCT_DEV_RUNNING
:
1019 case OCT_DEV_CORE_OK
:
1021 /* No more instructions will be forwarded. */
1022 atomic_set(&oct
->status
, OCT_DEV_IN_RESET
);
1024 oct
->app_mode
= CVM_DRV_INVALID_APP
;
1025 dev_dbg(&oct
->pci_dev
->dev
, "Device state is now %s\n",
1026 lio_get_state_string(&oct
->status
));
1028 schedule_timeout_uninterruptible(HZ
/ 10);
1031 case OCT_DEV_HOST_OK
:
1034 case OCT_DEV_CONSOLE_INIT_DONE
:
1035 /* Remove any consoles */
1036 octeon_remove_consoles(oct
);
1039 case OCT_DEV_IO_QUEUES_DONE
:
1040 if (wait_for_pending_requests(oct
))
1041 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
1043 if (lio_wait_for_instr_fetch(oct
))
1044 dev_err(&oct
->pci_dev
->dev
, "IQ had pending instructions\n");
1046 /* Disable the input and output queues now. No more packets will
1047 * arrive from Octeon, but we should wait for all packet
1048 * processing to finish.
1050 oct
->fn_list
.disable_io_queues(oct
);
1052 if (lio_wait_for_oq_pkts(oct
))
1053 dev_err(&oct
->pci_dev
->dev
, "OQ had pending packets\n");
1056 case OCT_DEV_INTR_SET_DONE
:
1057 /* Disable interrupts */
1058 oct
->fn_list
.disable_interrupt(oct
, OCTEON_ALL_INTR
);
1061 msix_entries
= (struct msix_entry
*)oct
->msix_entries
;
1062 for (i
= 0; i
< oct
->num_msix_irqs
- 1; i
++) {
1063 if (oct
->ioq_vector
[i
].vector
) {
1064 /* clear the affinity_cpumask */
1065 irq_set_affinity_hint(
1066 msix_entries
[i
].vector
,
1068 free_irq(msix_entries
[i
].vector
,
1069 &oct
->ioq_vector
[i
]);
1070 oct
->ioq_vector
[i
].vector
= 0;
1073 /* non-iov vector's argument is oct struct */
1074 free_irq(msix_entries
[i
].vector
, oct
);
1076 pci_disable_msix(oct
->pci_dev
);
1077 kfree(oct
->msix_entries
);
1078 oct
->msix_entries
= NULL
;
1080 /* Release the interrupt line */
1081 free_irq(oct
->pci_dev
->irq
, oct
);
1083 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
1084 pci_disable_msi(oct
->pci_dev
);
1087 kfree(oct
->irq_name_storage
);
1088 oct
->irq_name_storage
= NULL
;
1091 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE
:
1092 if (OCTEON_CN23XX_PF(oct
))
1093 octeon_free_ioq_vector(oct
);
1096 case OCT_DEV_MBOX_SETUP_DONE
:
1097 if (OCTEON_CN23XX_PF(oct
))
1098 oct
->fn_list
.free_mbox(oct
);
1101 case OCT_DEV_IN_RESET
:
1102 case OCT_DEV_DROQ_INIT_DONE
:
1103 /* Wait for any pending operations */
1105 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
1106 if (!(oct
->io_qmask
.oq
& BIT_ULL(i
)))
1108 octeon_delete_droq(oct
, i
);
1111 /* Force any pending handshakes to complete */
1112 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
1116 handshake
[oct
->octeon_id
].init_ok
= 0;
1117 complete(&handshake
[oct
->octeon_id
].init
);
1118 handshake
[oct
->octeon_id
].started_ok
= 0;
1119 complete(&handshake
[oct
->octeon_id
].started
);
1124 case OCT_DEV_RESP_LIST_INIT_DONE
:
1125 octeon_delete_response_list(oct
);
1128 case OCT_DEV_INSTR_QUEUE_INIT_DONE
:
1129 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
1130 if (!(oct
->io_qmask
.iq
& BIT_ULL(i
)))
1132 octeon_delete_instr_queue(oct
, i
);
1134 #ifdef CONFIG_PCI_IOV
1135 if (oct
->sriov_info
.sriov_enabled
)
1136 pci_disable_sriov(oct
->pci_dev
);
1139 case OCT_DEV_SC_BUFF_POOL_INIT_DONE
:
1140 octeon_free_sc_buffer_pool(oct
);
1143 case OCT_DEV_DISPATCH_INIT_DONE
:
1144 octeon_delete_dispatch_list(oct
);
1145 cancel_delayed_work_sync(&oct
->nic_poll_work
.work
);
1148 case OCT_DEV_PCI_MAP_DONE
:
1149 refcount
= octeon_deregister_device(oct
);
1151 /* Soft reset the octeon device before exiting.
1152 * However, if fw was loaded from card (i.e. autoboot),
1153 * perform an FLR instead.
1154 * Implementation note: only soft-reset the device
1155 * if it is a CN6XXX OR the LAST CN23XX device.
1157 if (atomic_read(oct
->adapter_fw_state
) == FW_IS_PRELOADED
)
1158 octeon_pci_flr(oct
);
1159 else if (OCTEON_CN6XXX(oct
) || !refcount
)
1160 oct
->fn_list
.soft_reset(oct
);
1162 octeon_unmap_pci_barx(oct
, 0);
1163 octeon_unmap_pci_barx(oct
, 1);
1166 case OCT_DEV_PCI_ENABLE_DONE
:
1167 pci_clear_master(oct
->pci_dev
);
1168 /* Disable the device, releasing the PCI INT */
1169 pci_disable_device(oct
->pci_dev
);
1172 case OCT_DEV_BEGIN_STATE
:
1173 /* Nothing to be done here either */
1175 } /* end switch (oct->status) */
1177 tasklet_kill(&oct_priv
->droq_tasklet
);
1181 * \brief Callback for rx ctrl
1182 * @param status status of request
1183 * @param buf pointer to resp structure
1185 static void rx_ctl_callback(struct octeon_device
*oct
,
1189 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)buf
;
1190 struct liquidio_rx_ctl_context
*ctx
;
1192 ctx
= (struct liquidio_rx_ctl_context
*)sc
->ctxptr
;
1194 oct
= lio_get_device(ctx
->octeon_id
);
1196 dev_err(&oct
->pci_dev
->dev
, "rx ctl instruction failed. Status: %llx\n",
1197 CVM_CAST64(status
));
1198 WRITE_ONCE(ctx
->cond
, 1);
1200 /* This barrier is required to be sure that the response has been
1201 * written fully before waking up the handler
1205 wake_up_interruptible(&ctx
->wc
);
1209 * \brief Send Rx control command
1210 * @param lio per-network private data
1211 * @param start_stop whether to start or stop
1213 static void send_rx_ctrl_cmd(struct lio
*lio
, int start_stop
)
1215 struct octeon_soft_command
*sc
;
1216 struct liquidio_rx_ctl_context
*ctx
;
1217 union octnet_cmd
*ncmd
;
1218 int ctx_size
= sizeof(struct liquidio_rx_ctl_context
);
1219 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1222 if (oct
->props
[lio
->ifidx
].rx_on
== start_stop
)
1225 sc
= (struct octeon_soft_command
*)
1226 octeon_alloc_soft_command(oct
, OCTNET_CMD_SIZE
,
1229 ncmd
= (union octnet_cmd
*)sc
->virtdptr
;
1230 ctx
= (struct liquidio_rx_ctl_context
*)sc
->ctxptr
;
1232 WRITE_ONCE(ctx
->cond
, 0);
1233 ctx
->octeon_id
= lio_get_device_id(oct
);
1234 init_waitqueue_head(&ctx
->wc
);
1237 ncmd
->s
.cmd
= OCTNET_CMD_RX_CTL
;
1238 ncmd
->s
.param1
= start_stop
;
1240 octeon_swap_8B_data((u64
*)ncmd
, (OCTNET_CMD_SIZE
>> 3));
1242 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
1244 octeon_prepare_soft_command(oct
, sc
, OPCODE_NIC
,
1245 OPCODE_NIC_CMD
, 0, 0, 0);
1247 sc
->callback
= rx_ctl_callback
;
1248 sc
->callback_arg
= sc
;
1249 sc
->wait_time
= 5000;
1251 retval
= octeon_send_soft_command(oct
, sc
);
1252 if (retval
== IQ_SEND_FAILED
) {
1253 netif_info(lio
, rx_err
, lio
->netdev
, "Failed to send RX Control message\n");
1255 /* Sleep on a wait queue till the cond flag indicates that the
1256 * response arrived or timed-out.
1258 if (sleep_cond(&ctx
->wc
, &ctx
->cond
) == -EINTR
)
1260 oct
->props
[lio
->ifidx
].rx_on
= start_stop
;
1263 octeon_free_soft_command(oct
, sc
);
1267 * \brief Destroy NIC device interface
1268 * @param oct octeon device
1269 * @param ifidx which interface to destroy
1271 * Cleanup associated with each interface for an Octeon device when NIC
1272 * module is being unloaded or if initialization fails during load.
1274 static void liquidio_destroy_nic_device(struct octeon_device
*oct
, int ifidx
)
1276 struct net_device
*netdev
= oct
->props
[ifidx
].netdev
;
1278 struct napi_struct
*napi
, *n
;
1281 dev_err(&oct
->pci_dev
->dev
, "%s No netdevice ptr for index %d\n",
1286 lio
= GET_LIO(netdev
);
1288 dev_dbg(&oct
->pci_dev
->dev
, "NIC device cleanup\n");
1290 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_RUNNING
)
1291 liquidio_stop(netdev
);
1293 if (oct
->props
[lio
->ifidx
].napi_enabled
== 1) {
1294 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
1297 oct
->props
[lio
->ifidx
].napi_enabled
= 0;
1299 if (OCTEON_CN23XX_PF(oct
))
1300 oct
->droq
[0]->ops
.poll_mode
= 0;
1304 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
1305 netif_napi_del(napi
);
1307 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_REGISTERED
)
1308 unregister_netdev(netdev
);
1310 cleanup_sync_octeon_time_wq(netdev
);
1311 cleanup_link_status_change_wq(netdev
);
1313 cleanup_rx_oom_poll_fn(netdev
);
1315 lio_delete_glists(lio
);
1317 free_netdev(netdev
);
1319 oct
->props
[ifidx
].gmxport
= -1;
1321 oct
->props
[ifidx
].netdev
= NULL
;
1325 * \brief Stop complete NIC functionality
1326 * @param oct octeon device
1328 static int liquidio_stop_nic_module(struct octeon_device
*oct
)
1333 dev_dbg(&oct
->pci_dev
->dev
, "Stopping network interfaces\n");
1334 if (!oct
->ifcount
) {
1335 dev_err(&oct
->pci_dev
->dev
, "Init for Octeon was not completed\n");
1339 spin_lock_bh(&oct
->cmd_resp_wqlock
);
1340 oct
->cmd_resp_state
= OCT_DRV_OFFLINE
;
1341 spin_unlock_bh(&oct
->cmd_resp_wqlock
);
1343 lio_vf_rep_destroy(oct
);
1345 for (i
= 0; i
< oct
->ifcount
; i
++) {
1346 lio
= GET_LIO(oct
->props
[i
].netdev
);
1347 for (j
= 0; j
< oct
->num_oqs
; j
++)
1348 octeon_unregister_droq_ops(oct
,
1349 lio
->linfo
.rxpciq
[j
].s
.q_no
);
1352 for (i
= 0; i
< oct
->ifcount
; i
++)
1353 liquidio_destroy_nic_device(oct
, i
);
1356 devlink_unregister(oct
->devlink
);
1357 devlink_free(oct
->devlink
);
1358 oct
->devlink
= NULL
;
1361 dev_dbg(&oct
->pci_dev
->dev
, "Network interfaces stopped\n");
1366 * \brief Cleans up resources at unload time
1367 * @param pdev PCI device structure
1369 static void liquidio_remove(struct pci_dev
*pdev
)
1371 struct octeon_device
*oct_dev
= pci_get_drvdata(pdev
);
1373 dev_dbg(&oct_dev
->pci_dev
->dev
, "Stopping device\n");
1375 if (oct_dev
->watchdog_task
)
1376 kthread_stop(oct_dev
->watchdog_task
);
1378 if (!oct_dev
->octeon_id
&&
1379 oct_dev
->fw_info
.app_cap_flags
& LIQUIDIO_SWITCHDEV_CAP
)
1380 lio_vf_rep_modexit();
1382 if (oct_dev
->app_mode
&& (oct_dev
->app_mode
== CVM_DRV_NIC_APP
))
1383 liquidio_stop_nic_module(oct_dev
);
1385 /* Reset the octeon device and cleanup all memory allocated for
1386 * the octeon device by driver.
1388 octeon_destroy_resources(oct_dev
);
1390 dev_info(&oct_dev
->pci_dev
->dev
, "Device removed\n");
1392 /* This octeon device has been removed. Update the global
1393 * data structure to reflect this. Free the device structure.
1395 octeon_free_device_mem(oct_dev
);
1399 * \brief Identify the Octeon device and to map the BAR address space
1400 * @param oct octeon device
1402 static int octeon_chip_specific_setup(struct octeon_device
*oct
)
1408 pci_read_config_dword(oct
->pci_dev
, 0, &dev_id
);
1409 pci_read_config_dword(oct
->pci_dev
, 8, &rev_id
);
1410 oct
->rev_id
= rev_id
& 0xff;
1413 case OCTEON_CN68XX_PCIID
:
1414 oct
->chip_id
= OCTEON_CN68XX
;
1415 ret
= lio_setup_cn68xx_octeon_device(oct
);
1419 case OCTEON_CN66XX_PCIID
:
1420 oct
->chip_id
= OCTEON_CN66XX
;
1421 ret
= lio_setup_cn66xx_octeon_device(oct
);
1425 case OCTEON_CN23XX_PCIID_PF
:
1426 oct
->chip_id
= OCTEON_CN23XX_PF_VID
;
1427 ret
= setup_cn23xx_octeon_pf_device(oct
);
1430 #ifdef CONFIG_PCI_IOV
1432 pci_sriov_set_totalvfs(oct
->pci_dev
,
1433 oct
->sriov_info
.max_vfs
);
1440 dev_err(&oct
->pci_dev
->dev
, "Unknown device found (dev_id: %x)\n",
1445 dev_info(&oct
->pci_dev
->dev
, "%s PASS%d.%d %s Version: %s\n", s
,
1446 OCTEON_MAJOR_REV(oct
),
1447 OCTEON_MINOR_REV(oct
),
1448 octeon_get_conf(oct
)->card_name
,
1455 * \brief PCI initialization for each Octeon device.
1456 * @param oct octeon device
1458 static int octeon_pci_os_setup(struct octeon_device
*oct
)
1460 /* setup PCI stuff first */
1461 if (pci_enable_device(oct
->pci_dev
)) {
1462 dev_err(&oct
->pci_dev
->dev
, "pci_enable_device failed\n");
1466 if (dma_set_mask_and_coherent(&oct
->pci_dev
->dev
, DMA_BIT_MASK(64))) {
1467 dev_err(&oct
->pci_dev
->dev
, "Unexpected DMA device capability\n");
1468 pci_disable_device(oct
->pci_dev
);
1472 /* Enable PCI DMA Master. */
1473 pci_set_master(oct
->pci_dev
);
1479 * \brief Unmap and free network buffer
1482 static void free_netbuf(void *buf
)
1484 struct sk_buff
*skb
;
1485 struct octnet_buf_free_info
*finfo
;
1488 finfo
= (struct octnet_buf_free_info
*)buf
;
1492 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
, finfo
->dptr
, skb
->len
,
1495 tx_buffer_free(skb
);
1499 * \brief Unmap and free gather buffer
1502 static void free_netsgbuf(void *buf
)
1504 struct octnet_buf_free_info
*finfo
;
1505 struct sk_buff
*skb
;
1507 struct octnic_gather
*g
;
1510 finfo
= (struct octnet_buf_free_info
*)buf
;
1514 frags
= skb_shinfo(skb
)->nr_frags
;
1516 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1517 g
->sg
[0].ptr
[0], (skb
->len
- skb
->data_len
),
1522 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1524 pci_unmap_page((lio
->oct_dev
)->pci_dev
,
1525 g
->sg
[(i
>> 2)].ptr
[(i
& 3)],
1526 frag
->size
, DMA_TO_DEVICE
);
1530 iq
= skb_iq(lio
->oct_dev
, skb
);
1531 spin_lock(&lio
->glist_lock
[iq
]);
1532 list_add_tail(&g
->list
, &lio
->glist
[iq
]);
1533 spin_unlock(&lio
->glist_lock
[iq
]);
1535 tx_buffer_free(skb
);
1539 * \brief Unmap and free gather buffer with response
1542 static void free_netsgbuf_with_resp(void *buf
)
1544 struct octeon_soft_command
*sc
;
1545 struct octnet_buf_free_info
*finfo
;
1546 struct sk_buff
*skb
;
1548 struct octnic_gather
*g
;
1551 sc
= (struct octeon_soft_command
*)buf
;
1552 skb
= (struct sk_buff
*)sc
->callback_arg
;
1553 finfo
= (struct octnet_buf_free_info
*)&skb
->cb
;
1557 frags
= skb_shinfo(skb
)->nr_frags
;
1559 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1560 g
->sg
[0].ptr
[0], (skb
->len
- skb
->data_len
),
1565 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1567 pci_unmap_page((lio
->oct_dev
)->pci_dev
,
1568 g
->sg
[(i
>> 2)].ptr
[(i
& 3)],
1569 frag
->size
, DMA_TO_DEVICE
);
1573 iq
= skb_iq(lio
->oct_dev
, skb
);
1575 spin_lock(&lio
->glist_lock
[iq
]);
1576 list_add_tail(&g
->list
, &lio
->glist
[iq
]);
1577 spin_unlock(&lio
->glist_lock
[iq
]);
1579 /* Don't free the skb yet */
1583 * \brief Adjust ptp frequency
1584 * @param ptp PTP clock info
1585 * @param ppb how much to adjust by, in parts-per-billion
1587 static int liquidio_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
1589 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1590 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1592 unsigned long flags
;
1593 bool neg_adj
= false;
1600 /* The hardware adds the clock compensation value to the
1601 * PTP clock on every coprocessor clock cycle, so we
1602 * compute the delta in terms of coprocessor clocks.
1604 delta
= (u64
)ppb
<< 32;
1605 do_div(delta
, oct
->coproc_clock_rate
);
1607 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1608 comp
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1613 lio_pci_writeq(oct
, comp
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1614 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1620 * \brief Adjust ptp time
1621 * @param ptp PTP clock info
1622 * @param delta how much to adjust by, in nanosecs
1624 static int liquidio_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
1626 unsigned long flags
;
1627 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1629 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1630 lio
->ptp_adjust
+= delta
;
1631 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1637 * \brief Get hardware clock time, including any adjustment
1638 * @param ptp PTP clock info
1639 * @param ts timespec
1641 static int liquidio_ptp_gettime(struct ptp_clock_info
*ptp
,
1642 struct timespec64
*ts
)
1645 unsigned long flags
;
1646 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1647 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1649 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1650 ns
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_HI
);
1651 ns
+= lio
->ptp_adjust
;
1652 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1654 *ts
= ns_to_timespec64(ns
);
1660 * \brief Set hardware clock time. Reset adjustment
1661 * @param ptp PTP clock info
1662 * @param ts timespec
1664 static int liquidio_ptp_settime(struct ptp_clock_info
*ptp
,
1665 const struct timespec64
*ts
)
1668 unsigned long flags
;
1669 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1670 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1672 ns
= timespec64_to_ns(ts
);
1674 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1675 lio_pci_writeq(oct
, ns
, CN6XXX_MIO_PTP_CLOCK_HI
);
1676 lio
->ptp_adjust
= 0;
1677 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1683 * \brief Check if PTP is enabled
1684 * @param ptp PTP clock info
1686 * @param on is it on
1689 liquidio_ptp_enable(struct ptp_clock_info
*ptp
__attribute__((unused
)),
1690 struct ptp_clock_request
*rq
__attribute__((unused
)),
1691 int on
__attribute__((unused
)))
1697 * \brief Open PTP clock source
1698 * @param netdev network device
1700 static void oct_ptp_open(struct net_device
*netdev
)
1702 struct lio
*lio
= GET_LIO(netdev
);
1703 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1705 spin_lock_init(&lio
->ptp_lock
);
1707 snprintf(lio
->ptp_info
.name
, 16, "%s", netdev
->name
);
1708 lio
->ptp_info
.owner
= THIS_MODULE
;
1709 lio
->ptp_info
.max_adj
= 250000000;
1710 lio
->ptp_info
.n_alarm
= 0;
1711 lio
->ptp_info
.n_ext_ts
= 0;
1712 lio
->ptp_info
.n_per_out
= 0;
1713 lio
->ptp_info
.pps
= 0;
1714 lio
->ptp_info
.adjfreq
= liquidio_ptp_adjfreq
;
1715 lio
->ptp_info
.adjtime
= liquidio_ptp_adjtime
;
1716 lio
->ptp_info
.gettime64
= liquidio_ptp_gettime
;
1717 lio
->ptp_info
.settime64
= liquidio_ptp_settime
;
1718 lio
->ptp_info
.enable
= liquidio_ptp_enable
;
1720 lio
->ptp_adjust
= 0;
1722 lio
->ptp_clock
= ptp_clock_register(&lio
->ptp_info
,
1723 &oct
->pci_dev
->dev
);
1725 if (IS_ERR(lio
->ptp_clock
))
1726 lio
->ptp_clock
= NULL
;
1730 * \brief Init PTP clock
1731 * @param oct octeon device
1733 static void liquidio_ptp_init(struct octeon_device
*oct
)
1735 u64 clock_comp
, cfg
;
1737 clock_comp
= (u64
)NSEC_PER_SEC
<< 32;
1738 do_div(clock_comp
, oct
->coproc_clock_rate
);
1739 lio_pci_writeq(oct
, clock_comp
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1742 cfg
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_CFG
);
1743 lio_pci_writeq(oct
, cfg
| 0x01, CN6XXX_MIO_PTP_CLOCK_CFG
);
1747 * \brief Load firmware to device
1748 * @param oct octeon device
1750 * Maps device to firmware filename, requests firmware, and downloads it
1752 static int load_firmware(struct octeon_device
*oct
)
1755 const struct firmware
*fw
;
1756 char fw_name
[LIO_MAX_FW_FILENAME_LEN
];
1759 if (fw_type_is_auto()) {
1760 tmp_fw_type
= LIO_FW_NAME_TYPE_NIC
;
1761 strncpy(fw_type
, tmp_fw_type
, sizeof(fw_type
));
1763 tmp_fw_type
= fw_type
;
1766 sprintf(fw_name
, "%s%s%s_%s%s", LIO_FW_DIR
, LIO_FW_BASE_NAME
,
1767 octeon_get_conf(oct
)->card_name
, tmp_fw_type
,
1768 LIO_FW_NAME_SUFFIX
);
1770 ret
= request_firmware(&fw
, fw_name
, &oct
->pci_dev
->dev
);
1772 dev_err(&oct
->pci_dev
->dev
, "Request firmware failed. Could not find file %s.\n",
1774 release_firmware(fw
);
1778 ret
= octeon_download_firmware(oct
, fw
->data
, fw
->size
);
1780 release_firmware(fw
);
1786 * \brief Poll routine for checking transmit queue status
1787 * @param work work_struct data structure
1789 static void octnet_poll_check_txq_status(struct work_struct
*work
)
1791 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
1792 struct lio
*lio
= (struct lio
*)wk
->ctxptr
;
1794 if (!ifstate_check(lio
, LIO_IFSTATE_RUNNING
))
1797 check_txq_status(lio
);
1798 queue_delayed_work(lio
->txq_status_wq
.wq
,
1799 &lio
->txq_status_wq
.wk
.work
, msecs_to_jiffies(1));
1803 * \brief Sets up the txq poll check
1804 * @param netdev network device
1806 static inline int setup_tx_poll_fn(struct net_device
*netdev
)
1808 struct lio
*lio
= GET_LIO(netdev
);
1809 struct octeon_device
*oct
= lio
->oct_dev
;
1811 lio
->txq_status_wq
.wq
= alloc_workqueue("txq-status",
1813 if (!lio
->txq_status_wq
.wq
) {
1814 dev_err(&oct
->pci_dev
->dev
, "unable to create cavium txq status wq\n");
1817 INIT_DELAYED_WORK(&lio
->txq_status_wq
.wk
.work
,
1818 octnet_poll_check_txq_status
);
1819 lio
->txq_status_wq
.wk
.ctxptr
= lio
;
1820 queue_delayed_work(lio
->txq_status_wq
.wq
,
1821 &lio
->txq_status_wq
.wk
.work
, msecs_to_jiffies(1));
1825 static inline void cleanup_tx_poll_fn(struct net_device
*netdev
)
1827 struct lio
*lio
= GET_LIO(netdev
);
1829 if (lio
->txq_status_wq
.wq
) {
1830 cancel_delayed_work_sync(&lio
->txq_status_wq
.wk
.work
);
1831 destroy_workqueue(lio
->txq_status_wq
.wq
);
1836 * \brief Net device open for LiquidIO
1837 * @param netdev network device
1839 static int liquidio_open(struct net_device
*netdev
)
1841 struct lio
*lio
= GET_LIO(netdev
);
1842 struct octeon_device
*oct
= lio
->oct_dev
;
1843 struct napi_struct
*napi
, *n
;
1845 if (oct
->props
[lio
->ifidx
].napi_enabled
== 0) {
1846 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
1849 oct
->props
[lio
->ifidx
].napi_enabled
= 1;
1851 if (OCTEON_CN23XX_PF(oct
))
1852 oct
->droq
[0]->ops
.poll_mode
= 1;
1855 if (oct
->ptp_enable
)
1856 oct_ptp_open(netdev
);
1858 ifstate_set(lio
, LIO_IFSTATE_RUNNING
);
1860 if (OCTEON_CN23XX_PF(oct
)) {
1862 if (setup_tx_poll_fn(netdev
))
1865 if (setup_tx_poll_fn(netdev
))
1869 netif_tx_start_all_queues(netdev
);
1871 /* Ready for link status updates */
1874 netif_info(lio
, ifup
, lio
->netdev
, "Interface Open, ready for traffic\n");
1876 /* tell Octeon to start forwarding packets to host */
1877 send_rx_ctrl_cmd(lio
, 1);
1879 dev_info(&oct
->pci_dev
->dev
, "%s interface is opened\n",
1886 * \brief Net device stop for LiquidIO
1887 * @param netdev network device
1889 static int liquidio_stop(struct net_device
*netdev
)
1891 struct lio
*lio
= GET_LIO(netdev
);
1892 struct octeon_device
*oct
= lio
->oct_dev
;
1893 struct napi_struct
*napi
, *n
;
1895 ifstate_reset(lio
, LIO_IFSTATE_RUNNING
);
1897 /* Stop any link updates */
1902 /* Inform that netif carrier is down */
1903 netif_carrier_off(netdev
);
1904 netif_tx_disable(netdev
);
1906 lio
->linfo
.link
.s
.link_up
= 0;
1907 lio
->link_changes
++;
1909 /* Tell Octeon that nic interface is down. */
1910 send_rx_ctrl_cmd(lio
, 0);
1912 if (OCTEON_CN23XX_PF(oct
)) {
1914 cleanup_tx_poll_fn(netdev
);
1916 cleanup_tx_poll_fn(netdev
);
1919 if (lio
->ptp_clock
) {
1920 ptp_clock_unregister(lio
->ptp_clock
);
1921 lio
->ptp_clock
= NULL
;
1924 /* Wait for any pending Rx descriptors */
1925 if (lio_wait_for_clean_oq(oct
))
1926 netif_info(lio
, rx_err
, lio
->netdev
,
1927 "Proceeding with stop interface after partial RX desc processing\n");
1929 if (oct
->props
[lio
->ifidx
].napi_enabled
== 1) {
1930 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
1933 oct
->props
[lio
->ifidx
].napi_enabled
= 0;
1935 if (OCTEON_CN23XX_PF(oct
))
1936 oct
->droq
[0]->ops
.poll_mode
= 0;
1939 dev_info(&oct
->pci_dev
->dev
, "%s interface is stopped\n", netdev
->name
);
1945 * \brief Converts a mask based on net device flags
1946 * @param netdev network device
1948 * This routine generates a octnet_ifflags mask from the net device flags
1949 * received from the OS.
1951 static inline enum octnet_ifflags
get_new_flags(struct net_device
*netdev
)
1953 enum octnet_ifflags f
= OCTNET_IFFLAG_UNICAST
;
1955 if (netdev
->flags
& IFF_PROMISC
)
1956 f
|= OCTNET_IFFLAG_PROMISC
;
1958 if (netdev
->flags
& IFF_ALLMULTI
)
1959 f
|= OCTNET_IFFLAG_ALLMULTI
;
1961 if (netdev
->flags
& IFF_MULTICAST
) {
1962 f
|= OCTNET_IFFLAG_MULTICAST
;
1964 /* Accept all multicast addresses if there are more than we
1967 if (netdev_mc_count(netdev
) > MAX_OCTEON_MULTICAST_ADDR
)
1968 f
|= OCTNET_IFFLAG_ALLMULTI
;
1971 if (netdev
->flags
& IFF_BROADCAST
)
1972 f
|= OCTNET_IFFLAG_BROADCAST
;
1978 * \brief Net device set_multicast_list
1979 * @param netdev network device
1981 static void liquidio_set_mcast_list(struct net_device
*netdev
)
1983 struct lio
*lio
= GET_LIO(netdev
);
1984 struct octeon_device
*oct
= lio
->oct_dev
;
1985 struct octnic_ctrl_pkt nctrl
;
1986 struct netdev_hw_addr
*ha
;
1989 int mc_count
= min(netdev_mc_count(netdev
), MAX_OCTEON_MULTICAST_ADDR
);
1991 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
1993 /* Create a ctrl pkt command to be sent to core app. */
1995 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_SET_MULTI_LIST
;
1996 nctrl
.ncmd
.s
.param1
= get_new_flags(netdev
);
1997 nctrl
.ncmd
.s
.param2
= mc_count
;
1998 nctrl
.ncmd
.s
.more
= mc_count
;
1999 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2000 nctrl
.netpndev
= (u64
)netdev
;
2001 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2003 /* copy all the addresses into the udd */
2005 netdev_for_each_mc_addr(ha
, netdev
) {
2007 memcpy(((u8
*)mc
) + 2, ha
->addr
, ETH_ALEN
);
2008 /* no need to swap bytes */
2010 if (++mc
> &nctrl
.udd
[mc_count
])
2014 /* Apparently, any activity in this call from the kernel has to
2015 * be atomic. So we won't wait for response.
2017 nctrl
.wait_time
= 0;
2019 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2021 dev_err(&oct
->pci_dev
->dev
, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2027 * \brief Net device set_mac_address
2028 * @param netdev network device
2030 static int liquidio_set_mac(struct net_device
*netdev
, void *p
)
2033 struct lio
*lio
= GET_LIO(netdev
);
2034 struct octeon_device
*oct
= lio
->oct_dev
;
2035 struct sockaddr
*addr
= (struct sockaddr
*)p
;
2036 struct octnic_ctrl_pkt nctrl
;
2038 if (!is_valid_ether_addr(addr
->sa_data
))
2039 return -EADDRNOTAVAIL
;
2041 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2044 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MACADDR
;
2045 nctrl
.ncmd
.s
.param1
= 0;
2046 nctrl
.ncmd
.s
.more
= 1;
2047 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2048 nctrl
.netpndev
= (u64
)netdev
;
2049 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2050 nctrl
.wait_time
= 100;
2053 /* The MAC Address is presented in network byte order. */
2054 memcpy((u8
*)&nctrl
.udd
[0] + 2, addr
->sa_data
, ETH_ALEN
);
2056 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2058 dev_err(&oct
->pci_dev
->dev
, "MAC Address change failed\n");
2061 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2062 memcpy(((u8
*)&lio
->linfo
.hw_addr
) + 2, addr
->sa_data
, ETH_ALEN
);
2068 liquidio_get_stats64(struct net_device
*netdev
,
2069 struct rtnl_link_stats64
*lstats
)
2071 struct lio
*lio
= GET_LIO(netdev
);
2072 struct octeon_device
*oct
;
2073 u64 pkts
= 0, drop
= 0, bytes
= 0;
2074 struct oct_droq_stats
*oq_stats
;
2075 struct oct_iq_stats
*iq_stats
;
2076 int i
, iq_no
, oq_no
;
2080 if (ifstate_check(lio
, LIO_IFSTATE_RESETTING
))
2083 for (i
= 0; i
< oct
->num_iqs
; i
++) {
2084 iq_no
= lio
->linfo
.txpciq
[i
].s
.q_no
;
2085 iq_stats
= &oct
->instr_queue
[iq_no
]->stats
;
2086 pkts
+= iq_stats
->tx_done
;
2087 drop
+= iq_stats
->tx_dropped
;
2088 bytes
+= iq_stats
->tx_tot_bytes
;
2091 lstats
->tx_packets
= pkts
;
2092 lstats
->tx_bytes
= bytes
;
2093 lstats
->tx_dropped
= drop
;
2099 for (i
= 0; i
< oct
->num_oqs
; i
++) {
2100 oq_no
= lio
->linfo
.rxpciq
[i
].s
.q_no
;
2101 oq_stats
= &oct
->droq
[oq_no
]->stats
;
2102 pkts
+= oq_stats
->rx_pkts_received
;
2103 drop
+= (oq_stats
->rx_dropped
+
2104 oq_stats
->dropped_nodispatch
+
2105 oq_stats
->dropped_toomany
+
2106 oq_stats
->dropped_nomem
);
2107 bytes
+= oq_stats
->rx_bytes_received
;
2110 lstats
->rx_bytes
= bytes
;
2111 lstats
->rx_packets
= pkts
;
2112 lstats
->rx_dropped
= drop
;
2114 octnet_get_link_stats(netdev
);
2115 lstats
->multicast
= oct
->link_stats
.fromwire
.fw_total_mcast
;
2116 lstats
->collisions
= oct
->link_stats
.fromhost
.total_collisions
;
2118 /* detailed rx_errors: */
2119 lstats
->rx_length_errors
= oct
->link_stats
.fromwire
.l2_err
;
2120 /* recved pkt with crc error */
2121 lstats
->rx_crc_errors
= oct
->link_stats
.fromwire
.fcs_err
;
2122 /* recv'd frame alignment error */
2123 lstats
->rx_frame_errors
= oct
->link_stats
.fromwire
.frame_err
;
2124 /* recv'r fifo overrun */
2125 lstats
->rx_fifo_errors
= oct
->link_stats
.fromwire
.fifo_err
;
2127 lstats
->rx_errors
= lstats
->rx_length_errors
+ lstats
->rx_crc_errors
+
2128 lstats
->rx_frame_errors
+ lstats
->rx_fifo_errors
;
2130 /* detailed tx_errors */
2131 lstats
->tx_aborted_errors
= oct
->link_stats
.fromhost
.fw_err_pko
;
2132 lstats
->tx_carrier_errors
= oct
->link_stats
.fromhost
.fw_err_link
;
2133 lstats
->tx_fifo_errors
= oct
->link_stats
.fromhost
.fifo_err
;
2135 lstats
->tx_errors
= lstats
->tx_aborted_errors
+
2136 lstats
->tx_carrier_errors
+
2137 lstats
->tx_fifo_errors
;
2141 * \brief Handler for SIOCSHWTSTAMP ioctl
2142 * @param netdev network device
2143 * @param ifr interface request
2144 * @param cmd command
2146 static int hwtstamp_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
)
2148 struct hwtstamp_config conf
;
2149 struct lio
*lio
= GET_LIO(netdev
);
2151 if (copy_from_user(&conf
, ifr
->ifr_data
, sizeof(conf
)))
2157 switch (conf
.tx_type
) {
2158 case HWTSTAMP_TX_ON
:
2159 case HWTSTAMP_TX_OFF
:
2165 switch (conf
.rx_filter
) {
2166 case HWTSTAMP_FILTER_NONE
:
2168 case HWTSTAMP_FILTER_ALL
:
2169 case HWTSTAMP_FILTER_SOME
:
2170 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
2171 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
2172 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
2173 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
2174 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
2175 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
2176 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
2177 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
2178 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
2179 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
2180 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
2181 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
2182 case HWTSTAMP_FILTER_NTP_ALL
:
2183 conf
.rx_filter
= HWTSTAMP_FILTER_ALL
;
2189 if (conf
.rx_filter
== HWTSTAMP_FILTER_ALL
)
2190 ifstate_set(lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
);
2193 ifstate_reset(lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
);
2195 return copy_to_user(ifr
->ifr_data
, &conf
, sizeof(conf
)) ? -EFAULT
: 0;
2199 * \brief ioctl handler
2200 * @param netdev network device
2201 * @param ifr interface request
2202 * @param cmd command
2204 static int liquidio_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2206 struct lio
*lio
= GET_LIO(netdev
);
2210 if (lio
->oct_dev
->ptp_enable
)
2211 return hwtstamp_ioctl(netdev
, ifr
);
2219 * \brief handle a Tx timestamp response
2220 * @param status response status
2221 * @param buf pointer to skb
2223 static void handle_timestamp(struct octeon_device
*oct
,
2227 struct octnet_buf_free_info
*finfo
;
2228 struct octeon_soft_command
*sc
;
2229 struct oct_timestamp_resp
*resp
;
2231 struct sk_buff
*skb
= (struct sk_buff
*)buf
;
2233 finfo
= (struct octnet_buf_free_info
*)skb
->cb
;
2237 resp
= (struct oct_timestamp_resp
*)sc
->virtrptr
;
2239 if (status
!= OCTEON_REQUEST_DONE
) {
2240 dev_err(&oct
->pci_dev
->dev
, "Tx timestamp instruction failed. Status: %llx\n",
2241 CVM_CAST64(status
));
2242 resp
->timestamp
= 0;
2245 octeon_swap_8B_data(&resp
->timestamp
, 1);
2247 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
) != 0)) {
2248 struct skb_shared_hwtstamps ts
;
2249 u64 ns
= resp
->timestamp
;
2251 netif_info(lio
, tx_done
, lio
->netdev
,
2252 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2253 skb
, (unsigned long long)ns
);
2254 ts
.hwtstamp
= ns_to_ktime(ns
+ lio
->ptp_adjust
);
2255 skb_tstamp_tx(skb
, &ts
);
2258 octeon_free_soft_command(oct
, sc
);
2259 tx_buffer_free(skb
);
2262 /* \brief Send a data packet that will be timestamped
2263 * @param oct octeon device
2264 * @param ndata pointer to network data
2265 * @param finfo pointer to private network data
2267 static inline int send_nic_timestamp_pkt(struct octeon_device
*oct
,
2268 struct octnic_data_pkt
*ndata
,
2269 struct octnet_buf_free_info
*finfo
,
2273 struct octeon_soft_command
*sc
;
2280 sc
= octeon_alloc_soft_command_resp(oct
, &ndata
->cmd
,
2281 sizeof(struct oct_timestamp_resp
));
2285 dev_err(&oct
->pci_dev
->dev
, "No memory for timestamped data packet\n");
2286 return IQ_SEND_FAILED
;
2289 if (ndata
->reqtype
== REQTYPE_NORESP_NET
)
2290 ndata
->reqtype
= REQTYPE_RESP_NET
;
2291 else if (ndata
->reqtype
== REQTYPE_NORESP_NET_SG
)
2292 ndata
->reqtype
= REQTYPE_RESP_NET_SG
;
2294 sc
->callback
= handle_timestamp
;
2295 sc
->callback_arg
= finfo
->skb
;
2296 sc
->iq_no
= ndata
->q_no
;
2298 if (OCTEON_CN23XX_PF(oct
))
2299 len
= (u32
)((struct octeon_instr_ih3
*)
2300 (&sc
->cmd
.cmd3
.ih3
))->dlengsz
;
2302 len
= (u32
)((struct octeon_instr_ih2
*)
2303 (&sc
->cmd
.cmd2
.ih2
))->dlengsz
;
2305 ring_doorbell
= !xmit_more
;
2307 retval
= octeon_send_command(oct
, sc
->iq_no
, ring_doorbell
, &sc
->cmd
,
2308 sc
, len
, ndata
->reqtype
);
2310 if (retval
== IQ_SEND_FAILED
) {
2311 dev_err(&oct
->pci_dev
->dev
, "timestamp data packet failed status: %x\n",
2313 octeon_free_soft_command(oct
, sc
);
2315 netif_info(lio
, tx_queued
, lio
->netdev
, "Queued timestamp packet\n");
2321 /** \brief Transmit networks packets to the Octeon interface
2322 * @param skbuff skbuff struct to be passed to network layer.
2323 * @param netdev pointer to network device
2324 * @returns whether the packet was transmitted to the device okay or not
2325 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2327 static netdev_tx_t
liquidio_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2330 struct octnet_buf_free_info
*finfo
;
2331 union octnic_cmd_setup cmdsetup
;
2332 struct octnic_data_pkt ndata
;
2333 struct octeon_device
*oct
;
2334 struct oct_iq_stats
*stats
;
2335 struct octeon_instr_irh
*irh
;
2336 union tx_info
*tx_info
;
2338 int q_idx
= 0, iq_no
= 0;
2339 int j
, xmit_more
= 0;
2343 lio
= GET_LIO(netdev
);
2346 q_idx
= skb_iq(oct
, skb
);
2348 iq_no
= lio
->linfo
.txpciq
[q_idx
].s
.q_no
;
2350 stats
= &oct
->instr_queue
[iq_no
]->stats
;
2352 /* Check for all conditions in which the current packet cannot be
2355 if (!(atomic_read(&lio
->ifstate
) & LIO_IFSTATE_RUNNING
) ||
2356 (!lio
->linfo
.link
.s
.link_up
) ||
2358 netif_info(lio
, tx_err
, lio
->netdev
,
2359 "Transmit failed link_status : %d\n",
2360 lio
->linfo
.link
.s
.link_up
);
2361 goto lio_xmit_failed
;
2364 /* Use space in skb->cb to store info used to unmap and
2367 finfo
= (struct octnet_buf_free_info
*)skb
->cb
;
2372 /* Prepare the attributes for the data to be passed to OSI. */
2373 memset(&ndata
, 0, sizeof(struct octnic_data_pkt
));
2375 ndata
.buf
= (void *)finfo
;
2379 if (octnet_iq_is_full(oct
, ndata
.q_no
)) {
2380 /* defer sending if queue is full */
2381 netif_info(lio
, tx_err
, lio
->netdev
, "Transmit failed iq:%d full\n",
2383 stats
->tx_iq_busy
++;
2384 return NETDEV_TX_BUSY
;
2387 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2388 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2391 ndata
.datasize
= skb
->len
;
2394 cmdsetup
.s
.iq_no
= iq_no
;
2396 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2397 if (skb
->encapsulation
) {
2398 cmdsetup
.s
.tnl_csum
= 1;
2401 cmdsetup
.s
.transport_csum
= 1;
2404 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) {
2405 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
2406 cmdsetup
.s
.timestamp
= 1;
2409 if (skb_shinfo(skb
)->nr_frags
== 0) {
2410 cmdsetup
.s
.u
.datasize
= skb
->len
;
2411 octnet_prepare_pci_cmd(oct
, &ndata
.cmd
, &cmdsetup
, tag
);
2413 /* Offload checksum calculation for TCP/UDP packets */
2414 dptr
= dma_map_single(&oct
->pci_dev
->dev
,
2418 if (dma_mapping_error(&oct
->pci_dev
->dev
, dptr
)) {
2419 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 1\n",
2421 stats
->tx_dmamap_fail
++;
2422 return NETDEV_TX_BUSY
;
2425 if (OCTEON_CN23XX_PF(oct
))
2426 ndata
.cmd
.cmd3
.dptr
= dptr
;
2428 ndata
.cmd
.cmd2
.dptr
= dptr
;
2430 ndata
.reqtype
= REQTYPE_NORESP_NET
;
2434 struct skb_frag_struct
*frag
;
2435 struct octnic_gather
*g
;
2437 spin_lock(&lio
->glist_lock
[q_idx
]);
2438 g
= (struct octnic_gather
*)
2439 lio_list_delete_head(&lio
->glist
[q_idx
]);
2440 spin_unlock(&lio
->glist_lock
[q_idx
]);
2443 netif_info(lio
, tx_err
, lio
->netdev
,
2444 "Transmit scatter gather: glist null!\n");
2445 goto lio_xmit_failed
;
2448 cmdsetup
.s
.gather
= 1;
2449 cmdsetup
.s
.u
.gatherptrs
= (skb_shinfo(skb
)->nr_frags
+ 1);
2450 octnet_prepare_pci_cmd(oct
, &ndata
.cmd
, &cmdsetup
, tag
);
2452 memset(g
->sg
, 0, g
->sg_size
);
2454 g
->sg
[0].ptr
[0] = dma_map_single(&oct
->pci_dev
->dev
,
2456 (skb
->len
- skb
->data_len
),
2458 if (dma_mapping_error(&oct
->pci_dev
->dev
, g
->sg
[0].ptr
[0])) {
2459 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 2\n",
2461 stats
->tx_dmamap_fail
++;
2462 return NETDEV_TX_BUSY
;
2464 add_sg_size(&g
->sg
[0], (skb
->len
- skb
->data_len
), 0);
2466 frags
= skb_shinfo(skb
)->nr_frags
;
2469 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
2471 g
->sg
[(i
>> 2)].ptr
[(i
& 3)] =
2472 dma_map_page(&oct
->pci_dev
->dev
,
2478 if (dma_mapping_error(&oct
->pci_dev
->dev
,
2479 g
->sg
[i
>> 2].ptr
[i
& 3])) {
2480 dma_unmap_single(&oct
->pci_dev
->dev
,
2482 skb
->len
- skb
->data_len
,
2484 for (j
= 1; j
< i
; j
++) {
2485 frag
= &skb_shinfo(skb
)->frags
[j
- 1];
2486 dma_unmap_page(&oct
->pci_dev
->dev
,
2487 g
->sg
[j
>> 2].ptr
[j
& 3],
2491 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 3\n",
2493 return NETDEV_TX_BUSY
;
2496 add_sg_size(&g
->sg
[(i
>> 2)], frag
->size
, (i
& 3));
2500 dptr
= g
->sg_dma_ptr
;
2502 if (OCTEON_CN23XX_PF(oct
))
2503 ndata
.cmd
.cmd3
.dptr
= dptr
;
2505 ndata
.cmd
.cmd2
.dptr
= dptr
;
2509 ndata
.reqtype
= REQTYPE_NORESP_NET_SG
;
2512 if (OCTEON_CN23XX_PF(oct
)) {
2513 irh
= (struct octeon_instr_irh
*)&ndata
.cmd
.cmd3
.irh
;
2514 tx_info
= (union tx_info
*)&ndata
.cmd
.cmd3
.ossp
[0];
2516 irh
= (struct octeon_instr_irh
*)&ndata
.cmd
.cmd2
.irh
;
2517 tx_info
= (union tx_info
*)&ndata
.cmd
.cmd2
.ossp
[0];
2520 if (skb_shinfo(skb
)->gso_size
) {
2521 tx_info
->s
.gso_size
= skb_shinfo(skb
)->gso_size
;
2522 tx_info
->s
.gso_segs
= skb_shinfo(skb
)->gso_segs
;
2526 /* HW insert VLAN tag */
2527 if (skb_vlan_tag_present(skb
)) {
2528 irh
->priority
= skb_vlan_tag_get(skb
) >> 13;
2529 irh
->vlan
= skb_vlan_tag_get(skb
) & 0xfff;
2532 xmit_more
= skb
->xmit_more
;
2534 if (unlikely(cmdsetup
.s
.timestamp
))
2535 status
= send_nic_timestamp_pkt(oct
, &ndata
, finfo
, xmit_more
);
2537 status
= octnet_send_nic_data_pkt(oct
, &ndata
, xmit_more
);
2538 if (status
== IQ_SEND_FAILED
)
2539 goto lio_xmit_failed
;
2541 netif_info(lio
, tx_queued
, lio
->netdev
, "Transmit queued successfully\n");
2543 if (status
== IQ_SEND_STOP
)
2544 netif_stop_subqueue(netdev
, q_idx
);
2546 netif_trans_update(netdev
);
2548 if (tx_info
->s
.gso_segs
)
2549 stats
->tx_done
+= tx_info
->s
.gso_segs
;
2552 stats
->tx_tot_bytes
+= ndata
.datasize
;
2554 return NETDEV_TX_OK
;
2557 stats
->tx_dropped
++;
2558 netif_info(lio
, tx_err
, lio
->netdev
, "IQ%d Transmit dropped:%llu\n",
2559 iq_no
, stats
->tx_dropped
);
2561 dma_unmap_single(&oct
->pci_dev
->dev
, dptr
,
2562 ndata
.datasize
, DMA_TO_DEVICE
);
2564 octeon_ring_doorbell_locked(oct
, iq_no
);
2566 tx_buffer_free(skb
);
2567 return NETDEV_TX_OK
;
2570 /** \brief Network device Tx timeout
2571 * @param netdev pointer to network device
2573 static void liquidio_tx_timeout(struct net_device
*netdev
)
2577 lio
= GET_LIO(netdev
);
2579 netif_info(lio
, tx_err
, lio
->netdev
,
2580 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2581 netdev
->stats
.tx_dropped
);
2582 netif_trans_update(netdev
);
2586 static int liquidio_vlan_rx_add_vid(struct net_device
*netdev
,
2587 __be16 proto
__attribute__((unused
)),
2590 struct lio
*lio
= GET_LIO(netdev
);
2591 struct octeon_device
*oct
= lio
->oct_dev
;
2592 struct octnic_ctrl_pkt nctrl
;
2595 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2598 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_ADD_VLAN_FILTER
;
2599 nctrl
.ncmd
.s
.param1
= vid
;
2600 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2601 nctrl
.wait_time
= 100;
2602 nctrl
.netpndev
= (u64
)netdev
;
2603 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2605 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2607 dev_err(&oct
->pci_dev
->dev
, "Add VLAN filter failed in core (ret: 0x%x)\n",
2614 static int liquidio_vlan_rx_kill_vid(struct net_device
*netdev
,
2615 __be16 proto
__attribute__((unused
)),
2618 struct lio
*lio
= GET_LIO(netdev
);
2619 struct octeon_device
*oct
= lio
->oct_dev
;
2620 struct octnic_ctrl_pkt nctrl
;
2623 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2626 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_DEL_VLAN_FILTER
;
2627 nctrl
.ncmd
.s
.param1
= vid
;
2628 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2629 nctrl
.wait_time
= 100;
2630 nctrl
.netpndev
= (u64
)netdev
;
2631 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2633 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2635 dev_err(&oct
->pci_dev
->dev
, "Del VLAN filter failed in core (ret: 0x%x)\n",
2641 /** Sending command to enable/disable RX checksum offload
2642 * @param netdev pointer to network device
2643 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
2644 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
2645 * OCTNET_CMD_RXCSUM_DISABLE
2646 * @returns SUCCESS or FAILURE
2648 static int liquidio_set_rxcsum_command(struct net_device
*netdev
, int command
,
2651 struct lio
*lio
= GET_LIO(netdev
);
2652 struct octeon_device
*oct
= lio
->oct_dev
;
2653 struct octnic_ctrl_pkt nctrl
;
2656 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2659 nctrl
.ncmd
.s
.cmd
= command
;
2660 nctrl
.ncmd
.s
.param1
= rx_cmd
;
2661 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2662 nctrl
.wait_time
= 100;
2663 nctrl
.netpndev
= (u64
)netdev
;
2664 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2666 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2668 dev_err(&oct
->pci_dev
->dev
,
2669 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2675 /** Sending command to add/delete VxLAN UDP port to firmware
2676 * @param netdev pointer to network device
2677 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
2678 * @param vxlan_port VxLAN port to be added or deleted
2679 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
2680 * OCTNET_CMD_VXLAN_PORT_DEL
2681 * @returns SUCCESS or FAILURE
2683 static int liquidio_vxlan_port_command(struct net_device
*netdev
, int command
,
2684 u16 vxlan_port
, u8 vxlan_cmd_bit
)
2686 struct lio
*lio
= GET_LIO(netdev
);
2687 struct octeon_device
*oct
= lio
->oct_dev
;
2688 struct octnic_ctrl_pkt nctrl
;
2691 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2694 nctrl
.ncmd
.s
.cmd
= command
;
2695 nctrl
.ncmd
.s
.more
= vxlan_cmd_bit
;
2696 nctrl
.ncmd
.s
.param1
= vxlan_port
;
2697 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2698 nctrl
.wait_time
= 100;
2699 nctrl
.netpndev
= (u64
)netdev
;
2700 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2702 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2704 dev_err(&oct
->pci_dev
->dev
,
2705 "VxLAN port add/delete failed in core (ret:0x%x)\n",
2711 /** \brief Net device fix features
2712 * @param netdev pointer to network device
2713 * @param request features requested
2714 * @returns updated features list
2716 static netdev_features_t
liquidio_fix_features(struct net_device
*netdev
,
2717 netdev_features_t request
)
2719 struct lio
*lio
= netdev_priv(netdev
);
2721 if ((request
& NETIF_F_RXCSUM
) &&
2722 !(lio
->dev_capability
& NETIF_F_RXCSUM
))
2723 request
&= ~NETIF_F_RXCSUM
;
2725 if ((request
& NETIF_F_HW_CSUM
) &&
2726 !(lio
->dev_capability
& NETIF_F_HW_CSUM
))
2727 request
&= ~NETIF_F_HW_CSUM
;
2729 if ((request
& NETIF_F_TSO
) && !(lio
->dev_capability
& NETIF_F_TSO
))
2730 request
&= ~NETIF_F_TSO
;
2732 if ((request
& NETIF_F_TSO6
) && !(lio
->dev_capability
& NETIF_F_TSO6
))
2733 request
&= ~NETIF_F_TSO6
;
2735 if ((request
& NETIF_F_LRO
) && !(lio
->dev_capability
& NETIF_F_LRO
))
2736 request
&= ~NETIF_F_LRO
;
2738 /*Disable LRO if RXCSUM is off */
2739 if (!(request
& NETIF_F_RXCSUM
) && (netdev
->features
& NETIF_F_LRO
) &&
2740 (lio
->dev_capability
& NETIF_F_LRO
))
2741 request
&= ~NETIF_F_LRO
;
2743 if ((request
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
2744 !(lio
->dev_capability
& NETIF_F_HW_VLAN_CTAG_FILTER
))
2745 request
&= ~NETIF_F_HW_VLAN_CTAG_FILTER
;
2750 /** \brief Net device set features
2751 * @param netdev pointer to network device
2752 * @param features features to enable/disable
2754 static int liquidio_set_features(struct net_device
*netdev
,
2755 netdev_features_t features
)
2757 struct lio
*lio
= netdev_priv(netdev
);
2759 if ((features
& NETIF_F_LRO
) &&
2760 (lio
->dev_capability
& NETIF_F_LRO
) &&
2761 !(netdev
->features
& NETIF_F_LRO
))
2762 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_ENABLE
,
2763 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
2764 else if (!(features
& NETIF_F_LRO
) &&
2765 (lio
->dev_capability
& NETIF_F_LRO
) &&
2766 (netdev
->features
& NETIF_F_LRO
))
2767 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_DISABLE
,
2768 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
2770 /* Sending command to firmware to enable/disable RX checksum
2771 * offload settings using ethtool
2773 if (!(netdev
->features
& NETIF_F_RXCSUM
) &&
2774 (lio
->enc_dev_capability
& NETIF_F_RXCSUM
) &&
2775 (features
& NETIF_F_RXCSUM
))
2776 liquidio_set_rxcsum_command(netdev
,
2777 OCTNET_CMD_TNL_RX_CSUM_CTL
,
2778 OCTNET_CMD_RXCSUM_ENABLE
);
2779 else if ((netdev
->features
& NETIF_F_RXCSUM
) &&
2780 (lio
->enc_dev_capability
& NETIF_F_RXCSUM
) &&
2781 !(features
& NETIF_F_RXCSUM
))
2782 liquidio_set_rxcsum_command(netdev
, OCTNET_CMD_TNL_RX_CSUM_CTL
,
2783 OCTNET_CMD_RXCSUM_DISABLE
);
2785 if ((features
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
2786 (lio
->dev_capability
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
2787 !(netdev
->features
& NETIF_F_HW_VLAN_CTAG_FILTER
))
2788 liquidio_set_feature(netdev
, OCTNET_CMD_VLAN_FILTER_CTL
,
2789 OCTNET_CMD_VLAN_FILTER_ENABLE
);
2790 else if (!(features
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
2791 (lio
->dev_capability
& NETIF_F_HW_VLAN_CTAG_FILTER
) &&
2792 (netdev
->features
& NETIF_F_HW_VLAN_CTAG_FILTER
))
2793 liquidio_set_feature(netdev
, OCTNET_CMD_VLAN_FILTER_CTL
,
2794 OCTNET_CMD_VLAN_FILTER_DISABLE
);
2799 static void liquidio_add_vxlan_port(struct net_device
*netdev
,
2800 struct udp_tunnel_info
*ti
)
2802 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
2805 liquidio_vxlan_port_command(netdev
,
2806 OCTNET_CMD_VXLAN_PORT_CONFIG
,
2808 OCTNET_CMD_VXLAN_PORT_ADD
);
2811 static void liquidio_del_vxlan_port(struct net_device
*netdev
,
2812 struct udp_tunnel_info
*ti
)
2814 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
2817 liquidio_vxlan_port_command(netdev
,
2818 OCTNET_CMD_VXLAN_PORT_CONFIG
,
2820 OCTNET_CMD_VXLAN_PORT_DEL
);
2823 static int __liquidio_set_vf_mac(struct net_device
*netdev
, int vfidx
,
2824 u8
*mac
, bool is_admin_assigned
)
2826 struct lio
*lio
= GET_LIO(netdev
);
2827 struct octeon_device
*oct
= lio
->oct_dev
;
2828 struct octnic_ctrl_pkt nctrl
;
2830 if (!is_valid_ether_addr(mac
))
2833 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.max_vfs
)
2836 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2839 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MACADDR
;
2840 /* vfidx is 0 based, but vf_num (param1) is 1 based */
2841 nctrl
.ncmd
.s
.param1
= vfidx
+ 1;
2842 nctrl
.ncmd
.s
.param2
= (is_admin_assigned
? 1 : 0);
2843 nctrl
.ncmd
.s
.more
= 1;
2844 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2845 nctrl
.netpndev
= (u64
)netdev
;
2846 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2847 nctrl
.wait_time
= LIO_CMD_WAIT_TM
;
2850 /* The MAC Address is presented in network byte order. */
2851 ether_addr_copy((u8
*)&nctrl
.udd
[0] + 2, mac
);
2853 oct
->sriov_info
.vf_macaddr
[vfidx
] = nctrl
.udd
[0];
2855 octnet_send_nic_ctrl_pkt(oct
, &nctrl
);
2860 static int liquidio_set_vf_mac(struct net_device
*netdev
, int vfidx
, u8
*mac
)
2862 struct lio
*lio
= GET_LIO(netdev
);
2863 struct octeon_device
*oct
= lio
->oct_dev
;
2866 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.num_vfs_alloced
)
2869 retval
= __liquidio_set_vf_mac(netdev
, vfidx
, mac
, true);
2871 cn23xx_tell_vf_its_macaddr_changed(oct
, vfidx
, mac
);
2876 static int liquidio_set_vf_vlan(struct net_device
*netdev
, int vfidx
,
2877 u16 vlan
, u8 qos
, __be16 vlan_proto
)
2879 struct lio
*lio
= GET_LIO(netdev
);
2880 struct octeon_device
*oct
= lio
->oct_dev
;
2881 struct octnic_ctrl_pkt nctrl
;
2884 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.num_vfs_alloced
)
2887 if (vlan_proto
!= htons(ETH_P_8021Q
))
2888 return -EPROTONOSUPPORT
;
2890 if (vlan
>= VLAN_N_VID
|| qos
> 7)
2894 vlantci
= vlan
| (u16
)qos
<< VLAN_PRIO_SHIFT
;
2898 if (oct
->sriov_info
.vf_vlantci
[vfidx
] == vlantci
)
2901 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2904 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_ADD_VLAN_FILTER
;
2906 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_DEL_VLAN_FILTER
;
2908 nctrl
.ncmd
.s
.param1
= vlantci
;
2909 nctrl
.ncmd
.s
.param2
=
2910 vfidx
+ 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2911 nctrl
.ncmd
.s
.more
= 0;
2912 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2914 nctrl
.wait_time
= LIO_CMD_WAIT_TM
;
2916 octnet_send_nic_ctrl_pkt(oct
, &nctrl
);
2918 oct
->sriov_info
.vf_vlantci
[vfidx
] = vlantci
;
2923 static int liquidio_get_vf_config(struct net_device
*netdev
, int vfidx
,
2924 struct ifla_vf_info
*ivi
)
2926 struct lio
*lio
= GET_LIO(netdev
);
2927 struct octeon_device
*oct
= lio
->oct_dev
;
2930 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.num_vfs_alloced
)
2934 macaddr
= 2 + (u8
*)&oct
->sriov_info
.vf_macaddr
[vfidx
];
2935 ether_addr_copy(&ivi
->mac
[0], macaddr
);
2936 ivi
->vlan
= oct
->sriov_info
.vf_vlantci
[vfidx
] & VLAN_VID_MASK
;
2937 ivi
->qos
= oct
->sriov_info
.vf_vlantci
[vfidx
] >> VLAN_PRIO_SHIFT
;
2938 if (oct
->sriov_info
.trusted_vf
.active
&&
2939 oct
->sriov_info
.trusted_vf
.id
== vfidx
)
2940 ivi
->trusted
= true;
2942 ivi
->trusted
= false;
2943 ivi
->linkstate
= oct
->sriov_info
.vf_linkstate
[vfidx
];
2947 static void trusted_vf_callback(struct octeon_device
*oct_dev
,
2948 u32 status
, void *ptr
)
2950 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)ptr
;
2951 struct lio_trusted_vf_ctx
*ctx
;
2953 ctx
= (struct lio_trusted_vf_ctx
*)sc
->ctxptr
;
2954 ctx
->status
= status
;
2956 complete(&ctx
->complete
);
2959 static int liquidio_send_vf_trust_cmd(struct lio
*lio
, int vfidx
, bool trusted
)
2961 struct octeon_device
*oct
= lio
->oct_dev
;
2962 struct lio_trusted_vf_ctx
*ctx
;
2963 struct octeon_soft_command
*sc
;
2964 int ctx_size
, retval
;
2966 ctx_size
= sizeof(struct lio_trusted_vf_ctx
);
2967 sc
= octeon_alloc_soft_command(oct
, 0, 0, ctx_size
);
2969 ctx
= (struct lio_trusted_vf_ctx
*)sc
->ctxptr
;
2970 init_completion(&ctx
->complete
);
2972 sc
->iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2974 /* vfidx is 0 based, but vf_num (param1) is 1 based */
2975 octeon_prepare_soft_command(oct
, sc
, OPCODE_NIC
,
2976 OPCODE_NIC_SET_TRUSTED_VF
, 0, vfidx
+ 1,
2979 sc
->callback
= trusted_vf_callback
;
2980 sc
->callback_arg
= sc
;
2981 sc
->wait_time
= 1000;
2983 retval
= octeon_send_soft_command(oct
, sc
);
2984 if (retval
== IQ_SEND_FAILED
) {
2987 /* Wait for response or timeout */
2988 if (wait_for_completion_timeout(&ctx
->complete
,
2989 msecs_to_jiffies(2000)))
2990 retval
= ctx
->status
;
2995 octeon_free_soft_command(oct
, sc
);
3000 static int liquidio_set_vf_trust(struct net_device
*netdev
, int vfidx
,
3003 struct lio
*lio
= GET_LIO(netdev
);
3004 struct octeon_device
*oct
= lio
->oct_dev
;
3006 if (strcmp(oct
->fw_info
.liquidio_firmware_version
, "1.7.1") < 0) {
3007 /* trusted vf is not supported by firmware older than 1.7.1 */
3011 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.num_vfs_alloced
) {
3012 netif_info(lio
, drv
, lio
->netdev
, "Invalid vfidx %d\n", vfidx
);
3019 if (oct
->sriov_info
.trusted_vf
.active
&&
3020 oct
->sriov_info
.trusted_vf
.id
== vfidx
)
3023 if (oct
->sriov_info
.trusted_vf
.active
) {
3024 netif_info(lio
, drv
, lio
->netdev
, "More than one trusted VF is not allowed\n");
3030 if (!oct
->sriov_info
.trusted_vf
.active
)
3034 if (!liquidio_send_vf_trust_cmd(lio
, vfidx
, setting
)) {
3036 oct
->sriov_info
.trusted_vf
.id
= vfidx
;
3037 oct
->sriov_info
.trusted_vf
.active
= true;
3039 oct
->sriov_info
.trusted_vf
.active
= false;
3042 netif_info(lio
, drv
, lio
->netdev
, "VF %u is %strusted\n", vfidx
,
3043 setting
? "" : "not ");
3045 netif_info(lio
, drv
, lio
->netdev
, "Failed to set VF trusted\n");
3052 static int liquidio_set_vf_link_state(struct net_device
*netdev
, int vfidx
,
3055 struct lio
*lio
= GET_LIO(netdev
);
3056 struct octeon_device
*oct
= lio
->oct_dev
;
3057 struct octnic_ctrl_pkt nctrl
;
3059 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.num_vfs_alloced
)
3062 if (oct
->sriov_info
.vf_linkstate
[vfidx
] == linkstate
)
3065 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3066 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_SET_VF_LINKSTATE
;
3067 nctrl
.ncmd
.s
.param1
=
3068 vfidx
+ 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3069 nctrl
.ncmd
.s
.param2
= linkstate
;
3070 nctrl
.ncmd
.s
.more
= 0;
3071 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3073 nctrl
.wait_time
= LIO_CMD_WAIT_TM
;
3075 octnet_send_nic_ctrl_pkt(oct
, &nctrl
);
3077 oct
->sriov_info
.vf_linkstate
[vfidx
] = linkstate
;
3083 liquidio_eswitch_mode_get(struct devlink
*devlink
, u16
*mode
)
3085 struct lio_devlink_priv
*priv
;
3086 struct octeon_device
*oct
;
3088 priv
= devlink_priv(devlink
);
3091 *mode
= oct
->eswitch_mode
;
3097 liquidio_eswitch_mode_set(struct devlink
*devlink
, u16 mode
)
3099 struct lio_devlink_priv
*priv
;
3100 struct octeon_device
*oct
;
3103 priv
= devlink_priv(devlink
);
3106 if (!(oct
->fw_info
.app_cap_flags
& LIQUIDIO_SWITCHDEV_CAP
))
3109 if (oct
->eswitch_mode
== mode
)
3113 case DEVLINK_ESWITCH_MODE_SWITCHDEV
:
3114 oct
->eswitch_mode
= mode
;
3115 ret
= lio_vf_rep_create(oct
);
3118 case DEVLINK_ESWITCH_MODE_LEGACY
:
3119 lio_vf_rep_destroy(oct
);
3120 oct
->eswitch_mode
= mode
;
3130 static const struct devlink_ops liquidio_devlink_ops
= {
3131 .eswitch_mode_get
= liquidio_eswitch_mode_get
,
3132 .eswitch_mode_set
= liquidio_eswitch_mode_set
,
3136 lio_pf_switchdev_attr_get(struct net_device
*dev
, struct switchdev_attr
*attr
)
3138 struct lio
*lio
= GET_LIO(dev
);
3139 struct octeon_device
*oct
= lio
->oct_dev
;
3141 if (oct
->eswitch_mode
!= DEVLINK_ESWITCH_MODE_SWITCHDEV
)
3145 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID
:
3146 attr
->u
.ppid
.id_len
= ETH_ALEN
;
3147 ether_addr_copy(attr
->u
.ppid
.id
,
3148 (void *)&lio
->linfo
.hw_addr
+ 2);
3158 static const struct switchdev_ops lio_pf_switchdev_ops
= {
3159 .switchdev_port_attr_get
= lio_pf_switchdev_attr_get
,
3162 static int liquidio_get_vf_stats(struct net_device
*netdev
, int vfidx
,
3163 struct ifla_vf_stats
*vf_stats
)
3165 struct lio
*lio
= GET_LIO(netdev
);
3166 struct octeon_device
*oct
= lio
->oct_dev
;
3167 struct oct_vf_stats stats
;
3170 if (vfidx
< 0 || vfidx
>= oct
->sriov_info
.num_vfs_alloced
)
3173 memset(&stats
, 0, sizeof(struct oct_vf_stats
));
3174 ret
= cn23xx_get_vf_stats(oct
, vfidx
, &stats
);
3176 vf_stats
->rx_packets
= stats
.rx_packets
;
3177 vf_stats
->tx_packets
= stats
.tx_packets
;
3178 vf_stats
->rx_bytes
= stats
.rx_bytes
;
3179 vf_stats
->tx_bytes
= stats
.tx_bytes
;
3180 vf_stats
->broadcast
= stats
.broadcast
;
3181 vf_stats
->multicast
= stats
.multicast
;
3187 static const struct net_device_ops lionetdevops
= {
3188 .ndo_open
= liquidio_open
,
3189 .ndo_stop
= liquidio_stop
,
3190 .ndo_start_xmit
= liquidio_xmit
,
3191 .ndo_get_stats64
= liquidio_get_stats64
,
3192 .ndo_set_mac_address
= liquidio_set_mac
,
3193 .ndo_set_rx_mode
= liquidio_set_mcast_list
,
3194 .ndo_tx_timeout
= liquidio_tx_timeout
,
3196 .ndo_vlan_rx_add_vid
= liquidio_vlan_rx_add_vid
,
3197 .ndo_vlan_rx_kill_vid
= liquidio_vlan_rx_kill_vid
,
3198 .ndo_change_mtu
= liquidio_change_mtu
,
3199 .ndo_do_ioctl
= liquidio_ioctl
,
3200 .ndo_fix_features
= liquidio_fix_features
,
3201 .ndo_set_features
= liquidio_set_features
,
3202 .ndo_udp_tunnel_add
= liquidio_add_vxlan_port
,
3203 .ndo_udp_tunnel_del
= liquidio_del_vxlan_port
,
3204 .ndo_set_vf_mac
= liquidio_set_vf_mac
,
3205 .ndo_set_vf_vlan
= liquidio_set_vf_vlan
,
3206 .ndo_get_vf_config
= liquidio_get_vf_config
,
3207 .ndo_set_vf_trust
= liquidio_set_vf_trust
,
3208 .ndo_set_vf_link_state
= liquidio_set_vf_link_state
,
3209 .ndo_get_vf_stats
= liquidio_get_vf_stats
,
3212 /** \brief Entry point for the liquidio module
3214 static int __init
liquidio_init(void)
3217 struct handshake
*hs
;
3219 init_completion(&first_stage
);
3221 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT
);
3223 if (liquidio_init_pci())
3226 wait_for_completion_timeout(&first_stage
, msecs_to_jiffies(1000));
3228 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
3231 wait_for_completion(&hs
->init
);
3233 /* init handshake failed */
3234 dev_err(&hs
->pci_dev
->dev
,
3235 "Failed to init device\n");
3236 liquidio_deinit_pci();
3242 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
3245 wait_for_completion_timeout(&hs
->started
,
3246 msecs_to_jiffies(30000));
3247 if (!hs
->started_ok
) {
3248 /* starter handshake failed */
3249 dev_err(&hs
->pci_dev
->dev
,
3250 "Firmware failed to start\n");
3251 liquidio_deinit_pci();
3260 static int lio_nic_info(struct octeon_recv_info
*recv_info
, void *buf
)
3262 struct octeon_device
*oct
= (struct octeon_device
*)buf
;
3263 struct octeon_recv_pkt
*recv_pkt
= recv_info
->recv_pkt
;
3265 union oct_link_status
*ls
;
3268 if (recv_pkt
->buffer_size
[0] != (sizeof(*ls
) + OCT_DROQ_INFO_SIZE
)) {
3269 dev_err(&oct
->pci_dev
->dev
, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3270 recv_pkt
->buffer_size
[0],
3271 recv_pkt
->rh
.r_nic_info
.gmxport
);
3275 gmxport
= recv_pkt
->rh
.r_nic_info
.gmxport
;
3276 ls
= (union oct_link_status
*)(get_rbd(recv_pkt
->buffer_ptr
[0]) +
3277 OCT_DROQ_INFO_SIZE
);
3279 octeon_swap_8B_data((u64
*)ls
, (sizeof(union oct_link_status
)) >> 3);
3280 for (i
= 0; i
< oct
->ifcount
; i
++) {
3281 if (oct
->props
[i
].gmxport
== gmxport
) {
3282 update_link_status(oct
->props
[i
].netdev
, ls
);
3288 for (i
= 0; i
< recv_pkt
->buffer_count
; i
++)
3289 recv_buffer_free(recv_pkt
->buffer_ptr
[i
]);
3290 octeon_free_recv_info(recv_info
);
3295 * \brief Setup network interfaces
3296 * @param octeon_dev octeon device
3298 * Called during init time for each device. It assumes the NIC
3299 * is already up and running. The link information for each
3300 * interface is passed in link_info.
3302 static int setup_nic_devices(struct octeon_device
*octeon_dev
)
3304 struct lio
*lio
= NULL
;
3305 struct net_device
*netdev
;
3306 u8 mac
[6], i
, j
, *fw_ver
, *micro_ver
;
3307 unsigned long micro
;
3309 struct octeon_soft_command
*sc
;
3310 struct liquidio_if_cfg_context
*ctx
;
3311 struct liquidio_if_cfg_resp
*resp
;
3312 struct octdev_props
*props
;
3313 int retval
, num_iqueues
, num_oqueues
;
3314 int max_num_queues
= 0;
3315 union oct_nic_if_cfg if_cfg
;
3316 unsigned int base_queue
;
3317 unsigned int gmx_port_id
;
3318 u32 resp_size
, ctx_size
, data_size
;
3320 struct lio_version
*vdata
;
3321 struct devlink
*devlink
;
3322 struct lio_devlink_priv
*lio_devlink
;
3324 /* This is to handle link status changes */
3325 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
3327 lio_nic_info
, octeon_dev
);
3329 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3330 * They are handled directly.
3332 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_NORESP_NET
,
3335 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_NORESP_NET_SG
,
3338 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_RESP_NET_SG
,
3339 free_netsgbuf_with_resp
);
3341 for (i
= 0; i
< octeon_dev
->ifcount
; i
++) {
3342 resp_size
= sizeof(struct liquidio_if_cfg_resp
);
3343 ctx_size
= sizeof(struct liquidio_if_cfg_context
);
3344 data_size
= sizeof(struct lio_version
);
3345 sc
= (struct octeon_soft_command
*)
3346 octeon_alloc_soft_command(octeon_dev
, data_size
,
3347 resp_size
, ctx_size
);
3348 resp
= (struct liquidio_if_cfg_resp
*)sc
->virtrptr
;
3349 ctx
= (struct liquidio_if_cfg_context
*)sc
->ctxptr
;
3350 vdata
= (struct lio_version
*)sc
->virtdptr
;
3352 *((u64
*)vdata
) = 0;
3353 vdata
->major
= cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION
);
3354 vdata
->minor
= cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION
);
3355 vdata
->micro
= cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION
);
3357 if (OCTEON_CN23XX_PF(octeon_dev
)) {
3358 num_iqueues
= octeon_dev
->sriov_info
.num_pf_rings
;
3359 num_oqueues
= octeon_dev
->sriov_info
.num_pf_rings
;
3360 base_queue
= octeon_dev
->sriov_info
.pf_srn
;
3362 gmx_port_id
= octeon_dev
->pf_num
;
3363 ifidx_or_pfnum
= octeon_dev
->pf_num
;
3365 num_iqueues
= CFG_GET_NUM_TXQS_NIC_IF(
3366 octeon_get_conf(octeon_dev
), i
);
3367 num_oqueues
= CFG_GET_NUM_RXQS_NIC_IF(
3368 octeon_get_conf(octeon_dev
), i
);
3369 base_queue
= CFG_GET_BASE_QUE_NIC_IF(
3370 octeon_get_conf(octeon_dev
), i
);
3371 gmx_port_id
= CFG_GET_GMXID_NIC_IF(
3372 octeon_get_conf(octeon_dev
), i
);
3376 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3377 "requesting config for interface %d, iqs %d, oqs %d\n",
3378 ifidx_or_pfnum
, num_iqueues
, num_oqueues
);
3379 WRITE_ONCE(ctx
->cond
, 0);
3380 ctx
->octeon_id
= lio_get_device_id(octeon_dev
);
3381 init_waitqueue_head(&ctx
->wc
);
3384 if_cfg
.s
.num_iqueues
= num_iqueues
;
3385 if_cfg
.s
.num_oqueues
= num_oqueues
;
3386 if_cfg
.s
.base_queue
= base_queue
;
3387 if_cfg
.s
.gmx_port_id
= gmx_port_id
;
3391 octeon_prepare_soft_command(octeon_dev
, sc
, OPCODE_NIC
,
3392 OPCODE_NIC_IF_CFG
, 0,
3395 sc
->callback
= lio_if_cfg_callback
;
3396 sc
->callback_arg
= sc
;
3397 sc
->wait_time
= LIO_IFCFG_WAIT_TIME
;
3399 retval
= octeon_send_soft_command(octeon_dev
, sc
);
3400 if (retval
== IQ_SEND_FAILED
) {
3401 dev_err(&octeon_dev
->pci_dev
->dev
,
3402 "iq/oq config failed status: %x\n",
3404 /* Soft instr is freed by driver in case of failure. */
3405 goto setup_nic_dev_fail
;
3408 /* Sleep on a wait queue till the cond flag indicates that the
3409 * response arrived or timed-out.
3411 if (sleep_cond(&ctx
->wc
, &ctx
->cond
) == -EINTR
) {
3412 dev_err(&octeon_dev
->pci_dev
->dev
, "Wait interrupted\n");
3413 goto setup_nic_wait_intr
;
3416 retval
= resp
->status
;
3418 dev_err(&octeon_dev
->pci_dev
->dev
, "iq/oq config failed\n");
3419 goto setup_nic_dev_fail
;
3422 /* Verify f/w version (in case of 'auto' loading from flash) */
3423 fw_ver
= octeon_dev
->fw_info
.liquidio_firmware_version
;
3424 if (memcmp(LIQUIDIO_BASE_VERSION
,
3426 strlen(LIQUIDIO_BASE_VERSION
))) {
3427 dev_err(&octeon_dev
->pci_dev
->dev
,
3428 "Unmatched firmware version. Expected %s.x, got %s.\n",
3429 LIQUIDIO_BASE_VERSION
, fw_ver
);
3430 goto setup_nic_dev_fail
;
3431 } else if (atomic_read(octeon_dev
->adapter_fw_state
) ==
3433 dev_info(&octeon_dev
->pci_dev
->dev
,
3434 "Using auto-loaded firmware version %s.\n",
3438 /* extract micro version field; point past '<maj>.<min>.' */
3439 micro_ver
= fw_ver
+ strlen(LIQUIDIO_BASE_VERSION
) + 1;
3440 if (kstrtoul(micro_ver
, 10, µ
) != 0)
3442 octeon_dev
->fw_info
.ver
.maj
= LIQUIDIO_BASE_MAJOR_VERSION
;
3443 octeon_dev
->fw_info
.ver
.min
= LIQUIDIO_BASE_MINOR_VERSION
;
3444 octeon_dev
->fw_info
.ver
.rev
= micro
;
3446 octeon_swap_8B_data((u64
*)(&resp
->cfg_info
),
3447 (sizeof(struct liquidio_if_cfg_info
)) >> 3);
3449 num_iqueues
= hweight64(resp
->cfg_info
.iqmask
);
3450 num_oqueues
= hweight64(resp
->cfg_info
.oqmask
);
3452 if (!(num_iqueues
) || !(num_oqueues
)) {
3453 dev_err(&octeon_dev
->pci_dev
->dev
,
3454 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3455 resp
->cfg_info
.iqmask
,
3456 resp
->cfg_info
.oqmask
);
3457 goto setup_nic_dev_fail
;
3460 if (OCTEON_CN6XXX(octeon_dev
)) {
3461 max_num_queues
= CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev
,
3463 } else if (OCTEON_CN23XX_PF(octeon_dev
)) {
3464 max_num_queues
= CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev
,
3468 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3469 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3470 i
, resp
->cfg_info
.iqmask
, resp
->cfg_info
.oqmask
,
3471 num_iqueues
, num_oqueues
, max_num_queues
);
3472 netdev
= alloc_etherdev_mq(LIO_SIZE
, max_num_queues
);
3475 dev_err(&octeon_dev
->pci_dev
->dev
, "Device allocation failed\n");
3476 goto setup_nic_dev_fail
;
3479 SET_NETDEV_DEV(netdev
, &octeon_dev
->pci_dev
->dev
);
3481 /* Associate the routines that will handle different
3484 netdev
->netdev_ops
= &lionetdevops
;
3485 SWITCHDEV_SET_OPS(netdev
, &lio_pf_switchdev_ops
);
3487 retval
= netif_set_real_num_rx_queues(netdev
, num_oqueues
);
3489 dev_err(&octeon_dev
->pci_dev
->dev
,
3490 "setting real number rx failed\n");
3491 goto setup_nic_dev_fail
;
3494 retval
= netif_set_real_num_tx_queues(netdev
, num_iqueues
);
3496 dev_err(&octeon_dev
->pci_dev
->dev
,
3497 "setting real number tx failed\n");
3498 goto setup_nic_dev_fail
;
3501 lio
= GET_LIO(netdev
);
3503 memset(lio
, 0, sizeof(struct lio
));
3505 lio
->ifidx
= ifidx_or_pfnum
;
3507 props
= &octeon_dev
->props
[i
];
3508 props
->gmxport
= resp
->cfg_info
.linfo
.gmxport
;
3509 props
->netdev
= netdev
;
3511 lio
->linfo
.num_rxpciq
= num_oqueues
;
3512 lio
->linfo
.num_txpciq
= num_iqueues
;
3513 for (j
= 0; j
< num_oqueues
; j
++) {
3514 lio
->linfo
.rxpciq
[j
].u64
=
3515 resp
->cfg_info
.linfo
.rxpciq
[j
].u64
;
3517 for (j
= 0; j
< num_iqueues
; j
++) {
3518 lio
->linfo
.txpciq
[j
].u64
=
3519 resp
->cfg_info
.linfo
.txpciq
[j
].u64
;
3521 lio
->linfo
.hw_addr
= resp
->cfg_info
.linfo
.hw_addr
;
3522 lio
->linfo
.gmxport
= resp
->cfg_info
.linfo
.gmxport
;
3523 lio
->linfo
.link
.u64
= resp
->cfg_info
.linfo
.link
.u64
;
3525 lio
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3527 if (OCTEON_CN23XX_PF(octeon_dev
) ||
3528 OCTEON_CN6XXX(octeon_dev
)) {
3529 lio
->dev_capability
= NETIF_F_HIGHDMA
3532 | NETIF_F_SG
| NETIF_F_RXCSUM
3534 | NETIF_F_TSO
| NETIF_F_TSO6
3537 netif_set_gso_max_size(netdev
, OCTNIC_GSO_MAX_SIZE
);
3539 /* Copy of transmit encapsulation capabilities:
3540 * TSO, TSO6, Checksums for this device
3542 lio
->enc_dev_capability
= NETIF_F_IP_CSUM
3544 | NETIF_F_GSO_UDP_TUNNEL
3545 | NETIF_F_HW_CSUM
| NETIF_F_SG
3547 | NETIF_F_TSO
| NETIF_F_TSO6
3550 netdev
->hw_enc_features
= (lio
->enc_dev_capability
&
3553 lio
->dev_capability
|= NETIF_F_GSO_UDP_TUNNEL
;
3555 netdev
->vlan_features
= lio
->dev_capability
;
3556 /* Add any unchangeable hw features */
3557 lio
->dev_capability
|= NETIF_F_HW_VLAN_CTAG_FILTER
|
3558 NETIF_F_HW_VLAN_CTAG_RX
|
3559 NETIF_F_HW_VLAN_CTAG_TX
;
3561 netdev
->features
= (lio
->dev_capability
& ~NETIF_F_LRO
);
3563 netdev
->hw_features
= lio
->dev_capability
;
3564 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3565 netdev
->hw_features
= netdev
->hw_features
&
3566 ~NETIF_F_HW_VLAN_CTAG_RX
;
3568 /* MTU range: 68 - 16000 */
3569 netdev
->min_mtu
= LIO_MIN_MTU_SIZE
;
3570 netdev
->max_mtu
= LIO_MAX_MTU_SIZE
;
3572 /* Point to the properties for octeon device to which this
3573 * interface belongs.
3575 lio
->oct_dev
= octeon_dev
;
3576 lio
->octprops
= props
;
3577 lio
->netdev
= netdev
;
3579 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3580 "if%d gmx: %d hw_addr: 0x%llx\n", i
,
3581 lio
->linfo
.gmxport
, CVM_CAST64(lio
->linfo
.hw_addr
));
3583 for (j
= 0; j
< octeon_dev
->sriov_info
.max_vfs
; j
++) {
3586 eth_random_addr(vfmac
);
3587 if (__liquidio_set_vf_mac(netdev
, j
, vfmac
, false)) {
3588 dev_err(&octeon_dev
->pci_dev
->dev
,
3589 "Error setting VF%d MAC address\n",
3591 goto setup_nic_dev_fail
;
3595 /* 64-bit swap required on LE machines */
3596 octeon_swap_8B_data(&lio
->linfo
.hw_addr
, 1);
3597 for (j
= 0; j
< 6; j
++)
3598 mac
[j
] = *((u8
*)(((u8
*)&lio
->linfo
.hw_addr
) + 2 + j
));
3600 /* Copy MAC Address to OS network device structure */
3602 ether_addr_copy(netdev
->dev_addr
, mac
);
3604 /* By default all interfaces on a single Octeon uses the same
3607 lio
->txq
= lio
->linfo
.txpciq
[0].s
.q_no
;
3608 lio
->rxq
= lio
->linfo
.rxpciq
[0].s
.q_no
;
3609 if (liquidio_setup_io_queues(octeon_dev
, i
,
3610 lio
->linfo
.num_txpciq
,
3611 lio
->linfo
.num_rxpciq
)) {
3612 dev_err(&octeon_dev
->pci_dev
->dev
, "I/O queues creation failed\n");
3613 goto setup_nic_dev_fail
;
3616 ifstate_set(lio
, LIO_IFSTATE_DROQ_OPS
);
3618 lio
->tx_qsize
= octeon_get_tx_qsize(octeon_dev
, lio
->txq
);
3619 lio
->rx_qsize
= octeon_get_rx_qsize(octeon_dev
, lio
->rxq
);
3621 if (lio_setup_glists(octeon_dev
, lio
, num_iqueues
)) {
3622 dev_err(&octeon_dev
->pci_dev
->dev
,
3623 "Gather list allocation failed\n");
3624 goto setup_nic_dev_fail
;
3627 /* Register ethtool support */
3628 liquidio_set_ethtool_ops(netdev
);
3629 if (lio
->oct_dev
->chip_id
== OCTEON_CN23XX_PF_VID
)
3630 octeon_dev
->priv_flags
= OCT_PRIV_FLAG_DEFAULT
;
3632 octeon_dev
->priv_flags
= 0x0;
3634 if (netdev
->features
& NETIF_F_LRO
)
3635 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_ENABLE
,
3636 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
3638 liquidio_set_feature(netdev
, OCTNET_CMD_VLAN_FILTER_CTL
,
3639 OCTNET_CMD_VLAN_FILTER_ENABLE
);
3641 if ((debug
!= -1) && (debug
& NETIF_MSG_HW
))
3642 liquidio_set_feature(netdev
,
3643 OCTNET_CMD_VERBOSE_ENABLE
, 0);
3645 if (setup_link_status_change_wq(netdev
))
3646 goto setup_nic_dev_fail
;
3648 if ((octeon_dev
->fw_info
.app_cap_flags
&
3649 LIQUIDIO_TIME_SYNC_CAP
) &&
3650 setup_sync_octeon_time_wq(netdev
))
3651 goto setup_nic_dev_fail
;
3653 if (setup_rx_oom_poll_fn(netdev
))
3654 goto setup_nic_dev_fail
;
3656 /* Register the network device with the OS */
3657 if (register_netdev(netdev
)) {
3658 dev_err(&octeon_dev
->pci_dev
->dev
, "Device registration failed\n");
3659 goto setup_nic_dev_fail
;
3662 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3663 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3664 i
, mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5]);
3665 netif_carrier_off(netdev
);
3666 lio
->link_changes
++;
3668 ifstate_set(lio
, LIO_IFSTATE_REGISTERED
);
3670 /* Sending command to firmware to enable Rx checksum offload
3671 * by default at the time of setup of Liquidio driver for
3674 liquidio_set_rxcsum_command(netdev
, OCTNET_CMD_TNL_RX_CSUM_CTL
,
3675 OCTNET_CMD_RXCSUM_ENABLE
);
3676 liquidio_set_feature(netdev
, OCTNET_CMD_TNL_TX_CSUM_CTL
,
3677 OCTNET_CMD_TXCSUM_ENABLE
);
3679 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3680 "NIC ifidx:%d Setup successful\n", i
);
3682 octeon_free_soft_command(octeon_dev
, sc
);
3684 if (octeon_dev
->subsystem_id
==
3685 OCTEON_CN2350_25GB_SUBSYS_ID
||
3686 octeon_dev
->subsystem_id
==
3687 OCTEON_CN2360_25GB_SUBSYS_ID
) {
3688 cur_ver
= OCT_FW_VER(octeon_dev
->fw_info
.ver
.maj
,
3689 octeon_dev
->fw_info
.ver
.min
,
3690 octeon_dev
->fw_info
.ver
.rev
);
3692 /* speed control unsupported in f/w older than 1.7.2 */
3693 if (cur_ver
< OCT_FW_VER(1, 7, 2)) {
3694 dev_info(&octeon_dev
->pci_dev
->dev
,
3695 "speed setting not supported by f/w.");
3696 octeon_dev
->speed_setting
= 25;
3697 octeon_dev
->no_speed_setting
= 1;
3699 liquidio_get_speed(lio
);
3702 if (octeon_dev
->speed_setting
== 0) {
3703 octeon_dev
->speed_setting
= 25;
3704 octeon_dev
->no_speed_setting
= 1;
3707 octeon_dev
->no_speed_setting
= 1;
3708 octeon_dev
->speed_setting
= 10;
3710 octeon_dev
->speed_boot
= octeon_dev
->speed_setting
;
3714 devlink
= devlink_alloc(&liquidio_devlink_ops
,
3715 sizeof(struct lio_devlink_priv
));
3717 dev_err(&octeon_dev
->pci_dev
->dev
, "devlink alloc failed\n");
3718 goto setup_nic_wait_intr
;
3721 lio_devlink
= devlink_priv(devlink
);
3722 lio_devlink
->oct
= octeon_dev
;
3724 if (devlink_register(devlink
, &octeon_dev
->pci_dev
->dev
)) {
3725 devlink_free(devlink
);
3726 dev_err(&octeon_dev
->pci_dev
->dev
,
3727 "devlink registration failed\n");
3728 goto setup_nic_wait_intr
;
3731 octeon_dev
->devlink
= devlink
;
3732 octeon_dev
->eswitch_mode
= DEVLINK_ESWITCH_MODE_LEGACY
;
3738 octeon_free_soft_command(octeon_dev
, sc
);
3740 setup_nic_wait_intr
:
3743 dev_err(&octeon_dev
->pci_dev
->dev
,
3744 "NIC ifidx:%d Setup failed\n", i
);
3745 liquidio_destroy_nic_device(octeon_dev
, i
);
3750 #ifdef CONFIG_PCI_IOV
3751 static int octeon_enable_sriov(struct octeon_device
*oct
)
3753 unsigned int num_vfs_alloced
= oct
->sriov_info
.num_vfs_alloced
;
3754 struct pci_dev
*vfdev
;
3758 if (OCTEON_CN23XX_PF(oct
) && num_vfs_alloced
) {
3759 err
= pci_enable_sriov(oct
->pci_dev
,
3760 oct
->sriov_info
.num_vfs_alloced
);
3762 dev_err(&oct
->pci_dev
->dev
,
3763 "OCTEON: Failed to enable PCI sriov: %d\n",
3765 oct
->sriov_info
.num_vfs_alloced
= 0;
3768 oct
->sriov_info
.sriov_enabled
= 1;
3770 /* init lookup table that maps DPI ring number to VF pci_dev
3774 vfdev
= pci_get_device(PCI_VENDOR_ID_CAVIUM
,
3775 OCTEON_CN23XX_VF_VID
, NULL
);
3777 if (vfdev
->is_virtfn
&&
3778 (vfdev
->physfn
== oct
->pci_dev
)) {
3779 oct
->sriov_info
.dpiring_to_vfpcidev_lut
[u
] =
3781 u
+= oct
->sriov_info
.rings_per_vf
;
3783 vfdev
= pci_get_device(PCI_VENDOR_ID_CAVIUM
,
3784 OCTEON_CN23XX_VF_VID
, vfdev
);
3788 return num_vfs_alloced
;
3791 static int lio_pci_sriov_disable(struct octeon_device
*oct
)
3795 if (pci_vfs_assigned(oct
->pci_dev
)) {
3796 dev_err(&oct
->pci_dev
->dev
, "VFs are still assigned to VMs.\n");
3800 pci_disable_sriov(oct
->pci_dev
);
3803 while (u
< MAX_POSSIBLE_VFS
) {
3804 oct
->sriov_info
.dpiring_to_vfpcidev_lut
[u
] = NULL
;
3805 u
+= oct
->sriov_info
.rings_per_vf
;
3808 oct
->sriov_info
.num_vfs_alloced
= 0;
3809 dev_info(&oct
->pci_dev
->dev
, "oct->pf_num:%d disabled VFs\n",
3815 static int liquidio_enable_sriov(struct pci_dev
*dev
, int num_vfs
)
3817 struct octeon_device
*oct
= pci_get_drvdata(dev
);
3820 if ((num_vfs
== oct
->sriov_info
.num_vfs_alloced
) &&
3821 (oct
->sriov_info
.sriov_enabled
)) {
3822 dev_info(&oct
->pci_dev
->dev
, "oct->pf_num:%d already enabled num_vfs:%d\n",
3823 oct
->pf_num
, num_vfs
);
3828 lio_vf_rep_destroy(oct
);
3829 ret
= lio_pci_sriov_disable(oct
);
3830 } else if (num_vfs
> oct
->sriov_info
.max_vfs
) {
3831 dev_err(&oct
->pci_dev
->dev
,
3832 "OCTEON: Max allowed VFs:%d user requested:%d",
3833 oct
->sriov_info
.max_vfs
, num_vfs
);
3836 oct
->sriov_info
.num_vfs_alloced
= num_vfs
;
3837 ret
= octeon_enable_sriov(oct
);
3838 dev_info(&oct
->pci_dev
->dev
, "oct->pf_num:%d num_vfs:%d\n",
3839 oct
->pf_num
, num_vfs
);
3840 ret
= lio_vf_rep_create(oct
);
3842 dev_info(&oct
->pci_dev
->dev
,
3843 "vf representor create failed");
3851 * \brief initialize the NIC
3852 * @param oct octeon device
3854 * This initialization routine is called once the Octeon device application is
3857 static int liquidio_init_nic_module(struct octeon_device
*oct
)
3860 int num_nic_ports
= CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct
));
3862 dev_dbg(&oct
->pci_dev
->dev
, "Initializing network interfaces\n");
3864 /* only default iq and oq were initialized
3865 * initialize the rest as well
3867 /* run port_config command for each port */
3868 oct
->ifcount
= num_nic_ports
;
3870 memset(oct
->props
, 0, sizeof(struct octdev_props
) * num_nic_ports
);
3872 for (i
= 0; i
< MAX_OCTEON_LINKS
; i
++)
3873 oct
->props
[i
].gmxport
= -1;
3875 retval
= setup_nic_devices(oct
);
3877 dev_err(&oct
->pci_dev
->dev
, "Setup NIC devices failed\n");
3878 goto octnet_init_failure
;
3881 /* Call vf_rep_modinit if the firmware is switchdev capable
3882 * and do it from the first liquidio function probed.
3884 if (!oct
->octeon_id
&&
3885 oct
->fw_info
.app_cap_flags
& LIQUIDIO_SWITCHDEV_CAP
) {
3886 retval
= lio_vf_rep_modinit();
3888 liquidio_stop_nic_module(oct
);
3889 goto octnet_init_failure
;
3893 liquidio_ptp_init(oct
);
3895 dev_dbg(&oct
->pci_dev
->dev
, "Network interfaces ready\n");
3899 octnet_init_failure
:
3907 * \brief starter callback that invokes the remaining initialization work after
3908 * the NIC is up and running.
3909 * @param octptr work struct work_struct
3911 static void nic_starter(struct work_struct
*work
)
3913 struct octeon_device
*oct
;
3914 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
3916 oct
= (struct octeon_device
*)wk
->ctxptr
;
3918 if (atomic_read(&oct
->status
) == OCT_DEV_RUNNING
)
3921 /* If the status of the device is CORE_OK, the core
3922 * application has reported its application type. Call
3923 * any registered handlers now and move to the RUNNING
3926 if (atomic_read(&oct
->status
) != OCT_DEV_CORE_OK
) {
3927 schedule_delayed_work(&oct
->nic_poll_work
.work
,
3928 LIQUIDIO_STARTER_POLL_INTERVAL_MS
);
3932 atomic_set(&oct
->status
, OCT_DEV_RUNNING
);
3934 if (oct
->app_mode
&& oct
->app_mode
== CVM_DRV_NIC_APP
) {
3935 dev_dbg(&oct
->pci_dev
->dev
, "Starting NIC module\n");
3937 if (liquidio_init_nic_module(oct
))
3938 dev_err(&oct
->pci_dev
->dev
, "NIC initialization failed\n");
3940 handshake
[oct
->octeon_id
].started_ok
= 1;
3942 dev_err(&oct
->pci_dev
->dev
,
3943 "Unexpected application running on NIC (%d). Check firmware.\n",
3947 complete(&handshake
[oct
->octeon_id
].started
);
3951 octeon_recv_vf_drv_notice(struct octeon_recv_info
*recv_info
, void *buf
)
3953 struct octeon_device
*oct
= (struct octeon_device
*)buf
;
3954 struct octeon_recv_pkt
*recv_pkt
= recv_info
->recv_pkt
;
3955 int i
, notice
, vf_idx
;
3959 notice
= recv_pkt
->rh
.r
.ossp
;
3960 data
= (u64
*)(get_rbd(recv_pkt
->buffer_ptr
[0]) + OCT_DROQ_INFO_SIZE
);
3962 /* the first 64-bit word of data is the vf_num */
3964 octeon_swap_8B_data(&vf_num
, 1);
3965 vf_idx
= (int)vf_num
- 1;
3967 cores_crashed
= READ_ONCE(oct
->cores_crashed
);
3969 if (notice
== VF_DRV_LOADED
) {
3970 if (!(oct
->sriov_info
.vf_drv_loaded_mask
& BIT_ULL(vf_idx
))) {
3971 oct
->sriov_info
.vf_drv_loaded_mask
|= BIT_ULL(vf_idx
);
3972 dev_info(&oct
->pci_dev
->dev
,
3973 "driver for VF%d was loaded\n", vf_idx
);
3975 try_module_get(THIS_MODULE
);
3977 } else if (notice
== VF_DRV_REMOVED
) {
3978 if (oct
->sriov_info
.vf_drv_loaded_mask
& BIT_ULL(vf_idx
)) {
3979 oct
->sriov_info
.vf_drv_loaded_mask
&= ~BIT_ULL(vf_idx
);
3980 dev_info(&oct
->pci_dev
->dev
,
3981 "driver for VF%d was removed\n", vf_idx
);
3983 module_put(THIS_MODULE
);
3985 } else if (notice
== VF_DRV_MACADDR_CHANGED
) {
3986 u8
*b
= (u8
*)&data
[1];
3988 oct
->sriov_info
.vf_macaddr
[vf_idx
] = data
[1];
3989 dev_info(&oct
->pci_dev
->dev
,
3990 "VF driver changed VF%d's MAC address to %pM\n",
3994 for (i
= 0; i
< recv_pkt
->buffer_count
; i
++)
3995 recv_buffer_free(recv_pkt
->buffer_ptr
[i
]);
3996 octeon_free_recv_info(recv_info
);
4002 * \brief Device initialization for each Octeon device that is probed
4003 * @param octeon_dev octeon device
4005 static int octeon_device_init(struct octeon_device
*octeon_dev
)
4008 char bootcmd
[] = "\n";
4009 char *dbg_enb
= NULL
;
4010 enum lio_fw_state fw_state
;
4011 struct octeon_device_priv
*oct_priv
=
4012 (struct octeon_device_priv
*)octeon_dev
->priv
;
4013 atomic_set(&octeon_dev
->status
, OCT_DEV_BEGIN_STATE
);
4015 /* Enable access to the octeon device and make its DMA capability
4018 if (octeon_pci_os_setup(octeon_dev
))
4021 atomic_set(&octeon_dev
->status
, OCT_DEV_PCI_ENABLE_DONE
);
4023 /* Identify the Octeon type and map the BAR address space. */
4024 if (octeon_chip_specific_setup(octeon_dev
)) {
4025 dev_err(&octeon_dev
->pci_dev
->dev
, "Chip specific setup failed\n");
4029 atomic_set(&octeon_dev
->status
, OCT_DEV_PCI_MAP_DONE
);
4031 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4032 * since that is what is required for the reference to be removed
4033 * during de-initialization (see 'octeon_destroy_resources').
4035 octeon_register_device(octeon_dev
, octeon_dev
->pci_dev
->bus
->number
,
4036 PCI_SLOT(octeon_dev
->pci_dev
->devfn
),
4037 PCI_FUNC(octeon_dev
->pci_dev
->devfn
),
4040 octeon_dev
->app_mode
= CVM_DRV_INVALID_APP
;
4042 /* CN23XX supports preloaded firmware if the following is true:
4044 * The adapter indicates that firmware is currently running AND
4045 * 'fw_type' is 'auto'.
4047 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4049 if (OCTEON_CN23XX_PF(octeon_dev
) &&
4050 cn23xx_fw_loaded(octeon_dev
) && fw_type_is_auto()) {
4051 atomic_cmpxchg(octeon_dev
->adapter_fw_state
,
4052 FW_NEEDS_TO_BE_LOADED
, FW_IS_PRELOADED
);
4055 /* If loading firmware, only first device of adapter needs to do so. */
4056 fw_state
= atomic_cmpxchg(octeon_dev
->adapter_fw_state
,
4057 FW_NEEDS_TO_BE_LOADED
,
4058 FW_IS_BEING_LOADED
);
4060 /* Here, [local variable] 'fw_state' is set to one of:
4062 * FW_IS_PRELOADED: No firmware is to be loaded (see above)
4063 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4064 * firmware to the adapter.
4065 * FW_IS_BEING_LOADED: The driver's second instance will not load
4066 * firmware to the adapter.
4069 /* Prior to f/w load, perform a soft reset of the Octeon device;
4070 * if error resetting, return w/error.
4072 if (fw_state
== FW_NEEDS_TO_BE_LOADED
)
4073 if (octeon_dev
->fn_list
.soft_reset(octeon_dev
))
4076 /* Initialize the dispatch mechanism used to push packets arriving on
4077 * Octeon Output queues.
4079 if (octeon_init_dispatch_list(octeon_dev
))
4082 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
4083 OPCODE_NIC_CORE_DRV_ACTIVE
,
4084 octeon_core_drv_init
,
4087 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
4088 OPCODE_NIC_VF_DRV_NOTICE
,
4089 octeon_recv_vf_drv_notice
, octeon_dev
);
4090 INIT_DELAYED_WORK(&octeon_dev
->nic_poll_work
.work
, nic_starter
);
4091 octeon_dev
->nic_poll_work
.ctxptr
= (void *)octeon_dev
;
4092 schedule_delayed_work(&octeon_dev
->nic_poll_work
.work
,
4093 LIQUIDIO_STARTER_POLL_INTERVAL_MS
);
4095 atomic_set(&octeon_dev
->status
, OCT_DEV_DISPATCH_INIT_DONE
);
4097 if (octeon_set_io_queues_off(octeon_dev
)) {
4098 dev_err(&octeon_dev
->pci_dev
->dev
, "setting io queues off failed\n");
4102 if (OCTEON_CN23XX_PF(octeon_dev
)) {
4103 ret
= octeon_dev
->fn_list
.setup_device_regs(octeon_dev
);
4105 dev_err(&octeon_dev
->pci_dev
->dev
, "OCTEON: Failed to configure device registers\n");
4110 /* Initialize soft command buffer pool
4112 if (octeon_setup_sc_buffer_pool(octeon_dev
)) {
4113 dev_err(&octeon_dev
->pci_dev
->dev
, "sc buffer pool allocation failed\n");
4116 atomic_set(&octeon_dev
->status
, OCT_DEV_SC_BUFF_POOL_INIT_DONE
);
4118 /* Setup the data structures that manage this Octeon's Input queues. */
4119 if (octeon_setup_instr_queues(octeon_dev
)) {
4120 dev_err(&octeon_dev
->pci_dev
->dev
,
4121 "instruction queue initialization failed\n");
4124 atomic_set(&octeon_dev
->status
, OCT_DEV_INSTR_QUEUE_INIT_DONE
);
4126 /* Initialize lists to manage the requests of different types that
4127 * arrive from user & kernel applications for this octeon device.
4129 if (octeon_setup_response_list(octeon_dev
)) {
4130 dev_err(&octeon_dev
->pci_dev
->dev
, "Response list allocation failed\n");
4133 atomic_set(&octeon_dev
->status
, OCT_DEV_RESP_LIST_INIT_DONE
);
4135 if (octeon_setup_output_queues(octeon_dev
)) {
4136 dev_err(&octeon_dev
->pci_dev
->dev
, "Output queue initialization failed\n");
4140 atomic_set(&octeon_dev
->status
, OCT_DEV_DROQ_INIT_DONE
);
4142 if (OCTEON_CN23XX_PF(octeon_dev
)) {
4143 if (octeon_dev
->fn_list
.setup_mbox(octeon_dev
)) {
4144 dev_err(&octeon_dev
->pci_dev
->dev
, "OCTEON: Mailbox setup failed\n");
4147 atomic_set(&octeon_dev
->status
, OCT_DEV_MBOX_SETUP_DONE
);
4149 if (octeon_allocate_ioq_vector
4151 octeon_dev
->sriov_info
.num_pf_rings
)) {
4152 dev_err(&octeon_dev
->pci_dev
->dev
, "OCTEON: ioq vector allocation failed\n");
4155 atomic_set(&octeon_dev
->status
, OCT_DEV_MSIX_ALLOC_VECTOR_DONE
);
4158 /* The input and output queue registers were setup earlier (the
4159 * queues were not enabled). Any additional registers
4160 * that need to be programmed should be done now.
4162 ret
= octeon_dev
->fn_list
.setup_device_regs(octeon_dev
);
4164 dev_err(&octeon_dev
->pci_dev
->dev
,
4165 "Failed to configure device registers\n");
4170 /* Initialize the tasklet that handles output queue packet processing.*/
4171 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Initializing droq tasklet\n");
4172 tasklet_init(&oct_priv
->droq_tasklet
, octeon_droq_bh
,
4173 (unsigned long)octeon_dev
);
4175 /* Setup the interrupt handler and record the INT SUM register address
4177 if (octeon_setup_interrupt(octeon_dev
,
4178 octeon_dev
->sriov_info
.num_pf_rings
))
4181 /* Enable Octeon device interrupts */
4182 octeon_dev
->fn_list
.enable_interrupt(octeon_dev
, OCTEON_ALL_INTR
);
4184 atomic_set(&octeon_dev
->status
, OCT_DEV_INTR_SET_DONE
);
4186 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4187 * the output queue is enabled.
4188 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4189 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4190 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4191 * before any credits have been issued, causing the ring to be reset
4192 * (and the f/w appear to never have started).
4194 for (j
= 0; j
< octeon_dev
->num_oqs
; j
++)
4195 writel(octeon_dev
->droq
[j
]->max_count
,
4196 octeon_dev
->droq
[j
]->pkts_credit_reg
);
4198 /* Enable the input and output queues for this Octeon device */
4199 ret
= octeon_dev
->fn_list
.enable_io_queues(octeon_dev
);
4201 dev_err(&octeon_dev
->pci_dev
->dev
, "Failed to enable input/output queues");
4205 atomic_set(&octeon_dev
->status
, OCT_DEV_IO_QUEUES_DONE
);
4207 if (fw_state
== FW_NEEDS_TO_BE_LOADED
) {
4208 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Waiting for DDR initialization...\n");
4210 dev_info(&octeon_dev
->pci_dev
->dev
,
4211 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4214 schedule_timeout_uninterruptible(HZ
* LIO_RESET_SECS
);
4216 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4217 while (!ddr_timeout
) {
4218 set_current_state(TASK_INTERRUPTIBLE
);
4219 if (schedule_timeout(HZ
/ 10)) {
4220 /* user probably pressed Control-C */
4224 ret
= octeon_wait_for_ddr_init(octeon_dev
, &ddr_timeout
);
4226 dev_err(&octeon_dev
->pci_dev
->dev
,
4227 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4232 if (octeon_wait_for_bootloader(octeon_dev
, 1000)) {
4233 dev_err(&octeon_dev
->pci_dev
->dev
, "Board not responding\n");
4237 /* Divert uboot to take commands from host instead. */
4238 ret
= octeon_console_send_cmd(octeon_dev
, bootcmd
, 50);
4240 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Initializing consoles\n");
4241 ret
= octeon_init_consoles(octeon_dev
);
4243 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not access board consoles\n");
4246 /* If console debug enabled, specify empty string to use default
4247 * enablement ELSE specify NULL string for 'disabled'.
4249 dbg_enb
= octeon_console_debug_enabled(0) ? "" : NULL
;
4250 ret
= octeon_add_console(octeon_dev
, 0, dbg_enb
);
4252 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not access board console\n");
4254 } else if (octeon_console_debug_enabled(0)) {
4255 /* If console was added AND we're logging console output
4256 * then set our console print function.
4258 octeon_dev
->console
[0].print
= octeon_dbg_console_print
;
4261 atomic_set(&octeon_dev
->status
, OCT_DEV_CONSOLE_INIT_DONE
);
4263 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Loading firmware\n");
4264 ret
= load_firmware(octeon_dev
);
4266 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not load firmware to board\n");
4270 atomic_set(octeon_dev
->adapter_fw_state
, FW_HAS_BEEN_LOADED
);
4273 handshake
[octeon_dev
->octeon_id
].init_ok
= 1;
4274 complete(&handshake
[octeon_dev
->octeon_id
].init
);
4276 atomic_set(&octeon_dev
->status
, OCT_DEV_HOST_OK
);
4282 * \brief Debug console print function
4283 * @param octeon_dev octeon device
4284 * @param console_num console number
4285 * @param prefix first portion of line to display
4286 * @param suffix second portion of line to display
4288 * The OCTEON debug console outputs entire lines (excluding '\n').
4289 * Normally, the line will be passed in the 'prefix' parameter.
4290 * However, due to buffering, it is possible for a line to be split into two
4291 * parts, in which case they will be passed as the 'prefix' parameter and
4292 * 'suffix' parameter.
4294 static int octeon_dbg_console_print(struct octeon_device
*oct
, u32 console_num
,
4295 char *prefix
, char *suffix
)
4297 if (prefix
&& suffix
)
4298 dev_info(&oct
->pci_dev
->dev
, "%u: %s%s\n", console_num
, prefix
,
4301 dev_info(&oct
->pci_dev
->dev
, "%u: %s\n", console_num
, prefix
);
4303 dev_info(&oct
->pci_dev
->dev
, "%u: %s\n", console_num
, suffix
);
4309 * \brief Exits the module
4311 static void __exit
liquidio_exit(void)
4313 liquidio_deinit_pci();
4315 pr_info("LiquidIO network module is now unloaded\n");
4318 module_init(liquidio_init
);
4319 module_exit(liquidio_exit
);