1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2015 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/version.h>
23 #include <linux/pci.h>
24 #include <linux/net_tstamp.h>
25 #include <linux/if_vlan.h>
26 #include <linux/firmware.h>
27 #include <linux/ptp_clock_kernel.h>
28 #include <net/vxlan.h>
29 #include "liquidio_common.h"
30 #include "octeon_droq.h"
31 #include "octeon_iq.h"
32 #include "response_manager.h"
33 #include "octeon_device.h"
34 #include "octeon_nic.h"
35 #include "octeon_main.h"
36 #include "octeon_network.h"
37 #include "cn66xx_regs.h"
38 #include "cn66xx_device.h"
39 #include "cn68xx_device.h"
40 #include "liquidio_image.h"
42 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
43 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(LIQUIDIO_VERSION
);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX
);
47 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX
);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX
);
50 static int ddr_timeout
= 10000;
51 module_param(ddr_timeout
, int, 0644);
52 MODULE_PARM_DESC(ddr_timeout
,
53 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
55 static u32 console_bitmask
;
56 module_param(console_bitmask
, int, 0644);
57 MODULE_PARM_DESC(console_bitmask
,
58 "Bitmask indicating which consoles have debug output redirected to syslog.");
60 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
62 #define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
63 (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
65 static int debug
= -1;
66 module_param(debug
, int, 0644);
67 MODULE_PARM_DESC(debug
, "NETIF_MSG debug bits");
69 static char fw_type
[LIO_MAX_FW_TYPE_LEN
];
70 module_param_string(fw_type
, fw_type
, sizeof(fw_type
), 0000);
71 MODULE_PARM_DESC(fw_type
, "Type of firmware to be loaded. Default \"nic\"");
74 module_param(conf_type
, int, 0);
75 MODULE_PARM_DESC(conf_type
, "select octeon configuration 0 default 1 ovs");
77 static int ptp_enable
= 1;
79 /* Bit mask values for lio->ifstate */
80 #define LIO_IFSTATE_DROQ_OPS 0x01
81 #define LIO_IFSTATE_REGISTERED 0x02
82 #define LIO_IFSTATE_RUNNING 0x04
83 #define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
85 /* Polling interval for determining when NIC application is alive */
86 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
88 /* runtime link query interval */
89 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
91 struct liquidio_if_cfg_context
{
99 struct liquidio_if_cfg_resp
{
101 struct liquidio_if_cfg_info cfg_info
;
105 struct oct_link_status_resp
{
107 struct oct_link_info link_info
;
111 struct oct_timestamp_resp
{
117 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
122 #ifdef __BIG_ENDIAN_BITFIELD
134 /** Octeon device properties to be used by the NIC module.
135 * Each octeon device in the system will be represented
136 * by this structure in the NIC module.
139 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
141 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
142 #define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE)
144 /** Structure of a node in list of gather components maintained by
145 * NIC driver for each network device.
147 struct octnic_gather
{
148 /** List manipulation. Next and prev pointers. */
149 struct list_head list
;
151 /** Size of the gather component at sg in bytes. */
154 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
157 /** Gather component that can accommodate max sized fragment list
158 * received from the IP layer.
160 struct octeon_sg_entry
*sg
;
165 /** This structure is used by NIC driver to store information required
166 * to free the sk_buff when the packet has been fetched by Octeon.
167 * Bytes offset below assume worst-case of a 64-bit system.
169 struct octnet_buf_free_info
{
170 /** Bytes 1-8. Pointer to network device private structure. */
173 /** Bytes 9-16. Pointer to sk_buff. */
176 /** Bytes 17-24. Pointer to gather list. */
177 struct octnic_gather
*g
;
179 /** Bytes 25-32. Physical address of skb->data or gather list. */
182 /** Bytes 33-47. Piggybacked soft command, if any */
183 struct octeon_soft_command
*sc
;
187 struct completion init
;
188 struct completion started
;
189 struct pci_dev
*pci_dev
;
194 struct octeon_device_priv
{
195 /** Tasklet structures for this device. */
196 struct tasklet_struct droq_tasklet
;
197 unsigned long napi_mask
;
200 static int octeon_device_init(struct octeon_device
*);
201 static void liquidio_remove(struct pci_dev
*pdev
);
202 static int liquidio_probe(struct pci_dev
*pdev
,
203 const struct pci_device_id
*ent
);
205 static struct handshake handshake
[MAX_OCTEON_DEVICES
];
206 static struct completion first_stage
;
208 static void octeon_droq_bh(unsigned long pdev
)
212 struct octeon_device
*oct
= (struct octeon_device
*)pdev
;
213 struct octeon_device_priv
*oct_priv
=
214 (struct octeon_device_priv
*)oct
->priv
;
216 /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
217 for (q_no
= 0; q_no
< MAX_OCTEON_OUTPUT_QUEUES(oct
); q_no
++) {
218 if (!(oct
->io_qmask
.oq
& (1ULL << q_no
)))
220 reschedule
|= octeon_droq_process_packets(oct
, oct
->droq
[q_no
],
225 tasklet_schedule(&oct_priv
->droq_tasklet
);
228 static int lio_wait_for_oq_pkts(struct octeon_device
*oct
)
230 struct octeon_device_priv
*oct_priv
=
231 (struct octeon_device_priv
*)oct
->priv
;
232 int retry
= 100, pkt_cnt
= 0, pending_pkts
= 0;
238 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
239 if (!(oct
->io_qmask
.oq
& (1ULL << i
)))
241 pkt_cnt
+= octeon_droq_check_hw_for_pkts(oct
->droq
[i
]);
244 pending_pkts
+= pkt_cnt
;
245 tasklet_schedule(&oct_priv
->droq_tasklet
);
248 schedule_timeout_uninterruptible(1);
250 } while (retry
-- && pending_pkts
);
255 void octeon_report_tx_completion_to_bql(void *txq
, unsigned int pkts_compl
,
256 unsigned int bytes_compl
)
258 struct netdev_queue
*netdev_queue
= txq
;
260 netdev_tx_completed_queue(netdev_queue
, pkts_compl
, bytes_compl
);
263 void octeon_update_tx_completion_counters(void *buf
, int reqtype
,
264 unsigned int *pkts_compl
,
265 unsigned int *bytes_compl
)
267 struct octnet_buf_free_info
*finfo
;
268 struct sk_buff
*skb
= NULL
;
269 struct octeon_soft_command
*sc
;
272 case REQTYPE_NORESP_NET
:
273 case REQTYPE_NORESP_NET_SG
:
278 case REQTYPE_RESP_NET_SG
:
279 case REQTYPE_RESP_NET
:
281 skb
= sc
->callback_arg
;
289 *bytes_compl
+= skb
->len
;
292 void octeon_report_sent_bytes_to_bql(void *buf
, int reqtype
)
294 struct octnet_buf_free_info
*finfo
;
296 struct octeon_soft_command
*sc
;
297 struct netdev_queue
*txq
;
300 case REQTYPE_NORESP_NET
:
301 case REQTYPE_NORESP_NET_SG
:
306 case REQTYPE_RESP_NET_SG
:
307 case REQTYPE_RESP_NET
:
309 skb
= sc
->callback_arg
;
316 txq
= netdev_get_tx_queue(skb
->dev
, skb_get_queue_mapping(skb
));
317 netdev_tx_sent_queue(txq
, skb
->len
);
320 int octeon_console_debug_enabled(u32 console
)
322 return (console_bitmask
>> (console
)) & 0x1;
326 * \brief Forces all IO queues off on a given device
327 * @param oct Pointer to Octeon device
329 static void force_io_queues_off(struct octeon_device
*oct
)
331 if ((oct
->chip_id
== OCTEON_CN66XX
) ||
332 (oct
->chip_id
== OCTEON_CN68XX
)) {
333 /* Reset the Enable bits for Input Queues. */
334 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, 0);
336 /* Reset the Enable bits for Output Queues. */
337 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, 0);
342 * \brief wait for all pending requests to complete
343 * @param oct Pointer to Octeon device
345 * Called during shutdown sequence
347 static int wait_for_pending_requests(struct octeon_device
*oct
)
351 for (i
= 0; i
< 100; i
++) {
353 atomic_read(&oct
->response_list
354 [OCTEON_ORDERED_SC_LIST
].pending_req_count
);
356 schedule_timeout_uninterruptible(HZ
/ 10);
368 * \brief Cause device to go quiet so it can be safely removed/reset/etc
369 * @param oct Pointer to Octeon device
371 static inline void pcierror_quiesce_device(struct octeon_device
*oct
)
375 /* Disable the input and output queues now. No more packets will
376 * arrive from Octeon, but we should wait for all packet processing
379 force_io_queues_off(oct
);
381 /* To allow for in-flight requests */
382 schedule_timeout_uninterruptible(100);
384 if (wait_for_pending_requests(oct
))
385 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
387 /* Force all requests waiting to be fetched by OCTEON to complete. */
388 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
389 struct octeon_instr_queue
*iq
;
391 if (!(oct
->io_qmask
.iq
& (1ULL << i
)))
393 iq
= oct
->instr_queue
[i
];
395 if (atomic_read(&iq
->instr_pending
)) {
396 spin_lock_bh(&iq
->lock
);
398 iq
->octeon_read_index
= iq
->host_write_index
;
399 iq
->stats
.instr_processed
+=
400 atomic_read(&iq
->instr_pending
);
401 lio_process_iq_request_list(oct
, iq
, 0);
402 spin_unlock_bh(&iq
->lock
);
406 /* Force all pending ordered list requests to time out. */
407 lio_process_ordered_list(oct
, 1);
409 /* We do not need to wait for output queue packets to be processed. */
413 * \brief Cleanup PCI AER uncorrectable error status
414 * @param dev Pointer to PCI device
416 static void cleanup_aer_uncorrect_error_status(struct pci_dev
*dev
)
421 pr_info("%s :\n", __func__
);
423 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, &status
);
424 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_SEVER
, &mask
);
425 if (dev
->error_state
== pci_channel_io_normal
)
426 status
&= ~mask
; /* Clear corresponding nonfatal bits */
428 status
&= mask
; /* Clear corresponding fatal bits */
429 pci_write_config_dword(dev
, pos
+ PCI_ERR_UNCOR_STATUS
, status
);
433 * \brief Stop all PCI IO to a given device
434 * @param dev Pointer to Octeon device
436 static void stop_pci_io(struct octeon_device
*oct
)
438 /* No more instructions will be forwarded. */
439 atomic_set(&oct
->status
, OCT_DEV_IN_RESET
);
441 pci_disable_device(oct
->pci_dev
);
443 /* Disable interrupts */
444 oct
->fn_list
.disable_interrupt(oct
->chip
);
446 pcierror_quiesce_device(oct
);
448 /* Release the interrupt line */
449 free_irq(oct
->pci_dev
->irq
, oct
);
451 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
452 pci_disable_msi(oct
->pci_dev
);
454 dev_dbg(&oct
->pci_dev
->dev
, "Device state is now %s\n",
455 lio_get_state_string(&oct
->status
));
457 /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
458 /* making it a common function for all OCTEON models */
459 cleanup_aer_uncorrect_error_status(oct
->pci_dev
);
463 * \brief called when PCI error is detected
464 * @param pdev Pointer to PCI device
465 * @param state The current pci connection state
467 * This function is called after a PCI bus error affecting
468 * this device has been detected.
470 static pci_ers_result_t
liquidio_pcie_error_detected(struct pci_dev
*pdev
,
471 pci_channel_state_t state
)
473 struct octeon_device
*oct
= pci_get_drvdata(pdev
);
475 /* Non-correctable Non-fatal errors */
476 if (state
== pci_channel_io_normal
) {
477 dev_err(&oct
->pci_dev
->dev
, "Non-correctable non-fatal error reported:\n");
478 cleanup_aer_uncorrect_error_status(oct
->pci_dev
);
479 return PCI_ERS_RESULT_CAN_RECOVER
;
482 /* Non-correctable Fatal errors */
483 dev_err(&oct
->pci_dev
->dev
, "Non-correctable FATAL reported by PCI AER driver\n");
486 /* Always return a DISCONNECT. There is no support for recovery but only
487 * for a clean shutdown.
489 return PCI_ERS_RESULT_DISCONNECT
;
493 * \brief mmio handler
494 * @param pdev Pointer to PCI device
496 static pci_ers_result_t
liquidio_pcie_mmio_enabled(
497 struct pci_dev
*pdev
__attribute__((unused
)))
499 /* We should never hit this since we never ask for a reset for a Fatal
500 * Error. We always return DISCONNECT in io_error above.
501 * But play safe and return RECOVERED for now.
503 return PCI_ERS_RESULT_RECOVERED
;
507 * \brief called after the pci bus has been reset.
508 * @param pdev Pointer to PCI device
510 * Restart the card from scratch, as if from a cold-boot. Implementation
511 * resembles the first-half of the octeon_resume routine.
513 static pci_ers_result_t
liquidio_pcie_slot_reset(
514 struct pci_dev
*pdev
__attribute__((unused
)))
516 /* We should never hit this since we never ask for a reset for a Fatal
517 * Error. We always return DISCONNECT in io_error above.
518 * But play safe and return RECOVERED for now.
520 return PCI_ERS_RESULT_RECOVERED
;
524 * \brief called when traffic can start flowing again.
525 * @param pdev Pointer to PCI device
527 * This callback is called when the error recovery driver tells us that
528 * its OK to resume normal operation. Implementation resembles the
529 * second-half of the octeon_resume routine.
531 static void liquidio_pcie_resume(struct pci_dev
*pdev
__attribute__((unused
)))
533 /* Nothing to be done here. */
538 * \brief called when suspending
539 * @param pdev Pointer to PCI device
540 * @param state state to suspend to
542 static int liquidio_suspend(struct pci_dev
*pdev
__attribute__((unused
)),
543 pm_message_t state
__attribute__((unused
)))
549 * \brief called when resuming
550 * @param pdev Pointer to PCI device
552 static int liquidio_resume(struct pci_dev
*pdev
__attribute__((unused
)))
558 /* For PCI-E Advanced Error Recovery (AER) Interface */
559 static const struct pci_error_handlers liquidio_err_handler
= {
560 .error_detected
= liquidio_pcie_error_detected
,
561 .mmio_enabled
= liquidio_pcie_mmio_enabled
,
562 .slot_reset
= liquidio_pcie_slot_reset
,
563 .resume
= liquidio_pcie_resume
,
566 static const struct pci_device_id liquidio_pci_tbl
[] = {
568 PCI_VENDOR_ID_CAVIUM
, 0x91, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
571 PCI_VENDOR_ID_CAVIUM
, 0x92, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0
577 MODULE_DEVICE_TABLE(pci
, liquidio_pci_tbl
);
579 static struct pci_driver liquidio_pci_driver
= {
581 .id_table
= liquidio_pci_tbl
,
582 .probe
= liquidio_probe
,
583 .remove
= liquidio_remove
,
584 .err_handler
= &liquidio_err_handler
, /* For AER */
587 .suspend
= liquidio_suspend
,
588 .resume
= liquidio_resume
,
594 * \brief register PCI driver
596 static int liquidio_init_pci(void)
598 return pci_register_driver(&liquidio_pci_driver
);
602 * \brief unregister PCI driver
604 static void liquidio_deinit_pci(void)
606 pci_unregister_driver(&liquidio_pci_driver
);
610 * \brief check interface state
611 * @param lio per-network private data
612 * @param state_flag flag state to check
614 static inline int ifstate_check(struct lio
*lio
, int state_flag
)
616 return atomic_read(&lio
->ifstate
) & state_flag
;
620 * \brief set interface state
621 * @param lio per-network private data
622 * @param state_flag flag state to set
624 static inline void ifstate_set(struct lio
*lio
, int state_flag
)
626 atomic_set(&lio
->ifstate
, (atomic_read(&lio
->ifstate
) | state_flag
));
630 * \brief clear interface state
631 * @param lio per-network private data
632 * @param state_flag flag state to clear
634 static inline void ifstate_reset(struct lio
*lio
, int state_flag
)
636 atomic_set(&lio
->ifstate
, (atomic_read(&lio
->ifstate
) & ~(state_flag
)));
640 * \brief Stop Tx queues
641 * @param netdev network device
643 static inline void txqs_stop(struct net_device
*netdev
)
645 if (netif_is_multiqueue(netdev
)) {
648 for (i
= 0; i
< netdev
->num_tx_queues
; i
++)
649 netif_stop_subqueue(netdev
, i
);
651 netif_stop_queue(netdev
);
656 * \brief Start Tx queues
657 * @param netdev network device
659 static inline void txqs_start(struct net_device
*netdev
)
661 if (netif_is_multiqueue(netdev
)) {
664 for (i
= 0; i
< netdev
->num_tx_queues
; i
++)
665 netif_start_subqueue(netdev
, i
);
667 netif_start_queue(netdev
);
672 * \brief Wake Tx queues
673 * @param netdev network device
675 static inline void txqs_wake(struct net_device
*netdev
)
677 struct lio
*lio
= GET_LIO(netdev
);
679 if (netif_is_multiqueue(netdev
)) {
682 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
683 int qno
= lio
->linfo
.txpciq
[i
%
684 (lio
->linfo
.num_txpciq
)].s
.q_no
;
686 if (__netif_subqueue_stopped(netdev
, i
)) {
687 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, qno
,
689 netif_wake_subqueue(netdev
, i
);
693 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, lio
->txq
,
695 netif_wake_queue(netdev
);
700 * \brief Stop Tx queue
701 * @param netdev network device
703 static void stop_txq(struct net_device
*netdev
)
709 * \brief Start Tx queue
710 * @param netdev network device
712 static void start_txq(struct net_device
*netdev
)
714 struct lio
*lio
= GET_LIO(netdev
);
716 if (lio
->linfo
.link
.s
.link_up
) {
723 * \brief Wake a queue
724 * @param netdev network device
725 * @param q which queue to wake
727 static inline void wake_q(struct net_device
*netdev
, int q
)
729 if (netif_is_multiqueue(netdev
))
730 netif_wake_subqueue(netdev
, q
);
732 netif_wake_queue(netdev
);
736 * \brief Stop a queue
737 * @param netdev network device
738 * @param q which queue to stop
740 static inline void stop_q(struct net_device
*netdev
, int q
)
742 if (netif_is_multiqueue(netdev
))
743 netif_stop_subqueue(netdev
, q
);
745 netif_stop_queue(netdev
);
749 * \brief Check Tx queue status, and take appropriate action
750 * @param lio per-network private data
751 * @returns 0 if full, number of queues woken up otherwise
753 static inline int check_txq_status(struct lio
*lio
)
757 if (netif_is_multiqueue(lio
->netdev
)) {
758 int numqs
= lio
->netdev
->num_tx_queues
;
761 /* check each sub-queue state */
762 for (q
= 0; q
< numqs
; q
++) {
763 iq
= lio
->linfo
.txpciq
[q
%
764 (lio
->linfo
.num_txpciq
)].s
.q_no
;
765 if (octnet_iq_is_full(lio
->oct_dev
, iq
))
767 if (__netif_subqueue_stopped(lio
->netdev
, q
)) {
768 wake_q(lio
->netdev
, q
);
769 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, iq
,
775 if (octnet_iq_is_full(lio
->oct_dev
, lio
->txq
))
777 wake_q(lio
->netdev
, lio
->txq
);
778 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, lio
->txq
,
786 * Remove the node at the head of the list. The list would be empty at
787 * the end of this call if there are no more nodes in the list.
789 static inline struct list_head
*list_delete_head(struct list_head
*root
)
791 struct list_head
*node
;
793 if ((root
->prev
== root
) && (root
->next
== root
))
805 * \brief Delete gather lists
806 * @param lio per-network private data
808 static void delete_glists(struct lio
*lio
)
810 struct octnic_gather
*g
;
816 for (i
= 0; i
< lio
->linfo
.num_txpciq
; i
++) {
818 g
= (struct octnic_gather
*)
819 list_delete_head(&lio
->glist
[i
]);
822 dma_unmap_single(&lio
->oct_dev
->
827 kfree((void *)((unsigned long)g
->sg
-
835 kfree((void *)lio
->glist
);
839 * \brief Setup gather lists
840 * @param lio per-network private data
842 static int setup_glists(struct octeon_device
*oct
, struct lio
*lio
, int num_iqs
)
845 struct octnic_gather
*g
;
847 lio
->glist_lock
= kcalloc(num_iqs
, sizeof(*lio
->glist_lock
),
849 if (!lio
->glist_lock
)
852 lio
->glist
= kcalloc(num_iqs
, sizeof(*lio
->glist
),
855 kfree((void *)lio
->glist_lock
);
859 for (i
= 0; i
< num_iqs
; i
++) {
860 int numa_node
= cpu_to_node(i
% num_online_cpus());
862 spin_lock_init(&lio
->glist_lock
[i
]);
864 INIT_LIST_HEAD(&lio
->glist
[i
]);
866 for (j
= 0; j
< lio
->tx_qsize
; j
++) {
867 g
= kzalloc_node(sizeof(*g
), GFP_KERNEL
,
870 g
= kzalloc(sizeof(*g
), GFP_KERNEL
);
874 g
->sg_size
= ((ROUNDUP4(OCTNIC_MAX_SG
) >> 2) *
877 g
->sg
= kmalloc_node(g
->sg_size
+ 8,
878 GFP_KERNEL
, numa_node
);
880 g
->sg
= kmalloc(g
->sg_size
+ 8, GFP_KERNEL
);
886 /* The gather component should be aligned on 64-bit
889 if (((unsigned long)g
->sg
) & 7) {
890 g
->adjust
= 8 - (((unsigned long)g
->sg
) & 7);
891 g
->sg
= (struct octeon_sg_entry
*)
892 ((unsigned long)g
->sg
+ g
->adjust
);
894 g
->sg_dma_ptr
= dma_map_single(&oct
->pci_dev
->dev
,
897 if (dma_mapping_error(&oct
->pci_dev
->dev
,
899 kfree((void *)((unsigned long)g
->sg
-
905 list_add_tail(&g
->list
, &lio
->glist
[i
]);
908 if (j
!= lio
->tx_qsize
) {
918 * \brief Print link information
919 * @param netdev network device
921 static void print_link_info(struct net_device
*netdev
)
923 struct lio
*lio
= GET_LIO(netdev
);
925 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_REGISTERED
) {
926 struct oct_link_info
*linfo
= &lio
->linfo
;
928 if (linfo
->link
.s
.link_up
) {
929 netif_info(lio
, link
, lio
->netdev
, "%d Mbps %s Duplex UP\n",
931 (linfo
->link
.s
.duplex
) ? "Full" : "Half");
933 netif_info(lio
, link
, lio
->netdev
, "Link Down\n");
939 * \brief Update link status
940 * @param netdev network device
941 * @param ls link status structure
943 * Called on receipt of a link status response from the core application to
944 * update each interface's link status.
946 static inline void update_link_status(struct net_device
*netdev
,
947 union oct_link_status
*ls
)
949 struct lio
*lio
= GET_LIO(netdev
);
950 int changed
= (lio
->linfo
.link
.u64
!= ls
->u64
);
952 lio
->linfo
.link
.u64
= ls
->u64
;
954 if ((lio
->intf_open
) && (changed
)) {
955 print_link_info(netdev
);
958 if (lio
->linfo
.link
.s
.link_up
) {
959 netif_carrier_on(netdev
);
960 /* start_txq(netdev); */
963 netif_carrier_off(netdev
);
969 /* Runs in interrupt context. */
970 static void update_txq_status(struct octeon_device
*oct
, int iq_num
)
972 struct net_device
*netdev
;
974 struct octeon_instr_queue
*iq
= oct
->instr_queue
[iq_num
];
976 /*octeon_update_iq_read_idx(oct, iq);*/
978 netdev
= oct
->props
[iq
->ifidx
].netdev
;
980 /* This is needed because the first IQ does not have
981 * a netdev associated with it.
986 lio
= GET_LIO(netdev
);
987 if (netif_is_multiqueue(netdev
)) {
988 if (__netif_subqueue_stopped(netdev
, iq
->q_index
) &&
989 lio
->linfo
.link
.s
.link_up
&&
990 (!octnet_iq_is_full(oct
, iq_num
))) {
991 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, iq_num
,
993 netif_wake_subqueue(netdev
, iq
->q_index
);
995 if (!octnet_iq_is_full(oct
, lio
->txq
)) {
996 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
,
999 wake_q(netdev
, lio
->txq
);
1006 * \brief Droq packet processor sceduler
1007 * @param oct octeon device
1010 void liquidio_schedule_droq_pkt_handlers(struct octeon_device
*oct
)
1012 struct octeon_device_priv
*oct_priv
=
1013 (struct octeon_device_priv
*)oct
->priv
;
1015 struct octeon_droq
*droq
;
1017 if (oct
->int_status
& OCT_DEV_INTR_PKT_DATA
) {
1018 for (oq_no
= 0; oq_no
< MAX_OCTEON_OUTPUT_QUEUES(oct
);
1020 if (!(oct
->droq_intr
& (1ULL << oq_no
)))
1023 droq
= oct
->droq
[oq_no
];
1025 if (droq
->ops
.poll_mode
) {
1026 droq
->ops
.napi_fn(droq
);
1027 oct_priv
->napi_mask
|= (1 << oq_no
);
1029 tasklet_schedule(&oct_priv
->droq_tasklet
);
1036 * \brief Interrupt handler for octeon
1038 * @param dev octeon device
1041 irqreturn_t
liquidio_intr_handler(int irq
__attribute__((unused
)), void *dev
)
1043 struct octeon_device
*oct
= (struct octeon_device
*)dev
;
1046 /* Disable our interrupts for the duration of ISR */
1047 oct
->fn_list
.disable_interrupt(oct
->chip
);
1049 ret
= oct
->fn_list
.process_interrupt_regs(oct
);
1051 if (ret
== IRQ_HANDLED
)
1052 liquidio_schedule_droq_pkt_handlers(oct
);
1054 /* Re-enable our interrupts */
1055 if (!(atomic_read(&oct
->status
) == OCT_DEV_IN_RESET
))
1056 oct
->fn_list
.enable_interrupt(oct
->chip
);
1062 * \brief Setup interrupt for octeon device
1063 * @param oct octeon device
1065 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
1067 static int octeon_setup_interrupt(struct octeon_device
*oct
)
1071 err
= pci_enable_msi(oct
->pci_dev
);
1073 dev_warn(&oct
->pci_dev
->dev
, "Reverting to legacy interrupts. Error: %d\n",
1076 oct
->flags
|= LIO_FLAG_MSI_ENABLED
;
1078 irqret
= request_irq(oct
->pci_dev
->irq
, liquidio_intr_handler
,
1079 IRQF_SHARED
, "octeon", oct
);
1081 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
1082 pci_disable_msi(oct
->pci_dev
);
1083 dev_err(&oct
->pci_dev
->dev
, "Request IRQ failed with code: %d\n",
1092 * \brief PCI probe handler
1093 * @param pdev PCI device structure
1097 liquidio_probe(struct pci_dev
*pdev
,
1098 const struct pci_device_id
*ent
__attribute__((unused
)))
1100 struct octeon_device
*oct_dev
= NULL
;
1101 struct handshake
*hs
;
1103 oct_dev
= octeon_allocate_device(pdev
->device
,
1104 sizeof(struct octeon_device_priv
));
1106 dev_err(&pdev
->dev
, "Unable to allocate device\n");
1110 dev_info(&pdev
->dev
, "Initializing device %x:%x.\n",
1111 (u32
)pdev
->vendor
, (u32
)pdev
->device
);
1113 /* Assign octeon_device for this device to the private data area. */
1114 pci_set_drvdata(pdev
, oct_dev
);
1116 /* set linux specific device pointer */
1117 oct_dev
->pci_dev
= (void *)pdev
;
1119 hs
= &handshake
[oct_dev
->octeon_id
];
1120 init_completion(&hs
->init
);
1121 init_completion(&hs
->started
);
1124 if (oct_dev
->octeon_id
== 0)
1125 /* first LiquidIO NIC is detected */
1126 complete(&first_stage
);
1128 if (octeon_device_init(oct_dev
)) {
1129 liquidio_remove(pdev
);
1133 oct_dev
->rx_pause
= 1;
1134 oct_dev
->tx_pause
= 1;
1136 dev_dbg(&oct_dev
->pci_dev
->dev
, "Device is ready\n");
1142 *\brief Destroy resources associated with octeon device
1143 * @param pdev PCI device structure
1146 static void octeon_destroy_resources(struct octeon_device
*oct
)
1149 struct octeon_device_priv
*oct_priv
=
1150 (struct octeon_device_priv
*)oct
->priv
;
1152 struct handshake
*hs
;
1154 switch (atomic_read(&oct
->status
)) {
1155 case OCT_DEV_RUNNING
:
1156 case OCT_DEV_CORE_OK
:
1158 /* No more instructions will be forwarded. */
1159 atomic_set(&oct
->status
, OCT_DEV_IN_RESET
);
1161 oct
->app_mode
= CVM_DRV_INVALID_APP
;
1162 dev_dbg(&oct
->pci_dev
->dev
, "Device state is now %s\n",
1163 lio_get_state_string(&oct
->status
));
1165 schedule_timeout_uninterruptible(HZ
/ 10);
1168 case OCT_DEV_HOST_OK
:
1171 case OCT_DEV_CONSOLE_INIT_DONE
:
1172 /* Remove any consoles */
1173 octeon_remove_consoles(oct
);
1176 case OCT_DEV_IO_QUEUES_DONE
:
1177 if (wait_for_pending_requests(oct
))
1178 dev_err(&oct
->pci_dev
->dev
, "There were pending requests\n");
1180 if (lio_wait_for_instr_fetch(oct
))
1181 dev_err(&oct
->pci_dev
->dev
, "IQ had pending instructions\n");
1183 /* Disable the input and output queues now. No more packets will
1184 * arrive from Octeon, but we should wait for all packet
1185 * processing to finish.
1187 oct
->fn_list
.disable_io_queues(oct
);
1189 if (lio_wait_for_oq_pkts(oct
))
1190 dev_err(&oct
->pci_dev
->dev
, "OQ had pending packets\n");
1192 /* Disable interrupts */
1193 oct
->fn_list
.disable_interrupt(oct
->chip
);
1195 /* Release the interrupt line */
1196 free_irq(oct
->pci_dev
->irq
, oct
);
1198 if (oct
->flags
& LIO_FLAG_MSI_ENABLED
)
1199 pci_disable_msi(oct
->pci_dev
);
1202 case OCT_DEV_IN_RESET
:
1203 case OCT_DEV_DROQ_INIT_DONE
:
1204 /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
1206 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
1207 if (!(oct
->io_qmask
.oq
& (1ULL << i
)))
1209 octeon_delete_droq(oct
, i
);
1212 /* Force any pending handshakes to complete */
1213 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
1217 handshake
[oct
->octeon_id
].init_ok
= 0;
1218 complete(&handshake
[oct
->octeon_id
].init
);
1219 handshake
[oct
->octeon_id
].started_ok
= 0;
1220 complete(&handshake
[oct
->octeon_id
].started
);
1225 case OCT_DEV_RESP_LIST_INIT_DONE
:
1226 octeon_delete_response_list(oct
);
1229 case OCT_DEV_SC_BUFF_POOL_INIT_DONE
:
1230 octeon_free_sc_buffer_pool(oct
);
1233 case OCT_DEV_INSTR_QUEUE_INIT_DONE
:
1234 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
1235 if (!(oct
->io_qmask
.iq
& (1ULL << i
)))
1237 octeon_delete_instr_queue(oct
, i
);
1241 case OCT_DEV_DISPATCH_INIT_DONE
:
1242 octeon_delete_dispatch_list(oct
);
1243 cancel_delayed_work_sync(&oct
->nic_poll_work
.work
);
1246 case OCT_DEV_PCI_MAP_DONE
:
1248 /* Soft reset the octeon device before exiting */
1249 oct
->fn_list
.soft_reset(oct
);
1251 octeon_unmap_pci_barx(oct
, 0);
1252 octeon_unmap_pci_barx(oct
, 1);
1255 case OCT_DEV_BEGIN_STATE
:
1256 /* Disable the device, releasing the PCI INT */
1257 pci_disable_device(oct
->pci_dev
);
1259 /* Nothing to be done here either */
1261 } /* end switch (oct->status) */
1263 tasklet_kill(&oct_priv
->droq_tasklet
);
1267 * \brief Send Rx control command
1268 * @param lio per-network private data
1269 * @param start_stop whether to start or stop
1271 static void send_rx_ctrl_cmd(struct lio
*lio
, int start_stop
)
1273 struct octnic_ctrl_pkt nctrl
;
1275 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
1277 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_RX_CTL
;
1278 nctrl
.ncmd
.s
.param1
= start_stop
;
1279 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
1280 nctrl
.netpndev
= (u64
)lio
->netdev
;
1282 if (octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
) < 0)
1283 netif_info(lio
, rx_err
, lio
->netdev
, "Failed to send RX Control message\n");
1287 * \brief Destroy NIC device interface
1288 * @param oct octeon device
1289 * @param ifidx which interface to destroy
1291 * Cleanup associated with each interface for an Octeon device when NIC
1292 * module is being unloaded or if initialization fails during load.
1294 static void liquidio_destroy_nic_device(struct octeon_device
*oct
, int ifidx
)
1296 struct net_device
*netdev
= oct
->props
[ifidx
].netdev
;
1298 struct napi_struct
*napi
, *n
;
1301 dev_err(&oct
->pci_dev
->dev
, "%s No netdevice ptr for index %d\n",
1306 lio
= GET_LIO(netdev
);
1308 dev_dbg(&oct
->pci_dev
->dev
, "NIC device cleanup\n");
1310 send_rx_ctrl_cmd(lio
, 0);
1312 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_RUNNING
)
1315 if (oct
->props
[lio
->ifidx
].napi_enabled
== 1) {
1316 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
1319 oct
->props
[lio
->ifidx
].napi_enabled
= 0;
1322 if (atomic_read(&lio
->ifstate
) & LIO_IFSTATE_REGISTERED
)
1323 unregister_netdev(netdev
);
1327 free_netdev(netdev
);
1329 oct
->props
[ifidx
].gmxport
= -1;
1331 oct
->props
[ifidx
].netdev
= NULL
;
1335 * \brief Stop complete NIC functionality
1336 * @param oct octeon device
1338 static int liquidio_stop_nic_module(struct octeon_device
*oct
)
1343 dev_dbg(&oct
->pci_dev
->dev
, "Stopping network interfaces\n");
1344 if (!oct
->ifcount
) {
1345 dev_err(&oct
->pci_dev
->dev
, "Init for Octeon was not completed\n");
1349 spin_lock_bh(&oct
->cmd_resp_wqlock
);
1350 oct
->cmd_resp_state
= OCT_DRV_OFFLINE
;
1351 spin_unlock_bh(&oct
->cmd_resp_wqlock
);
1353 for (i
= 0; i
< oct
->ifcount
; i
++) {
1354 lio
= GET_LIO(oct
->props
[i
].netdev
);
1355 for (j
= 0; j
< lio
->linfo
.num_rxpciq
; j
++)
1356 octeon_unregister_droq_ops(oct
,
1357 lio
->linfo
.rxpciq
[j
].s
.q_no
);
1360 for (i
= 0; i
< oct
->ifcount
; i
++)
1361 liquidio_destroy_nic_device(oct
, i
);
1363 dev_dbg(&oct
->pci_dev
->dev
, "Network interfaces stopped\n");
1368 * \brief Cleans up resources at unload time
1369 * @param pdev PCI device structure
1371 static void liquidio_remove(struct pci_dev
*pdev
)
1373 struct octeon_device
*oct_dev
= pci_get_drvdata(pdev
);
1375 dev_dbg(&oct_dev
->pci_dev
->dev
, "Stopping device\n");
1377 if (oct_dev
->app_mode
&& (oct_dev
->app_mode
== CVM_DRV_NIC_APP
))
1378 liquidio_stop_nic_module(oct_dev
);
1380 /* Reset the octeon device and cleanup all memory allocated for
1381 * the octeon device by driver.
1383 octeon_destroy_resources(oct_dev
);
1385 dev_info(&oct_dev
->pci_dev
->dev
, "Device removed\n");
1387 /* This octeon device has been removed. Update the global
1388 * data structure to reflect this. Free the device structure.
1390 octeon_free_device_mem(oct_dev
);
1394 * \brief Identify the Octeon device and to map the BAR address space
1395 * @param oct octeon device
1397 static int octeon_chip_specific_setup(struct octeon_device
*oct
)
1403 pci_read_config_dword(oct
->pci_dev
, 0, &dev_id
);
1404 pci_read_config_dword(oct
->pci_dev
, 8, &rev_id
);
1405 oct
->rev_id
= rev_id
& 0xff;
1408 case OCTEON_CN68XX_PCIID
:
1409 oct
->chip_id
= OCTEON_CN68XX
;
1410 ret
= lio_setup_cn68xx_octeon_device(oct
);
1414 case OCTEON_CN66XX_PCIID
:
1415 oct
->chip_id
= OCTEON_CN66XX
;
1416 ret
= lio_setup_cn66xx_octeon_device(oct
);
1422 dev_err(&oct
->pci_dev
->dev
, "Unknown device found (dev_id: %x)\n",
1427 dev_info(&oct
->pci_dev
->dev
, "%s PASS%d.%d %s Version: %s\n", s
,
1428 OCTEON_MAJOR_REV(oct
),
1429 OCTEON_MINOR_REV(oct
),
1430 octeon_get_conf(oct
)->card_name
,
1437 * \brief PCI initialization for each Octeon device.
1438 * @param oct octeon device
1440 static int octeon_pci_os_setup(struct octeon_device
*oct
)
1442 /* setup PCI stuff first */
1443 if (pci_enable_device(oct
->pci_dev
)) {
1444 dev_err(&oct
->pci_dev
->dev
, "pci_enable_device failed\n");
1448 if (dma_set_mask_and_coherent(&oct
->pci_dev
->dev
, DMA_BIT_MASK(64))) {
1449 dev_err(&oct
->pci_dev
->dev
, "Unexpected DMA device capability\n");
1453 /* Enable PCI DMA Master. */
1454 pci_set_master(oct
->pci_dev
);
1459 static inline int skb_iq(struct lio
*lio
, struct sk_buff
*skb
)
1463 if (netif_is_multiqueue(lio
->netdev
))
1464 q
= skb
->queue_mapping
% lio
->linfo
.num_txpciq
;
1470 * \brief Check Tx queue state for a given network buffer
1471 * @param lio per-network private data
1472 * @param skb network buffer
1474 static inline int check_txq_state(struct lio
*lio
, struct sk_buff
*skb
)
1478 if (netif_is_multiqueue(lio
->netdev
)) {
1479 q
= skb
->queue_mapping
;
1480 iq
= lio
->linfo
.txpciq
[(q
% (lio
->linfo
.num_txpciq
))].s
.q_no
;
1486 if (octnet_iq_is_full(lio
->oct_dev
, iq
))
1489 if (__netif_subqueue_stopped(lio
->netdev
, q
)) {
1490 INCR_INSTRQUEUE_PKT_COUNT(lio
->oct_dev
, iq
, tx_restart
, 1);
1491 wake_q(lio
->netdev
, q
);
1497 * \brief Unmap and free network buffer
1500 static void free_netbuf(void *buf
)
1502 struct sk_buff
*skb
;
1503 struct octnet_buf_free_info
*finfo
;
1506 finfo
= (struct octnet_buf_free_info
*)buf
;
1510 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
, finfo
->dptr
, skb
->len
,
1513 check_txq_state(lio
, skb
);
1515 tx_buffer_free(skb
);
1519 * \brief Unmap and free gather buffer
1522 static void free_netsgbuf(void *buf
)
1524 struct octnet_buf_free_info
*finfo
;
1525 struct sk_buff
*skb
;
1527 struct octnic_gather
*g
;
1530 finfo
= (struct octnet_buf_free_info
*)buf
;
1534 frags
= skb_shinfo(skb
)->nr_frags
;
1536 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1537 g
->sg
[0].ptr
[0], (skb
->len
- skb
->data_len
),
1542 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1544 pci_unmap_page((lio
->oct_dev
)->pci_dev
,
1545 g
->sg
[(i
>> 2)].ptr
[(i
& 3)],
1546 frag
->size
, DMA_TO_DEVICE
);
1550 dma_sync_single_for_cpu(&lio
->oct_dev
->pci_dev
->dev
,
1551 g
->sg_dma_ptr
, g
->sg_size
, DMA_TO_DEVICE
);
1553 iq
= skb_iq(lio
, skb
);
1554 spin_lock(&lio
->glist_lock
[iq
]);
1555 list_add_tail(&g
->list
, &lio
->glist
[iq
]);
1556 spin_unlock(&lio
->glist_lock
[iq
]);
1558 check_txq_state(lio
, skb
); /* mq support: sub-queue state check */
1560 tx_buffer_free(skb
);
1564 * \brief Unmap and free gather buffer with response
1567 static void free_netsgbuf_with_resp(void *buf
)
1569 struct octeon_soft_command
*sc
;
1570 struct octnet_buf_free_info
*finfo
;
1571 struct sk_buff
*skb
;
1573 struct octnic_gather
*g
;
1576 sc
= (struct octeon_soft_command
*)buf
;
1577 skb
= (struct sk_buff
*)sc
->callback_arg
;
1578 finfo
= (struct octnet_buf_free_info
*)&skb
->cb
;
1582 frags
= skb_shinfo(skb
)->nr_frags
;
1584 dma_unmap_single(&lio
->oct_dev
->pci_dev
->dev
,
1585 g
->sg
[0].ptr
[0], (skb
->len
- skb
->data_len
),
1590 struct skb_frag_struct
*frag
= &skb_shinfo(skb
)->frags
[i
- 1];
1592 pci_unmap_page((lio
->oct_dev
)->pci_dev
,
1593 g
->sg
[(i
>> 2)].ptr
[(i
& 3)],
1594 frag
->size
, DMA_TO_DEVICE
);
1598 dma_sync_single_for_cpu(&lio
->oct_dev
->pci_dev
->dev
,
1599 g
->sg_dma_ptr
, g
->sg_size
, DMA_TO_DEVICE
);
1601 iq
= skb_iq(lio
, skb
);
1603 spin_lock(&lio
->glist_lock
[iq
]);
1604 list_add_tail(&g
->list
, &lio
->glist
[iq
]);
1605 spin_unlock(&lio
->glist_lock
[iq
]);
1607 /* Don't free the skb yet */
1609 check_txq_state(lio
, skb
);
1613 * \brief Adjust ptp frequency
1614 * @param ptp PTP clock info
1615 * @param ppb how much to adjust by, in parts-per-billion
1617 static int liquidio_ptp_adjfreq(struct ptp_clock_info
*ptp
, s32 ppb
)
1619 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1620 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1622 unsigned long flags
;
1623 bool neg_adj
= false;
1630 /* The hardware adds the clock compensation value to the
1631 * PTP clock on every coprocessor clock cycle, so we
1632 * compute the delta in terms of coprocessor clocks.
1634 delta
= (u64
)ppb
<< 32;
1635 do_div(delta
, oct
->coproc_clock_rate
);
1637 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1638 comp
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1643 lio_pci_writeq(oct
, comp
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1644 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1650 * \brief Adjust ptp time
1651 * @param ptp PTP clock info
1652 * @param delta how much to adjust by, in nanosecs
1654 static int liquidio_ptp_adjtime(struct ptp_clock_info
*ptp
, s64 delta
)
1656 unsigned long flags
;
1657 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1659 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1660 lio
->ptp_adjust
+= delta
;
1661 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1667 * \brief Get hardware clock time, including any adjustment
1668 * @param ptp PTP clock info
1669 * @param ts timespec
1671 static int liquidio_ptp_gettime(struct ptp_clock_info
*ptp
,
1672 struct timespec64
*ts
)
1675 unsigned long flags
;
1676 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1677 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1679 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1680 ns
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_HI
);
1681 ns
+= lio
->ptp_adjust
;
1682 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1684 *ts
= ns_to_timespec64(ns
);
1690 * \brief Set hardware clock time. Reset adjustment
1691 * @param ptp PTP clock info
1692 * @param ts timespec
1694 static int liquidio_ptp_settime(struct ptp_clock_info
*ptp
,
1695 const struct timespec64
*ts
)
1698 unsigned long flags
;
1699 struct lio
*lio
= container_of(ptp
, struct lio
, ptp_info
);
1700 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1702 ns
= timespec_to_ns(ts
);
1704 spin_lock_irqsave(&lio
->ptp_lock
, flags
);
1705 lio_pci_writeq(oct
, ns
, CN6XXX_MIO_PTP_CLOCK_HI
);
1706 lio
->ptp_adjust
= 0;
1707 spin_unlock_irqrestore(&lio
->ptp_lock
, flags
);
1713 * \brief Check if PTP is enabled
1714 * @param ptp PTP clock info
1716 * @param on is it on
1719 liquidio_ptp_enable(struct ptp_clock_info
*ptp
__attribute__((unused
)),
1720 struct ptp_clock_request
*rq
__attribute__((unused
)),
1721 int on
__attribute__((unused
)))
1727 * \brief Open PTP clock source
1728 * @param netdev network device
1730 static void oct_ptp_open(struct net_device
*netdev
)
1732 struct lio
*lio
= GET_LIO(netdev
);
1733 struct octeon_device
*oct
= (struct octeon_device
*)lio
->oct_dev
;
1735 spin_lock_init(&lio
->ptp_lock
);
1737 snprintf(lio
->ptp_info
.name
, 16, "%s", netdev
->name
);
1738 lio
->ptp_info
.owner
= THIS_MODULE
;
1739 lio
->ptp_info
.max_adj
= 250000000;
1740 lio
->ptp_info
.n_alarm
= 0;
1741 lio
->ptp_info
.n_ext_ts
= 0;
1742 lio
->ptp_info
.n_per_out
= 0;
1743 lio
->ptp_info
.pps
= 0;
1744 lio
->ptp_info
.adjfreq
= liquidio_ptp_adjfreq
;
1745 lio
->ptp_info
.adjtime
= liquidio_ptp_adjtime
;
1746 lio
->ptp_info
.gettime64
= liquidio_ptp_gettime
;
1747 lio
->ptp_info
.settime64
= liquidio_ptp_settime
;
1748 lio
->ptp_info
.enable
= liquidio_ptp_enable
;
1750 lio
->ptp_adjust
= 0;
1752 lio
->ptp_clock
= ptp_clock_register(&lio
->ptp_info
,
1753 &oct
->pci_dev
->dev
);
1755 if (IS_ERR(lio
->ptp_clock
))
1756 lio
->ptp_clock
= NULL
;
1760 * \brief Init PTP clock
1761 * @param oct octeon device
1763 static void liquidio_ptp_init(struct octeon_device
*oct
)
1765 u64 clock_comp
, cfg
;
1767 clock_comp
= (u64
)NSEC_PER_SEC
<< 32;
1768 do_div(clock_comp
, oct
->coproc_clock_rate
);
1769 lio_pci_writeq(oct
, clock_comp
, CN6XXX_MIO_PTP_CLOCK_COMP
);
1772 cfg
= lio_pci_readq(oct
, CN6XXX_MIO_PTP_CLOCK_CFG
);
1773 lio_pci_writeq(oct
, cfg
| 0x01, CN6XXX_MIO_PTP_CLOCK_CFG
);
1777 * \brief Load firmware to device
1778 * @param oct octeon device
1780 * Maps device to firmware filename, requests firmware, and downloads it
1782 static int load_firmware(struct octeon_device
*oct
)
1785 const struct firmware
*fw
;
1786 char fw_name
[LIO_MAX_FW_FILENAME_LEN
];
1789 if (strncmp(fw_type
, LIO_FW_NAME_TYPE_NONE
,
1790 sizeof(LIO_FW_NAME_TYPE_NONE
)) == 0) {
1791 dev_info(&oct
->pci_dev
->dev
, "Skipping firmware load\n");
1795 if (fw_type
[0] == '\0')
1796 tmp_fw_type
= LIO_FW_NAME_TYPE_NIC
;
1798 tmp_fw_type
= fw_type
;
1800 sprintf(fw_name
, "%s%s%s_%s%s", LIO_FW_DIR
, LIO_FW_BASE_NAME
,
1801 octeon_get_conf(oct
)->card_name
, tmp_fw_type
,
1802 LIO_FW_NAME_SUFFIX
);
1804 ret
= request_firmware(&fw
, fw_name
, &oct
->pci_dev
->dev
);
1806 dev_err(&oct
->pci_dev
->dev
, "Request firmware failed. Could not find file %s.\n.",
1808 release_firmware(fw
);
1812 ret
= octeon_download_firmware(oct
, fw
->data
, fw
->size
);
1814 release_firmware(fw
);
1820 * \brief Setup output queue
1821 * @param oct octeon device
1822 * @param q_no which queue
1823 * @param num_descs how many descriptors
1824 * @param desc_size size of each descriptor
1825 * @param app_ctx application context
1827 static int octeon_setup_droq(struct octeon_device
*oct
, int q_no
, int num_descs
,
1828 int desc_size
, void *app_ctx
)
1832 dev_dbg(&oct
->pci_dev
->dev
, "Creating Droq: %d\n", q_no
);
1833 /* droq creation and local register settings. */
1834 ret_val
= octeon_create_droq(oct
, q_no
, num_descs
, desc_size
, app_ctx
);
1839 dev_dbg(&oct
->pci_dev
->dev
, "Using default droq %d\n", q_no
);
1842 /* tasklet creation for the droq */
1844 /* Enable the droq queues */
1845 octeon_set_droq_pkt_op(oct
, q_no
, 1);
1847 /* Send Credit for Octeon Output queues. Credits are always
1848 * sent after the output queue is enabled.
1850 writel(oct
->droq
[q_no
]->max_count
,
1851 oct
->droq
[q_no
]->pkts_credit_reg
);
1857 * \brief Callback for getting interface configuration
1858 * @param status status of request
1859 * @param buf pointer to resp structure
1861 static void if_cfg_callback(struct octeon_device
*oct
,
1862 u32 status
__attribute__((unused
)),
1865 struct octeon_soft_command
*sc
= (struct octeon_soft_command
*)buf
;
1866 struct liquidio_if_cfg_resp
*resp
;
1867 struct liquidio_if_cfg_context
*ctx
;
1869 resp
= (struct liquidio_if_cfg_resp
*)sc
->virtrptr
;
1870 ctx
= (struct liquidio_if_cfg_context
*)sc
->ctxptr
;
1872 oct
= lio_get_device(ctx
->octeon_id
);
1874 dev_err(&oct
->pci_dev
->dev
, "nic if cfg instruction failed. Status: %llx\n",
1875 CVM_CAST64(resp
->status
));
1876 WRITE_ONCE(ctx
->cond
, 1);
1878 snprintf(oct
->fw_info
.liquidio_firmware_version
, 32, "%s",
1879 resp
->cfg_info
.liquidio_firmware_version
);
1881 /* This barrier is required to be sure that the response has been
1882 * written fully before waking up the handler
1886 wake_up_interruptible(&ctx
->wc
);
1890 * \brief Select queue based on hash
1891 * @param dev Net device
1892 * @param skb sk_buff structure
1893 * @returns selected queue number
1895 static u16
select_q(struct net_device
*dev
, struct sk_buff
*skb
,
1896 void *accel_priv
__attribute__((unused
)),
1897 select_queue_fallback_t fallback
__attribute__((unused
)))
1903 qindex
= skb_tx_hash(dev
, skb
);
1905 return (u16
)(qindex
% (lio
->linfo
.num_txpciq
));
1908 /** Routine to push packets arriving on Octeon interface upto network layer.
1909 * @param oct_id - octeon device id.
1910 * @param skbuff - skbuff struct to be passed to network layer.
1911 * @param len - size of total data received.
1912 * @param rh - Control header associated with the packet
1913 * @param param - additional control data with the packet
1914 * @param arg - farg registered in droq_ops
1917 liquidio_push_packet(u32 octeon_id
__attribute__((unused
)),
1920 union octeon_rh
*rh
,
1924 struct napi_struct
*napi
= param
;
1925 struct sk_buff
*skb
= (struct sk_buff
*)skbuff
;
1926 struct skb_shared_hwtstamps
*shhwtstamps
;
1929 struct net_device
*netdev
= (struct net_device
*)arg
;
1930 struct octeon_droq
*droq
= container_of(param
, struct octeon_droq
,
1933 int packet_was_received
;
1934 struct lio
*lio
= GET_LIO(netdev
);
1935 struct octeon_device
*oct
= lio
->oct_dev
;
1937 /* Do not proceed if the interface is not in RUNNING state. */
1938 if (!ifstate_check(lio
, LIO_IFSTATE_RUNNING
)) {
1939 recv_buffer_free(skb
);
1940 droq
->stats
.rx_dropped
++;
1946 skb_record_rx_queue(skb
, droq
->q_no
);
1947 if (likely(len
> MIN_SKB_SIZE
)) {
1948 struct octeon_skb_page_info
*pg_info
;
1951 pg_info
= ((struct octeon_skb_page_info
*)(skb
->cb
));
1952 if (pg_info
->page
) {
1953 /* For Paged allocation use the frags */
1954 va
= page_address(pg_info
->page
) +
1955 pg_info
->page_offset
;
1956 memcpy(skb
->data
, va
, MIN_SKB_SIZE
);
1957 skb_put(skb
, MIN_SKB_SIZE
);
1958 skb_add_rx_frag(skb
, skb_shinfo(skb
)->nr_frags
,
1960 pg_info
->page_offset
+
1966 struct octeon_skb_page_info
*pg_info
=
1967 ((struct octeon_skb_page_info
*)(skb
->cb
));
1968 skb_copy_to_linear_data(skb
, page_address(pg_info
->page
)
1969 + pg_info
->page_offset
, len
);
1971 put_page(pg_info
->page
);
1974 if (((oct
->chip_id
== OCTEON_CN66XX
) ||
1975 (oct
->chip_id
== OCTEON_CN68XX
)) &&
1977 if (rh
->r_dh
.has_hwtstamp
) {
1978 /* timestamp is included from the hardware at
1979 * the beginning of the packet.
1982 (lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
)) {
1983 /* Nanoseconds are in the first 64-bits
1986 memcpy(&ns
, (skb
->data
), sizeof(ns
));
1987 shhwtstamps
= skb_hwtstamps(skb
);
1988 shhwtstamps
->hwtstamp
=
1992 skb_pull(skb
, sizeof(ns
));
1996 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
1997 if ((netdev
->features
& NETIF_F_RXCSUM
) &&
1998 (((rh
->r_dh
.encap_on
) &&
1999 (rh
->r_dh
.csum_verified
& CNNIC_TUN_CSUM_VERIFIED
)) ||
2000 (!(rh
->r_dh
.encap_on
) &&
2001 (rh
->r_dh
.csum_verified
& CNNIC_CSUM_VERIFIED
))))
2002 /* checksum has already been verified */
2003 skb
->ip_summed
= CHECKSUM_UNNECESSARY
;
2005 skb
->ip_summed
= CHECKSUM_NONE
;
2007 /* Setting Encapsulation field on basis of status received
2010 if (rh
->r_dh
.encap_on
) {
2011 skb
->encapsulation
= 1;
2012 skb
->csum_level
= 1;
2013 droq
->stats
.rx_vxlan
++;
2016 /* inbound VLAN tag */
2017 if ((netdev
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
2018 (rh
->r_dh
.vlan
!= 0)) {
2019 u16 vid
= rh
->r_dh
.vlan
;
2020 u16 priority
= rh
->r_dh
.priority
;
2022 vtag
= priority
<< 13 | vid
;
2023 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
), vtag
);
2026 packet_was_received
= napi_gro_receive(napi
, skb
) != GRO_DROP
;
2028 if (packet_was_received
) {
2029 droq
->stats
.rx_bytes_received
+= len
;
2030 droq
->stats
.rx_pkts_received
++;
2031 netdev
->last_rx
= jiffies
;
2033 droq
->stats
.rx_dropped
++;
2034 netif_info(lio
, rx_err
, lio
->netdev
,
2035 "droq:%d error rx_dropped:%llu\n",
2036 droq
->q_no
, droq
->stats
.rx_dropped
);
2040 recv_buffer_free(skb
);
2045 * \brief wrapper for calling napi_schedule
2046 * @param param parameters to pass to napi_schedule
2048 * Used when scheduling on different CPUs
2050 static void napi_schedule_wrapper(void *param
)
2052 struct napi_struct
*napi
= param
;
2054 napi_schedule(napi
);
2058 * \brief callback when receive interrupt occurs and we are in NAPI mode
2059 * @param arg pointer to octeon output queue
2061 static void liquidio_napi_drv_callback(void *arg
)
2063 struct octeon_droq
*droq
= arg
;
2064 int this_cpu
= smp_processor_id();
2066 if (droq
->cpu_id
== this_cpu
) {
2067 napi_schedule(&droq
->napi
);
2069 struct call_single_data
*csd
= &droq
->csd
;
2071 csd
->func
= napi_schedule_wrapper
;
2072 csd
->info
= &droq
->napi
;
2075 smp_call_function_single_async(droq
->cpu_id
, csd
);
2080 * \brief Entry point for NAPI polling
2081 * @param napi NAPI structure
2082 * @param budget maximum number of items to process
2084 static int liquidio_napi_poll(struct napi_struct
*napi
, int budget
)
2086 struct octeon_droq
*droq
;
2088 int tx_done
= 0, iq_no
;
2089 struct octeon_instr_queue
*iq
;
2090 struct octeon_device
*oct
;
2092 droq
= container_of(napi
, struct octeon_droq
, napi
);
2093 oct
= droq
->oct_dev
;
2095 /* Handle Droq descriptors */
2096 work_done
= octeon_process_droq_poll_cmd(oct
, droq
->q_no
,
2097 POLL_EVENT_PROCESS_PKTS
,
2100 /* Flush the instruction queue */
2101 iq
= oct
->instr_queue
[iq_no
];
2103 /* Process iq buffers with in the budget limits */
2104 tx_done
= octeon_flush_iq(oct
, iq
, 1, budget
);
2105 /* Update iq read-index rather than waiting for next interrupt.
2106 * Return back if tx_done is false.
2108 update_txq_status(oct
, iq_no
);
2109 /*tx_done = (iq->flush_index == iq->octeon_read_index);*/
2111 dev_err(&oct
->pci_dev
->dev
, "%s: iq (%d) num invalid\n",
2115 if ((work_done
< budget
) && (tx_done
)) {
2116 napi_complete(napi
);
2117 octeon_process_droq_poll_cmd(droq
->oct_dev
, droq
->q_no
,
2118 POLL_EVENT_ENABLE_INTR
, 0);
2122 return (!tx_done
) ? (budget
) : (work_done
);
2126 * \brief Setup input and output queues
2127 * @param octeon_dev octeon device
2128 * @param ifidx Interface Index
2130 * Note: Queues are with respect to the octeon device. Thus
2131 * an input queue is for egress packets, and output queues
2132 * are for ingress packets.
2134 static inline int setup_io_queues(struct octeon_device
*octeon_dev
,
2137 struct octeon_droq_ops droq_ops
;
2138 struct net_device
*netdev
;
2140 static int cpu_id_modulus
;
2141 struct octeon_droq
*droq
;
2142 struct napi_struct
*napi
;
2143 int q
, q_no
, retval
= 0;
2147 netdev
= octeon_dev
->props
[ifidx
].netdev
;
2149 lio
= GET_LIO(netdev
);
2151 memset(&droq_ops
, 0, sizeof(struct octeon_droq_ops
));
2153 droq_ops
.fptr
= liquidio_push_packet
;
2154 droq_ops
.farg
= (void *)netdev
;
2156 droq_ops
.poll_mode
= 1;
2157 droq_ops
.napi_fn
= liquidio_napi_drv_callback
;
2159 cpu_id_modulus
= num_present_cpus();
2162 for (q
= 0; q
< lio
->linfo
.num_rxpciq
; q
++) {
2163 q_no
= lio
->linfo
.rxpciq
[q
].s
.q_no
;
2164 dev_dbg(&octeon_dev
->pci_dev
->dev
,
2165 "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
2167 retval
= octeon_setup_droq(octeon_dev
, q_no
,
2168 CFG_GET_NUM_RX_DESCS_NIC_IF
2169 (octeon_get_conf(octeon_dev
),
2171 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
2172 (octeon_get_conf(octeon_dev
),
2175 dev_err(&octeon_dev
->pci_dev
->dev
,
2176 " %s : Runtime DROQ(RxQ) creation failed.\n",
2181 droq
= octeon_dev
->droq
[q_no
];
2183 dev_dbg(&octeon_dev
->pci_dev
->dev
,
2184 "netif_napi_add netdev:%llx oct:%llx\n",
2187 netif_napi_add(netdev
, napi
, liquidio_napi_poll
, 64);
2189 /* designate a CPU for this droq */
2190 droq
->cpu_id
= cpu_id
;
2192 if (cpu_id
>= cpu_id_modulus
)
2195 octeon_register_droq_ops(octeon_dev
, q_no
, &droq_ops
);
2199 for (q
= 0; q
< lio
->linfo
.num_txpciq
; q
++) {
2200 num_tx_descs
= CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
2203 retval
= octeon_setup_iq(octeon_dev
, ifidx
, q
,
2204 lio
->linfo
.txpciq
[q
], num_tx_descs
,
2205 netdev_get_tx_queue(netdev
, q
));
2207 dev_err(&octeon_dev
->pci_dev
->dev
,
2208 " %s : Runtime IQ(TxQ) creation failed.\n",
2218 * \brief Poll routine for checking transmit queue status
2219 * @param work work_struct data structure
2221 static void octnet_poll_check_txq_status(struct work_struct
*work
)
2223 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
2224 struct lio
*lio
= (struct lio
*)wk
->ctxptr
;
2226 if (!ifstate_check(lio
, LIO_IFSTATE_RUNNING
))
2229 check_txq_status(lio
);
2230 queue_delayed_work(lio
->txq_status_wq
.wq
,
2231 &lio
->txq_status_wq
.wk
.work
, msecs_to_jiffies(1));
2235 * \brief Sets up the txq poll check
2236 * @param netdev network device
2238 static inline void setup_tx_poll_fn(struct net_device
*netdev
)
2240 struct lio
*lio
= GET_LIO(netdev
);
2241 struct octeon_device
*oct
= lio
->oct_dev
;
2243 lio
->txq_status_wq
.wq
= alloc_workqueue("txq-status",
2245 if (!lio
->txq_status_wq
.wq
) {
2246 dev_err(&oct
->pci_dev
->dev
, "unable to create cavium txq status wq\n");
2249 INIT_DELAYED_WORK(&lio
->txq_status_wq
.wk
.work
,
2250 octnet_poll_check_txq_status
);
2251 lio
->txq_status_wq
.wk
.ctxptr
= lio
;
2252 queue_delayed_work(lio
->txq_status_wq
.wq
,
2253 &lio
->txq_status_wq
.wk
.work
, msecs_to_jiffies(1));
2256 static inline void cleanup_tx_poll_fn(struct net_device
*netdev
)
2258 struct lio
*lio
= GET_LIO(netdev
);
2260 cancel_delayed_work_sync(&lio
->txq_status_wq
.wk
.work
);
2261 destroy_workqueue(lio
->txq_status_wq
.wq
);
2265 * \brief Net device open for LiquidIO
2266 * @param netdev network device
2268 static int liquidio_open(struct net_device
*netdev
)
2270 struct lio
*lio
= GET_LIO(netdev
);
2271 struct octeon_device
*oct
= lio
->oct_dev
;
2272 struct napi_struct
*napi
, *n
;
2274 if (oct
->props
[lio
->ifidx
].napi_enabled
== 0) {
2275 list_for_each_entry_safe(napi
, n
, &netdev
->napi_list
, dev_list
)
2278 oct
->props
[lio
->ifidx
].napi_enabled
= 1;
2281 oct_ptp_open(netdev
);
2283 ifstate_set(lio
, LIO_IFSTATE_RUNNING
);
2285 setup_tx_poll_fn(netdev
);
2289 netif_info(lio
, ifup
, lio
->netdev
, "Interface Open, ready for traffic\n");
2291 /* tell Octeon to start forwarding packets to host */
2292 send_rx_ctrl_cmd(lio
, 1);
2294 /* Ready for link status updates */
2297 dev_info(&oct
->pci_dev
->dev
, "%s interface is opened\n",
2304 * \brief Net device stop for LiquidIO
2305 * @param netdev network device
2307 static int liquidio_stop(struct net_device
*netdev
)
2309 struct lio
*lio
= GET_LIO(netdev
);
2310 struct octeon_device
*oct
= lio
->oct_dev
;
2312 ifstate_reset(lio
, LIO_IFSTATE_RUNNING
);
2314 netif_tx_disable(netdev
);
2316 /* Inform that netif carrier is down */
2317 netif_carrier_off(netdev
);
2319 lio
->linfo
.link
.s
.link_up
= 0;
2320 lio
->link_changes
++;
2322 /* Pause for a moment and wait for Octeon to flush out (to the wire) any
2323 * egress packets that are in-flight.
2325 set_current_state(TASK_INTERRUPTIBLE
);
2326 schedule_timeout(msecs_to_jiffies(100));
2328 /* Now it should be safe to tell Octeon that nic interface is down. */
2329 send_rx_ctrl_cmd(lio
, 0);
2331 cleanup_tx_poll_fn(netdev
);
2333 if (lio
->ptp_clock
) {
2334 ptp_clock_unregister(lio
->ptp_clock
);
2335 lio
->ptp_clock
= NULL
;
2338 dev_info(&oct
->pci_dev
->dev
, "%s interface is stopped\n", netdev
->name
);
2343 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr
)
2345 struct octnic_ctrl_pkt
*nctrl
= (struct octnic_ctrl_pkt
*)nctrl_ptr
;
2346 struct net_device
*netdev
= (struct net_device
*)nctrl
->netpndev
;
2347 struct lio
*lio
= GET_LIO(netdev
);
2348 struct octeon_device
*oct
= lio
->oct_dev
;
2351 switch (nctrl
->ncmd
.s
.cmd
) {
2352 case OCTNET_CMD_CHANGE_DEVFLAGS
:
2353 case OCTNET_CMD_SET_MULTI_LIST
:
2356 case OCTNET_CMD_CHANGE_MACADDR
:
2357 mac
= ((u8
*)&nctrl
->udd
[0]) + 2;
2358 netif_info(lio
, probe
, lio
->netdev
,
2359 "%s %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
2360 "MACAddr changed to", mac
[0], mac
[1],
2361 mac
[2], mac
[3], mac
[4], mac
[5]);
2364 case OCTNET_CMD_CHANGE_MTU
:
2365 /* If command is successful, change the MTU. */
2366 netif_info(lio
, probe
, lio
->netdev
, " MTU Changed from %d to %d\n",
2367 netdev
->mtu
, nctrl
->ncmd
.s
.param1
);
2368 dev_info(&oct
->pci_dev
->dev
, "%s MTU Changed from %d to %d\n",
2369 netdev
->name
, netdev
->mtu
,
2370 nctrl
->ncmd
.s
.param1
);
2372 netdev
->mtu
= nctrl
->ncmd
.s
.param1
;
2373 call_netdevice_notifiers(NETDEV_CHANGEMTU
, netdev
);
2377 case OCTNET_CMD_GPIO_ACCESS
:
2378 netif_info(lio
, probe
, lio
->netdev
, "LED Flashing visual identification\n");
2382 case OCTNET_CMD_LRO_ENABLE
:
2383 dev_info(&oct
->pci_dev
->dev
, "%s LRO Enabled\n", netdev
->name
);
2386 case OCTNET_CMD_LRO_DISABLE
:
2387 dev_info(&oct
->pci_dev
->dev
, "%s LRO Disabled\n",
2391 case OCTNET_CMD_VERBOSE_ENABLE
:
2392 dev_info(&oct
->pci_dev
->dev
, "%s LRO Enabled\n", netdev
->name
);
2395 case OCTNET_CMD_VERBOSE_DISABLE
:
2396 dev_info(&oct
->pci_dev
->dev
, "%s LRO Disabled\n",
2400 case OCTNET_CMD_ENABLE_VLAN_FILTER
:
2401 dev_info(&oct
->pci_dev
->dev
, "%s VLAN filter enabled\n",
2405 case OCTNET_CMD_ADD_VLAN_FILTER
:
2406 dev_info(&oct
->pci_dev
->dev
, "%s VLAN filter %d added\n",
2407 netdev
->name
, nctrl
->ncmd
.s
.param1
);
2410 case OCTNET_CMD_DEL_VLAN_FILTER
:
2411 dev_info(&oct
->pci_dev
->dev
, "%s VLAN filter %d removed\n",
2412 netdev
->name
, nctrl
->ncmd
.s
.param1
);
2415 case OCTNET_CMD_SET_SETTINGS
:
2416 dev_info(&oct
->pci_dev
->dev
, "%s settings changed\n",
2420 /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
2421 * Command passed by NIC driver
2423 case OCTNET_CMD_TNL_RX_CSUM_CTL
:
2424 if (nctrl
->ncmd
.s
.param1
== OCTNET_CMD_RXCSUM_ENABLE
) {
2425 netif_info(lio
, probe
, lio
->netdev
,
2426 "%s RX Checksum Offload Enabled\n",
2428 } else if (nctrl
->ncmd
.s
.param1
==
2429 OCTNET_CMD_RXCSUM_DISABLE
) {
2430 netif_info(lio
, probe
, lio
->netdev
,
2431 "%s RX Checksum Offload Disabled\n",
2436 /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
2437 * Command passed by NIC driver
2439 case OCTNET_CMD_TNL_TX_CSUM_CTL
:
2440 if (nctrl
->ncmd
.s
.param1
== OCTNET_CMD_TXCSUM_ENABLE
) {
2441 netif_info(lio
, probe
, lio
->netdev
,
2442 "%s TX Checksum Offload Enabled\n",
2444 } else if (nctrl
->ncmd
.s
.param1
==
2445 OCTNET_CMD_TXCSUM_DISABLE
) {
2446 netif_info(lio
, probe
, lio
->netdev
,
2447 "%s TX Checksum Offload Disabled\n",
2452 /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
2453 * Command passed by NIC driver
2455 case OCTNET_CMD_VXLAN_PORT_CONFIG
:
2456 if (nctrl
->ncmd
.s
.more
== OCTNET_CMD_VXLAN_PORT_ADD
) {
2457 netif_info(lio
, probe
, lio
->netdev
,
2458 "%s VxLAN Destination UDP PORT:%d ADDED\n",
2460 nctrl
->ncmd
.s
.param1
);
2461 } else if (nctrl
->ncmd
.s
.more
==
2462 OCTNET_CMD_VXLAN_PORT_DEL
) {
2463 netif_info(lio
, probe
, lio
->netdev
,
2464 "%s VxLAN Destination UDP PORT:%d DELETED\n",
2466 nctrl
->ncmd
.s
.param1
);
2470 case OCTNET_CMD_SET_FLOW_CTL
:
2471 netif_info(lio
, probe
, lio
->netdev
, "Set RX/TX flow control parameters\n");
2475 dev_err(&oct
->pci_dev
->dev
, "%s Unknown cmd %d\n", __func__
,
2481 * \brief Converts a mask based on net device flags
2482 * @param netdev network device
2484 * This routine generates a octnet_ifflags mask from the net device flags
2485 * received from the OS.
2487 static inline enum octnet_ifflags
get_new_flags(struct net_device
*netdev
)
2489 enum octnet_ifflags f
= OCTNET_IFFLAG_UNICAST
;
2491 if (netdev
->flags
& IFF_PROMISC
)
2492 f
|= OCTNET_IFFLAG_PROMISC
;
2494 if (netdev
->flags
& IFF_ALLMULTI
)
2495 f
|= OCTNET_IFFLAG_ALLMULTI
;
2497 if (netdev
->flags
& IFF_MULTICAST
) {
2498 f
|= OCTNET_IFFLAG_MULTICAST
;
2500 /* Accept all multicast addresses if there are more than we
2503 if (netdev_mc_count(netdev
) > MAX_OCTEON_MULTICAST_ADDR
)
2504 f
|= OCTNET_IFFLAG_ALLMULTI
;
2507 if (netdev
->flags
& IFF_BROADCAST
)
2508 f
|= OCTNET_IFFLAG_BROADCAST
;
2514 * \brief Net device set_multicast_list
2515 * @param netdev network device
2517 static void liquidio_set_mcast_list(struct net_device
*netdev
)
2519 struct lio
*lio
= GET_LIO(netdev
);
2520 struct octeon_device
*oct
= lio
->oct_dev
;
2521 struct octnic_ctrl_pkt nctrl
;
2522 struct netdev_hw_addr
*ha
;
2525 int mc_count
= min(netdev_mc_count(netdev
), MAX_OCTEON_MULTICAST_ADDR
);
2527 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2529 /* Create a ctrl pkt command to be sent to core app. */
2531 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_SET_MULTI_LIST
;
2532 nctrl
.ncmd
.s
.param1
= get_new_flags(netdev
);
2533 nctrl
.ncmd
.s
.param2
= mc_count
;
2534 nctrl
.ncmd
.s
.more
= mc_count
;
2535 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2536 nctrl
.netpndev
= (u64
)netdev
;
2537 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2539 /* copy all the addresses into the udd */
2541 netdev_for_each_mc_addr(ha
, netdev
) {
2543 memcpy(((u8
*)mc
) + 2, ha
->addr
, ETH_ALEN
);
2544 /* no need to swap bytes */
2546 if (++mc
> &nctrl
.udd
[mc_count
])
2550 /* Apparently, any activity in this call from the kernel has to
2551 * be atomic. So we won't wait for response.
2553 nctrl
.wait_time
= 0;
2555 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2557 dev_err(&oct
->pci_dev
->dev
, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2563 * \brief Net device set_mac_address
2564 * @param netdev network device
2566 static int liquidio_set_mac(struct net_device
*netdev
, void *p
)
2569 struct lio
*lio
= GET_LIO(netdev
);
2570 struct octeon_device
*oct
= lio
->oct_dev
;
2571 struct sockaddr
*addr
= (struct sockaddr
*)p
;
2572 struct octnic_ctrl_pkt nctrl
;
2574 if (!is_valid_ether_addr(addr
->sa_data
))
2575 return -EADDRNOTAVAIL
;
2577 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2580 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MACADDR
;
2581 nctrl
.ncmd
.s
.param1
= 0;
2582 nctrl
.ncmd
.s
.more
= 1;
2583 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2584 nctrl
.netpndev
= (u64
)netdev
;
2585 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2586 nctrl
.wait_time
= 100;
2589 /* The MAC Address is presented in network byte order. */
2590 memcpy((u8
*)&nctrl
.udd
[0] + 2, addr
->sa_data
, ETH_ALEN
);
2592 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2594 dev_err(&oct
->pci_dev
->dev
, "MAC Address change failed\n");
2597 memcpy(netdev
->dev_addr
, addr
->sa_data
, netdev
->addr_len
);
2598 memcpy(((u8
*)&lio
->linfo
.hw_addr
) + 2, addr
->sa_data
, ETH_ALEN
);
2604 * \brief Net device get_stats
2605 * @param netdev network device
2607 static struct net_device_stats
*liquidio_get_stats(struct net_device
*netdev
)
2609 struct lio
*lio
= GET_LIO(netdev
);
2610 struct net_device_stats
*stats
= &netdev
->stats
;
2611 struct octeon_device
*oct
;
2612 u64 pkts
= 0, drop
= 0, bytes
= 0;
2613 struct oct_droq_stats
*oq_stats
;
2614 struct oct_iq_stats
*iq_stats
;
2615 int i
, iq_no
, oq_no
;
2619 for (i
= 0; i
< lio
->linfo
.num_txpciq
; i
++) {
2620 iq_no
= lio
->linfo
.txpciq
[i
].s
.q_no
;
2621 iq_stats
= &oct
->instr_queue
[iq_no
]->stats
;
2622 pkts
+= iq_stats
->tx_done
;
2623 drop
+= iq_stats
->tx_dropped
;
2624 bytes
+= iq_stats
->tx_tot_bytes
;
2627 stats
->tx_packets
= pkts
;
2628 stats
->tx_bytes
= bytes
;
2629 stats
->tx_dropped
= drop
;
2635 for (i
= 0; i
< lio
->linfo
.num_rxpciq
; i
++) {
2636 oq_no
= lio
->linfo
.rxpciq
[i
].s
.q_no
;
2637 oq_stats
= &oct
->droq
[oq_no
]->stats
;
2638 pkts
+= oq_stats
->rx_pkts_received
;
2639 drop
+= (oq_stats
->rx_dropped
+
2640 oq_stats
->dropped_nodispatch
+
2641 oq_stats
->dropped_toomany
+
2642 oq_stats
->dropped_nomem
);
2643 bytes
+= oq_stats
->rx_bytes_received
;
2646 stats
->rx_bytes
= bytes
;
2647 stats
->rx_packets
= pkts
;
2648 stats
->rx_dropped
= drop
;
2654 * \brief Net device change_mtu
2655 * @param netdev network device
2657 static int liquidio_change_mtu(struct net_device
*netdev
, int new_mtu
)
2659 struct lio
*lio
= GET_LIO(netdev
);
2660 struct octeon_device
*oct
= lio
->oct_dev
;
2661 struct octnic_ctrl_pkt nctrl
;
2664 /* Limit the MTU to make sure the ethernet packets are between 68 bytes
2667 if ((new_mtu
< LIO_MIN_MTU_SIZE
) ||
2668 (new_mtu
> LIO_MAX_MTU_SIZE
)) {
2669 dev_err(&oct
->pci_dev
->dev
, "Invalid MTU: %d\n", new_mtu
);
2670 dev_err(&oct
->pci_dev
->dev
, "Valid range %d and %d\n",
2671 LIO_MIN_MTU_SIZE
, LIO_MAX_MTU_SIZE
);
2675 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
2678 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_CHANGE_MTU
;
2679 nctrl
.ncmd
.s
.param1
= new_mtu
;
2680 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
2681 nctrl
.wait_time
= 100;
2682 nctrl
.netpndev
= (u64
)netdev
;
2683 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
2685 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
2687 dev_err(&oct
->pci_dev
->dev
, "Failed to set MTU\n");
2697 * \brief Handler for SIOCSHWTSTAMP ioctl
2698 * @param netdev network device
2699 * @param ifr interface request
2700 * @param cmd command
2702 static int hwtstamp_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
)
2704 struct hwtstamp_config conf
;
2705 struct lio
*lio
= GET_LIO(netdev
);
2707 if (copy_from_user(&conf
, ifr
->ifr_data
, sizeof(conf
)))
2713 switch (conf
.tx_type
) {
2714 case HWTSTAMP_TX_ON
:
2715 case HWTSTAMP_TX_OFF
:
2721 switch (conf
.rx_filter
) {
2722 case HWTSTAMP_FILTER_NONE
:
2724 case HWTSTAMP_FILTER_ALL
:
2725 case HWTSTAMP_FILTER_SOME
:
2726 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT
:
2727 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC
:
2728 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
:
2729 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT
:
2730 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC
:
2731 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ
:
2732 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT
:
2733 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC
:
2734 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ
:
2735 case HWTSTAMP_FILTER_PTP_V2_EVENT
:
2736 case HWTSTAMP_FILTER_PTP_V2_SYNC
:
2737 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ
:
2738 conf
.rx_filter
= HWTSTAMP_FILTER_ALL
;
2744 if (conf
.rx_filter
== HWTSTAMP_FILTER_ALL
)
2745 ifstate_set(lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
);
2748 ifstate_reset(lio
, LIO_IFSTATE_RX_TIMESTAMP_ENABLED
);
2750 return copy_to_user(ifr
->ifr_data
, &conf
, sizeof(conf
)) ? -EFAULT
: 0;
2754 * \brief ioctl handler
2755 * @param netdev network device
2756 * @param ifr interface request
2757 * @param cmd command
2759 static int liquidio_ioctl(struct net_device
*netdev
, struct ifreq
*ifr
, int cmd
)
2763 return hwtstamp_ioctl(netdev
, ifr
);
2770 * \brief handle a Tx timestamp response
2771 * @param status response status
2772 * @param buf pointer to skb
2774 static void handle_timestamp(struct octeon_device
*oct
,
2778 struct octnet_buf_free_info
*finfo
;
2779 struct octeon_soft_command
*sc
;
2780 struct oct_timestamp_resp
*resp
;
2782 struct sk_buff
*skb
= (struct sk_buff
*)buf
;
2784 finfo
= (struct octnet_buf_free_info
*)skb
->cb
;
2788 resp
= (struct oct_timestamp_resp
*)sc
->virtrptr
;
2790 if (status
!= OCTEON_REQUEST_DONE
) {
2791 dev_err(&oct
->pci_dev
->dev
, "Tx timestamp instruction failed. Status: %llx\n",
2792 CVM_CAST64(status
));
2793 resp
->timestamp
= 0;
2796 octeon_swap_8B_data(&resp
->timestamp
, 1);
2798 if (unlikely((skb_shinfo(skb
)->tx_flags
& SKBTX_IN_PROGRESS
) != 0)) {
2799 struct skb_shared_hwtstamps ts
;
2800 u64 ns
= resp
->timestamp
;
2802 netif_info(lio
, tx_done
, lio
->netdev
,
2803 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2804 skb
, (unsigned long long)ns
);
2805 ts
.hwtstamp
= ns_to_ktime(ns
+ lio
->ptp_adjust
);
2806 skb_tstamp_tx(skb
, &ts
);
2809 octeon_free_soft_command(oct
, sc
);
2810 tx_buffer_free(skb
);
2813 /* \brief Send a data packet that will be timestamped
2814 * @param oct octeon device
2815 * @param ndata pointer to network data
2816 * @param finfo pointer to private network data
2818 static inline int send_nic_timestamp_pkt(struct octeon_device
*oct
,
2819 struct octnic_data_pkt
*ndata
,
2820 struct octnet_buf_free_info
*finfo
,
2824 struct octeon_soft_command
*sc
;
2831 sc
= octeon_alloc_soft_command_resp(oct
, &ndata
->cmd
,
2832 sizeof(struct oct_timestamp_resp
));
2836 dev_err(&oct
->pci_dev
->dev
, "No memory for timestamped data packet\n");
2837 return IQ_SEND_FAILED
;
2840 if (ndata
->reqtype
== REQTYPE_NORESP_NET
)
2841 ndata
->reqtype
= REQTYPE_RESP_NET
;
2842 else if (ndata
->reqtype
== REQTYPE_NORESP_NET_SG
)
2843 ndata
->reqtype
= REQTYPE_RESP_NET_SG
;
2845 sc
->callback
= handle_timestamp
;
2846 sc
->callback_arg
= finfo
->skb
;
2847 sc
->iq_no
= ndata
->q_no
;
2849 len
= (u32
)((struct octeon_instr_ih2
*)(&sc
->cmd
.cmd2
.ih2
))->dlengsz
;
2851 ring_doorbell
= !xmit_more
;
2852 retval
= octeon_send_command(oct
, sc
->iq_no
, ring_doorbell
, &sc
->cmd
,
2853 sc
, len
, ndata
->reqtype
);
2855 if (retval
== IQ_SEND_FAILED
) {
2856 dev_err(&oct
->pci_dev
->dev
, "timestamp data packet failed status: %x\n",
2858 octeon_free_soft_command(oct
, sc
);
2860 netif_info(lio
, tx_queued
, lio
->netdev
, "Queued timestamp packet\n");
2866 /** \brief Transmit networks packets to the Octeon interface
2867 * @param skbuff skbuff struct to be passed to network layer.
2868 * @param netdev pointer to network device
2869 * @returns whether the packet was transmitted to the device okay or not
2870 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2872 static int liquidio_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
2875 struct octnet_buf_free_info
*finfo
;
2876 union octnic_cmd_setup cmdsetup
;
2877 struct octnic_data_pkt ndata
;
2878 struct octeon_device
*oct
;
2879 struct oct_iq_stats
*stats
;
2880 struct octeon_instr_irh
*irh
;
2881 union tx_info
*tx_info
;
2883 int q_idx
= 0, iq_no
= 0;
2888 lio
= GET_LIO(netdev
);
2891 if (netif_is_multiqueue(netdev
)) {
2892 q_idx
= skb
->queue_mapping
;
2893 q_idx
= (q_idx
% (lio
->linfo
.num_txpciq
));
2895 iq_no
= lio
->linfo
.txpciq
[q_idx
].s
.q_no
;
2900 stats
= &oct
->instr_queue
[iq_no
]->stats
;
2902 /* Check for all conditions in which the current packet cannot be
2905 if (!(atomic_read(&lio
->ifstate
) & LIO_IFSTATE_RUNNING
) ||
2906 (!lio
->linfo
.link
.s
.link_up
) ||
2908 netif_info(lio
, tx_err
, lio
->netdev
,
2909 "Transmit failed link_status : %d\n",
2910 lio
->linfo
.link
.s
.link_up
);
2911 goto lio_xmit_failed
;
2914 /* Use space in skb->cb to store info used to unmap and
2917 finfo
= (struct octnet_buf_free_info
*)skb
->cb
;
2922 /* Prepare the attributes for the data to be passed to OSI. */
2923 memset(&ndata
, 0, sizeof(struct octnic_data_pkt
));
2925 ndata
.buf
= (void *)finfo
;
2929 if (netif_is_multiqueue(netdev
)) {
2930 if (octnet_iq_is_full(oct
, ndata
.q_no
)) {
2931 /* defer sending if queue is full */
2932 netif_info(lio
, tx_err
, lio
->netdev
, "Transmit failed iq:%d full\n",
2934 stats
->tx_iq_busy
++;
2935 return NETDEV_TX_BUSY
;
2938 if (octnet_iq_is_full(oct
, lio
->txq
)) {
2939 /* defer sending if queue is full */
2940 stats
->tx_iq_busy
++;
2941 netif_info(lio
, tx_err
, lio
->netdev
, "Transmit failed iq:%d full\n",
2943 return NETDEV_TX_BUSY
;
2946 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2947 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2950 ndata
.datasize
= skb
->len
;
2953 cmdsetup
.s
.iq_no
= iq_no
;
2955 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
2956 if (skb
->encapsulation
) {
2957 cmdsetup
.s
.tnl_csum
= 1;
2960 cmdsetup
.s
.transport_csum
= 1;
2963 if (unlikely(skb_shinfo(skb
)->tx_flags
& SKBTX_HW_TSTAMP
)) {
2964 skb_shinfo(skb
)->tx_flags
|= SKBTX_IN_PROGRESS
;
2965 cmdsetup
.s
.timestamp
= 1;
2968 if (skb_shinfo(skb
)->nr_frags
== 0) {
2969 cmdsetup
.s
.u
.datasize
= skb
->len
;
2970 octnet_prepare_pci_cmd(oct
, &ndata
.cmd
, &cmdsetup
, tag
);
2972 /* Offload checksum calculation for TCP/UDP packets */
2973 dptr
= dma_map_single(&oct
->pci_dev
->dev
,
2977 if (dma_mapping_error(&oct
->pci_dev
->dev
, dptr
)) {
2978 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 1\n",
2980 return NETDEV_TX_BUSY
;
2983 ndata
.cmd
.cmd2
.dptr
= dptr
;
2985 ndata
.reqtype
= REQTYPE_NORESP_NET
;
2989 struct skb_frag_struct
*frag
;
2990 struct octnic_gather
*g
;
2992 spin_lock(&lio
->glist_lock
[q_idx
]);
2993 g
= (struct octnic_gather
*)
2994 list_delete_head(&lio
->glist
[q_idx
]);
2995 spin_unlock(&lio
->glist_lock
[q_idx
]);
2998 netif_info(lio
, tx_err
, lio
->netdev
,
2999 "Transmit scatter gather: glist null!\n");
3000 goto lio_xmit_failed
;
3003 cmdsetup
.s
.gather
= 1;
3004 cmdsetup
.s
.u
.gatherptrs
= (skb_shinfo(skb
)->nr_frags
+ 1);
3005 octnet_prepare_pci_cmd(oct
, &ndata
.cmd
, &cmdsetup
, tag
);
3007 memset(g
->sg
, 0, g
->sg_size
);
3009 g
->sg
[0].ptr
[0] = dma_map_single(&oct
->pci_dev
->dev
,
3011 (skb
->len
- skb
->data_len
),
3013 if (dma_mapping_error(&oct
->pci_dev
->dev
, g
->sg
[0].ptr
[0])) {
3014 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 2\n",
3016 return NETDEV_TX_BUSY
;
3018 add_sg_size(&g
->sg
[0], (skb
->len
- skb
->data_len
), 0);
3020 frags
= skb_shinfo(skb
)->nr_frags
;
3023 frag
= &skb_shinfo(skb
)->frags
[i
- 1];
3025 g
->sg
[(i
>> 2)].ptr
[(i
& 3)] =
3026 dma_map_page(&oct
->pci_dev
->dev
,
3032 if (dma_mapping_error(&oct
->pci_dev
->dev
,
3033 g
->sg
[i
>> 2].ptr
[i
& 3])) {
3034 dma_unmap_single(&oct
->pci_dev
->dev
,
3036 skb
->len
- skb
->data_len
,
3038 for (j
= 1; j
< i
; j
++) {
3039 frag
= &skb_shinfo(skb
)->frags
[j
- 1];
3040 dma_unmap_page(&oct
->pci_dev
->dev
,
3041 g
->sg
[j
>> 2].ptr
[j
& 3],
3045 dev_err(&oct
->pci_dev
->dev
, "%s DMA mapping error 3\n",
3047 return NETDEV_TX_BUSY
;
3050 add_sg_size(&g
->sg
[(i
>> 2)], frag
->size
, (i
& 3));
3054 dma_sync_single_for_device(&oct
->pci_dev
->dev
, g
->sg_dma_ptr
,
3055 g
->sg_size
, DMA_TO_DEVICE
);
3056 dptr
= g
->sg_dma_ptr
;
3058 ndata
.cmd
.cmd2
.dptr
= dptr
;
3062 ndata
.reqtype
= REQTYPE_NORESP_NET_SG
;
3065 irh
= (struct octeon_instr_irh
*)&ndata
.cmd
.cmd2
.irh
;
3066 tx_info
= (union tx_info
*)&ndata
.cmd
.cmd2
.ossp
[0];
3068 if (skb_shinfo(skb
)->gso_size
) {
3069 tx_info
->s
.gso_size
= skb_shinfo(skb
)->gso_size
;
3070 tx_info
->s
.gso_segs
= skb_shinfo(skb
)->gso_segs
;
3074 /* HW insert VLAN tag */
3075 if (skb_vlan_tag_present(skb
)) {
3076 irh
->priority
= skb_vlan_tag_get(skb
) >> 13;
3077 irh
->vlan
= skb_vlan_tag_get(skb
) & 0xfff;
3080 xmit_more
= skb
->xmit_more
;
3082 if (unlikely(cmdsetup
.s
.timestamp
))
3083 status
= send_nic_timestamp_pkt(oct
, &ndata
, finfo
, xmit_more
);
3085 status
= octnet_send_nic_data_pkt(oct
, &ndata
, xmit_more
);
3086 if (status
== IQ_SEND_FAILED
)
3087 goto lio_xmit_failed
;
3089 netif_info(lio
, tx_queued
, lio
->netdev
, "Transmit queued successfully\n");
3091 if (status
== IQ_SEND_STOP
)
3092 stop_q(lio
->netdev
, q_idx
);
3094 netif_trans_update(netdev
);
3096 if (skb_shinfo(skb
)->gso_size
)
3097 stats
->tx_done
+= skb_shinfo(skb
)->gso_segs
;
3100 stats
->tx_tot_bytes
+= skb
->len
;
3102 return NETDEV_TX_OK
;
3105 stats
->tx_dropped
++;
3106 netif_info(lio
, tx_err
, lio
->netdev
, "IQ%d Transmit dropped:%llu\n",
3107 iq_no
, stats
->tx_dropped
);
3109 dma_unmap_single(&oct
->pci_dev
->dev
, dptr
,
3110 ndata
.datasize
, DMA_TO_DEVICE
);
3111 tx_buffer_free(skb
);
3112 return NETDEV_TX_OK
;
3115 /** \brief Network device Tx timeout
3116 * @param netdev pointer to network device
3118 static void liquidio_tx_timeout(struct net_device
*netdev
)
3122 lio
= GET_LIO(netdev
);
3124 netif_info(lio
, tx_err
, lio
->netdev
,
3125 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
3126 netdev
->stats
.tx_dropped
);
3127 netif_trans_update(netdev
);
3131 static int liquidio_vlan_rx_add_vid(struct net_device
*netdev
,
3132 __be16 proto
__attribute__((unused
)),
3135 struct lio
*lio
= GET_LIO(netdev
);
3136 struct octeon_device
*oct
= lio
->oct_dev
;
3137 struct octnic_ctrl_pkt nctrl
;
3140 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3143 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_ADD_VLAN_FILTER
;
3144 nctrl
.ncmd
.s
.param1
= vid
;
3145 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3146 nctrl
.wait_time
= 100;
3147 nctrl
.netpndev
= (u64
)netdev
;
3148 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3150 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
3152 dev_err(&oct
->pci_dev
->dev
, "Add VLAN filter failed in core (ret: 0x%x)\n",
3159 static int liquidio_vlan_rx_kill_vid(struct net_device
*netdev
,
3160 __be16 proto
__attribute__((unused
)),
3163 struct lio
*lio
= GET_LIO(netdev
);
3164 struct octeon_device
*oct
= lio
->oct_dev
;
3165 struct octnic_ctrl_pkt nctrl
;
3168 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3171 nctrl
.ncmd
.s
.cmd
= OCTNET_CMD_DEL_VLAN_FILTER
;
3172 nctrl
.ncmd
.s
.param1
= vid
;
3173 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3174 nctrl
.wait_time
= 100;
3175 nctrl
.netpndev
= (u64
)netdev
;
3176 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3178 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
3180 dev_err(&oct
->pci_dev
->dev
, "Add VLAN filter failed in core (ret: 0x%x)\n",
3186 /** Sending command to enable/disable RX checksum offload
3187 * @param netdev pointer to network device
3188 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
3189 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
3190 * OCTNET_CMD_RXCSUM_DISABLE
3191 * @returns SUCCESS or FAILURE
3193 int liquidio_set_rxcsum_command(struct net_device
*netdev
, int command
,
3196 struct lio
*lio
= GET_LIO(netdev
);
3197 struct octeon_device
*oct
= lio
->oct_dev
;
3198 struct octnic_ctrl_pkt nctrl
;
3202 nctrl
.ncmd
.s
.cmd
= command
;
3203 nctrl
.ncmd
.s
.param1
= rx_cmd
;
3204 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3205 nctrl
.wait_time
= 100;
3206 nctrl
.netpndev
= (u64
)netdev
;
3207 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3209 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
3211 dev_err(&oct
->pci_dev
->dev
,
3212 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
3218 /** Sending command to add/delete VxLAN UDP port to firmware
3219 * @param netdev pointer to network device
3220 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
3221 * @param vxlan_port VxLAN port to be added or deleted
3222 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
3223 * OCTNET_CMD_VXLAN_PORT_DEL
3224 * @returns SUCCESS or FAILURE
3226 static int liquidio_vxlan_port_command(struct net_device
*netdev
, int command
,
3227 u16 vxlan_port
, u8 vxlan_cmd_bit
)
3229 struct lio
*lio
= GET_LIO(netdev
);
3230 struct octeon_device
*oct
= lio
->oct_dev
;
3231 struct octnic_ctrl_pkt nctrl
;
3235 nctrl
.ncmd
.s
.cmd
= command
;
3236 nctrl
.ncmd
.s
.more
= vxlan_cmd_bit
;
3237 nctrl
.ncmd
.s
.param1
= vxlan_port
;
3238 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3239 nctrl
.wait_time
= 100;
3240 nctrl
.netpndev
= (u64
)netdev
;
3241 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3243 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
3245 dev_err(&oct
->pci_dev
->dev
,
3246 "VxLAN port add/delete failed in core (ret:0x%x)\n",
3252 int liquidio_set_feature(struct net_device
*netdev
, int cmd
, u16 param1
)
3254 struct lio
*lio
= GET_LIO(netdev
);
3255 struct octeon_device
*oct
= lio
->oct_dev
;
3256 struct octnic_ctrl_pkt nctrl
;
3259 memset(&nctrl
, 0, sizeof(struct octnic_ctrl_pkt
));
3262 nctrl
.ncmd
.s
.cmd
= cmd
;
3263 nctrl
.ncmd
.s
.param1
= param1
;
3264 nctrl
.iq_no
= lio
->linfo
.txpciq
[0].s
.q_no
;
3265 nctrl
.wait_time
= 100;
3266 nctrl
.netpndev
= (u64
)netdev
;
3267 nctrl
.cb_fn
= liquidio_link_ctrl_cmd_completion
;
3269 ret
= octnet_send_nic_ctrl_pkt(lio
->oct_dev
, &nctrl
);
3271 dev_err(&oct
->pci_dev
->dev
, "Feature change failed in core (ret: 0x%x)\n",
3277 /** \brief Net device fix features
3278 * @param netdev pointer to network device
3279 * @param request features requested
3280 * @returns updated features list
3282 static netdev_features_t
liquidio_fix_features(struct net_device
*netdev
,
3283 netdev_features_t request
)
3285 struct lio
*lio
= netdev_priv(netdev
);
3287 if ((request
& NETIF_F_RXCSUM
) &&
3288 !(lio
->dev_capability
& NETIF_F_RXCSUM
))
3289 request
&= ~NETIF_F_RXCSUM
;
3291 if ((request
& NETIF_F_HW_CSUM
) &&
3292 !(lio
->dev_capability
& NETIF_F_HW_CSUM
))
3293 request
&= ~NETIF_F_HW_CSUM
;
3295 if ((request
& NETIF_F_TSO
) && !(lio
->dev_capability
& NETIF_F_TSO
))
3296 request
&= ~NETIF_F_TSO
;
3298 if ((request
& NETIF_F_TSO6
) && !(lio
->dev_capability
& NETIF_F_TSO6
))
3299 request
&= ~NETIF_F_TSO6
;
3301 if ((request
& NETIF_F_LRO
) && !(lio
->dev_capability
& NETIF_F_LRO
))
3302 request
&= ~NETIF_F_LRO
;
3304 /*Disable LRO if RXCSUM is off */
3305 if (!(request
& NETIF_F_RXCSUM
) && (netdev
->features
& NETIF_F_LRO
) &&
3306 (lio
->dev_capability
& NETIF_F_LRO
))
3307 request
&= ~NETIF_F_LRO
;
3312 /** \brief Net device set features
3313 * @param netdev pointer to network device
3314 * @param features features to enable/disable
3316 static int liquidio_set_features(struct net_device
*netdev
,
3317 netdev_features_t features
)
3319 struct lio
*lio
= netdev_priv(netdev
);
3321 if (!((netdev
->features
^ features
) & NETIF_F_LRO
))
3324 if ((features
& NETIF_F_LRO
) && (lio
->dev_capability
& NETIF_F_LRO
))
3325 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_ENABLE
,
3326 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
3327 else if (!(features
& NETIF_F_LRO
) &&
3328 (lio
->dev_capability
& NETIF_F_LRO
))
3329 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_DISABLE
,
3330 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
3332 /* Sending command to firmware to enable/disable RX checksum
3333 * offload settings using ethtool
3335 if (!(netdev
->features
& NETIF_F_RXCSUM
) &&
3336 (lio
->enc_dev_capability
& NETIF_F_RXCSUM
) &&
3337 (features
& NETIF_F_RXCSUM
))
3338 liquidio_set_rxcsum_command(netdev
,
3339 OCTNET_CMD_TNL_RX_CSUM_CTL
,
3340 OCTNET_CMD_RXCSUM_ENABLE
);
3341 else if ((netdev
->features
& NETIF_F_RXCSUM
) &&
3342 (lio
->enc_dev_capability
& NETIF_F_RXCSUM
) &&
3343 !(features
& NETIF_F_RXCSUM
))
3344 liquidio_set_rxcsum_command(netdev
, OCTNET_CMD_TNL_RX_CSUM_CTL
,
3345 OCTNET_CMD_RXCSUM_DISABLE
);
3350 static void liquidio_add_vxlan_port(struct net_device
*netdev
,
3351 struct udp_tunnel_info
*ti
)
3353 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3356 liquidio_vxlan_port_command(netdev
,
3357 OCTNET_CMD_VXLAN_PORT_CONFIG
,
3359 OCTNET_CMD_VXLAN_PORT_ADD
);
3362 static void liquidio_del_vxlan_port(struct net_device
*netdev
,
3363 struct udp_tunnel_info
*ti
)
3365 if (ti
->type
!= UDP_TUNNEL_TYPE_VXLAN
)
3368 liquidio_vxlan_port_command(netdev
,
3369 OCTNET_CMD_VXLAN_PORT_CONFIG
,
3371 OCTNET_CMD_VXLAN_PORT_DEL
);
3374 static struct net_device_ops lionetdevops
= {
3375 .ndo_open
= liquidio_open
,
3376 .ndo_stop
= liquidio_stop
,
3377 .ndo_start_xmit
= liquidio_xmit
,
3378 .ndo_get_stats
= liquidio_get_stats
,
3379 .ndo_set_mac_address
= liquidio_set_mac
,
3380 .ndo_set_rx_mode
= liquidio_set_mcast_list
,
3381 .ndo_tx_timeout
= liquidio_tx_timeout
,
3383 .ndo_vlan_rx_add_vid
= liquidio_vlan_rx_add_vid
,
3384 .ndo_vlan_rx_kill_vid
= liquidio_vlan_rx_kill_vid
,
3385 .ndo_change_mtu
= liquidio_change_mtu
,
3386 .ndo_do_ioctl
= liquidio_ioctl
,
3387 .ndo_fix_features
= liquidio_fix_features
,
3388 .ndo_set_features
= liquidio_set_features
,
3389 .ndo_udp_tunnel_add
= liquidio_add_vxlan_port
,
3390 .ndo_udp_tunnel_del
= liquidio_del_vxlan_port
,
3393 /** \brief Entry point for the liquidio module
3395 static int __init
liquidio_init(void)
3398 struct handshake
*hs
;
3400 init_completion(&first_stage
);
3402 octeon_init_device_list(conf_type
);
3404 if (liquidio_init_pci())
3407 wait_for_completion_timeout(&first_stage
, msecs_to_jiffies(1000));
3409 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
3412 wait_for_completion(&hs
->init
);
3414 /* init handshake failed */
3415 dev_err(&hs
->pci_dev
->dev
,
3416 "Failed to init device\n");
3417 liquidio_deinit_pci();
3423 for (i
= 0; i
< MAX_OCTEON_DEVICES
; i
++) {
3426 wait_for_completion_timeout(&hs
->started
,
3427 msecs_to_jiffies(30000));
3428 if (!hs
->started_ok
) {
3429 /* starter handshake failed */
3430 dev_err(&hs
->pci_dev
->dev
,
3431 "Firmware failed to start\n");
3432 liquidio_deinit_pci();
3441 static int lio_nic_info(struct octeon_recv_info
*recv_info
, void *buf
)
3443 struct octeon_device
*oct
= (struct octeon_device
*)buf
;
3444 struct octeon_recv_pkt
*recv_pkt
= recv_info
->recv_pkt
;
3446 union oct_link_status
*ls
;
3449 if (recv_pkt
->buffer_size
[0] != sizeof(*ls
)) {
3450 dev_err(&oct
->pci_dev
->dev
, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3451 recv_pkt
->buffer_size
[0],
3452 recv_pkt
->rh
.r_nic_info
.gmxport
);
3456 gmxport
= recv_pkt
->rh
.r_nic_info
.gmxport
;
3457 ls
= (union oct_link_status
*)get_rbd(recv_pkt
->buffer_ptr
[0]);
3459 octeon_swap_8B_data((u64
*)ls
, (sizeof(union oct_link_status
)) >> 3);
3460 for (i
= 0; i
< oct
->ifcount
; i
++) {
3461 if (oct
->props
[i
].gmxport
== gmxport
) {
3462 update_link_status(oct
->props
[i
].netdev
, ls
);
3468 for (i
= 0; i
< recv_pkt
->buffer_count
; i
++)
3469 recv_buffer_free(recv_pkt
->buffer_ptr
[i
]);
3470 octeon_free_recv_info(recv_info
);
3475 * \brief Setup network interfaces
3476 * @param octeon_dev octeon device
3478 * Called during init time for each device. It assumes the NIC
3479 * is already up and running. The link information for each
3480 * interface is passed in link_info.
3482 static int setup_nic_devices(struct octeon_device
*octeon_dev
)
3484 struct lio
*lio
= NULL
;
3485 struct net_device
*netdev
;
3487 struct octeon_soft_command
*sc
;
3488 struct liquidio_if_cfg_context
*ctx
;
3489 struct liquidio_if_cfg_resp
*resp
;
3490 struct octdev_props
*props
;
3491 int retval
, num_iqueues
, num_oqueues
;
3492 union oct_nic_if_cfg if_cfg
;
3493 unsigned int base_queue
;
3494 unsigned int gmx_port_id
;
3495 u32 resp_size
, ctx_size
;
3498 /* This is to handle link status changes */
3499 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
3501 lio_nic_info
, octeon_dev
);
3503 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3504 * They are handled directly.
3506 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_NORESP_NET
,
3509 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_NORESP_NET_SG
,
3512 octeon_register_reqtype_free_fn(octeon_dev
, REQTYPE_RESP_NET_SG
,
3513 free_netsgbuf_with_resp
);
3515 for (i
= 0; i
< octeon_dev
->ifcount
; i
++) {
3516 resp_size
= sizeof(struct liquidio_if_cfg_resp
);
3517 ctx_size
= sizeof(struct liquidio_if_cfg_context
);
3518 sc
= (struct octeon_soft_command
*)
3519 octeon_alloc_soft_command(octeon_dev
, 0,
3520 resp_size
, ctx_size
);
3521 resp
= (struct liquidio_if_cfg_resp
*)sc
->virtrptr
;
3522 ctx
= (struct liquidio_if_cfg_context
*)sc
->ctxptr
;
3525 CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev
), i
);
3527 CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev
), i
);
3529 CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev
), i
);
3531 CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev
), i
);
3534 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3535 "requesting config for interface %d, iqs %d, oqs %d\n",
3536 ifidx_or_pfnum
, num_iqueues
, num_oqueues
);
3537 WRITE_ONCE(ctx
->cond
, 0);
3538 ctx
->octeon_id
= lio_get_device_id(octeon_dev
);
3539 init_waitqueue_head(&ctx
->wc
);
3542 if_cfg
.s
.num_iqueues
= num_iqueues
;
3543 if_cfg
.s
.num_oqueues
= num_oqueues
;
3544 if_cfg
.s
.base_queue
= base_queue
;
3545 if_cfg
.s
.gmx_port_id
= gmx_port_id
;
3549 octeon_prepare_soft_command(octeon_dev
, sc
, OPCODE_NIC
,
3550 OPCODE_NIC_IF_CFG
, 0,
3553 sc
->callback
= if_cfg_callback
;
3554 sc
->callback_arg
= sc
;
3555 sc
->wait_time
= 3000;
3557 retval
= octeon_send_soft_command(octeon_dev
, sc
);
3558 if (retval
== IQ_SEND_FAILED
) {
3559 dev_err(&octeon_dev
->pci_dev
->dev
,
3560 "iq/oq config failed status: %x\n",
3562 /* Soft instr is freed by driver in case of failure. */
3563 goto setup_nic_dev_fail
;
3566 /* Sleep on a wait queue till the cond flag indicates that the
3567 * response arrived or timed-out.
3569 sleep_cond(&ctx
->wc
, &ctx
->cond
);
3570 retval
= resp
->status
;
3572 dev_err(&octeon_dev
->pci_dev
->dev
, "iq/oq config failed\n");
3573 goto setup_nic_dev_fail
;
3576 octeon_swap_8B_data((u64
*)(&resp
->cfg_info
),
3577 (sizeof(struct liquidio_if_cfg_info
)) >> 3);
3579 num_iqueues
= hweight64(resp
->cfg_info
.iqmask
);
3580 num_oqueues
= hweight64(resp
->cfg_info
.oqmask
);
3582 if (!(num_iqueues
) || !(num_oqueues
)) {
3583 dev_err(&octeon_dev
->pci_dev
->dev
,
3584 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3585 resp
->cfg_info
.iqmask
,
3586 resp
->cfg_info
.oqmask
);
3587 goto setup_nic_dev_fail
;
3589 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3590 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3591 i
, resp
->cfg_info
.iqmask
, resp
->cfg_info
.oqmask
,
3592 num_iqueues
, num_oqueues
);
3593 netdev
= alloc_etherdev_mq(LIO_SIZE
, num_iqueues
);
3596 dev_err(&octeon_dev
->pci_dev
->dev
, "Device allocation failed\n");
3597 goto setup_nic_dev_fail
;
3600 SET_NETDEV_DEV(netdev
, &octeon_dev
->pci_dev
->dev
);
3602 if (num_iqueues
> 1)
3603 lionetdevops
.ndo_select_queue
= select_q
;
3605 /* Associate the routines that will handle different
3608 netdev
->netdev_ops
= &lionetdevops
;
3610 lio
= GET_LIO(netdev
);
3612 memset(lio
, 0, sizeof(struct lio
));
3614 lio
->ifidx
= ifidx_or_pfnum
;
3616 props
= &octeon_dev
->props
[i
];
3617 props
->gmxport
= resp
->cfg_info
.linfo
.gmxport
;
3618 props
->netdev
= netdev
;
3620 lio
->linfo
.num_rxpciq
= num_oqueues
;
3621 lio
->linfo
.num_txpciq
= num_iqueues
;
3622 for (j
= 0; j
< num_oqueues
; j
++) {
3623 lio
->linfo
.rxpciq
[j
].u64
=
3624 resp
->cfg_info
.linfo
.rxpciq
[j
].u64
;
3626 for (j
= 0; j
< num_iqueues
; j
++) {
3627 lio
->linfo
.txpciq
[j
].u64
=
3628 resp
->cfg_info
.linfo
.txpciq
[j
].u64
;
3630 lio
->linfo
.hw_addr
= resp
->cfg_info
.linfo
.hw_addr
;
3631 lio
->linfo
.gmxport
= resp
->cfg_info
.linfo
.gmxport
;
3632 lio
->linfo
.link
.u64
= resp
->cfg_info
.linfo
.link
.u64
;
3634 lio
->msg_enable
= netif_msg_init(debug
, DEFAULT_MSG_ENABLE
);
3636 lio
->dev_capability
= NETIF_F_HIGHDMA
3637 | NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
3638 | NETIF_F_SG
| NETIF_F_RXCSUM
3640 | NETIF_F_TSO
| NETIF_F_TSO6
3642 netif_set_gso_max_size(netdev
, OCTNIC_GSO_MAX_SIZE
);
3644 /* Copy of transmit encapsulation capabilities:
3645 * TSO, TSO6, Checksums for this device
3647 lio
->enc_dev_capability
= NETIF_F_IP_CSUM
3649 | NETIF_F_GSO_UDP_TUNNEL
3650 | NETIF_F_HW_CSUM
| NETIF_F_SG
3652 | NETIF_F_TSO
| NETIF_F_TSO6
3655 netdev
->hw_enc_features
= (lio
->enc_dev_capability
&
3658 lio
->dev_capability
|= NETIF_F_GSO_UDP_TUNNEL
;
3660 netdev
->vlan_features
= lio
->dev_capability
;
3661 /* Add any unchangeable hw features */
3662 lio
->dev_capability
|= NETIF_F_HW_VLAN_CTAG_FILTER
|
3663 NETIF_F_HW_VLAN_CTAG_RX
|
3664 NETIF_F_HW_VLAN_CTAG_TX
;
3666 netdev
->features
= (lio
->dev_capability
& ~NETIF_F_LRO
);
3668 netdev
->hw_features
= lio
->dev_capability
;
3669 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3670 netdev
->hw_features
= netdev
->hw_features
&
3671 ~NETIF_F_HW_VLAN_CTAG_RX
;
3673 /* Point to the properties for octeon device to which this
3674 * interface belongs.
3676 lio
->oct_dev
= octeon_dev
;
3677 lio
->octprops
= props
;
3678 lio
->netdev
= netdev
;
3680 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3681 "if%d gmx: %d hw_addr: 0x%llx\n", i
,
3682 lio
->linfo
.gmxport
, CVM_CAST64(lio
->linfo
.hw_addr
));
3684 /* 64-bit swap required on LE machines */
3685 octeon_swap_8B_data(&lio
->linfo
.hw_addr
, 1);
3686 for (j
= 0; j
< 6; j
++)
3687 mac
[j
] = *((u8
*)(((u8
*)&lio
->linfo
.hw_addr
) + 2 + j
));
3689 /* Copy MAC Address to OS network device structure */
3691 ether_addr_copy(netdev
->dev_addr
, mac
);
3693 /* By default all interfaces on a single Octeon uses the same
3696 lio
->txq
= lio
->linfo
.txpciq
[0].s
.q_no
;
3697 lio
->rxq
= lio
->linfo
.rxpciq
[0].s
.q_no
;
3698 if (setup_io_queues(octeon_dev
, i
)) {
3699 dev_err(&octeon_dev
->pci_dev
->dev
, "I/O queues creation failed\n");
3700 goto setup_nic_dev_fail
;
3703 ifstate_set(lio
, LIO_IFSTATE_DROQ_OPS
);
3705 lio
->tx_qsize
= octeon_get_tx_qsize(octeon_dev
, lio
->txq
);
3706 lio
->rx_qsize
= octeon_get_rx_qsize(octeon_dev
, lio
->rxq
);
3708 if (setup_glists(octeon_dev
, lio
, num_iqueues
)) {
3709 dev_err(&octeon_dev
->pci_dev
->dev
,
3710 "Gather list allocation failed\n");
3711 goto setup_nic_dev_fail
;
3714 /* Register ethtool support */
3715 liquidio_set_ethtool_ops(netdev
);
3716 octeon_dev
->priv_flags
= 0x0;
3718 if (netdev
->features
& NETIF_F_LRO
)
3719 liquidio_set_feature(netdev
, OCTNET_CMD_LRO_ENABLE
,
3720 OCTNIC_LROIPV4
| OCTNIC_LROIPV6
);
3722 liquidio_set_feature(netdev
, OCTNET_CMD_ENABLE_VLAN_FILTER
, 0);
3724 if ((debug
!= -1) && (debug
& NETIF_MSG_HW
))
3725 liquidio_set_feature(netdev
,
3726 OCTNET_CMD_VERBOSE_ENABLE
, 0);
3728 /* Register the network device with the OS */
3729 if (register_netdev(netdev
)) {
3730 dev_err(&octeon_dev
->pci_dev
->dev
, "Device registration failed\n");
3731 goto setup_nic_dev_fail
;
3734 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3735 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3736 i
, mac
[0], mac
[1], mac
[2], mac
[3], mac
[4], mac
[5]);
3737 netif_carrier_off(netdev
);
3738 lio
->link_changes
++;
3740 ifstate_set(lio
, LIO_IFSTATE_REGISTERED
);
3742 /* Sending command to firmware to enable Rx checksum offload
3743 * by default at the time of setup of Liquidio driver for
3746 liquidio_set_rxcsum_command(netdev
, OCTNET_CMD_TNL_RX_CSUM_CTL
,
3747 OCTNET_CMD_RXCSUM_ENABLE
);
3748 liquidio_set_feature(netdev
, OCTNET_CMD_TNL_TX_CSUM_CTL
,
3749 OCTNET_CMD_TXCSUM_ENABLE
);
3751 dev_dbg(&octeon_dev
->pci_dev
->dev
,
3752 "NIC ifidx:%d Setup successful\n", i
);
3754 octeon_free_soft_command(octeon_dev
, sc
);
3761 octeon_free_soft_command(octeon_dev
, sc
);
3764 dev_err(&octeon_dev
->pci_dev
->dev
,
3765 "NIC ifidx:%d Setup failed\n", i
);
3766 liquidio_destroy_nic_device(octeon_dev
, i
);
3772 * \brief initialize the NIC
3773 * @param oct octeon device
3775 * This initialization routine is called once the Octeon device application is
3778 static int liquidio_init_nic_module(struct octeon_device
*oct
)
3780 struct oct_intrmod_cfg
*intrmod_cfg
;
3782 int num_nic_ports
= CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct
));
3784 dev_dbg(&oct
->pci_dev
->dev
, "Initializing network interfaces\n");
3786 /* only default iq and oq were initialized
3787 * initialize the rest as well
3789 /* run port_config command for each port */
3790 oct
->ifcount
= num_nic_ports
;
3792 memset(oct
->props
, 0,
3793 sizeof(struct octdev_props
) * num_nic_ports
);
3795 for (i
= 0; i
< MAX_OCTEON_LINKS
; i
++)
3796 oct
->props
[i
].gmxport
= -1;
3798 retval
= setup_nic_devices(oct
);
3800 dev_err(&oct
->pci_dev
->dev
, "Setup NIC devices failed\n");
3801 goto octnet_init_failure
;
3804 liquidio_ptp_init(oct
);
3806 /* Initialize interrupt moderation params */
3807 intrmod_cfg
= &((struct octeon_device
*)oct
)->intrmod
;
3808 intrmod_cfg
->rx_enable
= 1;
3809 intrmod_cfg
->check_intrvl
= LIO_INTRMOD_CHECK_INTERVAL
;
3810 intrmod_cfg
->maxpkt_ratethr
= LIO_INTRMOD_MAXPKT_RATETHR
;
3811 intrmod_cfg
->minpkt_ratethr
= LIO_INTRMOD_MINPKT_RATETHR
;
3812 intrmod_cfg
->rx_maxcnt_trigger
= LIO_INTRMOD_RXMAXCNT_TRIGGER
;
3813 intrmod_cfg
->rx_maxtmr_trigger
= LIO_INTRMOD_RXMAXTMR_TRIGGER
;
3814 intrmod_cfg
->rx_mintmr_trigger
= LIO_INTRMOD_RXMINTMR_TRIGGER
;
3815 intrmod_cfg
->rx_mincnt_trigger
= LIO_INTRMOD_RXMINCNT_TRIGGER
;
3816 intrmod_cfg
->tx_enable
= 1;
3817 intrmod_cfg
->tx_maxcnt_trigger
= LIO_INTRMOD_TXMAXCNT_TRIGGER
;
3818 intrmod_cfg
->tx_mincnt_trigger
= LIO_INTRMOD_TXMINCNT_TRIGGER
;
3819 intrmod_cfg
->rx_frames
= CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct
));
3820 intrmod_cfg
->rx_usecs
= CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct
));
3821 dev_dbg(&oct
->pci_dev
->dev
, "Network interfaces ready\n");
3825 octnet_init_failure
:
3833 * \brief starter callback that invokes the remaining initialization work after
3834 * the NIC is up and running.
3835 * @param octptr work struct work_struct
3837 static void nic_starter(struct work_struct
*work
)
3839 struct octeon_device
*oct
;
3840 struct cavium_wk
*wk
= (struct cavium_wk
*)work
;
3842 oct
= (struct octeon_device
*)wk
->ctxptr
;
3844 if (atomic_read(&oct
->status
) == OCT_DEV_RUNNING
)
3847 /* If the status of the device is CORE_OK, the core
3848 * application has reported its application type. Call
3849 * any registered handlers now and move to the RUNNING
3852 if (atomic_read(&oct
->status
) != OCT_DEV_CORE_OK
) {
3853 schedule_delayed_work(&oct
->nic_poll_work
.work
,
3854 LIQUIDIO_STARTER_POLL_INTERVAL_MS
);
3858 atomic_set(&oct
->status
, OCT_DEV_RUNNING
);
3860 if (oct
->app_mode
&& oct
->app_mode
== CVM_DRV_NIC_APP
) {
3861 dev_dbg(&oct
->pci_dev
->dev
, "Starting NIC module\n");
3863 if (liquidio_init_nic_module(oct
))
3864 dev_err(&oct
->pci_dev
->dev
, "NIC initialization failed\n");
3866 handshake
[oct
->octeon_id
].started_ok
= 1;
3868 dev_err(&oct
->pci_dev
->dev
,
3869 "Unexpected application running on NIC (%d). Check firmware.\n",
3873 complete(&handshake
[oct
->octeon_id
].started
);
3877 * \brief Device initialization for each Octeon device that is probed
3878 * @param octeon_dev octeon device
3880 static int octeon_device_init(struct octeon_device
*octeon_dev
)
3883 char bootcmd
[] = "\n";
3884 struct octeon_device_priv
*oct_priv
=
3885 (struct octeon_device_priv
*)octeon_dev
->priv
;
3886 atomic_set(&octeon_dev
->status
, OCT_DEV_BEGIN_STATE
);
3888 /* Enable access to the octeon device and make its DMA capability
3891 if (octeon_pci_os_setup(octeon_dev
))
3894 /* Identify the Octeon type and map the BAR address space. */
3895 if (octeon_chip_specific_setup(octeon_dev
)) {
3896 dev_err(&octeon_dev
->pci_dev
->dev
, "Chip specific setup failed\n");
3900 atomic_set(&octeon_dev
->status
, OCT_DEV_PCI_MAP_DONE
);
3902 octeon_dev
->app_mode
= CVM_DRV_INVALID_APP
;
3904 /* Do a soft reset of the Octeon device. */
3905 if (octeon_dev
->fn_list
.soft_reset(octeon_dev
))
3908 /* Initialize the dispatch mechanism used to push packets arriving on
3909 * Octeon Output queues.
3911 if (octeon_init_dispatch_list(octeon_dev
))
3914 octeon_register_dispatch_fn(octeon_dev
, OPCODE_NIC
,
3915 OPCODE_NIC_CORE_DRV_ACTIVE
,
3916 octeon_core_drv_init
,
3919 INIT_DELAYED_WORK(&octeon_dev
->nic_poll_work
.work
, nic_starter
);
3920 octeon_dev
->nic_poll_work
.ctxptr
= (void *)octeon_dev
;
3921 schedule_delayed_work(&octeon_dev
->nic_poll_work
.work
,
3922 LIQUIDIO_STARTER_POLL_INTERVAL_MS
);
3924 atomic_set(&octeon_dev
->status
, OCT_DEV_DISPATCH_INIT_DONE
);
3926 octeon_set_io_queues_off(octeon_dev
);
3928 /* Setup the data structures that manage this Octeon's Input queues. */
3929 if (octeon_setup_instr_queues(octeon_dev
)) {
3930 dev_err(&octeon_dev
->pci_dev
->dev
,
3931 "instruction queue initialization failed\n");
3932 /* On error, release any previously allocated queues */
3933 for (j
= 0; j
< octeon_dev
->num_iqs
; j
++)
3934 octeon_delete_instr_queue(octeon_dev
, j
);
3937 atomic_set(&octeon_dev
->status
, OCT_DEV_INSTR_QUEUE_INIT_DONE
);
3939 /* Initialize soft command buffer pool
3941 if (octeon_setup_sc_buffer_pool(octeon_dev
)) {
3942 dev_err(&octeon_dev
->pci_dev
->dev
, "sc buffer pool allocation failed\n");
3945 atomic_set(&octeon_dev
->status
, OCT_DEV_SC_BUFF_POOL_INIT_DONE
);
3947 /* Initialize lists to manage the requests of different types that
3948 * arrive from user & kernel applications for this octeon device.
3950 if (octeon_setup_response_list(octeon_dev
)) {
3951 dev_err(&octeon_dev
->pci_dev
->dev
, "Response list allocation failed\n");
3954 atomic_set(&octeon_dev
->status
, OCT_DEV_RESP_LIST_INIT_DONE
);
3956 if (octeon_setup_output_queues(octeon_dev
)) {
3957 dev_err(&octeon_dev
->pci_dev
->dev
, "Output queue initialization failed\n");
3958 /* Release any previously allocated queues */
3959 for (j
= 0; j
< octeon_dev
->num_oqs
; j
++)
3960 octeon_delete_droq(octeon_dev
, j
);
3964 atomic_set(&octeon_dev
->status
, OCT_DEV_DROQ_INIT_DONE
);
3966 /* The input and output queue registers were setup earlier (the queues
3967 * were not enabled). Any additional registers that need to be
3968 * programmed should be done now.
3970 ret
= octeon_dev
->fn_list
.setup_device_regs(octeon_dev
);
3972 dev_err(&octeon_dev
->pci_dev
->dev
,
3973 "Failed to configure device registers\n");
3977 /* Initialize the tasklet that handles output queue packet processing.*/
3978 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Initializing droq tasklet\n");
3979 tasklet_init(&oct_priv
->droq_tasklet
, octeon_droq_bh
,
3980 (unsigned long)octeon_dev
);
3982 /* Setup the interrupt handler and record the INT SUM register address
3984 if (octeon_setup_interrupt(octeon_dev
))
3987 /* Enable Octeon device interrupts */
3988 octeon_dev
->fn_list
.enable_interrupt(octeon_dev
->chip
);
3990 /* Enable the input and output queues for this Octeon device */
3991 octeon_dev
->fn_list
.enable_io_queues(octeon_dev
);
3993 atomic_set(&octeon_dev
->status
, OCT_DEV_IO_QUEUES_DONE
);
3995 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Waiting for DDR initialization...\n");
3997 if (ddr_timeout
== 0)
3998 dev_info(&octeon_dev
->pci_dev
->dev
, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4000 schedule_timeout_uninterruptible(HZ
* LIO_RESET_SECS
);
4002 /* Wait for the octeon to initialize DDR after the soft-reset. */
4003 while (ddr_timeout
== 0) {
4004 set_current_state(TASK_INTERRUPTIBLE
);
4005 if (schedule_timeout(HZ
/ 10)) {
4006 /* user probably pressed Control-C */
4010 ret
= octeon_wait_for_ddr_init(octeon_dev
, &ddr_timeout
);
4012 dev_err(&octeon_dev
->pci_dev
->dev
,
4013 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4018 if (octeon_wait_for_bootloader(octeon_dev
, 1000) != 0) {
4019 dev_err(&octeon_dev
->pci_dev
->dev
, "Board not responding\n");
4023 /* Divert uboot to take commands from host instead. */
4024 ret
= octeon_console_send_cmd(octeon_dev
, bootcmd
, 50);
4026 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Initializing consoles\n");
4027 ret
= octeon_init_consoles(octeon_dev
);
4029 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not access board consoles\n");
4032 ret
= octeon_add_console(octeon_dev
, 0);
4034 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not access board console\n");
4038 atomic_set(&octeon_dev
->status
, OCT_DEV_CONSOLE_INIT_DONE
);
4040 dev_dbg(&octeon_dev
->pci_dev
->dev
, "Loading firmware\n");
4041 ret
= load_firmware(octeon_dev
);
4043 dev_err(&octeon_dev
->pci_dev
->dev
, "Could not load firmware to board\n");
4047 handshake
[octeon_dev
->octeon_id
].init_ok
= 1;
4048 complete(&handshake
[octeon_dev
->octeon_id
].init
);
4050 atomic_set(&octeon_dev
->status
, OCT_DEV_HOST_OK
);
4052 /* Send Credit for Octeon Output queues. Credits are always sent after
4053 * the output queue is enabled.
4055 for (j
= 0; j
< octeon_dev
->num_oqs
; j
++)
4056 writel(octeon_dev
->droq
[j
]->max_count
,
4057 octeon_dev
->droq
[j
]->pkts_credit_reg
);
4059 /* Packets can start arriving on the output queues from this point. */
4065 * \brief Exits the module
4067 static void __exit
liquidio_exit(void)
4069 liquidio_deinit_pci();
4071 pr_info("LiquidIO network module is now unloaded\n");
4074 module_init(liquidio_init
);
4075 module_exit(liquidio_exit
);