2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/version.h>
37 #include <linux/module.h>
38 #include <linux/moduleparam.h>
39 #include <linux/init.h>
40 #include <linux/pci.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/netdevice.h>
43 #include <linux/etherdevice.h>
44 #include <linux/debugfs.h>
45 #include <linux/ethtool.h>
47 #include "t4vf_common.h"
48 #include "t4vf_defs.h"
50 #include "../cxgb4/t4_regs.h"
51 #include "../cxgb4/t4_msg.h"
54 * Generic information about the driver.
56 #define DRV_VERSION "1.0.0"
57 #define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver"
65 * Default ethtool "message level" for adapters.
67 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
68 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
69 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
71 static int dflt_msg_enable
= DFLT_MSG_ENABLE
;
73 module_param(dflt_msg_enable
, int, 0644);
74 MODULE_PARM_DESC(dflt_msg_enable
,
75 "default adapter ethtool message level bitmap");
78 * The driver uses the best interrupt scheme available on a platform in the
79 * order MSI-X then MSI. This parameter determines which of these schemes the
80 * driver may consider as follows:
82 * msi = 2: choose from among MSI-X and MSI
83 * msi = 1: only consider MSI interrupts
85 * Note that unlike the Physical Function driver, this Virtual Function driver
86 * does _not_ support legacy INTx interrupts (this limitation is mandated by
87 * the PCI-E SR-IOV standard).
91 #define MSI_DEFAULT MSI_MSIX
93 static int msi
= MSI_DEFAULT
;
95 module_param(msi
, int, 0644);
96 MODULE_PARM_DESC(msi
, "whether to use MSI-X or MSI");
99 * Fundamental constants.
100 * ======================
104 MAX_TXQ_ENTRIES
= 16384,
105 MAX_RSPQ_ENTRIES
= 16384,
106 MAX_RX_BUFFERS
= 16384,
108 MIN_TXQ_ENTRIES
= 32,
109 MIN_RSPQ_ENTRIES
= 128,
113 * For purposes of manipulating the Free List size we need to
114 * recognize that Free Lists are actually Egress Queues (the host
115 * produces free buffers which the hardware consumes), Egress Queues
116 * indices are all in units of Egress Context Units bytes, and free
117 * list entries are 64-bit PCI DMA addresses. And since the state of
118 * the Producer Index == the Consumer Index implies an EMPTY list, we
119 * always have at least one Egress Unit's worth of Free List entries
120 * unused. See sge.c for more details ...
122 EQ_UNIT
= SGE_EQ_IDXSIZE
,
123 FL_PER_EQ_UNIT
= EQ_UNIT
/ sizeof(__be64
),
124 MIN_FL_RESID
= FL_PER_EQ_UNIT
,
128 * Global driver state.
129 * ====================
132 static struct dentry
*cxgb4vf_debugfs_root
;
135 * OS "Callback" functions.
136 * ========================
140 * The link status has changed on the indicated "port" (Virtual Interface).
142 void t4vf_os_link_changed(struct adapter
*adapter
, int pidx
, int link_ok
)
144 struct net_device
*dev
= adapter
->port
[pidx
];
147 * If the port is disabled or the current recorded "link up"
148 * status matches the new status, just return.
150 if (!netif_running(dev
) || link_ok
== netif_carrier_ok(dev
))
154 * Tell the OS that the link status has changed and print a short
155 * informative message on the console about the event.
160 const struct port_info
*pi
= netdev_priv(dev
);
162 netif_carrier_on(dev
);
164 switch (pi
->link_cfg
.speed
) {
182 switch (pi
->link_cfg
.fc
) {
191 case PAUSE_RX
|PAUSE_TX
:
200 printk(KERN_INFO
"%s: link up, %s, full-duplex, %s PAUSE\n",
203 netif_carrier_off(dev
);
204 printk(KERN_INFO
"%s: link down\n", dev
->name
);
209 * Net device operations.
210 * ======================
214 * Record our new VLAN Group and enable/disable hardware VLAN Tag extraction
215 * based on whether the specified VLAN Group pointer is NULL or not.
217 static void cxgb4vf_vlan_rx_register(struct net_device
*dev
,
218 struct vlan_group
*grp
)
220 struct port_info
*pi
= netdev_priv(dev
);
223 t4vf_set_rxmode(pi
->adapter
, pi
->viid
, -1, -1, -1, -1, grp
!= NULL
, 0);
227 * Perform the MAC and PHY actions needed to enable a "port" (Virtual
230 static int link_start(struct net_device
*dev
)
233 struct port_info
*pi
= netdev_priv(dev
);
236 * We do not set address filters and promiscuity here, the stack does
237 * that step explicitly.
239 ret
= t4vf_set_rxmode(pi
->adapter
, pi
->viid
, dev
->mtu
, -1, -1, -1, -1,
242 ret
= t4vf_change_mac(pi
->adapter
, pi
->viid
,
243 pi
->xact_addr_filt
, dev
->dev_addr
, true);
245 pi
->xact_addr_filt
= ret
;
251 * We don't need to actually "start the link" itself since the
252 * firmware will do that for us when the first Virtual Interface
253 * is enabled on a port.
256 ret
= t4vf_enable_vi(pi
->adapter
, pi
->viid
, true, true);
261 * Name the MSI-X interrupts.
263 static void name_msix_vecs(struct adapter
*adapter
)
265 int namelen
= sizeof(adapter
->msix_info
[0].desc
) - 1;
271 snprintf(adapter
->msix_info
[MSIX_FW
].desc
, namelen
,
272 "%s-FWeventq", adapter
->name
);
273 adapter
->msix_info
[MSIX_FW
].desc
[namelen
] = 0;
278 for_each_port(adapter
, pidx
) {
279 struct net_device
*dev
= adapter
->port
[pidx
];
280 const struct port_info
*pi
= netdev_priv(dev
);
283 for (qs
= 0, msi
= MSIX_IQFLINT
; qs
< pi
->nqsets
; qs
++, msi
++) {
284 snprintf(adapter
->msix_info
[msi
].desc
, namelen
,
285 "%s-%d", dev
->name
, qs
);
286 adapter
->msix_info
[msi
].desc
[namelen
] = 0;
292 * Request all of our MSI-X resources.
294 static int request_msix_queue_irqs(struct adapter
*adapter
)
296 struct sge
*s
= &adapter
->sge
;
302 err
= request_irq(adapter
->msix_info
[MSIX_FW
].vec
, t4vf_sge_intr_msix
,
303 0, adapter
->msix_info
[MSIX_FW
].desc
, &s
->fw_evtq
);
311 for_each_ethrxq(s
, rxq
) {
312 err
= request_irq(adapter
->msix_info
[msi
].vec
,
313 t4vf_sge_intr_msix
, 0,
314 adapter
->msix_info
[msi
].desc
,
315 &s
->ethrxq
[rxq
].rspq
);
324 free_irq(adapter
->msix_info
[--msi
].vec
, &s
->ethrxq
[rxq
].rspq
);
325 free_irq(adapter
->msix_info
[MSIX_FW
].vec
, &s
->fw_evtq
);
330 * Free our MSI-X resources.
332 static void free_msix_queue_irqs(struct adapter
*adapter
)
334 struct sge
*s
= &adapter
->sge
;
337 free_irq(adapter
->msix_info
[MSIX_FW
].vec
, &s
->fw_evtq
);
339 for_each_ethrxq(s
, rxq
)
340 free_irq(adapter
->msix_info
[msi
++].vec
,
341 &s
->ethrxq
[rxq
].rspq
);
345 * Turn on NAPI and start up interrupts on a response queue.
347 static void qenable(struct sge_rspq
*rspq
)
349 napi_enable(&rspq
->napi
);
352 * 0-increment the Going To Sleep register to start the timer and
355 t4_write_reg(rspq
->adapter
, T4VF_SGE_BASE_ADDR
+ SGE_VF_GTS
,
357 SEINTARM(rspq
->intr_params
) |
358 INGRESSQID(rspq
->cntxt_id
));
362 * Enable NAPI scheduling and interrupt generation for all Receive Queues.
364 static void enable_rx(struct adapter
*adapter
)
367 struct sge
*s
= &adapter
->sge
;
369 for_each_ethrxq(s
, rxq
)
370 qenable(&s
->ethrxq
[rxq
].rspq
);
371 qenable(&s
->fw_evtq
);
374 * The interrupt queue doesn't use NAPI so we do the 0-increment of
375 * its Going To Sleep register here to get it started.
377 if (adapter
->flags
& USING_MSI
)
378 t4_write_reg(adapter
, T4VF_SGE_BASE_ADDR
+ SGE_VF_GTS
,
380 SEINTARM(s
->intrq
.intr_params
) |
381 INGRESSQID(s
->intrq
.cntxt_id
));
386 * Wait until all NAPI handlers are descheduled.
388 static void quiesce_rx(struct adapter
*adapter
)
390 struct sge
*s
= &adapter
->sge
;
393 for_each_ethrxq(s
, rxq
)
394 napi_disable(&s
->ethrxq
[rxq
].rspq
.napi
);
395 napi_disable(&s
->fw_evtq
.napi
);
399 * Response queue handler for the firmware event queue.
401 static int fwevtq_handler(struct sge_rspq
*rspq
, const __be64
*rsp
,
402 const struct pkt_gl
*gl
)
405 * Extract response opcode and get pointer to CPL message body.
407 struct adapter
*adapter
= rspq
->adapter
;
408 u8 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
409 void *cpl
= (void *)(rsp
+ 1);
414 * We've received an asynchronous message from the firmware.
416 const struct cpl_fw6_msg
*fw_msg
= cpl
;
417 if (fw_msg
->type
== FW6_TYPE_CMD_RPL
)
418 t4vf_handle_fw_rpl(adapter
, fw_msg
->data
);
422 case CPL_SGE_EGR_UPDATE
: {
424 * We've received an Egress Queue Status Update message. We
425 * get these, if the SGE is configured to send these when the
426 * firmware passes certain points in processing our TX
427 * Ethernet Queue or if we make an explicit request for one.
428 * We use these updates to determine when we may need to
429 * restart a TX Ethernet Queue which was stopped for lack of
430 * free TX Queue Descriptors ...
432 const struct cpl_sge_egr_update
*p
= (void *)cpl
;
433 unsigned int qid
= EGR_QID(be32_to_cpu(p
->opcode_qid
));
434 struct sge
*s
= &adapter
->sge
;
436 struct sge_eth_txq
*txq
;
440 * Perform sanity checking on the Queue ID to make sure it
441 * really refers to one of our TX Ethernet Egress Queues which
442 * is active and matches the queue's ID. None of these error
443 * conditions should ever happen so we may want to either make
444 * them fatal and/or conditionalized under DEBUG.
446 eq_idx
= EQ_IDX(s
, qid
);
447 if (unlikely(eq_idx
>= MAX_EGRQ
)) {
448 dev_err(adapter
->pdev_dev
,
449 "Egress Update QID %d out of range\n", qid
);
452 tq
= s
->egr_map
[eq_idx
];
453 if (unlikely(tq
== NULL
)) {
454 dev_err(adapter
->pdev_dev
,
455 "Egress Update QID %d TXQ=NULL\n", qid
);
458 txq
= container_of(tq
, struct sge_eth_txq
, q
);
459 if (unlikely(tq
->abs_id
!= qid
)) {
460 dev_err(adapter
->pdev_dev
,
461 "Egress Update QID %d refers to TXQ %d\n",
467 * Restart a stopped TX Queue which has less than half of its
471 netif_tx_wake_queue(txq
->txq
);
476 dev_err(adapter
->pdev_dev
,
477 "unexpected CPL %#x on FW event queue\n", opcode
);
484 * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues
485 * to use and initializes them. We support multiple "Queue Sets" per port if
486 * we have MSI-X, otherwise just one queue set per port.
488 static int setup_sge_queues(struct adapter
*adapter
)
490 struct sge
*s
= &adapter
->sge
;
494 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
497 bitmap_zero(s
->starving_fl
, MAX_EGRQ
);
500 * If we're using MSI interrupt mode we need to set up a "forwarded
501 * interrupt" queue which we'll set up with our MSI vector. The rest
502 * of the ingress queues will be set up to forward their interrupts to
503 * this queue ... This must be first since t4vf_sge_alloc_rxq() uses
504 * the intrq's queue ID as the interrupt forwarding queue for the
505 * subsequent calls ...
507 if (adapter
->flags
& USING_MSI
) {
508 err
= t4vf_sge_alloc_rxq(adapter
, &s
->intrq
, false,
509 adapter
->port
[0], 0, NULL
, NULL
);
511 goto err_free_queues
;
515 * Allocate our ingress queue for asynchronous firmware messages.
517 err
= t4vf_sge_alloc_rxq(adapter
, &s
->fw_evtq
, true, adapter
->port
[0],
518 MSIX_FW
, NULL
, fwevtq_handler
);
520 goto err_free_queues
;
523 * Allocate each "port"'s initial Queue Sets. These can be changed
524 * later on ... up to the point where any interface on the adapter is
525 * brought up at which point lots of things get nailed down
529 for_each_port(adapter
, pidx
) {
530 struct net_device
*dev
= adapter
->port
[pidx
];
531 struct port_info
*pi
= netdev_priv(dev
);
532 struct sge_eth_rxq
*rxq
= &s
->ethrxq
[pi
->first_qset
];
533 struct sge_eth_txq
*txq
= &s
->ethtxq
[pi
->first_qset
];
536 for (qs
= 0; qs
< pi
->nqsets
; qs
++, rxq
++, txq
++) {
537 err
= t4vf_sge_alloc_rxq(adapter
, &rxq
->rspq
, false,
539 &rxq
->fl
, t4vf_ethrx_handler
);
541 goto err_free_queues
;
543 err
= t4vf_sge_alloc_eth_txq(adapter
, txq
, dev
,
544 netdev_get_tx_queue(dev
, qs
),
545 s
->fw_evtq
.cntxt_id
);
547 goto err_free_queues
;
550 memset(&rxq
->stats
, 0, sizeof(rxq
->stats
));
555 * Create the reverse mappings for the queues.
557 s
->egr_base
= s
->ethtxq
[0].q
.abs_id
- s
->ethtxq
[0].q
.cntxt_id
;
558 s
->ingr_base
= s
->ethrxq
[0].rspq
.abs_id
- s
->ethrxq
[0].rspq
.cntxt_id
;
559 IQ_MAP(s
, s
->fw_evtq
.abs_id
) = &s
->fw_evtq
;
560 for_each_port(adapter
, pidx
) {
561 struct net_device
*dev
= adapter
->port
[pidx
];
562 struct port_info
*pi
= netdev_priv(dev
);
563 struct sge_eth_rxq
*rxq
= &s
->ethrxq
[pi
->first_qset
];
564 struct sge_eth_txq
*txq
= &s
->ethtxq
[pi
->first_qset
];
567 for (qs
= 0; qs
< pi
->nqsets
; qs
++, rxq
++, txq
++) {
568 IQ_MAP(s
, rxq
->rspq
.abs_id
) = &rxq
->rspq
;
569 EQ_MAP(s
, txq
->q
.abs_id
) = &txq
->q
;
572 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
573 * for Free Lists but since all of the Egress Queues
574 * (including Free Lists) have Relative Queue IDs
575 * which are computed as Absolute - Base Queue ID, we
576 * can synthesize the Absolute Queue IDs for the Free
577 * Lists. This is useful for debugging purposes when
578 * we want to dump Queue Contexts via the PF Driver.
580 rxq
->fl
.abs_id
= rxq
->fl
.cntxt_id
+ s
->egr_base
;
581 EQ_MAP(s
, rxq
->fl
.abs_id
) = &rxq
->fl
;
587 t4vf_free_sge_resources(adapter
);
592 * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
593 * queues. We configure the RSS CPU lookup table to distribute to the number
594 * of HW receive queues, and the response queue lookup table to narrow that
595 * down to the response queues actually configured for each "port" (Virtual
596 * Interface). We always configure the RSS mapping for all ports since the
597 * mapping table has plenty of entries.
599 static int setup_rss(struct adapter
*adapter
)
603 for_each_port(adapter
, pidx
) {
604 struct port_info
*pi
= adap2pinfo(adapter
, pidx
);
605 struct sge_eth_rxq
*rxq
= &adapter
->sge
.ethrxq
[pi
->first_qset
];
606 u16 rss
[MAX_PORT_QSETS
];
609 for (qs
= 0; qs
< pi
->nqsets
; qs
++)
610 rss
[qs
] = rxq
[qs
].rspq
.abs_id
;
612 err
= t4vf_config_rss_range(adapter
, pi
->viid
,
613 0, pi
->rss_size
, rss
, pi
->nqsets
);
618 * Perform Global RSS Mode-specific initialization.
620 switch (adapter
->params
.rss
.mode
) {
621 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
:
623 * If Tunnel All Lookup isn't specified in the global
624 * RSS Configuration, then we need to specify a
625 * default Ingress Queue for any ingress packets which
626 * aren't hashed. We'll use our first ingress queue
629 if (!adapter
->params
.rss
.u
.basicvirtual
.tnlalllookup
) {
630 union rss_vi_config config
;
631 err
= t4vf_read_rss_vi_config(adapter
,
636 config
.basicvirtual
.defaultq
=
638 err
= t4vf_write_rss_vi_config(adapter
,
652 * Bring the adapter up. Called whenever we go from no "ports" open to having
653 * one open. This function performs the actions necessary to make an adapter
654 * operational, such as completing the initialization of HW modules, and
655 * enabling interrupts. Must be called with the rtnl lock held. (Note that
656 * this is called "cxgb_up" in the PF Driver.)
658 static int adapter_up(struct adapter
*adapter
)
663 * If this is the first time we've been called, perform basic
664 * adapter setup. Once we've done this, many of our adapter
665 * parameters can no longer be changed ...
667 if ((adapter
->flags
& FULL_INIT_DONE
) == 0) {
668 err
= setup_sge_queues(adapter
);
671 err
= setup_rss(adapter
);
673 t4vf_free_sge_resources(adapter
);
677 if (adapter
->flags
& USING_MSIX
)
678 name_msix_vecs(adapter
);
679 adapter
->flags
|= FULL_INIT_DONE
;
683 * Acquire our interrupt resources. We only support MSI-X and MSI.
685 BUG_ON((adapter
->flags
& (USING_MSIX
|USING_MSI
)) == 0);
686 if (adapter
->flags
& USING_MSIX
)
687 err
= request_msix_queue_irqs(adapter
);
689 err
= request_irq(adapter
->pdev
->irq
,
690 t4vf_intr_handler(adapter
), 0,
691 adapter
->name
, adapter
);
693 dev_err(adapter
->pdev_dev
, "request_irq failed, err %d\n",
699 * Enable NAPI ingress processing and return success.
702 t4vf_sge_start(adapter
);
707 * Bring the adapter down. Called whenever the last "port" (Virtual
708 * Interface) closed. (Note that this routine is called "cxgb_down" in the PF
711 static void adapter_down(struct adapter
*adapter
)
714 * Free interrupt resources.
716 if (adapter
->flags
& USING_MSIX
)
717 free_msix_queue_irqs(adapter
);
719 free_irq(adapter
->pdev
->irq
, adapter
);
722 * Wait for NAPI handlers to finish.
728 * Start up a net device.
730 static int cxgb4vf_open(struct net_device
*dev
)
733 struct port_info
*pi
= netdev_priv(dev
);
734 struct adapter
*adapter
= pi
->adapter
;
737 * If this is the first interface that we're opening on the "adapter",
738 * bring the "adapter" up now.
740 if (adapter
->open_device_map
== 0) {
741 err
= adapter_up(adapter
);
747 * Note that this interface is up and start everything up ...
749 netif_set_real_num_tx_queues(dev
, pi
->nqsets
);
750 err
= netif_set_real_num_rx_queues(dev
, pi
->nqsets
);
753 err
= link_start(dev
);
757 netif_tx_start_all_queues(dev
);
758 set_bit(pi
->port_id
, &adapter
->open_device_map
);
762 if (adapter
->open_device_map
== 0)
763 adapter_down(adapter
);
768 * Shut down a net device. This routine is called "cxgb_close" in the PF
771 static int cxgb4vf_stop(struct net_device
*dev
)
773 struct port_info
*pi
= netdev_priv(dev
);
774 struct adapter
*adapter
= pi
->adapter
;
776 netif_tx_stop_all_queues(dev
);
777 netif_carrier_off(dev
);
778 t4vf_enable_vi(adapter
, pi
->viid
, false, false);
779 pi
->link_cfg
.link_ok
= 0;
781 clear_bit(pi
->port_id
, &adapter
->open_device_map
);
782 if (adapter
->open_device_map
== 0)
783 adapter_down(adapter
);
788 * Translate our basic statistics into the standard "ifconfig" statistics.
790 static struct net_device_stats
*cxgb4vf_get_stats(struct net_device
*dev
)
792 struct t4vf_port_stats stats
;
793 struct port_info
*pi
= netdev2pinfo(dev
);
794 struct adapter
*adapter
= pi
->adapter
;
795 struct net_device_stats
*ns
= &dev
->stats
;
798 spin_lock(&adapter
->stats_lock
);
799 err
= t4vf_get_port_stats(adapter
, pi
->pidx
, &stats
);
800 spin_unlock(&adapter
->stats_lock
);
802 memset(ns
, 0, sizeof(*ns
));
806 ns
->tx_bytes
= (stats
.tx_bcast_bytes
+ stats
.tx_mcast_bytes
+
807 stats
.tx_ucast_bytes
+ stats
.tx_offload_bytes
);
808 ns
->tx_packets
= (stats
.tx_bcast_frames
+ stats
.tx_mcast_frames
+
809 stats
.tx_ucast_frames
+ stats
.tx_offload_frames
);
810 ns
->rx_bytes
= (stats
.rx_bcast_bytes
+ stats
.rx_mcast_bytes
+
811 stats
.rx_ucast_bytes
);
812 ns
->rx_packets
= (stats
.rx_bcast_frames
+ stats
.rx_mcast_frames
+
813 stats
.rx_ucast_frames
);
814 ns
->multicast
= stats
.rx_mcast_frames
;
815 ns
->tx_errors
= stats
.tx_drop_frames
;
816 ns
->rx_errors
= stats
.rx_err_frames
;
822 * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
823 * at a specified offset within the list, into an array of addrss pointers and
824 * return the number collected.
826 static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device
*dev
,
829 unsigned int maxaddrs
)
831 unsigned int index
= 0;
832 unsigned int naddr
= 0;
833 const struct netdev_hw_addr
*ha
;
835 for_each_dev_addr(dev
, ha
)
836 if (index
++ >= offset
) {
837 addr
[naddr
++] = ha
->addr
;
838 if (naddr
>= maxaddrs
)
845 * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
846 * at a specified offset within the list, into an array of addrss pointers and
847 * return the number collected.
849 static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device
*dev
,
852 unsigned int maxaddrs
)
854 unsigned int index
= 0;
855 unsigned int naddr
= 0;
856 const struct netdev_hw_addr
*ha
;
858 netdev_for_each_mc_addr(ha
, dev
)
859 if (index
++ >= offset
) {
860 addr
[naddr
++] = ha
->addr
;
861 if (naddr
>= maxaddrs
)
868 * Configure the exact and hash address filters to handle a port's multicast
869 * and secondary unicast MAC addresses.
871 static int set_addr_filters(const struct net_device
*dev
, bool sleep
)
876 unsigned int offset
, naddr
;
879 const struct port_info
*pi
= netdev_priv(dev
);
881 /* first do the secondary unicast addresses */
882 for (offset
= 0; ; offset
+= naddr
) {
883 naddr
= collect_netdev_uc_list_addrs(dev
, addr
, offset
,
888 ret
= t4vf_alloc_mac_filt(pi
->adapter
, pi
->viid
, free
,
889 naddr
, addr
, NULL
, &uhash
, sleep
);
896 /* next set up the multicast addresses */
897 for (offset
= 0; ; offset
+= naddr
) {
898 naddr
= collect_netdev_mc_list_addrs(dev
, addr
, offset
,
903 ret
= t4vf_alloc_mac_filt(pi
->adapter
, pi
->viid
, free
,
904 naddr
, addr
, NULL
, &mhash
, sleep
);
910 return t4vf_set_addr_hash(pi
->adapter
, pi
->viid
, uhash
!= 0,
911 uhash
| mhash
, sleep
);
915 * Set RX properties of a port, such as promiscruity, address filters, and MTU.
916 * If @mtu is -1 it is left unchanged.
918 static int set_rxmode(struct net_device
*dev
, int mtu
, bool sleep_ok
)
921 struct port_info
*pi
= netdev_priv(dev
);
923 ret
= set_addr_filters(dev
, sleep_ok
);
925 ret
= t4vf_set_rxmode(pi
->adapter
, pi
->viid
, -1,
926 (dev
->flags
& IFF_PROMISC
) != 0,
927 (dev
->flags
& IFF_ALLMULTI
) != 0,
933 * Set the current receive modes on the device.
935 static void cxgb4vf_set_rxmode(struct net_device
*dev
)
937 /* unfortunately we can't return errors to the stack */
938 set_rxmode(dev
, -1, false);
942 * Find the entry in the interrupt holdoff timer value array which comes
943 * closest to the specified interrupt holdoff value.
945 static int closest_timer(const struct sge
*s
, int us
)
947 int i
, timer_idx
= 0, min_delta
= INT_MAX
;
949 for (i
= 0; i
< ARRAY_SIZE(s
->timer_val
); i
++) {
950 int delta
= us
- s
->timer_val
[i
];
953 if (delta
< min_delta
) {
961 static int closest_thres(const struct sge
*s
, int thres
)
963 int i
, delta
, pktcnt_idx
= 0, min_delta
= INT_MAX
;
965 for (i
= 0; i
< ARRAY_SIZE(s
->counter_val
); i
++) {
966 delta
= thres
- s
->counter_val
[i
];
969 if (delta
< min_delta
) {
978 * Return a queue's interrupt hold-off time in us. 0 means no timer.
980 static unsigned int qtimer_val(const struct adapter
*adapter
,
981 const struct sge_rspq
*rspq
)
983 unsigned int timer_idx
= QINTR_TIMER_IDX_GET(rspq
->intr_params
);
985 return timer_idx
< SGE_NTIMERS
986 ? adapter
->sge
.timer_val
[timer_idx
]
991 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
992 * @adapter: the adapter
993 * @rspq: the RX response queue
994 * @us: the hold-off time in us, or 0 to disable timer
995 * @cnt: the hold-off packet count, or 0 to disable counter
997 * Sets an RX response queue's interrupt hold-off time and packet count.
998 * At least one of the two needs to be enabled for the queue to generate
1001 static int set_rxq_intr_params(struct adapter
*adapter
, struct sge_rspq
*rspq
,
1002 unsigned int us
, unsigned int cnt
)
1004 unsigned int timer_idx
;
1007 * If both the interrupt holdoff timer and count are specified as
1008 * zero, default to a holdoff count of 1 ...
1010 if ((us
| cnt
) == 0)
1014 * If an interrupt holdoff count has been specified, then find the
1015 * closest configured holdoff count and use that. If the response
1016 * queue has already been created, then update its queue context
1023 pktcnt_idx
= closest_thres(&adapter
->sge
, cnt
);
1024 if (rspq
->desc
&& rspq
->pktcnt_idx
!= pktcnt_idx
) {
1025 v
= FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ
) |
1027 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH
) |
1028 FW_PARAMS_PARAM_YZ(rspq
->cntxt_id
);
1029 err
= t4vf_set_params(adapter
, 1, &v
, &pktcnt_idx
);
1033 rspq
->pktcnt_idx
= pktcnt_idx
;
1037 * Compute the closest holdoff timer index from the supplied holdoff
1040 timer_idx
= (us
== 0
1041 ? SGE_TIMER_RSTRT_CNTR
1042 : closest_timer(&adapter
->sge
, us
));
1045 * Update the response queue's interrupt coalescing parameters and
1048 rspq
->intr_params
= (QINTR_TIMER_IDX(timer_idx
) |
1049 (cnt
> 0 ? QINTR_CNT_EN
: 0));
1054 * Return a version number to identify the type of adapter. The scheme is:
1055 * - bits 0..9: chip version
1056 * - bits 10..15: chip revision
1058 static inline unsigned int mk_adap_vers(const struct adapter
*adapter
)
1061 * Chip version 4, revision 0x3f (cxgb4vf).
1063 return 4 | (0x3f << 10);
1067 * Execute the specified ioctl command.
1069 static int cxgb4vf_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1075 * The VF Driver doesn't have access to any of the other
1076 * common Ethernet device ioctl()'s (like reading/writing
1077 * PHY registers, etc.
1088 * Change the device's MTU.
1090 static int cxgb4vf_change_mtu(struct net_device
*dev
, int new_mtu
)
1093 struct port_info
*pi
= netdev_priv(dev
);
1095 /* accommodate SACK */
1099 ret
= t4vf_set_rxmode(pi
->adapter
, pi
->viid
, new_mtu
,
1100 -1, -1, -1, -1, true);
1107 * Change the devices MAC address.
1109 static int cxgb4vf_set_mac_addr(struct net_device
*dev
, void *_addr
)
1112 struct sockaddr
*addr
= _addr
;
1113 struct port_info
*pi
= netdev_priv(dev
);
1115 if (!is_valid_ether_addr(addr
->sa_data
))
1118 ret
= t4vf_change_mac(pi
->adapter
, pi
->viid
, pi
->xact_addr_filt
,
1119 addr
->sa_data
, true);
1123 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1124 pi
->xact_addr_filt
= ret
;
1128 #ifdef CONFIG_NET_POLL_CONTROLLER
1130 * Poll all of our receive queues. This is called outside of normal interrupt
1133 static void cxgb4vf_poll_controller(struct net_device
*dev
)
1135 struct port_info
*pi
= netdev_priv(dev
);
1136 struct adapter
*adapter
= pi
->adapter
;
1138 if (adapter
->flags
& USING_MSIX
) {
1139 struct sge_eth_rxq
*rxq
;
1142 rxq
= &adapter
->sge
.ethrxq
[pi
->first_qset
];
1143 for (nqsets
= pi
->nqsets
; nqsets
; nqsets
--) {
1144 t4vf_sge_intr_msix(0, &rxq
->rspq
);
1148 t4vf_intr_handler(adapter
)(0, adapter
);
1153 * Ethtool operations.
1154 * ===================
1156 * Note that we don't support any ethtool operations which change the physical
1157 * state of the port to which we're linked.
1161 * Return current port link settings.
1163 static int cxgb4vf_get_settings(struct net_device
*dev
,
1164 struct ethtool_cmd
*cmd
)
1166 const struct port_info
*pi
= netdev_priv(dev
);
1168 cmd
->supported
= pi
->link_cfg
.supported
;
1169 cmd
->advertising
= pi
->link_cfg
.advertising
;
1170 cmd
->speed
= netif_carrier_ok(dev
) ? pi
->link_cfg
.speed
: -1;
1171 cmd
->duplex
= DUPLEX_FULL
;
1173 cmd
->port
= (cmd
->supported
& SUPPORTED_TP
) ? PORT_TP
: PORT_FIBRE
;
1174 cmd
->phy_address
= pi
->port_id
;
1175 cmd
->transceiver
= XCVR_EXTERNAL
;
1176 cmd
->autoneg
= pi
->link_cfg
.autoneg
;
1183 * Return our driver information.
1185 static void cxgb4vf_get_drvinfo(struct net_device
*dev
,
1186 struct ethtool_drvinfo
*drvinfo
)
1188 struct adapter
*adapter
= netdev2adap(dev
);
1190 strcpy(drvinfo
->driver
, KBUILD_MODNAME
);
1191 strcpy(drvinfo
->version
, DRV_VERSION
);
1192 strcpy(drvinfo
->bus_info
, pci_name(to_pci_dev(dev
->dev
.parent
)));
1193 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
1194 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1195 FW_HDR_FW_VER_MAJOR_GET(adapter
->params
.dev
.fwrev
),
1196 FW_HDR_FW_VER_MINOR_GET(adapter
->params
.dev
.fwrev
),
1197 FW_HDR_FW_VER_MICRO_GET(adapter
->params
.dev
.fwrev
),
1198 FW_HDR_FW_VER_BUILD_GET(adapter
->params
.dev
.fwrev
),
1199 FW_HDR_FW_VER_MAJOR_GET(adapter
->params
.dev
.tprev
),
1200 FW_HDR_FW_VER_MINOR_GET(adapter
->params
.dev
.tprev
),
1201 FW_HDR_FW_VER_MICRO_GET(adapter
->params
.dev
.tprev
),
1202 FW_HDR_FW_VER_BUILD_GET(adapter
->params
.dev
.tprev
));
1206 * Return current adapter message level.
1208 static u32
cxgb4vf_get_msglevel(struct net_device
*dev
)
1210 return netdev2adap(dev
)->msg_enable
;
1214 * Set current adapter message level.
1216 static void cxgb4vf_set_msglevel(struct net_device
*dev
, u32 msglevel
)
1218 netdev2adap(dev
)->msg_enable
= msglevel
;
1222 * Return the device's current Queue Set ring size parameters along with the
1223 * allowed maximum values. Since ethtool doesn't understand the concept of
1224 * multi-queue devices, we just return the current values associated with the
1227 static void cxgb4vf_get_ringparam(struct net_device
*dev
,
1228 struct ethtool_ringparam
*rp
)
1230 const struct port_info
*pi
= netdev_priv(dev
);
1231 const struct sge
*s
= &pi
->adapter
->sge
;
1233 rp
->rx_max_pending
= MAX_RX_BUFFERS
;
1234 rp
->rx_mini_max_pending
= MAX_RSPQ_ENTRIES
;
1235 rp
->rx_jumbo_max_pending
= 0;
1236 rp
->tx_max_pending
= MAX_TXQ_ENTRIES
;
1238 rp
->rx_pending
= s
->ethrxq
[pi
->first_qset
].fl
.size
- MIN_FL_RESID
;
1239 rp
->rx_mini_pending
= s
->ethrxq
[pi
->first_qset
].rspq
.size
;
1240 rp
->rx_jumbo_pending
= 0;
1241 rp
->tx_pending
= s
->ethtxq
[pi
->first_qset
].q
.size
;
1245 * Set the Queue Set ring size parameters for the device. Again, since
1246 * ethtool doesn't allow for the concept of multiple queues per device, we'll
1247 * apply these new values across all of the Queue Sets associated with the
1248 * device -- after vetting them of course!
1250 static int cxgb4vf_set_ringparam(struct net_device
*dev
,
1251 struct ethtool_ringparam
*rp
)
1253 const struct port_info
*pi
= netdev_priv(dev
);
1254 struct adapter
*adapter
= pi
->adapter
;
1255 struct sge
*s
= &adapter
->sge
;
1258 if (rp
->rx_pending
> MAX_RX_BUFFERS
||
1259 rp
->rx_jumbo_pending
||
1260 rp
->tx_pending
> MAX_TXQ_ENTRIES
||
1261 rp
->rx_mini_pending
> MAX_RSPQ_ENTRIES
||
1262 rp
->rx_mini_pending
< MIN_RSPQ_ENTRIES
||
1263 rp
->rx_pending
< MIN_FL_ENTRIES
||
1264 rp
->tx_pending
< MIN_TXQ_ENTRIES
)
1267 if (adapter
->flags
& FULL_INIT_DONE
)
1270 for (qs
= pi
->first_qset
; qs
< pi
->first_qset
+ pi
->nqsets
; qs
++) {
1271 s
->ethrxq
[qs
].fl
.size
= rp
->rx_pending
+ MIN_FL_RESID
;
1272 s
->ethrxq
[qs
].rspq
.size
= rp
->rx_mini_pending
;
1273 s
->ethtxq
[qs
].q
.size
= rp
->tx_pending
;
1279 * Return the interrupt holdoff timer and count for the first Queue Set on the
1280 * device. Our extension ioctl() (the cxgbtool interface) allows the
1281 * interrupt holdoff timer to be read on all of the device's Queue Sets.
1283 static int cxgb4vf_get_coalesce(struct net_device
*dev
,
1284 struct ethtool_coalesce
*coalesce
)
1286 const struct port_info
*pi
= netdev_priv(dev
);
1287 const struct adapter
*adapter
= pi
->adapter
;
1288 const struct sge_rspq
*rspq
= &adapter
->sge
.ethrxq
[pi
->first_qset
].rspq
;
1290 coalesce
->rx_coalesce_usecs
= qtimer_val(adapter
, rspq
);
1291 coalesce
->rx_max_coalesced_frames
=
1292 ((rspq
->intr_params
& QINTR_CNT_EN
)
1293 ? adapter
->sge
.counter_val
[rspq
->pktcnt_idx
]
1299 * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1300 * interface. Our extension ioctl() (the cxgbtool interface) allows us to set
1301 * the interrupt holdoff timer on any of the device's Queue Sets.
1303 static int cxgb4vf_set_coalesce(struct net_device
*dev
,
1304 struct ethtool_coalesce
*coalesce
)
1306 const struct port_info
*pi
= netdev_priv(dev
);
1307 struct adapter
*adapter
= pi
->adapter
;
1309 return set_rxq_intr_params(adapter
,
1310 &adapter
->sge
.ethrxq
[pi
->first_qset
].rspq
,
1311 coalesce
->rx_coalesce_usecs
,
1312 coalesce
->rx_max_coalesced_frames
);
1316 * Report current port link pause parameter settings.
1318 static void cxgb4vf_get_pauseparam(struct net_device
*dev
,
1319 struct ethtool_pauseparam
*pauseparam
)
1321 struct port_info
*pi
= netdev_priv(dev
);
1323 pauseparam
->autoneg
= (pi
->link_cfg
.requested_fc
& PAUSE_AUTONEG
) != 0;
1324 pauseparam
->rx_pause
= (pi
->link_cfg
.fc
& PAUSE_RX
) != 0;
1325 pauseparam
->tx_pause
= (pi
->link_cfg
.fc
& PAUSE_TX
) != 0;
1329 * Return whether RX Checksum Offloading is currently enabled for the device.
1331 static u32
cxgb4vf_get_rx_csum(struct net_device
*dev
)
1333 struct port_info
*pi
= netdev_priv(dev
);
1335 return (pi
->rx_offload
& RX_CSO
) != 0;
1339 * Turn RX Checksum Offloading on or off for the device.
1341 static int cxgb4vf_set_rx_csum(struct net_device
*dev
, u32 csum
)
1343 struct port_info
*pi
= netdev_priv(dev
);
1346 pi
->rx_offload
|= RX_CSO
;
1348 pi
->rx_offload
&= ~RX_CSO
;
1353 * Identify the port by blinking the port's LED.
1355 static int cxgb4vf_phys_id(struct net_device
*dev
, u32 id
)
1357 struct port_info
*pi
= netdev_priv(dev
);
1359 return t4vf_identify_port(pi
->adapter
, pi
->viid
, 5);
1363 * Port stats maintained per queue of the port.
1365 struct queue_port_stats
{
1376 * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that
1377 * these need to match the order of statistics returned by
1378 * t4vf_get_port_stats().
1380 static const char stats_strings
[][ETH_GSTRING_LEN
] = {
1382 * These must match the layout of the t4vf_port_stats structure.
1384 "TxBroadcastBytes ",
1385 "TxBroadcastFrames ",
1386 "TxMulticastBytes ",
1387 "TxMulticastFrames ",
1393 "RxBroadcastBytes ",
1394 "RxBroadcastFrames ",
1395 "RxMulticastBytes ",
1396 "RxMulticastFrames ",
1402 * These are accumulated per-queue statistics and must match the
1403 * order of the fields in the queue_port_stats structure.
1415 * Return the number of statistics in the specified statistics set.
1417 static int cxgb4vf_get_sset_count(struct net_device
*dev
, int sset
)
1421 return ARRAY_SIZE(stats_strings
);
1429 * Return the strings for the specified statistics set.
1431 static void cxgb4vf_get_strings(struct net_device
*dev
,
1437 memcpy(data
, stats_strings
, sizeof(stats_strings
));
1443 * Small utility routine to accumulate queue statistics across the queues of
1446 static void collect_sge_port_stats(const struct adapter
*adapter
,
1447 const struct port_info
*pi
,
1448 struct queue_port_stats
*stats
)
1450 const struct sge_eth_txq
*txq
= &adapter
->sge
.ethtxq
[pi
->first_qset
];
1451 const struct sge_eth_rxq
*rxq
= &adapter
->sge
.ethrxq
[pi
->first_qset
];
1454 memset(stats
, 0, sizeof(*stats
));
1455 for (qs
= 0; qs
< pi
->nqsets
; qs
++, rxq
++, txq
++) {
1456 stats
->tso
+= txq
->tso
;
1457 stats
->tx_csum
+= txq
->tx_cso
;
1458 stats
->rx_csum
+= rxq
->stats
.rx_cso
;
1459 stats
->vlan_ex
+= rxq
->stats
.vlan_ex
;
1460 stats
->vlan_ins
+= txq
->vlan_ins
;
1461 stats
->lro_pkts
+= rxq
->stats
.lro_pkts
;
1462 stats
->lro_merged
+= rxq
->stats
.lro_merged
;
1467 * Return the ETH_SS_STATS statistics set.
1469 static void cxgb4vf_get_ethtool_stats(struct net_device
*dev
,
1470 struct ethtool_stats
*stats
,
1473 struct port_info
*pi
= netdev2pinfo(dev
);
1474 struct adapter
*adapter
= pi
->adapter
;
1475 int err
= t4vf_get_port_stats(adapter
, pi
->pidx
,
1476 (struct t4vf_port_stats
*)data
);
1478 memset(data
, 0, sizeof(struct t4vf_port_stats
));
1480 data
+= sizeof(struct t4vf_port_stats
) / sizeof(u64
);
1481 collect_sge_port_stats(adapter
, pi
, (struct queue_port_stats
*)data
);
1485 * Return the size of our register map.
1487 static int cxgb4vf_get_regs_len(struct net_device
*dev
)
1489 return T4VF_REGMAP_SIZE
;
1493 * Dump a block of registers, start to end inclusive, into a buffer.
1495 static void reg_block_dump(struct adapter
*adapter
, void *regbuf
,
1496 unsigned int start
, unsigned int end
)
1498 u32
*bp
= regbuf
+ start
- T4VF_REGMAP_START
;
1500 for ( ; start
<= end
; start
+= sizeof(u32
)) {
1502 * Avoid reading the Mailbox Control register since that
1503 * can trigger a Mailbox Ownership Arbitration cycle and
1504 * interfere with communication with the firmware.
1506 if (start
== T4VF_CIM_BASE_ADDR
+ CIM_VF_EXT_MAILBOX_CTRL
)
1509 *bp
++ = t4_read_reg(adapter
, start
);
1514 * Copy our entire register map into the provided buffer.
1516 static void cxgb4vf_get_regs(struct net_device
*dev
,
1517 struct ethtool_regs
*regs
,
1520 struct adapter
*adapter
= netdev2adap(dev
);
1522 regs
->version
= mk_adap_vers(adapter
);
1525 * Fill in register buffer with our register map.
1527 memset(regbuf
, 0, T4VF_REGMAP_SIZE
);
1529 reg_block_dump(adapter
, regbuf
,
1530 T4VF_SGE_BASE_ADDR
+ T4VF_MOD_MAP_SGE_FIRST
,
1531 T4VF_SGE_BASE_ADDR
+ T4VF_MOD_MAP_SGE_LAST
);
1532 reg_block_dump(adapter
, regbuf
,
1533 T4VF_MPS_BASE_ADDR
+ T4VF_MOD_MAP_MPS_FIRST
,
1534 T4VF_MPS_BASE_ADDR
+ T4VF_MOD_MAP_MPS_LAST
);
1535 reg_block_dump(adapter
, regbuf
,
1536 T4VF_PL_BASE_ADDR
+ T4VF_MOD_MAP_PL_FIRST
,
1537 T4VF_PL_BASE_ADDR
+ T4VF_MOD_MAP_PL_LAST
);
1538 reg_block_dump(adapter
, regbuf
,
1539 T4VF_CIM_BASE_ADDR
+ T4VF_MOD_MAP_CIM_FIRST
,
1540 T4VF_CIM_BASE_ADDR
+ T4VF_MOD_MAP_CIM_LAST
);
1542 reg_block_dump(adapter
, regbuf
,
1543 T4VF_MBDATA_BASE_ADDR
+ T4VF_MBDATA_FIRST
,
1544 T4VF_MBDATA_BASE_ADDR
+ T4VF_MBDATA_LAST
);
1548 * Report current Wake On LAN settings.
1550 static void cxgb4vf_get_wol(struct net_device
*dev
,
1551 struct ethtool_wolinfo
*wol
)
1555 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
1559 * TCP Segmentation Offload flags which we support.
1561 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1564 * Set TCP Segmentation Offloading feature capabilities.
1566 static int cxgb4vf_set_tso(struct net_device
*dev
, u32 tso
)
1569 dev
->features
|= TSO_FLAGS
;
1571 dev
->features
&= ~TSO_FLAGS
;
1575 static struct ethtool_ops cxgb4vf_ethtool_ops
= {
1576 .get_settings
= cxgb4vf_get_settings
,
1577 .get_drvinfo
= cxgb4vf_get_drvinfo
,
1578 .get_msglevel
= cxgb4vf_get_msglevel
,
1579 .set_msglevel
= cxgb4vf_set_msglevel
,
1580 .get_ringparam
= cxgb4vf_get_ringparam
,
1581 .set_ringparam
= cxgb4vf_set_ringparam
,
1582 .get_coalesce
= cxgb4vf_get_coalesce
,
1583 .set_coalesce
= cxgb4vf_set_coalesce
,
1584 .get_pauseparam
= cxgb4vf_get_pauseparam
,
1585 .get_rx_csum
= cxgb4vf_get_rx_csum
,
1586 .set_rx_csum
= cxgb4vf_set_rx_csum
,
1587 .set_tx_csum
= ethtool_op_set_tx_ipv6_csum
,
1588 .set_sg
= ethtool_op_set_sg
,
1589 .get_link
= ethtool_op_get_link
,
1590 .get_strings
= cxgb4vf_get_strings
,
1591 .phys_id
= cxgb4vf_phys_id
,
1592 .get_sset_count
= cxgb4vf_get_sset_count
,
1593 .get_ethtool_stats
= cxgb4vf_get_ethtool_stats
,
1594 .get_regs_len
= cxgb4vf_get_regs_len
,
1595 .get_regs
= cxgb4vf_get_regs
,
1596 .get_wol
= cxgb4vf_get_wol
,
1597 .set_tso
= cxgb4vf_set_tso
,
1601 * /sys/kernel/debug/cxgb4vf support code and data.
1602 * ================================================
1606 * Show SGE Queue Set information. We display QPL Queues Sets per line.
1610 static int sge_qinfo_show(struct seq_file
*seq
, void *v
)
1612 struct adapter
*adapter
= seq
->private;
1613 int eth_entries
= DIV_ROUND_UP(adapter
->sge
.ethqsets
, QPL
);
1614 int qs
, r
= (uintptr_t)v
- 1;
1617 seq_putc(seq
, '\n');
1619 #define S3(fmt_spec, s, v) \
1621 seq_printf(seq, "%-12s", s); \
1622 for (qs = 0; qs < n; ++qs) \
1623 seq_printf(seq, " %16" fmt_spec, v); \
1624 seq_putc(seq, '\n'); \
1626 #define S(s, v) S3("s", s, v)
1627 #define T(s, v) S3("u", s, txq[qs].v)
1628 #define R(s, v) S3("u", s, rxq[qs].v)
1630 if (r
< eth_entries
) {
1631 const struct sge_eth_rxq
*rxq
= &adapter
->sge
.ethrxq
[r
* QPL
];
1632 const struct sge_eth_txq
*txq
= &adapter
->sge
.ethtxq
[r
* QPL
];
1633 int n
= min(QPL
, adapter
->sge
.ethqsets
- QPL
* r
);
1635 S("QType:", "Ethernet");
1637 (rxq
[qs
].rspq
.netdev
1638 ? rxq
[qs
].rspq
.netdev
->name
1641 (rxq
[qs
].rspq
.netdev
1642 ? ((struct port_info
*)
1643 netdev_priv(rxq
[qs
].rspq
.netdev
))->port_id
1645 T("TxQ ID:", q
.abs_id
);
1646 T("TxQ size:", q
.size
);
1647 T("TxQ inuse:", q
.in_use
);
1648 T("TxQ PIdx:", q
.pidx
);
1649 T("TxQ CIdx:", q
.cidx
);
1650 R("RspQ ID:", rspq
.abs_id
);
1651 R("RspQ size:", rspq
.size
);
1652 R("RspQE size:", rspq
.iqe_len
);
1653 S3("u", "Intr delay:", qtimer_val(adapter
, &rxq
[qs
].rspq
));
1654 S3("u", "Intr pktcnt:",
1655 adapter
->sge
.counter_val
[rxq
[qs
].rspq
.pktcnt_idx
]);
1656 R("RspQ CIdx:", rspq
.cidx
);
1657 R("RspQ Gen:", rspq
.gen
);
1658 R("FL ID:", fl
.abs_id
);
1659 R("FL size:", fl
.size
- MIN_FL_RESID
);
1660 R("FL avail:", fl
.avail
);
1661 R("FL PIdx:", fl
.pidx
);
1662 R("FL CIdx:", fl
.cidx
);
1668 const struct sge_rspq
*evtq
= &adapter
->sge
.fw_evtq
;
1670 seq_printf(seq
, "%-12s %16s\n", "QType:", "FW event queue");
1671 seq_printf(seq
, "%-12s %16u\n", "RspQ ID:", evtq
->abs_id
);
1672 seq_printf(seq
, "%-12s %16u\n", "Intr delay:",
1673 qtimer_val(adapter
, evtq
));
1674 seq_printf(seq
, "%-12s %16u\n", "Intr pktcnt:",
1675 adapter
->sge
.counter_val
[evtq
->pktcnt_idx
]);
1676 seq_printf(seq
, "%-12s %16u\n", "RspQ Cidx:", evtq
->cidx
);
1677 seq_printf(seq
, "%-12s %16u\n", "RspQ Gen:", evtq
->gen
);
1678 } else if (r
== 1) {
1679 const struct sge_rspq
*intrq
= &adapter
->sge
.intrq
;
1681 seq_printf(seq
, "%-12s %16s\n", "QType:", "Interrupt Queue");
1682 seq_printf(seq
, "%-12s %16u\n", "RspQ ID:", intrq
->abs_id
);
1683 seq_printf(seq
, "%-12s %16u\n", "Intr delay:",
1684 qtimer_val(adapter
, intrq
));
1685 seq_printf(seq
, "%-12s %16u\n", "Intr pktcnt:",
1686 adapter
->sge
.counter_val
[intrq
->pktcnt_idx
]);
1687 seq_printf(seq
, "%-12s %16u\n", "RspQ Cidx:", intrq
->cidx
);
1688 seq_printf(seq
, "%-12s %16u\n", "RspQ Gen:", intrq
->gen
);
1700 * Return the number of "entries" in our "file". We group the multi-Queue
1701 * sections with QPL Queue Sets per "entry". The sections of the output are:
1703 * Ethernet RX/TX Queue Sets
1704 * Firmware Event Queue
1705 * Forwarded Interrupt Queue (if in MSI mode)
1707 static int sge_queue_entries(const struct adapter
*adapter
)
1709 return DIV_ROUND_UP(adapter
->sge
.ethqsets
, QPL
) + 1 +
1710 ((adapter
->flags
& USING_MSI
) != 0);
1713 static void *sge_queue_start(struct seq_file
*seq
, loff_t
*pos
)
1715 int entries
= sge_queue_entries(seq
->private);
1717 return *pos
< entries
? (void *)((uintptr_t)*pos
+ 1) : NULL
;
1720 static void sge_queue_stop(struct seq_file
*seq
, void *v
)
1724 static void *sge_queue_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1726 int entries
= sge_queue_entries(seq
->private);
1729 return *pos
< entries
? (void *)((uintptr_t)*pos
+ 1) : NULL
;
1732 static const struct seq_operations sge_qinfo_seq_ops
= {
1733 .start
= sge_queue_start
,
1734 .next
= sge_queue_next
,
1735 .stop
= sge_queue_stop
,
1736 .show
= sge_qinfo_show
1739 static int sge_qinfo_open(struct inode
*inode
, struct file
*file
)
1741 int res
= seq_open(file
, &sge_qinfo_seq_ops
);
1744 struct seq_file
*seq
= file
->private_data
;
1745 seq
->private = inode
->i_private
;
1750 static const struct file_operations sge_qinfo_debugfs_fops
= {
1751 .owner
= THIS_MODULE
,
1752 .open
= sge_qinfo_open
,
1754 .llseek
= seq_lseek
,
1755 .release
= seq_release
,
1759 * Show SGE Queue Set statistics. We display QPL Queues Sets per line.
1763 static int sge_qstats_show(struct seq_file
*seq
, void *v
)
1765 struct adapter
*adapter
= seq
->private;
1766 int eth_entries
= DIV_ROUND_UP(adapter
->sge
.ethqsets
, QPL
);
1767 int qs
, r
= (uintptr_t)v
- 1;
1770 seq_putc(seq
, '\n');
1772 #define S3(fmt, s, v) \
1774 seq_printf(seq, "%-16s", s); \
1775 for (qs = 0; qs < n; ++qs) \
1776 seq_printf(seq, " %8" fmt, v); \
1777 seq_putc(seq, '\n'); \
1779 #define S(s, v) S3("s", s, v)
1781 #define T3(fmt, s, v) S3(fmt, s, txq[qs].v)
1782 #define T(s, v) T3("lu", s, v)
1784 #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
1785 #define R(s, v) R3("lu", s, v)
1787 if (r
< eth_entries
) {
1788 const struct sge_eth_rxq
*rxq
= &adapter
->sge
.ethrxq
[r
* QPL
];
1789 const struct sge_eth_txq
*txq
= &adapter
->sge
.ethtxq
[r
* QPL
];
1790 int n
= min(QPL
, adapter
->sge
.ethqsets
- QPL
* r
);
1792 S("QType:", "Ethernet");
1794 (rxq
[qs
].rspq
.netdev
1795 ? rxq
[qs
].rspq
.netdev
->name
1797 R3("u", "RspQNullInts:", rspq
.unhandled_irqs
);
1798 R("RxPackets:", stats
.pkts
);
1799 R("RxCSO:", stats
.rx_cso
);
1800 R("VLANxtract:", stats
.vlan_ex
);
1801 R("LROmerged:", stats
.lro_merged
);
1802 R("LROpackets:", stats
.lro_pkts
);
1803 R("RxDrops:", stats
.rx_drops
);
1805 T("TxCSO:", tx_cso
);
1806 T("VLANins:", vlan_ins
);
1807 T("TxQFull:", q
.stops
);
1808 T("TxQRestarts:", q
.restarts
);
1809 T("TxMapErr:", mapping_err
);
1810 R("FLAllocErr:", fl
.alloc_failed
);
1811 R("FLLrgAlcErr:", fl
.large_alloc_failed
);
1812 R("FLStarving:", fl
.starving
);
1818 const struct sge_rspq
*evtq
= &adapter
->sge
.fw_evtq
;
1820 seq_printf(seq
, "%-8s %16s\n", "QType:", "FW event queue");
1821 seq_printf(seq
, "%-16s %8u\n", "RspQNullInts:",
1822 evtq
->unhandled_irqs
);
1823 seq_printf(seq
, "%-16s %8u\n", "RspQ CIdx:", evtq
->cidx
);
1824 seq_printf(seq
, "%-16s %8u\n", "RspQ Gen:", evtq
->gen
);
1825 } else if (r
== 1) {
1826 const struct sge_rspq
*intrq
= &adapter
->sge
.intrq
;
1828 seq_printf(seq
, "%-8s %16s\n", "QType:", "Interrupt Queue");
1829 seq_printf(seq
, "%-16s %8u\n", "RspQNullInts:",
1830 intrq
->unhandled_irqs
);
1831 seq_printf(seq
, "%-16s %8u\n", "RspQ CIdx:", intrq
->cidx
);
1832 seq_printf(seq
, "%-16s %8u\n", "RspQ Gen:", intrq
->gen
);
1846 * Return the number of "entries" in our "file". We group the multi-Queue
1847 * sections with QPL Queue Sets per "entry". The sections of the output are:
1849 * Ethernet RX/TX Queue Sets
1850 * Firmware Event Queue
1851 * Forwarded Interrupt Queue (if in MSI mode)
1853 static int sge_qstats_entries(const struct adapter
*adapter
)
1855 return DIV_ROUND_UP(adapter
->sge
.ethqsets
, QPL
) + 1 +
1856 ((adapter
->flags
& USING_MSI
) != 0);
1859 static void *sge_qstats_start(struct seq_file
*seq
, loff_t
*pos
)
1861 int entries
= sge_qstats_entries(seq
->private);
1863 return *pos
< entries
? (void *)((uintptr_t)*pos
+ 1) : NULL
;
1866 static void sge_qstats_stop(struct seq_file
*seq
, void *v
)
1870 static void *sge_qstats_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1872 int entries
= sge_qstats_entries(seq
->private);
1875 return *pos
< entries
? (void *)((uintptr_t)*pos
+ 1) : NULL
;
1878 static const struct seq_operations sge_qstats_seq_ops
= {
1879 .start
= sge_qstats_start
,
1880 .next
= sge_qstats_next
,
1881 .stop
= sge_qstats_stop
,
1882 .show
= sge_qstats_show
1885 static int sge_qstats_open(struct inode
*inode
, struct file
*file
)
1887 int res
= seq_open(file
, &sge_qstats_seq_ops
);
1890 struct seq_file
*seq
= file
->private_data
;
1891 seq
->private = inode
->i_private
;
1896 static const struct file_operations sge_qstats_proc_fops
= {
1897 .owner
= THIS_MODULE
,
1898 .open
= sge_qstats_open
,
1900 .llseek
= seq_lseek
,
1901 .release
= seq_release
,
1905 * Show PCI-E SR-IOV Virtual Function Resource Limits.
1907 static int resources_show(struct seq_file
*seq
, void *v
)
1909 struct adapter
*adapter
= seq
->private;
1910 struct vf_resources
*vfres
= &adapter
->params
.vfres
;
1912 #define S(desc, fmt, var) \
1913 seq_printf(seq, "%-60s " fmt "\n", \
1914 desc " (" #var "):", vfres->var)
1916 S("Virtual Interfaces", "%d", nvi
);
1917 S("Egress Queues", "%d", neq
);
1918 S("Ethernet Control", "%d", nethctrl
);
1919 S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint
);
1920 S("Ingress Queues", "%d", niq
);
1921 S("Traffic Class", "%d", tc
);
1922 S("Port Access Rights Mask", "%#x", pmask
);
1923 S("MAC Address Filters", "%d", nexactf
);
1924 S("Firmware Command Read Capabilities", "%#x", r_caps
);
1925 S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps
);
1932 static int resources_open(struct inode
*inode
, struct file
*file
)
1934 return single_open(file
, resources_show
, inode
->i_private
);
1937 static const struct file_operations resources_proc_fops
= {
1938 .owner
= THIS_MODULE
,
1939 .open
= resources_open
,
1941 .llseek
= seq_lseek
,
1942 .release
= single_release
,
1946 * Show Virtual Interfaces.
1948 static int interfaces_show(struct seq_file
*seq
, void *v
)
1950 if (v
== SEQ_START_TOKEN
) {
1951 seq_puts(seq
, "Interface Port VIID\n");
1953 struct adapter
*adapter
= seq
->private;
1954 int pidx
= (uintptr_t)v
- 2;
1955 struct net_device
*dev
= adapter
->port
[pidx
];
1956 struct port_info
*pi
= netdev_priv(dev
);
1958 seq_printf(seq
, "%9s %4d %#5x\n",
1959 dev
->name
, pi
->port_id
, pi
->viid
);
1964 static inline void *interfaces_get_idx(struct adapter
*adapter
, loff_t pos
)
1966 return pos
<= adapter
->params
.nports
1967 ? (void *)(uintptr_t)(pos
+ 1)
1971 static void *interfaces_start(struct seq_file
*seq
, loff_t
*pos
)
1974 ? interfaces_get_idx(seq
->private, *pos
)
1978 static void *interfaces_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1981 return interfaces_get_idx(seq
->private, *pos
);
1984 static void interfaces_stop(struct seq_file
*seq
, void *v
)
1988 static const struct seq_operations interfaces_seq_ops
= {
1989 .start
= interfaces_start
,
1990 .next
= interfaces_next
,
1991 .stop
= interfaces_stop
,
1992 .show
= interfaces_show
1995 static int interfaces_open(struct inode
*inode
, struct file
*file
)
1997 int res
= seq_open(file
, &interfaces_seq_ops
);
2000 struct seq_file
*seq
= file
->private_data
;
2001 seq
->private = inode
->i_private
;
2006 static const struct file_operations interfaces_proc_fops
= {
2007 .owner
= THIS_MODULE
,
2008 .open
= interfaces_open
,
2010 .llseek
= seq_lseek
,
2011 .release
= seq_release
,
2015 * /sys/kernel/debugfs/cxgb4vf/ files list.
2017 struct cxgb4vf_debugfs_entry
{
2018 const char *name
; /* name of debugfs node */
2019 mode_t mode
; /* file system mode */
2020 const struct file_operations
*fops
;
2023 static struct cxgb4vf_debugfs_entry debugfs_files
[] = {
2024 { "sge_qinfo", S_IRUGO
, &sge_qinfo_debugfs_fops
},
2025 { "sge_qstats", S_IRUGO
, &sge_qstats_proc_fops
},
2026 { "resources", S_IRUGO
, &resources_proc_fops
},
2027 { "interfaces", S_IRUGO
, &interfaces_proc_fops
},
2031 * Module and device initialization and cleanup code.
2032 * ==================================================
2036 * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
2037 * directory (debugfs_root) has already been set up.
2039 static int __devinit
setup_debugfs(struct adapter
*adapter
)
2043 BUG_ON(IS_ERR_OR_NULL(adapter
->debugfs_root
));
2046 * Debugfs support is best effort.
2048 for (i
= 0; i
< ARRAY_SIZE(debugfs_files
); i
++)
2049 (void)debugfs_create_file(debugfs_files
[i
].name
,
2050 debugfs_files
[i
].mode
,
2051 adapter
->debugfs_root
,
2053 debugfs_files
[i
].fops
);
2059 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2060 * it to our caller to tear down the directory (debugfs_root).
2062 static void cleanup_debugfs(struct adapter
*adapter
)
2064 BUG_ON(IS_ERR_OR_NULL(adapter
->debugfs_root
));
2067 * Unlike our sister routine cleanup_proc(), we don't need to remove
2068 * individual entries because a call will be made to
2069 * debugfs_remove_recursive(). We just need to clean up any ancillary
2076 * Perform early "adapter" initialization. This is where we discover what
2077 * adapter parameters we're going to be using and initialize basic adapter
2080 static int __devinit
adap_init0(struct adapter
*adapter
)
2082 struct vf_resources
*vfres
= &adapter
->params
.vfres
;
2083 struct sge_params
*sge_params
= &adapter
->params
.sge
;
2084 struct sge
*s
= &adapter
->sge
;
2085 unsigned int ethqsets
;
2089 * Wait for the device to become ready before proceeding ...
2091 err
= t4vf_wait_dev_ready(adapter
);
2093 dev_err(adapter
->pdev_dev
, "device didn't become ready:"
2099 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2100 * 2.6.31 and later we can't call pci_reset_function() in order to
2101 * issue an FLR because of a self- deadlock on the device semaphore.
2102 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2103 * cases where they're needed -- for instance, some versions of KVM
2104 * fail to reset "Assigned Devices" when the VM reboots. Therefore we
2105 * use the firmware based reset in order to reset any per function
2108 err
= t4vf_fw_reset(adapter
);
2110 dev_err(adapter
->pdev_dev
, "FW reset failed: err=%d\n", err
);
2115 * Grab basic operational parameters. These will predominantly have
2116 * been set up by the Physical Function Driver or will be hard coded
2117 * into the adapter. We just have to live with them ... Note that
2118 * we _must_ get our VPD parameters before our SGE parameters because
2119 * we need to know the adapter's core clock from the VPD in order to
2120 * properly decode the SGE Timer Values.
2122 err
= t4vf_get_dev_params(adapter
);
2124 dev_err(adapter
->pdev_dev
, "unable to retrieve adapter"
2125 " device parameters: err=%d\n", err
);
2128 err
= t4vf_get_vpd_params(adapter
);
2130 dev_err(adapter
->pdev_dev
, "unable to retrieve adapter"
2131 " VPD parameters: err=%d\n", err
);
2134 err
= t4vf_get_sge_params(adapter
);
2136 dev_err(adapter
->pdev_dev
, "unable to retrieve adapter"
2137 " SGE parameters: err=%d\n", err
);
2140 err
= t4vf_get_rss_glb_config(adapter
);
2142 dev_err(adapter
->pdev_dev
, "unable to retrieve adapter"
2143 " RSS parameters: err=%d\n", err
);
2146 if (adapter
->params
.rss
.mode
!=
2147 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
) {
2148 dev_err(adapter
->pdev_dev
, "unable to operate with global RSS"
2149 " mode %d\n", adapter
->params
.rss
.mode
);
2152 err
= t4vf_sge_init(adapter
);
2154 dev_err(adapter
->pdev_dev
, "unable to use adapter parameters:"
2160 * Retrieve our RX interrupt holdoff timer values and counter
2161 * threshold values from the SGE parameters.
2163 s
->timer_val
[0] = core_ticks_to_us(adapter
,
2164 TIMERVALUE0_GET(sge_params
->sge_timer_value_0_and_1
));
2165 s
->timer_val
[1] = core_ticks_to_us(adapter
,
2166 TIMERVALUE1_GET(sge_params
->sge_timer_value_0_and_1
));
2167 s
->timer_val
[2] = core_ticks_to_us(adapter
,
2168 TIMERVALUE0_GET(sge_params
->sge_timer_value_2_and_3
));
2169 s
->timer_val
[3] = core_ticks_to_us(adapter
,
2170 TIMERVALUE1_GET(sge_params
->sge_timer_value_2_and_3
));
2171 s
->timer_val
[4] = core_ticks_to_us(adapter
,
2172 TIMERVALUE0_GET(sge_params
->sge_timer_value_4_and_5
));
2173 s
->timer_val
[5] = core_ticks_to_us(adapter
,
2174 TIMERVALUE1_GET(sge_params
->sge_timer_value_4_and_5
));
2177 THRESHOLD_0_GET(sge_params
->sge_ingress_rx_threshold
);
2179 THRESHOLD_1_GET(sge_params
->sge_ingress_rx_threshold
);
2181 THRESHOLD_2_GET(sge_params
->sge_ingress_rx_threshold
);
2183 THRESHOLD_3_GET(sge_params
->sge_ingress_rx_threshold
);
2186 * Grab our Virtual Interface resource allocation, extract the
2187 * features that we're interested in and do a bit of sanity testing on
2190 err
= t4vf_get_vfres(adapter
);
2192 dev_err(adapter
->pdev_dev
, "unable to get virtual interface"
2193 " resources: err=%d\n", err
);
2198 * The number of "ports" which we support is equal to the number of
2199 * Virtual Interfaces with which we've been provisioned.
2201 adapter
->params
.nports
= vfres
->nvi
;
2202 if (adapter
->params
.nports
> MAX_NPORTS
) {
2203 dev_warn(adapter
->pdev_dev
, "only using %d of %d allowed"
2204 " virtual interfaces\n", MAX_NPORTS
,
2205 adapter
->params
.nports
);
2206 adapter
->params
.nports
= MAX_NPORTS
;
2210 * We need to reserve a number of the ingress queues with Free List
2211 * and Interrupt capabilities for special interrupt purposes (like
2212 * asynchronous firmware messages, or forwarded interrupts if we're
2213 * using MSI). The rest of the FL/Intr-capable ingress queues will be
2214 * matched up one-for-one with Ethernet/Control egress queues in order
2215 * to form "Queue Sets" which will be aportioned between the "ports".
2216 * For each Queue Set, we'll need the ability to allocate two Egress
2217 * Contexts -- one for the Ingress Queue Free List and one for the TX
2220 ethqsets
= vfres
->niqflint
- INGQ_EXTRAS
;
2221 if (vfres
->nethctrl
!= ethqsets
) {
2222 dev_warn(adapter
->pdev_dev
, "unequal number of [available]"
2223 " ingress/egress queues (%d/%d); using minimum for"
2224 " number of Queue Sets\n", ethqsets
, vfres
->nethctrl
);
2225 ethqsets
= min(vfres
->nethctrl
, ethqsets
);
2227 if (vfres
->neq
< ethqsets
*2) {
2228 dev_warn(adapter
->pdev_dev
, "Not enough Egress Contexts (%d)"
2229 " to support Queue Sets (%d); reducing allowed Queue"
2230 " Sets\n", vfres
->neq
, ethqsets
);
2231 ethqsets
= vfres
->neq
/2;
2233 if (ethqsets
> MAX_ETH_QSETS
) {
2234 dev_warn(adapter
->pdev_dev
, "only using %d of %d allowed Queue"
2235 " Sets\n", MAX_ETH_QSETS
, adapter
->sge
.max_ethqsets
);
2236 ethqsets
= MAX_ETH_QSETS
;
2238 if (vfres
->niq
!= 0 || vfres
->neq
> ethqsets
*2) {
2239 dev_warn(adapter
->pdev_dev
, "unused resources niq/neq (%d/%d)"
2240 " ignored\n", vfres
->niq
, vfres
->neq
- ethqsets
*2);
2242 adapter
->sge
.max_ethqsets
= ethqsets
;
2245 * Check for various parameter sanity issues. Most checks simply
2246 * result in us using fewer resources than our provissioning but we
2247 * do need at least one "port" with which to work ...
2249 if (adapter
->sge
.max_ethqsets
< adapter
->params
.nports
) {
2250 dev_warn(adapter
->pdev_dev
, "only using %d of %d available"
2251 " virtual interfaces (too few Queue Sets)\n",
2252 adapter
->sge
.max_ethqsets
, adapter
->params
.nports
);
2253 adapter
->params
.nports
= adapter
->sge
.max_ethqsets
;
2255 if (adapter
->params
.nports
== 0) {
2256 dev_err(adapter
->pdev_dev
, "no virtual interfaces configured/"
2263 static inline void init_rspq(struct sge_rspq
*rspq
, u8 timer_idx
,
2264 u8 pkt_cnt_idx
, unsigned int size
,
2265 unsigned int iqe_size
)
2267 rspq
->intr_params
= (QINTR_TIMER_IDX(timer_idx
) |
2268 (pkt_cnt_idx
< SGE_NCOUNTERS
? QINTR_CNT_EN
: 0));
2269 rspq
->pktcnt_idx
= (pkt_cnt_idx
< SGE_NCOUNTERS
2272 rspq
->iqe_len
= iqe_size
;
2277 * Perform default configuration of DMA queues depending on the number and
2278 * type of ports we found and the number of available CPUs. Most settings can
2279 * be modified by the admin via ethtool and cxgbtool prior to the adapter
2280 * being brought up for the first time.
2282 static void __devinit
cfg_queues(struct adapter
*adapter
)
2284 struct sge
*s
= &adapter
->sge
;
2285 int q10g
, n10g
, qidx
, pidx
, qs
;
2289 * We should not be called till we know how many Queue Sets we can
2290 * support. In particular, this means that we need to know what kind
2291 * of interrupts we'll be using ...
2293 BUG_ON((adapter
->flags
& (USING_MSIX
|USING_MSI
)) == 0);
2296 * Count the number of 10GbE Virtual Interfaces that we have.
2299 for_each_port(adapter
, pidx
)
2300 n10g
+= is_10g_port(&adap2pinfo(adapter
, pidx
)->link_cfg
);
2303 * We default to 1 queue per non-10G port and up to # of cores queues
2309 int n1g
= (adapter
->params
.nports
- n10g
);
2310 q10g
= (adapter
->sge
.max_ethqsets
- n1g
) / n10g
;
2311 if (q10g
> num_online_cpus())
2312 q10g
= num_online_cpus();
2316 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2317 * The layout will be established in setup_sge_queues() when the
2318 * adapter is brough up for the first time.
2321 for_each_port(adapter
, pidx
) {
2322 struct port_info
*pi
= adap2pinfo(adapter
, pidx
);
2324 pi
->first_qset
= qidx
;
2325 pi
->nqsets
= is_10g_port(&pi
->link_cfg
) ? q10g
: 1;
2331 * The Ingress Queue Entry Size for our various Response Queues needs
2332 * to be big enough to accommodate the largest message we can receive
2333 * from the chip/firmware; which is 64 bytes ...
2338 * Set up default Queue Set parameters ... Start off with the
2339 * shortest interrupt holdoff timer.
2341 for (qs
= 0; qs
< s
->max_ethqsets
; qs
++) {
2342 struct sge_eth_rxq
*rxq
= &s
->ethrxq
[qs
];
2343 struct sge_eth_txq
*txq
= &s
->ethtxq
[qs
];
2345 init_rspq(&rxq
->rspq
, 0, 0, 1024, iqe_size
);
2351 * The firmware event queue is used for link state changes and
2352 * notifications of TX DMA completions.
2354 init_rspq(&s
->fw_evtq
, SGE_TIMER_RSTRT_CNTR
, 0, 512, iqe_size
);
2357 * The forwarded interrupt queue is used when we're in MSI interrupt
2358 * mode. In this mode all interrupts associated with RX queues will
2359 * be forwarded to a single queue which we'll associate with our MSI
2360 * interrupt vector. The messages dropped in the forwarded interrupt
2361 * queue will indicate which ingress queue needs servicing ... This
2362 * queue needs to be large enough to accommodate all of the ingress
2363 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2364 * from equalling the CIDX if every ingress queue has an outstanding
2365 * interrupt). The queue doesn't need to be any larger because no
2366 * ingress queue will ever have more than one outstanding interrupt at
2369 init_rspq(&s
->intrq
, SGE_TIMER_RSTRT_CNTR
, 0, MSIX_ENTRIES
+ 1,
2374 * Reduce the number of Ethernet queues across all ports to at most n.
2375 * n provides at least one queue per port.
2377 static void __devinit
reduce_ethqs(struct adapter
*adapter
, int n
)
2380 struct port_info
*pi
;
2383 * While we have too many active Ether Queue Sets, interate across the
2384 * "ports" and reduce their individual Queue Set allocations.
2386 BUG_ON(n
< adapter
->params
.nports
);
2387 while (n
< adapter
->sge
.ethqsets
)
2388 for_each_port(adapter
, i
) {
2389 pi
= adap2pinfo(adapter
, i
);
2390 if (pi
->nqsets
> 1) {
2392 adapter
->sge
.ethqsets
--;
2393 if (adapter
->sge
.ethqsets
<= n
)
2399 * Reassign the starting Queue Sets for each of the "ports" ...
2402 for_each_port(adapter
, i
) {
2403 pi
= adap2pinfo(adapter
, i
);
2410 * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally
2411 * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2412 * need. Minimally we need one for every Virtual Interface plus those needed
2413 * for our "extras". Note that this process may lower the maximum number of
2414 * allowed Queue Sets ...
2416 static int __devinit
enable_msix(struct adapter
*adapter
)
2418 int i
, err
, want
, need
;
2419 struct msix_entry entries
[MSIX_ENTRIES
];
2420 struct sge
*s
= &adapter
->sge
;
2422 for (i
= 0; i
< MSIX_ENTRIES
; ++i
)
2423 entries
[i
].entry
= i
;
2426 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2427 * plus those needed for our "extras" (for example, the firmware
2428 * message queue). We _need_ at least one "Queue Set" per Virtual
2429 * Interface plus those needed for our "extras". So now we get to see
2430 * if the song is right ...
2432 want
= s
->max_ethqsets
+ MSIX_EXTRAS
;
2433 need
= adapter
->params
.nports
+ MSIX_EXTRAS
;
2434 while ((err
= pci_enable_msix(adapter
->pdev
, entries
, want
)) >= need
)
2438 int nqsets
= want
- MSIX_EXTRAS
;
2439 if (nqsets
< s
->max_ethqsets
) {
2440 dev_warn(adapter
->pdev_dev
, "only enough MSI-X vectors"
2441 " for %d Queue Sets\n", nqsets
);
2442 s
->max_ethqsets
= nqsets
;
2443 if (nqsets
< s
->ethqsets
)
2444 reduce_ethqs(adapter
, nqsets
);
2446 for (i
= 0; i
< want
; ++i
)
2447 adapter
->msix_info
[i
].vec
= entries
[i
].vector
;
2448 } else if (err
> 0) {
2449 pci_disable_msix(adapter
->pdev
);
2450 dev_info(adapter
->pdev_dev
, "only %d MSI-X vectors left,"
2451 " not using MSI-X\n", err
);
2456 #ifdef HAVE_NET_DEVICE_OPS
2457 static const struct net_device_ops cxgb4vf_netdev_ops
= {
2458 .ndo_open
= cxgb4vf_open
,
2459 .ndo_stop
= cxgb4vf_stop
,
2460 .ndo_start_xmit
= t4vf_eth_xmit
,
2461 .ndo_get_stats
= cxgb4vf_get_stats
,
2462 .ndo_set_rx_mode
= cxgb4vf_set_rxmode
,
2463 .ndo_set_mac_address
= cxgb4vf_set_mac_addr
,
2464 .ndo_validate_addr
= eth_validate_addr
,
2465 .ndo_do_ioctl
= cxgb4vf_do_ioctl
,
2466 .ndo_change_mtu
= cxgb4vf_change_mtu
,
2467 .ndo_vlan_rx_register
= cxgb4vf_vlan_rx_register
,
2468 #ifdef CONFIG_NET_POLL_CONTROLLER
2469 .ndo_poll_controller
= cxgb4vf_poll_controller
,
2475 * "Probe" a device: initialize a device and construct all kernel and driver
2476 * state needed to manage the device. This routine is called "init_one" in
2479 static int __devinit
cxgb4vf_pci_probe(struct pci_dev
*pdev
,
2480 const struct pci_device_id
*ent
)
2482 static int version_printed
;
2487 struct adapter
*adapter
;
2488 struct port_info
*pi
;
2489 struct net_device
*netdev
;
2492 * Print our driver banner the first time we're called to initialize a
2495 if (version_printed
== 0) {
2496 printk(KERN_INFO
"%s - version %s\n", DRV_DESC
, DRV_VERSION
);
2497 version_printed
= 1;
2501 * Initialize generic PCI device state.
2503 err
= pci_enable_device(pdev
);
2505 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
2510 * Reserve PCI resources for the device. If we can't get them some
2511 * other driver may have already claimed the device ...
2513 err
= pci_request_regions(pdev
, KBUILD_MODNAME
);
2515 dev_err(&pdev
->dev
, "cannot obtain PCI resources\n");
2516 goto err_disable_device
;
2520 * Set up our DMA mask: try for 64-bit address masking first and
2521 * fall back to 32-bit if we can't get 64 bits ...
2523 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
2525 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
2527 dev_err(&pdev
->dev
, "unable to obtain 64-bit DMA for"
2528 " coherent allocations\n");
2529 goto err_release_regions
;
2533 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2535 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
2536 goto err_release_regions
;
2542 * Enable bus mastering for the device ...
2544 pci_set_master(pdev
);
2547 * Allocate our adapter data structure and attach it to the device.
2549 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
2552 goto err_release_regions
;
2554 pci_set_drvdata(pdev
, adapter
);
2555 adapter
->pdev
= pdev
;
2556 adapter
->pdev_dev
= &pdev
->dev
;
2559 * Initialize SMP data synchronization resources.
2561 spin_lock_init(&adapter
->stats_lock
);
2564 * Map our I/O registers in BAR0.
2566 adapter
->regs
= pci_ioremap_bar(pdev
, 0);
2567 if (!adapter
->regs
) {
2568 dev_err(&pdev
->dev
, "cannot map device registers\n");
2570 goto err_free_adapter
;
2574 * Initialize adapter level features.
2576 adapter
->name
= pci_name(pdev
);
2577 adapter
->msg_enable
= dflt_msg_enable
;
2578 err
= adap_init0(adapter
);
2583 * Allocate our "adapter ports" and stitch everything together.
2585 pmask
= adapter
->params
.vfres
.pmask
;
2586 for_each_port(adapter
, pidx
) {
2590 * We simplistically allocate our virtual interfaces
2591 * sequentially across the port numbers to which we have
2592 * access rights. This should be configurable in some manner
2597 port_id
= ffs(pmask
) - 1;
2598 pmask
&= ~(1 << port_id
);
2599 viid
= t4vf_alloc_vi(adapter
, port_id
);
2601 dev_err(&pdev
->dev
, "cannot allocate VI for port %d:"
2602 " err=%d\n", port_id
, viid
);
2608 * Allocate our network device and stitch things together.
2610 netdev
= alloc_etherdev_mq(sizeof(struct port_info
),
2612 if (netdev
== NULL
) {
2613 dev_err(&pdev
->dev
, "cannot allocate netdev for"
2614 " port %d\n", port_id
);
2615 t4vf_free_vi(adapter
, viid
);
2619 adapter
->port
[pidx
] = netdev
;
2620 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2621 pi
= netdev_priv(netdev
);
2622 pi
->adapter
= adapter
;
2624 pi
->port_id
= port_id
;
2628 * Initialize the starting state of our "port" and register
2631 pi
->xact_addr_filt
= -1;
2632 pi
->rx_offload
= RX_CSO
;
2633 netif_carrier_off(netdev
);
2634 netdev
->irq
= pdev
->irq
;
2636 netdev
->features
= (NETIF_F_SG
| TSO_FLAGS
|
2637 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
2638 NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
|
2641 netdev
->features
|= NETIF_F_HIGHDMA
;
2642 netdev
->vlan_features
=
2644 ~(NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
));
2646 #ifdef HAVE_NET_DEVICE_OPS
2647 netdev
->netdev_ops
= &cxgb4vf_netdev_ops
;
2649 netdev
->vlan_rx_register
= cxgb4vf_vlan_rx_register
;
2650 netdev
->open
= cxgb4vf_open
;
2651 netdev
->stop
= cxgb4vf_stop
;
2652 netdev
->hard_start_xmit
= t4vf_eth_xmit
;
2653 netdev
->get_stats
= cxgb4vf_get_stats
;
2654 netdev
->set_rx_mode
= cxgb4vf_set_rxmode
;
2655 netdev
->do_ioctl
= cxgb4vf_do_ioctl
;
2656 netdev
->change_mtu
= cxgb4vf_change_mtu
;
2657 netdev
->set_mac_address
= cxgb4vf_set_mac_addr
;
2658 #ifdef CONFIG_NET_POLL_CONTROLLER
2659 netdev
->poll_controller
= cxgb4vf_poll_controller
;
2662 SET_ETHTOOL_OPS(netdev
, &cxgb4vf_ethtool_ops
);
2665 * Initialize the hardware/software state for the port.
2667 err
= t4vf_port_init(adapter
, pidx
);
2669 dev_err(&pdev
->dev
, "cannot initialize port %d\n",
2676 * The "card" is now ready to go. If any errors occur during device
2677 * registration we do not fail the whole "card" but rather proceed
2678 * only with the ports we manage to register successfully. However we
2679 * must register at least one net device.
2681 for_each_port(adapter
, pidx
) {
2682 netdev
= adapter
->port
[pidx
];
2686 err
= register_netdev(netdev
);
2688 dev_warn(&pdev
->dev
, "cannot register net device %s,"
2689 " skipping\n", netdev
->name
);
2693 set_bit(pidx
, &adapter
->registered_device_map
);
2695 if (adapter
->registered_device_map
== 0) {
2696 dev_err(&pdev
->dev
, "could not register any net devices\n");
2701 * Set up our debugfs entries.
2703 if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root
)) {
2704 adapter
->debugfs_root
=
2705 debugfs_create_dir(pci_name(pdev
),
2706 cxgb4vf_debugfs_root
);
2707 if (IS_ERR_OR_NULL(adapter
->debugfs_root
))
2708 dev_warn(&pdev
->dev
, "could not create debugfs"
2711 setup_debugfs(adapter
);
2715 * See what interrupts we'll be using. If we've been configured to
2716 * use MSI-X interrupts, try to enable them but fall back to using
2717 * MSI interrupts if we can't enable MSI-X interrupts. If we can't
2718 * get MSI interrupts we bail with the error.
2720 if (msi
== MSI_MSIX
&& enable_msix(adapter
) == 0)
2721 adapter
->flags
|= USING_MSIX
;
2723 err
= pci_enable_msi(pdev
);
2725 dev_err(&pdev
->dev
, "Unable to allocate %s interrupts;"
2727 msi
== MSI_MSIX
? "MSI-X or MSI" : "MSI", err
);
2728 goto err_free_debugfs
;
2730 adapter
->flags
|= USING_MSI
;
2734 * Now that we know how many "ports" we have and what their types are,
2735 * and how many Queue Sets we can support, we can configure our queue
2738 cfg_queues(adapter
);
2741 * Print a short notice on the existence and configuration of the new
2742 * VF network device ...
2744 for_each_port(adapter
, pidx
) {
2745 dev_info(adapter
->pdev_dev
, "%s: Chelsio VF NIC PCIe %s\n",
2746 adapter
->port
[pidx
]->name
,
2747 (adapter
->flags
& USING_MSIX
) ? "MSI-X" :
2748 (adapter
->flags
& USING_MSI
) ? "MSI" : "");
2757 * Error recovery and exit code. Unwind state that's been created
2758 * so far and return the error.
2762 if (!IS_ERR_OR_NULL(adapter
->debugfs_root
)) {
2763 cleanup_debugfs(adapter
);
2764 debugfs_remove_recursive(adapter
->debugfs_root
);
2768 for_each_port(adapter
, pidx
) {
2769 netdev
= adapter
->port
[pidx
];
2772 pi
= netdev_priv(netdev
);
2773 t4vf_free_vi(adapter
, pi
->viid
);
2774 if (test_bit(pidx
, &adapter
->registered_device_map
))
2775 unregister_netdev(netdev
);
2776 free_netdev(netdev
);
2780 iounmap(adapter
->regs
);
2784 pci_set_drvdata(pdev
, NULL
);
2786 err_release_regions
:
2787 pci_release_regions(pdev
);
2788 pci_set_drvdata(pdev
, NULL
);
2789 pci_clear_master(pdev
);
2792 pci_disable_device(pdev
);
2798 * "Remove" a device: tear down all kernel and driver state created in the
2799 * "probe" routine and quiesce the device (disable interrupts, etc.). (Note
2800 * that this is called "remove_one" in the PF Driver.)
2802 static void __devexit
cxgb4vf_pci_remove(struct pci_dev
*pdev
)
2804 struct adapter
*adapter
= pci_get_drvdata(pdev
);
2807 * Tear down driver state associated with device.
2813 * Stop all of our activity. Unregister network port,
2814 * disable interrupts, etc.
2816 for_each_port(adapter
, pidx
)
2817 if (test_bit(pidx
, &adapter
->registered_device_map
))
2818 unregister_netdev(adapter
->port
[pidx
]);
2819 t4vf_sge_stop(adapter
);
2820 if (adapter
->flags
& USING_MSIX
) {
2821 pci_disable_msix(adapter
->pdev
);
2822 adapter
->flags
&= ~USING_MSIX
;
2823 } else if (adapter
->flags
& USING_MSI
) {
2824 pci_disable_msi(adapter
->pdev
);
2825 adapter
->flags
&= ~USING_MSI
;
2829 * Tear down our debugfs entries.
2831 if (!IS_ERR_OR_NULL(adapter
->debugfs_root
)) {
2832 cleanup_debugfs(adapter
);
2833 debugfs_remove_recursive(adapter
->debugfs_root
);
2837 * Free all of the various resources which we've acquired ...
2839 t4vf_free_sge_resources(adapter
);
2840 for_each_port(adapter
, pidx
) {
2841 struct net_device
*netdev
= adapter
->port
[pidx
];
2842 struct port_info
*pi
;
2847 pi
= netdev_priv(netdev
);
2848 t4vf_free_vi(adapter
, pi
->viid
);
2849 free_netdev(netdev
);
2851 iounmap(adapter
->regs
);
2853 pci_set_drvdata(pdev
, NULL
);
2857 * Disable the device and release its PCI resources.
2859 pci_disable_device(pdev
);
2860 pci_clear_master(pdev
);
2861 pci_release_regions(pdev
);
2865 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
2868 static void __devexit
cxgb4vf_pci_shutdown(struct pci_dev
*pdev
)
2870 struct adapter
*adapter
;
2873 adapter
= pci_get_drvdata(pdev
);
2878 * Disable all Virtual Interfaces. This will shut down the
2879 * delivery of all ingress packets into the chip for these
2880 * Virtual Interfaces.
2882 for_each_port(adapter
, pidx
) {
2883 struct net_device
*netdev
;
2884 struct port_info
*pi
;
2886 if (!test_bit(pidx
, &adapter
->registered_device_map
))
2889 netdev
= adapter
->port
[pidx
];
2893 pi
= netdev_priv(netdev
);
2894 t4vf_enable_vi(adapter
, pi
->viid
, false, false);
2898 * Free up all Queues which will prevent further DMA and
2899 * Interrupts allowing various internal pathways to drain.
2901 t4vf_free_sge_resources(adapter
);
2905 * PCI Device registration data structures.
2907 #define CH_DEVICE(devid, idx) \
2908 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
2910 static struct pci_device_id cxgb4vf_pci_tbl
[] = {
2911 CH_DEVICE(0xb000, 0), /* PE10K FPGA */
2912 CH_DEVICE(0x4800, 0), /* T440-dbg */
2913 CH_DEVICE(0x4801, 0), /* T420-cr */
2914 CH_DEVICE(0x4802, 0), /* T422-cr */
2915 CH_DEVICE(0x4803, 0), /* T440-cr */
2916 CH_DEVICE(0x4804, 0), /* T420-bch */
2917 CH_DEVICE(0x4805, 0), /* T440-bch */
2918 CH_DEVICE(0x4806, 0), /* T460-ch */
2919 CH_DEVICE(0x4807, 0), /* T420-so */
2920 CH_DEVICE(0x4808, 0), /* T420-cx */
2921 CH_DEVICE(0x4809, 0), /* T420-bt */
2922 CH_DEVICE(0x480a, 0), /* T404-bt */
2926 MODULE_DESCRIPTION(DRV_DESC
);
2927 MODULE_AUTHOR("Chelsio Communications");
2928 MODULE_LICENSE("Dual BSD/GPL");
2929 MODULE_VERSION(DRV_VERSION
);
2930 MODULE_DEVICE_TABLE(pci
, cxgb4vf_pci_tbl
);
2932 static struct pci_driver cxgb4vf_driver
= {
2933 .name
= KBUILD_MODNAME
,
2934 .id_table
= cxgb4vf_pci_tbl
,
2935 .probe
= cxgb4vf_pci_probe
,
2936 .remove
= __devexit_p(cxgb4vf_pci_remove
),
2937 .shutdown
= __devexit_p(cxgb4vf_pci_shutdown
),
2941 * Initialize global driver state.
2943 static int __init
cxgb4vf_module_init(void)
2948 * Vet our module parameters.
2950 if (msi
!= MSI_MSIX
&& msi
!= MSI_MSI
) {
2951 printk(KERN_WARNING KBUILD_MODNAME
2952 ": bad module parameter msi=%d; must be %d"
2953 " (MSI-X or MSI) or %d (MSI)\n",
2954 msi
, MSI_MSIX
, MSI_MSI
);
2958 /* Debugfs support is optional, just warn if this fails */
2959 cxgb4vf_debugfs_root
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
2960 if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root
))
2961 printk(KERN_WARNING KBUILD_MODNAME
": could not create"
2962 " debugfs entry, continuing\n");
2964 ret
= pci_register_driver(&cxgb4vf_driver
);
2965 if (ret
< 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root
))
2966 debugfs_remove(cxgb4vf_debugfs_root
);
2971 * Tear down global driver state.
2973 static void __exit
cxgb4vf_module_exit(void)
2975 pci_unregister_driver(&cxgb4vf_driver
);
2976 debugfs_remove(cxgb4vf_debugfs_root
);
2979 module_init(cxgb4vf_module_init
);
2980 module_exit(cxgb4vf_module_exit
);