2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38 #include <linux/module.h>
39 #include <linux/moduleparam.h>
40 #include <linux/init.h>
41 #include <linux/pci.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/netdevice.h>
44 #include <linux/etherdevice.h>
45 #include <linux/debugfs.h>
46 #include <linux/ethtool.h>
47 #include <linux/mdio.h>
49 #include "t4vf_common.h"
50 #include "t4vf_defs.h"
52 #include "../cxgb4/t4_regs.h"
53 #include "../cxgb4/t4_msg.h"
56 * Generic information about the driver.
58 #define DRV_VERSION "2.0.0-ko"
59 #define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
67 * Default ethtool "message level" for adapters.
69 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
70 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
71 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
74 * The driver uses the best interrupt scheme available on a platform in the
75 * order MSI-X then MSI. This parameter determines which of these schemes the
76 * driver may consider as follows:
78 * msi = 2: choose from among MSI-X and MSI
79 * msi = 1: only consider MSI interrupts
81 * Note that unlike the Physical Function driver, this Virtual Function driver
82 * does _not_ support legacy INTx interrupts (this limitation is mandated by
83 * the PCI-E SR-IOV standard).
87 #define MSI_DEFAULT MSI_MSIX
89 static int msi
= MSI_DEFAULT
;
91 module_param(msi
, int, 0644);
92 MODULE_PARM_DESC(msi
, "whether to use MSI-X or MSI");
95 * Fundamental constants.
96 * ======================
100 MAX_TXQ_ENTRIES
= 16384,
101 MAX_RSPQ_ENTRIES
= 16384,
102 MAX_RX_BUFFERS
= 16384,
104 MIN_TXQ_ENTRIES
= 32,
105 MIN_RSPQ_ENTRIES
= 128,
109 * For purposes of manipulating the Free List size we need to
110 * recognize that Free Lists are actually Egress Queues (the host
111 * produces free buffers which the hardware consumes), Egress Queues
112 * indices are all in units of Egress Context Units bytes, and free
113 * list entries are 64-bit PCI DMA addresses. And since the state of
114 * the Producer Index == the Consumer Index implies an EMPTY list, we
115 * always have at least one Egress Unit's worth of Free List entries
116 * unused. See sge.c for more details ...
118 EQ_UNIT
= SGE_EQ_IDXSIZE
,
119 FL_PER_EQ_UNIT
= EQ_UNIT
/ sizeof(__be64
),
120 MIN_FL_RESID
= FL_PER_EQ_UNIT
,
124 * Global driver state.
125 * ====================
128 static struct dentry
*cxgb4vf_debugfs_root
;
131 * OS "Callback" functions.
132 * ========================
136 * The link status has changed on the indicated "port" (Virtual Interface).
138 void t4vf_os_link_changed(struct adapter
*adapter
, int pidx
, int link_ok
)
140 struct net_device
*dev
= adapter
->port
[pidx
];
143 * If the port is disabled or the current recorded "link up"
144 * status matches the new status, just return.
146 if (!netif_running(dev
) || link_ok
== netif_carrier_ok(dev
))
150 * Tell the OS that the link status has changed and print a short
151 * informative message on the console about the event.
156 const struct port_info
*pi
= netdev_priv(dev
);
158 netif_carrier_on(dev
);
160 switch (pi
->link_cfg
.speed
) {
185 switch ((int)pi
->link_cfg
.fc
) {
194 case PAUSE_RX
| PAUSE_TX
:
203 netdev_info(dev
, "link up, %s, full-duplex, %s PAUSE\n", s
, fc
);
205 netif_carrier_off(dev
);
206 netdev_info(dev
, "link down\n");
211 * THe port module type has changed on the indicated "port" (Virtual
214 void t4vf_os_portmod_changed(struct adapter
*adapter
, int pidx
)
216 static const char * const mod_str
[] = {
217 NULL
, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
219 const struct net_device
*dev
= adapter
->port
[pidx
];
220 const struct port_info
*pi
= netdev_priv(dev
);
222 if (pi
->mod_type
== FW_PORT_MOD_TYPE_NONE
)
223 dev_info(adapter
->pdev_dev
, "%s: port module unplugged\n",
225 else if (pi
->mod_type
< ARRAY_SIZE(mod_str
))
226 dev_info(adapter
->pdev_dev
, "%s: %s port module inserted\n",
227 dev
->name
, mod_str
[pi
->mod_type
]);
228 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_NOTSUPPORTED
)
229 dev_info(adapter
->pdev_dev
, "%s: unsupported optical port "
230 "module inserted\n", dev
->name
);
231 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_UNKNOWN
)
232 dev_info(adapter
->pdev_dev
, "%s: unknown port module inserted,"
233 "forcing TWINAX\n", dev
->name
);
234 else if (pi
->mod_type
== FW_PORT_MOD_TYPE_ERROR
)
235 dev_info(adapter
->pdev_dev
, "%s: transceiver module error\n",
238 dev_info(adapter
->pdev_dev
, "%s: unknown module type %d "
239 "inserted\n", dev
->name
, pi
->mod_type
);
242 static int cxgb4vf_set_addr_hash(struct port_info
*pi
)
244 struct adapter
*adapter
= pi
->adapter
;
247 struct hash_mac_addr
*entry
;
249 /* Calculate the hash vector for the updated list and program it */
250 list_for_each_entry(entry
, &adapter
->mac_hlist
, list
) {
251 ucast
|= is_unicast_ether_addr(entry
->addr
);
252 vec
|= (1ULL << hash_mac_addr(entry
->addr
));
254 return t4vf_set_addr_hash(adapter
, pi
->viid
, ucast
, vec
, false);
258 * cxgb4vf_change_mac - Update match filter for a MAC address.
261 * @tcam_idx: TCAM index of existing filter for old value of MAC address,
263 * @addr: the new MAC address value
264 * @persist: whether a new MAC allocation should be persistent
265 * @add_smt: if true also add the address to the HW SMT
267 * Modifies an MPS filter and sets it to the new MAC address if
268 * @tcam_idx >= 0, or adds the MAC address to a new filter if
269 * @tcam_idx < 0. In the latter case the address is added persistently
270 * if @persist is %true.
271 * Addresses are programmed to hash region, if tcam runs out of entries.
274 static int cxgb4vf_change_mac(struct port_info
*pi
, unsigned int viid
,
275 int *tcam_idx
, const u8
*addr
, bool persistent
)
277 struct hash_mac_addr
*new_entry
, *entry
;
278 struct adapter
*adapter
= pi
->adapter
;
281 ret
= t4vf_change_mac(adapter
, viid
, *tcam_idx
, addr
, persistent
);
282 /* We ran out of TCAM entries. try programming hash region. */
283 if (ret
== -ENOMEM
) {
284 /* If the MAC address to be updated is in the hash addr
285 * list, update it from the list
287 list_for_each_entry(entry
, &adapter
->mac_hlist
, list
) {
288 if (entry
->iface_mac
) {
289 ether_addr_copy(entry
->addr
, addr
);
293 new_entry
= kzalloc(sizeof(*new_entry
), GFP_KERNEL
);
296 ether_addr_copy(new_entry
->addr
, addr
);
297 new_entry
->iface_mac
= true;
298 list_add_tail(&new_entry
->list
, &adapter
->mac_hlist
);
300 ret
= cxgb4vf_set_addr_hash(pi
);
301 } else if (ret
>= 0) {
310 * Net device operations.
311 * ======================
318 * Perform the MAC and PHY actions needed to enable a "port" (Virtual
321 static int link_start(struct net_device
*dev
)
324 struct port_info
*pi
= netdev_priv(dev
);
327 * We do not set address filters and promiscuity here, the stack does
328 * that step explicitly. Enable vlan accel.
330 ret
= t4vf_set_rxmode(pi
->adapter
, pi
->viid
, dev
->mtu
, -1, -1, -1, 1,
333 ret
= cxgb4vf_change_mac(pi
, pi
->viid
,
335 dev
->dev_addr
, true);
338 * We don't need to actually "start the link" itself since the
339 * firmware will do that for us when the first Virtual Interface
340 * is enabled on a port.
343 ret
= t4vf_enable_pi(pi
->adapter
, pi
, true, true);
349 * Name the MSI-X interrupts.
351 static void name_msix_vecs(struct adapter
*adapter
)
353 int namelen
= sizeof(adapter
->msix_info
[0].desc
) - 1;
359 snprintf(adapter
->msix_info
[MSIX_FW
].desc
, namelen
,
360 "%s-FWeventq", adapter
->name
);
361 adapter
->msix_info
[MSIX_FW
].desc
[namelen
] = 0;
366 for_each_port(adapter
, pidx
) {
367 struct net_device
*dev
= adapter
->port
[pidx
];
368 const struct port_info
*pi
= netdev_priv(dev
);
371 for (qs
= 0, msi
= MSIX_IQFLINT
; qs
< pi
->nqsets
; qs
++, msi
++) {
372 snprintf(adapter
->msix_info
[msi
].desc
, namelen
,
373 "%s-%d", dev
->name
, qs
);
374 adapter
->msix_info
[msi
].desc
[namelen
] = 0;
380 * Request all of our MSI-X resources.
382 static int request_msix_queue_irqs(struct adapter
*adapter
)
384 struct sge
*s
= &adapter
->sge
;
390 err
= request_irq(adapter
->msix_info
[MSIX_FW
].vec
, t4vf_sge_intr_msix
,
391 0, adapter
->msix_info
[MSIX_FW
].desc
, &s
->fw_evtq
);
399 for_each_ethrxq(s
, rxq
) {
400 err
= request_irq(adapter
->msix_info
[msi
].vec
,
401 t4vf_sge_intr_msix
, 0,
402 adapter
->msix_info
[msi
].desc
,
403 &s
->ethrxq
[rxq
].rspq
);
412 free_irq(adapter
->msix_info
[--msi
].vec
, &s
->ethrxq
[rxq
].rspq
);
413 free_irq(adapter
->msix_info
[MSIX_FW
].vec
, &s
->fw_evtq
);
418 * Free our MSI-X resources.
420 static void free_msix_queue_irqs(struct adapter
*adapter
)
422 struct sge
*s
= &adapter
->sge
;
425 free_irq(adapter
->msix_info
[MSIX_FW
].vec
, &s
->fw_evtq
);
427 for_each_ethrxq(s
, rxq
)
428 free_irq(adapter
->msix_info
[msi
++].vec
,
429 &s
->ethrxq
[rxq
].rspq
);
433 * Turn on NAPI and start up interrupts on a response queue.
435 static void qenable(struct sge_rspq
*rspq
)
437 napi_enable(&rspq
->napi
);
440 * 0-increment the Going To Sleep register to start the timer and
443 t4_write_reg(rspq
->adapter
, T4VF_SGE_BASE_ADDR
+ SGE_VF_GTS
,
445 SEINTARM_V(rspq
->intr_params
) |
446 INGRESSQID_V(rspq
->cntxt_id
));
450 * Enable NAPI scheduling and interrupt generation for all Receive Queues.
452 static void enable_rx(struct adapter
*adapter
)
455 struct sge
*s
= &adapter
->sge
;
457 for_each_ethrxq(s
, rxq
)
458 qenable(&s
->ethrxq
[rxq
].rspq
);
459 qenable(&s
->fw_evtq
);
462 * The interrupt queue doesn't use NAPI so we do the 0-increment of
463 * its Going To Sleep register here to get it started.
465 if (adapter
->flags
& CXGB4VF_USING_MSI
)
466 t4_write_reg(adapter
, T4VF_SGE_BASE_ADDR
+ SGE_VF_GTS
,
468 SEINTARM_V(s
->intrq
.intr_params
) |
469 INGRESSQID_V(s
->intrq
.cntxt_id
));
474 * Wait until all NAPI handlers are descheduled.
476 static void quiesce_rx(struct adapter
*adapter
)
478 struct sge
*s
= &adapter
->sge
;
481 for_each_ethrxq(s
, rxq
)
482 napi_disable(&s
->ethrxq
[rxq
].rspq
.napi
);
483 napi_disable(&s
->fw_evtq
.napi
);
487 * Response queue handler for the firmware event queue.
489 static int fwevtq_handler(struct sge_rspq
*rspq
, const __be64
*rsp
,
490 const struct pkt_gl
*gl
)
493 * Extract response opcode and get pointer to CPL message body.
495 struct adapter
*adapter
= rspq
->adapter
;
496 u8 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
497 void *cpl
= (void *)(rsp
+ 1);
502 * We've received an asynchronous message from the firmware.
504 const struct cpl_fw6_msg
*fw_msg
= cpl
;
505 if (fw_msg
->type
== FW6_TYPE_CMD_RPL
)
506 t4vf_handle_fw_rpl(adapter
, fw_msg
->data
);
511 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
513 const struct cpl_sge_egr_update
*p
= (void *)(rsp
+ 3);
514 opcode
= CPL_OPCODE_G(ntohl(p
->opcode_qid
));
515 if (opcode
!= CPL_SGE_EGR_UPDATE
) {
516 dev_err(adapter
->pdev_dev
, "unexpected FW4/CPL %#x on FW event queue\n"
524 case CPL_SGE_EGR_UPDATE
: {
526 * We've received an Egress Queue Status Update message. We
527 * get these, if the SGE is configured to send these when the
528 * firmware passes certain points in processing our TX
529 * Ethernet Queue or if we make an explicit request for one.
530 * We use these updates to determine when we may need to
531 * restart a TX Ethernet Queue which was stopped for lack of
532 * free TX Queue Descriptors ...
534 const struct cpl_sge_egr_update
*p
= cpl
;
535 unsigned int qid
= EGR_QID_G(be32_to_cpu(p
->opcode_qid
));
536 struct sge
*s
= &adapter
->sge
;
538 struct sge_eth_txq
*txq
;
542 * Perform sanity checking on the Queue ID to make sure it
543 * really refers to one of our TX Ethernet Egress Queues which
544 * is active and matches the queue's ID. None of these error
545 * conditions should ever happen so we may want to either make
546 * them fatal and/or conditionalized under DEBUG.
548 eq_idx
= EQ_IDX(s
, qid
);
549 if (unlikely(eq_idx
>= MAX_EGRQ
)) {
550 dev_err(adapter
->pdev_dev
,
551 "Egress Update QID %d out of range\n", qid
);
554 tq
= s
->egr_map
[eq_idx
];
555 if (unlikely(tq
== NULL
)) {
556 dev_err(adapter
->pdev_dev
,
557 "Egress Update QID %d TXQ=NULL\n", qid
);
560 txq
= container_of(tq
, struct sge_eth_txq
, q
);
561 if (unlikely(tq
->abs_id
!= qid
)) {
562 dev_err(adapter
->pdev_dev
,
563 "Egress Update QID %d refers to TXQ %d\n",
569 * Restart a stopped TX Queue which has less than half of its
573 netif_tx_wake_queue(txq
->txq
);
578 dev_err(adapter
->pdev_dev
,
579 "unexpected CPL %#x on FW event queue\n", opcode
);
586 * Allocate SGE TX/RX response queues. Determine how many sets of SGE queues
587 * to use and initializes them. We support multiple "Queue Sets" per port if
588 * we have MSI-X, otherwise just one queue set per port.
590 static int setup_sge_queues(struct adapter
*adapter
)
592 struct sge
*s
= &adapter
->sge
;
596 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
599 bitmap_zero(s
->starving_fl
, MAX_EGRQ
);
602 * If we're using MSI interrupt mode we need to set up a "forwarded
603 * interrupt" queue which we'll set up with our MSI vector. The rest
604 * of the ingress queues will be set up to forward their interrupts to
605 * this queue ... This must be first since t4vf_sge_alloc_rxq() uses
606 * the intrq's queue ID as the interrupt forwarding queue for the
607 * subsequent calls ...
609 if (adapter
->flags
& CXGB4VF_USING_MSI
) {
610 err
= t4vf_sge_alloc_rxq(adapter
, &s
->intrq
, false,
611 adapter
->port
[0], 0, NULL
, NULL
);
613 goto err_free_queues
;
617 * Allocate our ingress queue for asynchronous firmware messages.
619 err
= t4vf_sge_alloc_rxq(adapter
, &s
->fw_evtq
, true, adapter
->port
[0],
620 MSIX_FW
, NULL
, fwevtq_handler
);
622 goto err_free_queues
;
625 * Allocate each "port"'s initial Queue Sets. These can be changed
626 * later on ... up to the point where any interface on the adapter is
627 * brought up at which point lots of things get nailed down
631 for_each_port(adapter
, pidx
) {
632 struct net_device
*dev
= adapter
->port
[pidx
];
633 struct port_info
*pi
= netdev_priv(dev
);
634 struct sge_eth_rxq
*rxq
= &s
->ethrxq
[pi
->first_qset
];
635 struct sge_eth_txq
*txq
= &s
->ethtxq
[pi
->first_qset
];
638 for (qs
= 0; qs
< pi
->nqsets
; qs
++, rxq
++, txq
++) {
639 err
= t4vf_sge_alloc_rxq(adapter
, &rxq
->rspq
, false,
641 &rxq
->fl
, t4vf_ethrx_handler
);
643 goto err_free_queues
;
645 err
= t4vf_sge_alloc_eth_txq(adapter
, txq
, dev
,
646 netdev_get_tx_queue(dev
, qs
),
647 s
->fw_evtq
.cntxt_id
);
649 goto err_free_queues
;
652 memset(&rxq
->stats
, 0, sizeof(rxq
->stats
));
657 * Create the reverse mappings for the queues.
659 s
->egr_base
= s
->ethtxq
[0].q
.abs_id
- s
->ethtxq
[0].q
.cntxt_id
;
660 s
->ingr_base
= s
->ethrxq
[0].rspq
.abs_id
- s
->ethrxq
[0].rspq
.cntxt_id
;
661 IQ_MAP(s
, s
->fw_evtq
.abs_id
) = &s
->fw_evtq
;
662 for_each_port(adapter
, pidx
) {
663 struct net_device
*dev
= adapter
->port
[pidx
];
664 struct port_info
*pi
= netdev_priv(dev
);
665 struct sge_eth_rxq
*rxq
= &s
->ethrxq
[pi
->first_qset
];
666 struct sge_eth_txq
*txq
= &s
->ethtxq
[pi
->first_qset
];
669 for (qs
= 0; qs
< pi
->nqsets
; qs
++, rxq
++, txq
++) {
670 IQ_MAP(s
, rxq
->rspq
.abs_id
) = &rxq
->rspq
;
671 EQ_MAP(s
, txq
->q
.abs_id
) = &txq
->q
;
674 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
675 * for Free Lists but since all of the Egress Queues
676 * (including Free Lists) have Relative Queue IDs
677 * which are computed as Absolute - Base Queue ID, we
678 * can synthesize the Absolute Queue IDs for the Free
679 * Lists. This is useful for debugging purposes when
680 * we want to dump Queue Contexts via the PF Driver.
682 rxq
->fl
.abs_id
= rxq
->fl
.cntxt_id
+ s
->egr_base
;
683 EQ_MAP(s
, rxq
->fl
.abs_id
) = &rxq
->fl
;
689 t4vf_free_sge_resources(adapter
);
694 * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
695 * queues. We configure the RSS CPU lookup table to distribute to the number
696 * of HW receive queues, and the response queue lookup table to narrow that
697 * down to the response queues actually configured for each "port" (Virtual
698 * Interface). We always configure the RSS mapping for all ports since the
699 * mapping table has plenty of entries.
701 static int setup_rss(struct adapter
*adapter
)
705 for_each_port(adapter
, pidx
) {
706 struct port_info
*pi
= adap2pinfo(adapter
, pidx
);
707 struct sge_eth_rxq
*rxq
= &adapter
->sge
.ethrxq
[pi
->first_qset
];
708 u16 rss
[MAX_PORT_QSETS
];
711 for (qs
= 0; qs
< pi
->nqsets
; qs
++)
712 rss
[qs
] = rxq
[qs
].rspq
.abs_id
;
714 err
= t4vf_config_rss_range(adapter
, pi
->viid
,
715 0, pi
->rss_size
, rss
, pi
->nqsets
);
720 * Perform Global RSS Mode-specific initialization.
722 switch (adapter
->params
.rss
.mode
) {
723 case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
:
725 * If Tunnel All Lookup isn't specified in the global
726 * RSS Configuration, then we need to specify a
727 * default Ingress Queue for any ingress packets which
728 * aren't hashed. We'll use our first ingress queue
731 if (!adapter
->params
.rss
.u
.basicvirtual
.tnlalllookup
) {
732 union rss_vi_config config
;
733 err
= t4vf_read_rss_vi_config(adapter
,
738 config
.basicvirtual
.defaultq
=
740 err
= t4vf_write_rss_vi_config(adapter
,
754 * Bring the adapter up. Called whenever we go from no "ports" open to having
755 * one open. This function performs the actions necessary to make an adapter
756 * operational, such as completing the initialization of HW modules, and
757 * enabling interrupts. Must be called with the rtnl lock held. (Note that
758 * this is called "cxgb_up" in the PF Driver.)
760 static int adapter_up(struct adapter
*adapter
)
765 * If this is the first time we've been called, perform basic
766 * adapter setup. Once we've done this, many of our adapter
767 * parameters can no longer be changed ...
769 if ((adapter
->flags
& CXGB4VF_FULL_INIT_DONE
) == 0) {
770 err
= setup_sge_queues(adapter
);
773 err
= setup_rss(adapter
);
775 t4vf_free_sge_resources(adapter
);
779 if (adapter
->flags
& CXGB4VF_USING_MSIX
)
780 name_msix_vecs(adapter
);
782 adapter
->flags
|= CXGB4VF_FULL_INIT_DONE
;
786 * Acquire our interrupt resources. We only support MSI-X and MSI.
788 BUG_ON((adapter
->flags
&
789 (CXGB4VF_USING_MSIX
| CXGB4VF_USING_MSI
)) == 0);
790 if (adapter
->flags
& CXGB4VF_USING_MSIX
)
791 err
= request_msix_queue_irqs(adapter
);
793 err
= request_irq(adapter
->pdev
->irq
,
794 t4vf_intr_handler(adapter
), 0,
795 adapter
->name
, adapter
);
797 dev_err(adapter
->pdev_dev
, "request_irq failed, err %d\n",
803 * Enable NAPI ingress processing and return success.
806 t4vf_sge_start(adapter
);
812 * Bring the adapter down. Called whenever the last "port" (Virtual
813 * Interface) closed. (Note that this routine is called "cxgb_down" in the PF
816 static void adapter_down(struct adapter
*adapter
)
819 * Free interrupt resources.
821 if (adapter
->flags
& CXGB4VF_USING_MSIX
)
822 free_msix_queue_irqs(adapter
);
824 free_irq(adapter
->pdev
->irq
, adapter
);
827 * Wait for NAPI handlers to finish.
833 * Start up a net device.
835 static int cxgb4vf_open(struct net_device
*dev
)
838 struct port_info
*pi
= netdev_priv(dev
);
839 struct adapter
*adapter
= pi
->adapter
;
842 * If we don't have a connection to the firmware there's nothing we
845 if (!(adapter
->flags
& CXGB4VF_FW_OK
))
849 * If this is the first interface that we're opening on the "adapter",
850 * bring the "adapter" up now.
852 if (adapter
->open_device_map
== 0) {
853 err
= adapter_up(adapter
);
858 /* It's possible that the basic port information could have
859 * changed since we first read it.
861 err
= t4vf_update_port_info(pi
);
866 * Note that this interface is up and start everything up ...
868 err
= link_start(dev
);
872 pi
->vlan_id
= t4vf_get_vf_vlan_acl(adapter
);
874 netif_tx_start_all_queues(dev
);
875 set_bit(pi
->port_id
, &adapter
->open_device_map
);
879 if (adapter
->open_device_map
== 0)
880 adapter_down(adapter
);
885 * Shut down a net device. This routine is called "cxgb_close" in the PF
888 static int cxgb4vf_stop(struct net_device
*dev
)
890 struct port_info
*pi
= netdev_priv(dev
);
891 struct adapter
*adapter
= pi
->adapter
;
893 netif_tx_stop_all_queues(dev
);
894 netif_carrier_off(dev
);
895 t4vf_enable_pi(adapter
, pi
, false, false);
897 clear_bit(pi
->port_id
, &adapter
->open_device_map
);
898 if (adapter
->open_device_map
== 0)
899 adapter_down(adapter
);
904 * Translate our basic statistics into the standard "ifconfig" statistics.
906 static struct net_device_stats
*cxgb4vf_get_stats(struct net_device
*dev
)
908 struct t4vf_port_stats stats
;
909 struct port_info
*pi
= netdev2pinfo(dev
);
910 struct adapter
*adapter
= pi
->adapter
;
911 struct net_device_stats
*ns
= &dev
->stats
;
914 spin_lock(&adapter
->stats_lock
);
915 err
= t4vf_get_port_stats(adapter
, pi
->pidx
, &stats
);
916 spin_unlock(&adapter
->stats_lock
);
918 memset(ns
, 0, sizeof(*ns
));
922 ns
->tx_bytes
= (stats
.tx_bcast_bytes
+ stats
.tx_mcast_bytes
+
923 stats
.tx_ucast_bytes
+ stats
.tx_offload_bytes
);
924 ns
->tx_packets
= (stats
.tx_bcast_frames
+ stats
.tx_mcast_frames
+
925 stats
.tx_ucast_frames
+ stats
.tx_offload_frames
);
926 ns
->rx_bytes
= (stats
.rx_bcast_bytes
+ stats
.rx_mcast_bytes
+
927 stats
.rx_ucast_bytes
);
928 ns
->rx_packets
= (stats
.rx_bcast_frames
+ stats
.rx_mcast_frames
+
929 stats
.rx_ucast_frames
);
930 ns
->multicast
= stats
.rx_mcast_frames
;
931 ns
->tx_errors
= stats
.tx_drop_frames
;
932 ns
->rx_errors
= stats
.rx_err_frames
;
937 static int cxgb4vf_mac_sync(struct net_device
*netdev
, const u8
*mac_addr
)
939 struct port_info
*pi
= netdev_priv(netdev
);
940 struct adapter
*adapter
= pi
->adapter
;
945 bool ucast
= is_unicast_ether_addr(mac_addr
);
946 const u8
*maclist
[1] = {mac_addr
};
947 struct hash_mac_addr
*new_entry
;
949 ret
= t4vf_alloc_mac_filt(adapter
, pi
->viid
, free
, 1, maclist
,
950 NULL
, ucast
? &uhash
: &mhash
, false);
953 /* if hash != 0, then add the addr to hash addr list
954 * so on the end we will calculate the hash for the
955 * list and program it
957 if (uhash
|| mhash
) {
958 new_entry
= kzalloc(sizeof(*new_entry
), GFP_ATOMIC
);
961 ether_addr_copy(new_entry
->addr
, mac_addr
);
962 list_add_tail(&new_entry
->list
, &adapter
->mac_hlist
);
963 ret
= cxgb4vf_set_addr_hash(pi
);
966 return ret
< 0 ? ret
: 0;
969 static int cxgb4vf_mac_unsync(struct net_device
*netdev
, const u8
*mac_addr
)
971 struct port_info
*pi
= netdev_priv(netdev
);
972 struct adapter
*adapter
= pi
->adapter
;
974 const u8
*maclist
[1] = {mac_addr
};
975 struct hash_mac_addr
*entry
, *tmp
;
977 /* If the MAC address to be removed is in the hash addr
978 * list, delete it from the list and update hash vector
980 list_for_each_entry_safe(entry
, tmp
, &adapter
->mac_hlist
, list
) {
981 if (ether_addr_equal(entry
->addr
, mac_addr
)) {
982 list_del(&entry
->list
);
984 return cxgb4vf_set_addr_hash(pi
);
988 ret
= t4vf_free_mac_filt(adapter
, pi
->viid
, 1, maclist
, false);
989 return ret
< 0 ? -EINVAL
: 0;
993 * Set RX properties of a port, such as promiscruity, address filters, and MTU.
994 * If @mtu is -1 it is left unchanged.
996 static int set_rxmode(struct net_device
*dev
, int mtu
, bool sleep_ok
)
998 struct port_info
*pi
= netdev_priv(dev
);
1000 __dev_uc_sync(dev
, cxgb4vf_mac_sync
, cxgb4vf_mac_unsync
);
1001 __dev_mc_sync(dev
, cxgb4vf_mac_sync
, cxgb4vf_mac_unsync
);
1002 return t4vf_set_rxmode(pi
->adapter
, pi
->viid
, -1,
1003 (dev
->flags
& IFF_PROMISC
) != 0,
1004 (dev
->flags
& IFF_ALLMULTI
) != 0,
1009 * Set the current receive modes on the device.
1011 static void cxgb4vf_set_rxmode(struct net_device
*dev
)
1013 /* unfortunately we can't return errors to the stack */
1014 set_rxmode(dev
, -1, false);
1018 * Find the entry in the interrupt holdoff timer value array which comes
1019 * closest to the specified interrupt holdoff value.
1021 static int closest_timer(const struct sge
*s
, int us
)
1023 int i
, timer_idx
= 0, min_delta
= INT_MAX
;
1025 for (i
= 0; i
< ARRAY_SIZE(s
->timer_val
); i
++) {
1026 int delta
= us
- s
->timer_val
[i
];
1029 if (delta
< min_delta
) {
1037 static int closest_thres(const struct sge
*s
, int thres
)
1039 int i
, delta
, pktcnt_idx
= 0, min_delta
= INT_MAX
;
1041 for (i
= 0; i
< ARRAY_SIZE(s
->counter_val
); i
++) {
1042 delta
= thres
- s
->counter_val
[i
];
1045 if (delta
< min_delta
) {
1054 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1056 static unsigned int qtimer_val(const struct adapter
*adapter
,
1057 const struct sge_rspq
*rspq
)
1059 unsigned int timer_idx
= QINTR_TIMER_IDX_G(rspq
->intr_params
);
1061 return timer_idx
< SGE_NTIMERS
1062 ? adapter
->sge
.timer_val
[timer_idx
]
1067 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1068 * @adapter: the adapter
1069 * @rspq: the RX response queue
1070 * @us: the hold-off time in us, or 0 to disable timer
1071 * @cnt: the hold-off packet count, or 0 to disable counter
1073 * Sets an RX response queue's interrupt hold-off time and packet count.
1074 * At least one of the two needs to be enabled for the queue to generate
1077 static int set_rxq_intr_params(struct adapter
*adapter
, struct sge_rspq
*rspq
,
1078 unsigned int us
, unsigned int cnt
)
1080 unsigned int timer_idx
;
1083 * If both the interrupt holdoff timer and count are specified as
1084 * zero, default to a holdoff count of 1 ...
1086 if ((us
| cnt
) == 0)
1090 * If an interrupt holdoff count has been specified, then find the
1091 * closest configured holdoff count and use that. If the response
1092 * queue has already been created, then update its queue context
1099 pktcnt_idx
= closest_thres(&adapter
->sge
, cnt
);
1100 if (rspq
->desc
&& rspq
->pktcnt_idx
!= pktcnt_idx
) {
1101 v
= FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ
) |
1102 FW_PARAMS_PARAM_X_V(
1103 FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH
) |
1104 FW_PARAMS_PARAM_YZ_V(rspq
->cntxt_id
);
1105 err
= t4vf_set_params(adapter
, 1, &v
, &pktcnt_idx
);
1109 rspq
->pktcnt_idx
= pktcnt_idx
;
1113 * Compute the closest holdoff timer index from the supplied holdoff
1116 timer_idx
= (us
== 0
1117 ? SGE_TIMER_RSTRT_CNTR
1118 : closest_timer(&adapter
->sge
, us
));
1121 * Update the response queue's interrupt coalescing parameters and
1124 rspq
->intr_params
= (QINTR_TIMER_IDX_V(timer_idx
) |
1125 QINTR_CNT_EN_V(cnt
> 0));
1130 * Return a version number to identify the type of adapter. The scheme is:
1131 * - bits 0..9: chip version
1132 * - bits 10..15: chip revision
1134 static inline unsigned int mk_adap_vers(const struct adapter
*adapter
)
1137 * Chip version 4, revision 0x3f (cxgb4vf).
1139 return CHELSIO_CHIP_VERSION(adapter
->params
.chip
) | (0x3f << 10);
1143 * Execute the specified ioctl command.
1145 static int cxgb4vf_do_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
1151 * The VF Driver doesn't have access to any of the other
1152 * common Ethernet device ioctl()'s (like reading/writing
1153 * PHY registers, etc.
1164 * Change the device's MTU.
1166 static int cxgb4vf_change_mtu(struct net_device
*dev
, int new_mtu
)
1169 struct port_info
*pi
= netdev_priv(dev
);
1171 ret
= t4vf_set_rxmode(pi
->adapter
, pi
->viid
, new_mtu
,
1172 -1, -1, -1, -1, true);
1178 static netdev_features_t
cxgb4vf_fix_features(struct net_device
*dev
,
1179 netdev_features_t features
)
1182 * Since there is no support for separate rx/tx vlan accel
1183 * enable/disable make sure tx flag is always in same state as rx.
1185 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1186 features
|= NETIF_F_HW_VLAN_CTAG_TX
;
1188 features
&= ~NETIF_F_HW_VLAN_CTAG_TX
;
1193 static int cxgb4vf_set_features(struct net_device
*dev
,
1194 netdev_features_t features
)
1196 struct port_info
*pi
= netdev_priv(dev
);
1197 netdev_features_t changed
= dev
->features
^ features
;
1199 if (changed
& NETIF_F_HW_VLAN_CTAG_RX
)
1200 t4vf_set_rxmode(pi
->adapter
, pi
->viid
, -1, -1, -1, -1,
1201 features
& NETIF_F_HW_VLAN_CTAG_TX
, 0);
1207 * Change the devices MAC address.
1209 static int cxgb4vf_set_mac_addr(struct net_device
*dev
, void *_addr
)
1212 struct sockaddr
*addr
= _addr
;
1213 struct port_info
*pi
= netdev_priv(dev
);
1215 if (!is_valid_ether_addr(addr
->sa_data
))
1216 return -EADDRNOTAVAIL
;
1218 ret
= cxgb4vf_change_mac(pi
, pi
->viid
, &pi
->xact_addr_filt
,
1219 addr
->sa_data
, true);
1223 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1227 #ifdef CONFIG_NET_POLL_CONTROLLER
1229 * Poll all of our receive queues. This is called outside of normal interrupt
1232 static void cxgb4vf_poll_controller(struct net_device
*dev
)
1234 struct port_info
*pi
= netdev_priv(dev
);
1235 struct adapter
*adapter
= pi
->adapter
;
1237 if (adapter
->flags
& CXGB4VF_USING_MSIX
) {
1238 struct sge_eth_rxq
*rxq
;
1241 rxq
= &adapter
->sge
.ethrxq
[pi
->first_qset
];
1242 for (nqsets
= pi
->nqsets
; nqsets
; nqsets
--) {
1243 t4vf_sge_intr_msix(0, &rxq
->rspq
);
1247 t4vf_intr_handler(adapter
)(0, adapter
);
1252 * Ethtool operations.
1253 * ===================
1255 * Note that we don't support any ethtool operations which change the physical
1256 * state of the port to which we're linked.
1260 * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
1261 * @port_type: Firmware Port Type
1262 * @mod_type: Firmware Module Type
1264 * Translate Firmware Port/Module type to Ethtool Port Type.
1266 static int from_fw_port_mod_type(enum fw_port_type port_type
,
1267 enum fw_port_module_type mod_type
)
1269 if (port_type
== FW_PORT_TYPE_BT_SGMII
||
1270 port_type
== FW_PORT_TYPE_BT_XFI
||
1271 port_type
== FW_PORT_TYPE_BT_XAUI
) {
1273 } else if (port_type
== FW_PORT_TYPE_FIBER_XFI
||
1274 port_type
== FW_PORT_TYPE_FIBER_XAUI
) {
1276 } else if (port_type
== FW_PORT_TYPE_SFP
||
1277 port_type
== FW_PORT_TYPE_QSFP_10G
||
1278 port_type
== FW_PORT_TYPE_QSA
||
1279 port_type
== FW_PORT_TYPE_QSFP
||
1280 port_type
== FW_PORT_TYPE_CR4_QSFP
||
1281 port_type
== FW_PORT_TYPE_CR_QSFP
||
1282 port_type
== FW_PORT_TYPE_CR2_QSFP
||
1283 port_type
== FW_PORT_TYPE_SFP28
) {
1284 if (mod_type
== FW_PORT_MOD_TYPE_LR
||
1285 mod_type
== FW_PORT_MOD_TYPE_SR
||
1286 mod_type
== FW_PORT_MOD_TYPE_ER
||
1287 mod_type
== FW_PORT_MOD_TYPE_LRM
)
1289 else if (mod_type
== FW_PORT_MOD_TYPE_TWINAX_PASSIVE
||
1290 mod_type
== FW_PORT_MOD_TYPE_TWINAX_ACTIVE
)
1294 } else if (port_type
== FW_PORT_TYPE_KR4_100G
||
1295 port_type
== FW_PORT_TYPE_KR_SFP28
||
1296 port_type
== FW_PORT_TYPE_KR_XLAUI
) {
1304 * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
1305 * @port_type: Firmware Port Type
1306 * @fw_caps: Firmware Port Capabilities
1307 * @link_mode_mask: ethtool Link Mode Mask
1309 * Translate a Firmware Port Capabilities specification to an ethtool
1312 static void fw_caps_to_lmm(enum fw_port_type port_type
,
1313 unsigned int fw_caps
,
1314 unsigned long *link_mode_mask
)
1316 #define SET_LMM(__lmm_name) \
1317 __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
1320 #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
1322 if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1323 SET_LMM(__lmm_name); \
1326 switch (port_type
) {
1327 case FW_PORT_TYPE_BT_SGMII
:
1328 case FW_PORT_TYPE_BT_XFI
:
1329 case FW_PORT_TYPE_BT_XAUI
:
1331 FW_CAPS_TO_LMM(SPEED_100M
, 100baseT_Full
);
1332 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseT_Full
);
1333 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseT_Full
);
1336 case FW_PORT_TYPE_KX4
:
1337 case FW_PORT_TYPE_KX
:
1339 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseKX_Full
);
1340 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKX4_Full
);
1343 case FW_PORT_TYPE_KR
:
1345 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKR_Full
);
1348 case FW_PORT_TYPE_BP_AP
:
1350 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseKX_Full
);
1351 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseR_FEC
);
1352 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKR_Full
);
1355 case FW_PORT_TYPE_BP4_AP
:
1357 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseKX_Full
);
1358 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseR_FEC
);
1359 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKR_Full
);
1360 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKX4_Full
);
1363 case FW_PORT_TYPE_FIBER_XFI
:
1364 case FW_PORT_TYPE_FIBER_XAUI
:
1365 case FW_PORT_TYPE_SFP
:
1366 case FW_PORT_TYPE_QSFP_10G
:
1367 case FW_PORT_TYPE_QSA
:
1369 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseT_Full
);
1370 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseT_Full
);
1373 case FW_PORT_TYPE_BP40_BA
:
1374 case FW_PORT_TYPE_QSFP
:
1376 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseT_Full
);
1377 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseT_Full
);
1378 FW_CAPS_TO_LMM(SPEED_40G
, 40000baseSR4_Full
);
1381 case FW_PORT_TYPE_CR_QSFP
:
1382 case FW_PORT_TYPE_SFP28
:
1384 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseT_Full
);
1385 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseT_Full
);
1386 FW_CAPS_TO_LMM(SPEED_25G
, 25000baseCR_Full
);
1389 case FW_PORT_TYPE_KR_SFP28
:
1391 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseT_Full
);
1392 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKR_Full
);
1393 FW_CAPS_TO_LMM(SPEED_25G
, 25000baseKR_Full
);
1396 case FW_PORT_TYPE_KR_XLAUI
:
1398 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseKX_Full
);
1399 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKR_Full
);
1400 FW_CAPS_TO_LMM(SPEED_40G
, 40000baseKR4_Full
);
1403 case FW_PORT_TYPE_CR2_QSFP
:
1405 FW_CAPS_TO_LMM(SPEED_50G
, 50000baseSR2_Full
);
1408 case FW_PORT_TYPE_KR4_100G
:
1409 case FW_PORT_TYPE_CR4_QSFP
:
1411 FW_CAPS_TO_LMM(SPEED_1G
, 1000baseT_Full
);
1412 FW_CAPS_TO_LMM(SPEED_10G
, 10000baseKR_Full
);
1413 FW_CAPS_TO_LMM(SPEED_40G
, 40000baseSR4_Full
);
1414 FW_CAPS_TO_LMM(SPEED_25G
, 25000baseCR_Full
);
1415 FW_CAPS_TO_LMM(SPEED_50G
, 50000baseCR2_Full
);
1416 FW_CAPS_TO_LMM(SPEED_100G
, 100000baseCR4_Full
);
1423 if (fw_caps
& FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M
)) {
1424 FW_CAPS_TO_LMM(FEC_RS
, FEC_RS
);
1425 FW_CAPS_TO_LMM(FEC_BASER_RS
, FEC_BASER
);
1430 FW_CAPS_TO_LMM(ANEG
, Autoneg
);
1431 FW_CAPS_TO_LMM(802_3_PAUSE
, Pause
);
1432 FW_CAPS_TO_LMM(802_3_ASM_DIR
, Asym_Pause
);
1434 #undef FW_CAPS_TO_LMM
1438 static int cxgb4vf_get_link_ksettings(struct net_device
*dev
,
1439 struct ethtool_link_ksettings
*link_ksettings
)
1441 struct port_info
*pi
= netdev_priv(dev
);
1442 struct ethtool_link_settings
*base
= &link_ksettings
->base
;
1444 /* For the nonce, the Firmware doesn't send up Port State changes
1445 * when the Virtual Interface attached to the Port is down. So
1446 * if it's down, let's grab any changes.
1448 if (!netif_running(dev
))
1449 (void)t4vf_update_port_info(pi
);
1451 ethtool_link_ksettings_zero_link_mode(link_ksettings
, supported
);
1452 ethtool_link_ksettings_zero_link_mode(link_ksettings
, advertising
);
1453 ethtool_link_ksettings_zero_link_mode(link_ksettings
, lp_advertising
);
1455 base
->port
= from_fw_port_mod_type(pi
->port_type
, pi
->mod_type
);
1457 if (pi
->mdio_addr
>= 0) {
1458 base
->phy_address
= pi
->mdio_addr
;
1459 base
->mdio_support
= (pi
->port_type
== FW_PORT_TYPE_BT_SGMII
1460 ? ETH_MDIO_SUPPORTS_C22
1461 : ETH_MDIO_SUPPORTS_C45
);
1463 base
->phy_address
= 255;
1464 base
->mdio_support
= 0;
1467 fw_caps_to_lmm(pi
->port_type
, pi
->link_cfg
.pcaps
,
1468 link_ksettings
->link_modes
.supported
);
1469 fw_caps_to_lmm(pi
->port_type
, pi
->link_cfg
.acaps
,
1470 link_ksettings
->link_modes
.advertising
);
1471 fw_caps_to_lmm(pi
->port_type
, pi
->link_cfg
.lpacaps
,
1472 link_ksettings
->link_modes
.lp_advertising
);
1474 if (netif_carrier_ok(dev
)) {
1475 base
->speed
= pi
->link_cfg
.speed
;
1476 base
->duplex
= DUPLEX_FULL
;
1478 base
->speed
= SPEED_UNKNOWN
;
1479 base
->duplex
= DUPLEX_UNKNOWN
;
1482 base
->autoneg
= pi
->link_cfg
.autoneg
;
1483 if (pi
->link_cfg
.pcaps
& FW_PORT_CAP32_ANEG
)
1484 ethtool_link_ksettings_add_link_mode(link_ksettings
,
1485 supported
, Autoneg
);
1486 if (pi
->link_cfg
.autoneg
)
1487 ethtool_link_ksettings_add_link_mode(link_ksettings
,
1488 advertising
, Autoneg
);
1493 /* Translate the Firmware FEC value into the ethtool value. */
1494 static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec
)
1496 unsigned int eth_fec
= 0;
1498 if (fw_fec
& FW_PORT_CAP32_FEC_RS
)
1499 eth_fec
|= ETHTOOL_FEC_RS
;
1500 if (fw_fec
& FW_PORT_CAP32_FEC_BASER_RS
)
1501 eth_fec
|= ETHTOOL_FEC_BASER
;
1503 /* if nothing is set, then FEC is off */
1505 eth_fec
= ETHTOOL_FEC_OFF
;
1510 /* Translate Common Code FEC value into ethtool value. */
1511 static inline unsigned int cc_to_eth_fec(unsigned int cc_fec
)
1513 unsigned int eth_fec
= 0;
1515 if (cc_fec
& FEC_AUTO
)
1516 eth_fec
|= ETHTOOL_FEC_AUTO
;
1517 if (cc_fec
& FEC_RS
)
1518 eth_fec
|= ETHTOOL_FEC_RS
;
1519 if (cc_fec
& FEC_BASER_RS
)
1520 eth_fec
|= ETHTOOL_FEC_BASER
;
1522 /* if nothing is set, then FEC is off */
1524 eth_fec
= ETHTOOL_FEC_OFF
;
1529 static int cxgb4vf_get_fecparam(struct net_device
*dev
,
1530 struct ethtool_fecparam
*fec
)
1532 const struct port_info
*pi
= netdev_priv(dev
);
1533 const struct link_config
*lc
= &pi
->link_cfg
;
1535 /* Translate the Firmware FEC Support into the ethtool value. We
1536 * always support IEEE 802.3 "automatic" selection of Link FEC type if
1537 * any FEC is supported.
1539 fec
->fec
= fwcap_to_eth_fec(lc
->pcaps
);
1540 if (fec
->fec
!= ETHTOOL_FEC_OFF
)
1541 fec
->fec
|= ETHTOOL_FEC_AUTO
;
1543 /* Translate the current internal FEC parameters into the
1546 fec
->active_fec
= cc_to_eth_fec(lc
->fec
);
1551 * Return our driver information.
1553 static void cxgb4vf_get_drvinfo(struct net_device
*dev
,
1554 struct ethtool_drvinfo
*drvinfo
)
1556 struct adapter
*adapter
= netdev2adap(dev
);
1558 strlcpy(drvinfo
->driver
, KBUILD_MODNAME
, sizeof(drvinfo
->driver
));
1559 strlcpy(drvinfo
->version
, DRV_VERSION
, sizeof(drvinfo
->version
));
1560 strlcpy(drvinfo
->bus_info
, pci_name(to_pci_dev(dev
->dev
.parent
)),
1561 sizeof(drvinfo
->bus_info
));
1562 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
),
1563 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1564 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.dev
.fwrev
),
1565 FW_HDR_FW_VER_MINOR_G(adapter
->params
.dev
.fwrev
),
1566 FW_HDR_FW_VER_MICRO_G(adapter
->params
.dev
.fwrev
),
1567 FW_HDR_FW_VER_BUILD_G(adapter
->params
.dev
.fwrev
),
1568 FW_HDR_FW_VER_MAJOR_G(adapter
->params
.dev
.tprev
),
1569 FW_HDR_FW_VER_MINOR_G(adapter
->params
.dev
.tprev
),
1570 FW_HDR_FW_VER_MICRO_G(adapter
->params
.dev
.tprev
),
1571 FW_HDR_FW_VER_BUILD_G(adapter
->params
.dev
.tprev
));
1575 * Return current adapter message level.
1577 static u32
cxgb4vf_get_msglevel(struct net_device
*dev
)
1579 return netdev2adap(dev
)->msg_enable
;
1583 * Set current adapter message level.
1585 static void cxgb4vf_set_msglevel(struct net_device
*dev
, u32 msglevel
)
1587 netdev2adap(dev
)->msg_enable
= msglevel
;
1591 * Return the device's current Queue Set ring size parameters along with the
1592 * allowed maximum values. Since ethtool doesn't understand the concept of
1593 * multi-queue devices, we just return the current values associated with the
1596 static void cxgb4vf_get_ringparam(struct net_device
*dev
,
1597 struct ethtool_ringparam
*rp
)
1599 const struct port_info
*pi
= netdev_priv(dev
);
1600 const struct sge
*s
= &pi
->adapter
->sge
;
1602 rp
->rx_max_pending
= MAX_RX_BUFFERS
;
1603 rp
->rx_mini_max_pending
= MAX_RSPQ_ENTRIES
;
1604 rp
->rx_jumbo_max_pending
= 0;
1605 rp
->tx_max_pending
= MAX_TXQ_ENTRIES
;
1607 rp
->rx_pending
= s
->ethrxq
[pi
->first_qset
].fl
.size
- MIN_FL_RESID
;
1608 rp
->rx_mini_pending
= s
->ethrxq
[pi
->first_qset
].rspq
.size
;
1609 rp
->rx_jumbo_pending
= 0;
1610 rp
->tx_pending
= s
->ethtxq
[pi
->first_qset
].q
.size
;
1614 * Set the Queue Set ring size parameters for the device. Again, since
1615 * ethtool doesn't allow for the concept of multiple queues per device, we'll
1616 * apply these new values across all of the Queue Sets associated with the
1617 * device -- after vetting them of course!
1619 static int cxgb4vf_set_ringparam(struct net_device
*dev
,
1620 struct ethtool_ringparam
*rp
)
1622 const struct port_info
*pi
= netdev_priv(dev
);
1623 struct adapter
*adapter
= pi
->adapter
;
1624 struct sge
*s
= &adapter
->sge
;
1627 if (rp
->rx_pending
> MAX_RX_BUFFERS
||
1628 rp
->rx_jumbo_pending
||
1629 rp
->tx_pending
> MAX_TXQ_ENTRIES
||
1630 rp
->rx_mini_pending
> MAX_RSPQ_ENTRIES
||
1631 rp
->rx_mini_pending
< MIN_RSPQ_ENTRIES
||
1632 rp
->rx_pending
< MIN_FL_ENTRIES
||
1633 rp
->tx_pending
< MIN_TXQ_ENTRIES
)
1636 if (adapter
->flags
& CXGB4VF_FULL_INIT_DONE
)
1639 for (qs
= pi
->first_qset
; qs
< pi
->first_qset
+ pi
->nqsets
; qs
++) {
1640 s
->ethrxq
[qs
].fl
.size
= rp
->rx_pending
+ MIN_FL_RESID
;
1641 s
->ethrxq
[qs
].rspq
.size
= rp
->rx_mini_pending
;
1642 s
->ethtxq
[qs
].q
.size
= rp
->tx_pending
;
1648 * Return the interrupt holdoff timer and count for the first Queue Set on the
1649 * device. Our extension ioctl() (the cxgbtool interface) allows the
1650 * interrupt holdoff timer to be read on all of the device's Queue Sets.
1652 static int cxgb4vf_get_coalesce(struct net_device
*dev
,
1653 struct ethtool_coalesce
*coalesce
)
1655 const struct port_info
*pi
= netdev_priv(dev
);
1656 const struct adapter
*adapter
= pi
->adapter
;
1657 const struct sge_rspq
*rspq
= &adapter
->sge
.ethrxq
[pi
->first_qset
].rspq
;
1659 coalesce
->rx_coalesce_usecs
= qtimer_val(adapter
, rspq
);
1660 coalesce
->rx_max_coalesced_frames
=
1661 ((rspq
->intr_params
& QINTR_CNT_EN_F
)
1662 ? adapter
->sge
.counter_val
[rspq
->pktcnt_idx
]
1668 * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1669 * interface. Our extension ioctl() (the cxgbtool interface) allows us to set
1670 * the interrupt holdoff timer on any of the device's Queue Sets.
1672 static int cxgb4vf_set_coalesce(struct net_device
*dev
,
1673 struct ethtool_coalesce
*coalesce
)
1675 const struct port_info
*pi
= netdev_priv(dev
);
1676 struct adapter
*adapter
= pi
->adapter
;
1678 return set_rxq_intr_params(adapter
,
1679 &adapter
->sge
.ethrxq
[pi
->first_qset
].rspq
,
1680 coalesce
->rx_coalesce_usecs
,
1681 coalesce
->rx_max_coalesced_frames
);
1685 * Report current port link pause parameter settings.
1687 static void cxgb4vf_get_pauseparam(struct net_device
*dev
,
1688 struct ethtool_pauseparam
*pauseparam
)
1690 struct port_info
*pi
= netdev_priv(dev
);
1692 pauseparam
->autoneg
= (pi
->link_cfg
.requested_fc
& PAUSE_AUTONEG
) != 0;
1693 pauseparam
->rx_pause
= (pi
->link_cfg
.advertised_fc
& PAUSE_RX
) != 0;
1694 pauseparam
->tx_pause
= (pi
->link_cfg
.advertised_fc
& PAUSE_TX
) != 0;
1698 * Identify the port by blinking the port's LED.
1700 static int cxgb4vf_phys_id(struct net_device
*dev
,
1701 enum ethtool_phys_id_state state
)
1704 struct port_info
*pi
= netdev_priv(dev
);
1706 if (state
== ETHTOOL_ID_ACTIVE
)
1708 else if (state
== ETHTOOL_ID_INACTIVE
)
1713 return t4vf_identify_port(pi
->adapter
, pi
->viid
, val
);
1717 * Port stats maintained per queue of the port.
1719 struct queue_port_stats
{
1730 * Strings for the ETH_SS_STATS statistics set ("ethtool -S"). Note that
1731 * these need to match the order of statistics returned by
1732 * t4vf_get_port_stats().
1734 static const char stats_strings
[][ETH_GSTRING_LEN
] = {
1736 * These must match the layout of the t4vf_port_stats structure.
1738 "TxBroadcastBytes ",
1739 "TxBroadcastFrames ",
1740 "TxMulticastBytes ",
1741 "TxMulticastFrames ",
1747 "RxBroadcastBytes ",
1748 "RxBroadcastFrames ",
1749 "RxMulticastBytes ",
1750 "RxMulticastFrames ",
1756 * These are accumulated per-queue statistics and must match the
1757 * order of the fields in the queue_port_stats structure.
1769 * Return the number of statistics in the specified statistics set.
1771 static int cxgb4vf_get_sset_count(struct net_device
*dev
, int sset
)
1775 return ARRAY_SIZE(stats_strings
);
1783 * Return the strings for the specified statistics set.
1785 static void cxgb4vf_get_strings(struct net_device
*dev
,
1791 memcpy(data
, stats_strings
, sizeof(stats_strings
));
1797 * Small utility routine to accumulate queue statistics across the queues of
1800 static void collect_sge_port_stats(const struct adapter
*adapter
,
1801 const struct port_info
*pi
,
1802 struct queue_port_stats
*stats
)
1804 const struct sge_eth_txq
*txq
= &adapter
->sge
.ethtxq
[pi
->first_qset
];
1805 const struct sge_eth_rxq
*rxq
= &adapter
->sge
.ethrxq
[pi
->first_qset
];
1808 memset(stats
, 0, sizeof(*stats
));
1809 for (qs
= 0; qs
< pi
->nqsets
; qs
++, rxq
++, txq
++) {
1810 stats
->tso
+= txq
->tso
;
1811 stats
->tx_csum
+= txq
->tx_cso
;
1812 stats
->rx_csum
+= rxq
->stats
.rx_cso
;
1813 stats
->vlan_ex
+= rxq
->stats
.vlan_ex
;
1814 stats
->vlan_ins
+= txq
->vlan_ins
;
1815 stats
->lro_pkts
+= rxq
->stats
.lro_pkts
;
1816 stats
->lro_merged
+= rxq
->stats
.lro_merged
;
1821 * Return the ETH_SS_STATS statistics set.
1823 static void cxgb4vf_get_ethtool_stats(struct net_device
*dev
,
1824 struct ethtool_stats
*stats
,
1827 struct port_info
*pi
= netdev2pinfo(dev
);
1828 struct adapter
*adapter
= pi
->adapter
;
1829 int err
= t4vf_get_port_stats(adapter
, pi
->pidx
,
1830 (struct t4vf_port_stats
*)data
);
1832 memset(data
, 0, sizeof(struct t4vf_port_stats
));
1834 data
+= sizeof(struct t4vf_port_stats
) / sizeof(u64
);
1835 collect_sge_port_stats(adapter
, pi
, (struct queue_port_stats
*)data
);
1839 * Return the size of our register map.
1841 static int cxgb4vf_get_regs_len(struct net_device
*dev
)
1843 return T4VF_REGMAP_SIZE
;
1847 * Dump a block of registers, start to end inclusive, into a buffer.
1849 static void reg_block_dump(struct adapter
*adapter
, void *regbuf
,
1850 unsigned int start
, unsigned int end
)
1852 u32
*bp
= regbuf
+ start
- T4VF_REGMAP_START
;
1854 for ( ; start
<= end
; start
+= sizeof(u32
)) {
1856 * Avoid reading the Mailbox Control register since that
1857 * can trigger a Mailbox Ownership Arbitration cycle and
1858 * interfere with communication with the firmware.
1860 if (start
== T4VF_CIM_BASE_ADDR
+ CIM_VF_EXT_MAILBOX_CTRL
)
1863 *bp
++ = t4_read_reg(adapter
, start
);
1868 * Copy our entire register map into the provided buffer.
1870 static void cxgb4vf_get_regs(struct net_device
*dev
,
1871 struct ethtool_regs
*regs
,
1874 struct adapter
*adapter
= netdev2adap(dev
);
1876 regs
->version
= mk_adap_vers(adapter
);
1879 * Fill in register buffer with our register map.
1881 memset(regbuf
, 0, T4VF_REGMAP_SIZE
);
1883 reg_block_dump(adapter
, regbuf
,
1884 T4VF_SGE_BASE_ADDR
+ T4VF_MOD_MAP_SGE_FIRST
,
1885 T4VF_SGE_BASE_ADDR
+ T4VF_MOD_MAP_SGE_LAST
);
1886 reg_block_dump(adapter
, regbuf
,
1887 T4VF_MPS_BASE_ADDR
+ T4VF_MOD_MAP_MPS_FIRST
,
1888 T4VF_MPS_BASE_ADDR
+ T4VF_MOD_MAP_MPS_LAST
);
1890 /* T5 adds new registers in the PL Register map.
1892 reg_block_dump(adapter
, regbuf
,
1893 T4VF_PL_BASE_ADDR
+ T4VF_MOD_MAP_PL_FIRST
,
1894 T4VF_PL_BASE_ADDR
+ (is_t4(adapter
->params
.chip
)
1895 ? PL_VF_WHOAMI_A
: PL_VF_REVISION_A
));
1896 reg_block_dump(adapter
, regbuf
,
1897 T4VF_CIM_BASE_ADDR
+ T4VF_MOD_MAP_CIM_FIRST
,
1898 T4VF_CIM_BASE_ADDR
+ T4VF_MOD_MAP_CIM_LAST
);
1900 reg_block_dump(adapter
, regbuf
,
1901 T4VF_MBDATA_BASE_ADDR
+ T4VF_MBDATA_FIRST
,
1902 T4VF_MBDATA_BASE_ADDR
+ T4VF_MBDATA_LAST
);
1906 * Report current Wake On LAN settings.
1908 static void cxgb4vf_get_wol(struct net_device
*dev
,
1909 struct ethtool_wolinfo
*wol
)
1913 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
1917 * TCP Segmentation Offload flags which we support.
1919 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1920 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
1921 NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
1923 static const struct ethtool_ops cxgb4vf_ethtool_ops
= {
1924 .get_link_ksettings
= cxgb4vf_get_link_ksettings
,
1925 .get_fecparam
= cxgb4vf_get_fecparam
,
1926 .get_drvinfo
= cxgb4vf_get_drvinfo
,
1927 .get_msglevel
= cxgb4vf_get_msglevel
,
1928 .set_msglevel
= cxgb4vf_set_msglevel
,
1929 .get_ringparam
= cxgb4vf_get_ringparam
,
1930 .set_ringparam
= cxgb4vf_set_ringparam
,
1931 .get_coalesce
= cxgb4vf_get_coalesce
,
1932 .set_coalesce
= cxgb4vf_set_coalesce
,
1933 .get_pauseparam
= cxgb4vf_get_pauseparam
,
1934 .get_link
= ethtool_op_get_link
,
1935 .get_strings
= cxgb4vf_get_strings
,
1936 .set_phys_id
= cxgb4vf_phys_id
,
1937 .get_sset_count
= cxgb4vf_get_sset_count
,
1938 .get_ethtool_stats
= cxgb4vf_get_ethtool_stats
,
1939 .get_regs_len
= cxgb4vf_get_regs_len
,
1940 .get_regs
= cxgb4vf_get_regs
,
1941 .get_wol
= cxgb4vf_get_wol
,
1945 * /sys/kernel/debug/cxgb4vf support code and data.
1946 * ================================================
1950 * Show Firmware Mailbox Command/Reply Log
1952 * Note that we don't do any locking when dumping the Firmware Mailbox Log so
1953 * it's possible that we can catch things during a log update and therefore
1954 * see partially corrupted log entries. But i9t's probably Good Enough(tm).
1955 * If we ever decide that we want to make sure that we're dumping a coherent
1956 * log, we'd need to perform locking in the mailbox logging and in
1957 * mboxlog_open() where we'd need to grab the entire mailbox log in one go
1958 * like we do for the Firmware Device Log. But as stated above, meh ...
1960 static int mboxlog_show(struct seq_file
*seq
, void *v
)
1962 struct adapter
*adapter
= seq
->private;
1963 struct mbox_cmd_log
*log
= adapter
->mbox_log
;
1964 struct mbox_cmd
*entry
;
1967 if (v
== SEQ_START_TOKEN
) {
1969 "%10s %15s %5s %5s %s\n",
1970 "Seq#", "Tstamp", "Atime", "Etime",
1975 entry_idx
= log
->cursor
+ ((uintptr_t)v
- 2);
1976 if (entry_idx
>= log
->size
)
1977 entry_idx
-= log
->size
;
1978 entry
= mbox_cmd_log_entry(log
, entry_idx
);
1980 /* skip over unused entries */
1981 if (entry
->timestamp
== 0)
1984 seq_printf(seq
, "%10u %15llu %5d %5d",
1985 entry
->seqno
, entry
->timestamp
,
1986 entry
->access
, entry
->execute
);
1987 for (i
= 0; i
< MBOX_LEN
/ 8; i
++) {
1988 u64 flit
= entry
->cmd
[i
];
1989 u32 hi
= (u32
)(flit
>> 32);
1992 seq_printf(seq
, " %08x %08x", hi
, lo
);
1994 seq_puts(seq
, "\n");
1998 static inline void *mboxlog_get_idx(struct seq_file
*seq
, loff_t pos
)
2000 struct adapter
*adapter
= seq
->private;
2001 struct mbox_cmd_log
*log
= adapter
->mbox_log
;
2003 return ((pos
<= log
->size
) ? (void *)(uintptr_t)(pos
+ 1) : NULL
);
2006 static void *mboxlog_start(struct seq_file
*seq
, loff_t
*pos
)
2008 return *pos
? mboxlog_get_idx(seq
, *pos
) : SEQ_START_TOKEN
;
2011 static void *mboxlog_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2014 return mboxlog_get_idx(seq
, *pos
);
2017 static void mboxlog_stop(struct seq_file
*seq
, void *v
)
2021 static const struct seq_operations mboxlog_seq_ops
= {
2022 .start
= mboxlog_start
,
2023 .next
= mboxlog_next
,
2024 .stop
= mboxlog_stop
,
2025 .show
= mboxlog_show
2028 static int mboxlog_open(struct inode
*inode
, struct file
*file
)
2030 int res
= seq_open(file
, &mboxlog_seq_ops
);
2033 struct seq_file
*seq
= file
->private_data
;
2035 seq
->private = inode
->i_private
;
2040 static const struct file_operations mboxlog_fops
= {
2041 .owner
= THIS_MODULE
,
2042 .open
= mboxlog_open
,
2044 .llseek
= seq_lseek
,
2045 .release
= seq_release
,
2049 * Show SGE Queue Set information. We display QPL Queues Sets per line.
2053 static int sge_qinfo_show(struct seq_file
*seq
, void *v
)
2055 struct adapter
*adapter
= seq
->private;
2056 int eth_entries
= DIV_ROUND_UP(adapter
->sge
.ethqsets
, QPL
);
2057 int qs
, r
= (uintptr_t)v
- 1;
2060 seq_putc(seq
, '\n');
2062 #define S3(fmt_spec, s, v) \
2064 seq_printf(seq, "%-12s", s); \
2065 for (qs = 0; qs < n; ++qs) \
2066 seq_printf(seq, " %16" fmt_spec, v); \
2067 seq_putc(seq, '\n'); \
2069 #define S(s, v) S3("s", s, v)
2070 #define T(s, v) S3("u", s, txq[qs].v)
2071 #define R(s, v) S3("u", s, rxq[qs].v)
2073 if (r
< eth_entries
) {
2074 const struct sge_eth_rxq
*rxq
= &adapter
->sge
.ethrxq
[r
* QPL
];
2075 const struct sge_eth_txq
*txq
= &adapter
->sge
.ethtxq
[r
* QPL
];
2076 int n
= min(QPL
, adapter
->sge
.ethqsets
- QPL
* r
);
2078 S("QType:", "Ethernet");
2080 (rxq
[qs
].rspq
.netdev
2081 ? rxq
[qs
].rspq
.netdev
->name
2084 (rxq
[qs
].rspq
.netdev
2085 ? ((struct port_info
*)
2086 netdev_priv(rxq
[qs
].rspq
.netdev
))->port_id
2088 T("TxQ ID:", q
.abs_id
);
2089 T("TxQ size:", q
.size
);
2090 T("TxQ inuse:", q
.in_use
);
2091 T("TxQ PIdx:", q
.pidx
);
2092 T("TxQ CIdx:", q
.cidx
);
2093 R("RspQ ID:", rspq
.abs_id
);
2094 R("RspQ size:", rspq
.size
);
2095 R("RspQE size:", rspq
.iqe_len
);
2096 S3("u", "Intr delay:", qtimer_val(adapter
, &rxq
[qs
].rspq
));
2097 S3("u", "Intr pktcnt:",
2098 adapter
->sge
.counter_val
[rxq
[qs
].rspq
.pktcnt_idx
]);
2099 R("RspQ CIdx:", rspq
.cidx
);
2100 R("RspQ Gen:", rspq
.gen
);
2101 R("FL ID:", fl
.abs_id
);
2102 R("FL size:", fl
.size
- MIN_FL_RESID
);
2103 R("FL avail:", fl
.avail
);
2104 R("FL PIdx:", fl
.pidx
);
2105 R("FL CIdx:", fl
.cidx
);
2111 const struct sge_rspq
*evtq
= &adapter
->sge
.fw_evtq
;
2113 seq_printf(seq
, "%-12s %16s\n", "QType:", "FW event queue");
2114 seq_printf(seq
, "%-12s %16u\n", "RspQ ID:", evtq
->abs_id
);
2115 seq_printf(seq
, "%-12s %16u\n", "Intr delay:",
2116 qtimer_val(adapter
, evtq
));
2117 seq_printf(seq
, "%-12s %16u\n", "Intr pktcnt:",
2118 adapter
->sge
.counter_val
[evtq
->pktcnt_idx
]);
2119 seq_printf(seq
, "%-12s %16u\n", "RspQ Cidx:", evtq
->cidx
);
2120 seq_printf(seq
, "%-12s %16u\n", "RspQ Gen:", evtq
->gen
);
2121 } else if (r
== 1) {
2122 const struct sge_rspq
*intrq
= &adapter
->sge
.intrq
;
2124 seq_printf(seq
, "%-12s %16s\n", "QType:", "Interrupt Queue");
2125 seq_printf(seq
, "%-12s %16u\n", "RspQ ID:", intrq
->abs_id
);
2126 seq_printf(seq
, "%-12s %16u\n", "Intr delay:",
2127 qtimer_val(adapter
, intrq
));
2128 seq_printf(seq
, "%-12s %16u\n", "Intr pktcnt:",
2129 adapter
->sge
.counter_val
[intrq
->pktcnt_idx
]);
2130 seq_printf(seq
, "%-12s %16u\n", "RspQ Cidx:", intrq
->cidx
);
2131 seq_printf(seq
, "%-12s %16u\n", "RspQ Gen:", intrq
->gen
);
2143 * Return the number of "entries" in our "file". We group the multi-Queue
2144 * sections with QPL Queue Sets per "entry". The sections of the output are:
2146 * Ethernet RX/TX Queue Sets
2147 * Firmware Event Queue
2148 * Forwarded Interrupt Queue (if in MSI mode)
2150 static int sge_queue_entries(const struct adapter
*adapter
)
2152 return DIV_ROUND_UP(adapter
->sge
.ethqsets
, QPL
) + 1 +
2153 ((adapter
->flags
& CXGB4VF_USING_MSI
) != 0);
2156 static void *sge_queue_start(struct seq_file
*seq
, loff_t
*pos
)
2158 int entries
= sge_queue_entries(seq
->private);
2160 return *pos
< entries
? (void *)((uintptr_t)*pos
+ 1) : NULL
;
2163 static void sge_queue_stop(struct seq_file
*seq
, void *v
)
2167 static void *sge_queue_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2169 int entries
= sge_queue_entries(seq
->private);
2172 return *pos
< entries
? (void *)((uintptr_t)*pos
+ 1) : NULL
;
2175 static const struct seq_operations sge_qinfo_seq_ops
= {
2176 .start
= sge_queue_start
,
2177 .next
= sge_queue_next
,
2178 .stop
= sge_queue_stop
,
2179 .show
= sge_qinfo_show
2182 static int sge_qinfo_open(struct inode
*inode
, struct file
*file
)
2184 int res
= seq_open(file
, &sge_qinfo_seq_ops
);
2187 struct seq_file
*seq
= file
->private_data
;
2188 seq
->private = inode
->i_private
;
2193 static const struct file_operations sge_qinfo_debugfs_fops
= {
2194 .owner
= THIS_MODULE
,
2195 .open
= sge_qinfo_open
,
2197 .llseek
= seq_lseek
,
2198 .release
= seq_release
,
2202 * Show SGE Queue Set statistics. We display QPL Queues Sets per line.
2206 static int sge_qstats_show(struct seq_file
*seq
, void *v
)
2208 struct adapter
*adapter
= seq
->private;
2209 int eth_entries
= DIV_ROUND_UP(adapter
->sge
.ethqsets
, QPL
);
2210 int qs
, r
= (uintptr_t)v
- 1;
2213 seq_putc(seq
, '\n');
2215 #define S3(fmt, s, v) \
2217 seq_printf(seq, "%-16s", s); \
2218 for (qs = 0; qs < n; ++qs) \
2219 seq_printf(seq, " %8" fmt, v); \
2220 seq_putc(seq, '\n'); \
2222 #define S(s, v) S3("s", s, v)
2224 #define T3(fmt, s, v) S3(fmt, s, txq[qs].v)
2225 #define T(s, v) T3("lu", s, v)
2227 #define R3(fmt, s, v) S3(fmt, s, rxq[qs].v)
2228 #define R(s, v) R3("lu", s, v)
2230 if (r
< eth_entries
) {
2231 const struct sge_eth_rxq
*rxq
= &adapter
->sge
.ethrxq
[r
* QPL
];
2232 const struct sge_eth_txq
*txq
= &adapter
->sge
.ethtxq
[r
* QPL
];
2233 int n
= min(QPL
, adapter
->sge
.ethqsets
- QPL
* r
);
2235 S("QType:", "Ethernet");
2237 (rxq
[qs
].rspq
.netdev
2238 ? rxq
[qs
].rspq
.netdev
->name
2240 R3("u", "RspQNullInts:", rspq
.unhandled_irqs
);
2241 R("RxPackets:", stats
.pkts
);
2242 R("RxCSO:", stats
.rx_cso
);
2243 R("VLANxtract:", stats
.vlan_ex
);
2244 R("LROmerged:", stats
.lro_merged
);
2245 R("LROpackets:", stats
.lro_pkts
);
2246 R("RxDrops:", stats
.rx_drops
);
2248 T("TxCSO:", tx_cso
);
2249 T("VLANins:", vlan_ins
);
2250 T("TxQFull:", q
.stops
);
2251 T("TxQRestarts:", q
.restarts
);
2252 T("TxMapErr:", mapping_err
);
2253 R("FLAllocErr:", fl
.alloc_failed
);
2254 R("FLLrgAlcErr:", fl
.large_alloc_failed
);
2255 R("FLStarving:", fl
.starving
);
2261 const struct sge_rspq
*evtq
= &adapter
->sge
.fw_evtq
;
2263 seq_printf(seq
, "%-8s %16s\n", "QType:", "FW event queue");
2264 seq_printf(seq
, "%-16s %8u\n", "RspQNullInts:",
2265 evtq
->unhandled_irqs
);
2266 seq_printf(seq
, "%-16s %8u\n", "RspQ CIdx:", evtq
->cidx
);
2267 seq_printf(seq
, "%-16s %8u\n", "RspQ Gen:", evtq
->gen
);
2268 } else if (r
== 1) {
2269 const struct sge_rspq
*intrq
= &adapter
->sge
.intrq
;
2271 seq_printf(seq
, "%-8s %16s\n", "QType:", "Interrupt Queue");
2272 seq_printf(seq
, "%-16s %8u\n", "RspQNullInts:",
2273 intrq
->unhandled_irqs
);
2274 seq_printf(seq
, "%-16s %8u\n", "RspQ CIdx:", intrq
->cidx
);
2275 seq_printf(seq
, "%-16s %8u\n", "RspQ Gen:", intrq
->gen
);
2289 * Return the number of "entries" in our "file". We group the multi-Queue
2290 * sections with QPL Queue Sets per "entry". The sections of the output are:
2292 * Ethernet RX/TX Queue Sets
2293 * Firmware Event Queue
2294 * Forwarded Interrupt Queue (if in MSI mode)
2296 static int sge_qstats_entries(const struct adapter
*adapter
)
2298 return DIV_ROUND_UP(adapter
->sge
.ethqsets
, QPL
) + 1 +
2299 ((adapter
->flags
& CXGB4VF_USING_MSI
) != 0);
2302 static void *sge_qstats_start(struct seq_file
*seq
, loff_t
*pos
)
2304 int entries
= sge_qstats_entries(seq
->private);
2306 return *pos
< entries
? (void *)((uintptr_t)*pos
+ 1) : NULL
;
2309 static void sge_qstats_stop(struct seq_file
*seq
, void *v
)
2313 static void *sge_qstats_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2315 int entries
= sge_qstats_entries(seq
->private);
2318 return *pos
< entries
? (void *)((uintptr_t)*pos
+ 1) : NULL
;
2321 static const struct seq_operations sge_qstats_seq_ops
= {
2322 .start
= sge_qstats_start
,
2323 .next
= sge_qstats_next
,
2324 .stop
= sge_qstats_stop
,
2325 .show
= sge_qstats_show
2328 static int sge_qstats_open(struct inode
*inode
, struct file
*file
)
2330 int res
= seq_open(file
, &sge_qstats_seq_ops
);
2333 struct seq_file
*seq
= file
->private_data
;
2334 seq
->private = inode
->i_private
;
2339 static const struct file_operations sge_qstats_proc_fops
= {
2340 .owner
= THIS_MODULE
,
2341 .open
= sge_qstats_open
,
2343 .llseek
= seq_lseek
,
2344 .release
= seq_release
,
2348 * Show PCI-E SR-IOV Virtual Function Resource Limits.
2350 static int resources_show(struct seq_file
*seq
, void *v
)
2352 struct adapter
*adapter
= seq
->private;
2353 struct vf_resources
*vfres
= &adapter
->params
.vfres
;
2355 #define S(desc, fmt, var) \
2356 seq_printf(seq, "%-60s " fmt "\n", \
2357 desc " (" #var "):", vfres->var)
2359 S("Virtual Interfaces", "%d", nvi
);
2360 S("Egress Queues", "%d", neq
);
2361 S("Ethernet Control", "%d", nethctrl
);
2362 S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint
);
2363 S("Ingress Queues", "%d", niq
);
2364 S("Traffic Class", "%d", tc
);
2365 S("Port Access Rights Mask", "%#x", pmask
);
2366 S("MAC Address Filters", "%d", nexactf
);
2367 S("Firmware Command Read Capabilities", "%#x", r_caps
);
2368 S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps
);
2374 DEFINE_SHOW_ATTRIBUTE(resources
);
2377 * Show Virtual Interfaces.
2379 static int interfaces_show(struct seq_file
*seq
, void *v
)
2381 if (v
== SEQ_START_TOKEN
) {
2382 seq_puts(seq
, "Interface Port VIID\n");
2384 struct adapter
*adapter
= seq
->private;
2385 int pidx
= (uintptr_t)v
- 2;
2386 struct net_device
*dev
= adapter
->port
[pidx
];
2387 struct port_info
*pi
= netdev_priv(dev
);
2389 seq_printf(seq
, "%9s %4d %#5x\n",
2390 dev
->name
, pi
->port_id
, pi
->viid
);
2395 static inline void *interfaces_get_idx(struct adapter
*adapter
, loff_t pos
)
2397 return pos
<= adapter
->params
.nports
2398 ? (void *)(uintptr_t)(pos
+ 1)
2402 static void *interfaces_start(struct seq_file
*seq
, loff_t
*pos
)
2405 ? interfaces_get_idx(seq
->private, *pos
)
2409 static void *interfaces_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
2412 return interfaces_get_idx(seq
->private, *pos
);
2415 static void interfaces_stop(struct seq_file
*seq
, void *v
)
2419 static const struct seq_operations interfaces_seq_ops
= {
2420 .start
= interfaces_start
,
2421 .next
= interfaces_next
,
2422 .stop
= interfaces_stop
,
2423 .show
= interfaces_show
2426 static int interfaces_open(struct inode
*inode
, struct file
*file
)
2428 int res
= seq_open(file
, &interfaces_seq_ops
);
2431 struct seq_file
*seq
= file
->private_data
;
2432 seq
->private = inode
->i_private
;
2437 static const struct file_operations interfaces_proc_fops
= {
2438 .owner
= THIS_MODULE
,
2439 .open
= interfaces_open
,
2441 .llseek
= seq_lseek
,
2442 .release
= seq_release
,
2446 * /sys/kernel/debugfs/cxgb4vf/ files list.
2448 struct cxgb4vf_debugfs_entry
{
2449 const char *name
; /* name of debugfs node */
2450 umode_t mode
; /* file system mode */
2451 const struct file_operations
*fops
;
2454 static struct cxgb4vf_debugfs_entry debugfs_files
[] = {
2455 { "mboxlog", 0444, &mboxlog_fops
},
2456 { "sge_qinfo", 0444, &sge_qinfo_debugfs_fops
},
2457 { "sge_qstats", 0444, &sge_qstats_proc_fops
},
2458 { "resources", 0444, &resources_fops
},
2459 { "interfaces", 0444, &interfaces_proc_fops
},
2463 * Module and device initialization and cleanup code.
2464 * ==================================================
2468 * Set up out /sys/kernel/debug/cxgb4vf sub-nodes. We assume that the
2469 * directory (debugfs_root) has already been set up.
2471 static int setup_debugfs(struct adapter
*adapter
)
2475 BUG_ON(IS_ERR_OR_NULL(adapter
->debugfs_root
));
2478 * Debugfs support is best effort.
2480 for (i
= 0; i
< ARRAY_SIZE(debugfs_files
); i
++)
2481 debugfs_create_file(debugfs_files
[i
].name
,
2482 debugfs_files
[i
].mode
,
2483 adapter
->debugfs_root
, (void *)adapter
,
2484 debugfs_files
[i
].fops
);
2490 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2491 * it to our caller to tear down the directory (debugfs_root).
2493 static void cleanup_debugfs(struct adapter
*adapter
)
2495 BUG_ON(IS_ERR_OR_NULL(adapter
->debugfs_root
));
2498 * Unlike our sister routine cleanup_proc(), we don't need to remove
2499 * individual entries because a call will be made to
2500 * debugfs_remove_recursive(). We just need to clean up any ancillary
2506 /* Figure out how many Ports and Queue Sets we can support. This depends on
2507 * knowing our Virtual Function Resources and may be called a second time if
2508 * we fall back from MSI-X to MSI Interrupt Mode.
2510 static void size_nports_qsets(struct adapter
*adapter
)
2512 struct vf_resources
*vfres
= &adapter
->params
.vfres
;
2513 unsigned int ethqsets
, pmask_nports
;
2515 /* The number of "ports" which we support is equal to the number of
2516 * Virtual Interfaces with which we've been provisioned.
2518 adapter
->params
.nports
= vfres
->nvi
;
2519 if (adapter
->params
.nports
> MAX_NPORTS
) {
2520 dev_warn(adapter
->pdev_dev
, "only using %d of %d maximum"
2521 " allowed virtual interfaces\n", MAX_NPORTS
,
2522 adapter
->params
.nports
);
2523 adapter
->params
.nports
= MAX_NPORTS
;
2526 /* We may have been provisioned with more VIs than the number of
2527 * ports we're allowed to access (our Port Access Rights Mask).
2528 * This is obviously a configuration conflict but we don't want to
2529 * crash the kernel or anything silly just because of that.
2531 pmask_nports
= hweight32(adapter
->params
.vfres
.pmask
);
2532 if (pmask_nports
< adapter
->params
.nports
) {
2533 dev_warn(adapter
->pdev_dev
, "only using %d of %d provisioned"
2534 " virtual interfaces; limited by Port Access Rights"
2535 " mask %#x\n", pmask_nports
, adapter
->params
.nports
,
2536 adapter
->params
.vfres
.pmask
);
2537 adapter
->params
.nports
= pmask_nports
;
2540 /* We need to reserve an Ingress Queue for the Asynchronous Firmware
2541 * Event Queue. And if we're using MSI Interrupts, we'll also need to
2542 * reserve an Ingress Queue for a Forwarded Interrupts.
2544 * The rest of the FL/Intr-capable ingress queues will be matched up
2545 * one-for-one with Ethernet/Control egress queues in order to form
2546 * "Queue Sets" which will be aportioned between the "ports". For
2547 * each Queue Set, we'll need the ability to allocate two Egress
2548 * Contexts -- one for the Ingress Queue Free List and one for the TX
2551 * Note that even if we're currently configured to use MSI-X
2552 * Interrupts (module variable msi == MSI_MSIX) we may get downgraded
2553 * to MSI Interrupts if we can't get enough MSI-X Interrupts. If that
2554 * happens we'll need to adjust things later.
2556 ethqsets
= vfres
->niqflint
- 1 - (msi
== MSI_MSI
);
2557 if (vfres
->nethctrl
!= ethqsets
)
2558 ethqsets
= min(vfres
->nethctrl
, ethqsets
);
2559 if (vfres
->neq
< ethqsets
*2)
2560 ethqsets
= vfres
->neq
/2;
2561 if (ethqsets
> MAX_ETH_QSETS
)
2562 ethqsets
= MAX_ETH_QSETS
;
2563 adapter
->sge
.max_ethqsets
= ethqsets
;
2565 if (adapter
->sge
.max_ethqsets
< adapter
->params
.nports
) {
2566 dev_warn(adapter
->pdev_dev
, "only using %d of %d available"
2567 " virtual interfaces (too few Queue Sets)\n",
2568 adapter
->sge
.max_ethqsets
, adapter
->params
.nports
);
2569 adapter
->params
.nports
= adapter
->sge
.max_ethqsets
;
2574 * Perform early "adapter" initialization. This is where we discover what
2575 * adapter parameters we're going to be using and initialize basic adapter
2578 static int adap_init0(struct adapter
*adapter
)
2580 struct sge_params
*sge_params
= &adapter
->params
.sge
;
2581 struct sge
*s
= &adapter
->sge
;
2586 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2587 * 2.6.31 and later we can't call pci_reset_function() in order to
2588 * issue an FLR because of a self- deadlock on the device semaphore.
2589 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2590 * cases where they're needed -- for instance, some versions of KVM
2591 * fail to reset "Assigned Devices" when the VM reboots. Therefore we
2592 * use the firmware based reset in order to reset any per function
2595 err
= t4vf_fw_reset(adapter
);
2597 dev_err(adapter
->pdev_dev
, "FW reset failed: err=%d\n", err
);
2602 * Grab basic operational parameters. These will predominantly have
2603 * been set up by the Physical Function Driver or will be hard coded
2604 * into the adapter. We just have to live with them ... Note that
2605 * we _must_ get our VPD parameters before our SGE parameters because
2606 * we need to know the adapter's core clock from the VPD in order to
2607 * properly decode the SGE Timer Values.
2609 err
= t4vf_get_dev_params(adapter
);
2611 dev_err(adapter
->pdev_dev
, "unable to retrieve adapter"
2612 " device parameters: err=%d\n", err
);
2615 err
= t4vf_get_vpd_params(adapter
);
2617 dev_err(adapter
->pdev_dev
, "unable to retrieve adapter"
2618 " VPD parameters: err=%d\n", err
);
2621 err
= t4vf_get_sge_params(adapter
);
2623 dev_err(adapter
->pdev_dev
, "unable to retrieve adapter"
2624 " SGE parameters: err=%d\n", err
);
2627 err
= t4vf_get_rss_glb_config(adapter
);
2629 dev_err(adapter
->pdev_dev
, "unable to retrieve adapter"
2630 " RSS parameters: err=%d\n", err
);
2633 if (adapter
->params
.rss
.mode
!=
2634 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
) {
2635 dev_err(adapter
->pdev_dev
, "unable to operate with global RSS"
2636 " mode %d\n", adapter
->params
.rss
.mode
);
2639 err
= t4vf_sge_init(adapter
);
2641 dev_err(adapter
->pdev_dev
, "unable to use adapter parameters:"
2646 /* If we're running on newer firmware, let it know that we're
2647 * prepared to deal with encapsulated CPL messages. Older
2648 * firmware won't understand this and we'll just get
2649 * unencapsulated messages ...
2651 param
= FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF
) |
2652 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP
);
2654 (void) t4vf_set_params(adapter
, 1, ¶m
, &val
);
2657 * Retrieve our RX interrupt holdoff timer values and counter
2658 * threshold values from the SGE parameters.
2660 s
->timer_val
[0] = core_ticks_to_us(adapter
,
2661 TIMERVALUE0_G(sge_params
->sge_timer_value_0_and_1
));
2662 s
->timer_val
[1] = core_ticks_to_us(adapter
,
2663 TIMERVALUE1_G(sge_params
->sge_timer_value_0_and_1
));
2664 s
->timer_val
[2] = core_ticks_to_us(adapter
,
2665 TIMERVALUE0_G(sge_params
->sge_timer_value_2_and_3
));
2666 s
->timer_val
[3] = core_ticks_to_us(adapter
,
2667 TIMERVALUE1_G(sge_params
->sge_timer_value_2_and_3
));
2668 s
->timer_val
[4] = core_ticks_to_us(adapter
,
2669 TIMERVALUE0_G(sge_params
->sge_timer_value_4_and_5
));
2670 s
->timer_val
[5] = core_ticks_to_us(adapter
,
2671 TIMERVALUE1_G(sge_params
->sge_timer_value_4_and_5
));
2673 s
->counter_val
[0] = THRESHOLD_0_G(sge_params
->sge_ingress_rx_threshold
);
2674 s
->counter_val
[1] = THRESHOLD_1_G(sge_params
->sge_ingress_rx_threshold
);
2675 s
->counter_val
[2] = THRESHOLD_2_G(sge_params
->sge_ingress_rx_threshold
);
2676 s
->counter_val
[3] = THRESHOLD_3_G(sge_params
->sge_ingress_rx_threshold
);
2679 * Grab our Virtual Interface resource allocation, extract the
2680 * features that we're interested in and do a bit of sanity testing on
2683 err
= t4vf_get_vfres(adapter
);
2685 dev_err(adapter
->pdev_dev
, "unable to get virtual interface"
2686 " resources: err=%d\n", err
);
2690 /* Check for various parameter sanity issues */
2691 if (adapter
->params
.vfres
.pmask
== 0) {
2692 dev_err(adapter
->pdev_dev
, "no port access configured\n"
2696 if (adapter
->params
.vfres
.nvi
== 0) {
2697 dev_err(adapter
->pdev_dev
, "no virtual interfaces configured/"
2702 /* Initialize nports and max_ethqsets now that we have our Virtual
2703 * Function Resources.
2705 size_nports_qsets(adapter
);
2707 adapter
->flags
|= CXGB4VF_FW_OK
;
2711 static inline void init_rspq(struct sge_rspq
*rspq
, u8 timer_idx
,
2712 u8 pkt_cnt_idx
, unsigned int size
,
2713 unsigned int iqe_size
)
2715 rspq
->intr_params
= (QINTR_TIMER_IDX_V(timer_idx
) |
2716 (pkt_cnt_idx
< SGE_NCOUNTERS
?
2717 QINTR_CNT_EN_F
: 0));
2718 rspq
->pktcnt_idx
= (pkt_cnt_idx
< SGE_NCOUNTERS
2721 rspq
->iqe_len
= iqe_size
;
2726 * Perform default configuration of DMA queues depending on the number and
2727 * type of ports we found and the number of available CPUs. Most settings can
2728 * be modified by the admin via ethtool and cxgbtool prior to the adapter
2729 * being brought up for the first time.
2731 static void cfg_queues(struct adapter
*adapter
)
2733 struct sge
*s
= &adapter
->sge
;
2734 int q10g
, n10g
, qidx
, pidx
, qs
;
2738 * We should not be called till we know how many Queue Sets we can
2739 * support. In particular, this means that we need to know what kind
2740 * of interrupts we'll be using ...
2742 BUG_ON((adapter
->flags
&
2743 (CXGB4VF_USING_MSIX
| CXGB4VF_USING_MSI
)) == 0);
2746 * Count the number of 10GbE Virtual Interfaces that we have.
2749 for_each_port(adapter
, pidx
)
2750 n10g
+= is_x_10g_port(&adap2pinfo(adapter
, pidx
)->link_cfg
);
2753 * We default to 1 queue per non-10G port and up to # of cores queues
2759 int n1g
= (adapter
->params
.nports
- n10g
);
2760 q10g
= (adapter
->sge
.max_ethqsets
- n1g
) / n10g
;
2761 if (q10g
> num_online_cpus())
2762 q10g
= num_online_cpus();
2766 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2767 * The layout will be established in setup_sge_queues() when the
2768 * adapter is brough up for the first time.
2771 for_each_port(adapter
, pidx
) {
2772 struct port_info
*pi
= adap2pinfo(adapter
, pidx
);
2774 pi
->first_qset
= qidx
;
2775 pi
->nqsets
= is_x_10g_port(&pi
->link_cfg
) ? q10g
: 1;
2781 * The Ingress Queue Entry Size for our various Response Queues needs
2782 * to be big enough to accommodate the largest message we can receive
2783 * from the chip/firmware; which is 64 bytes ...
2788 * Set up default Queue Set parameters ... Start off with the
2789 * shortest interrupt holdoff timer.
2791 for (qs
= 0; qs
< s
->max_ethqsets
; qs
++) {
2792 struct sge_eth_rxq
*rxq
= &s
->ethrxq
[qs
];
2793 struct sge_eth_txq
*txq
= &s
->ethtxq
[qs
];
2795 init_rspq(&rxq
->rspq
, 0, 0, 1024, iqe_size
);
2801 * The firmware event queue is used for link state changes and
2802 * notifications of TX DMA completions.
2804 init_rspq(&s
->fw_evtq
, SGE_TIMER_RSTRT_CNTR
, 0, 512, iqe_size
);
2807 * The forwarded interrupt queue is used when we're in MSI interrupt
2808 * mode. In this mode all interrupts associated with RX queues will
2809 * be forwarded to a single queue which we'll associate with our MSI
2810 * interrupt vector. The messages dropped in the forwarded interrupt
2811 * queue will indicate which ingress queue needs servicing ... This
2812 * queue needs to be large enough to accommodate all of the ingress
2813 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2814 * from equalling the CIDX if every ingress queue has an outstanding
2815 * interrupt). The queue doesn't need to be any larger because no
2816 * ingress queue will ever have more than one outstanding interrupt at
2819 init_rspq(&s
->intrq
, SGE_TIMER_RSTRT_CNTR
, 0, MSIX_ENTRIES
+ 1,
2824 * Reduce the number of Ethernet queues across all ports to at most n.
2825 * n provides at least one queue per port.
2827 static void reduce_ethqs(struct adapter
*adapter
, int n
)
2830 struct port_info
*pi
;
2833 * While we have too many active Ether Queue Sets, interate across the
2834 * "ports" and reduce their individual Queue Set allocations.
2836 BUG_ON(n
< adapter
->params
.nports
);
2837 while (n
< adapter
->sge
.ethqsets
)
2838 for_each_port(adapter
, i
) {
2839 pi
= adap2pinfo(adapter
, i
);
2840 if (pi
->nqsets
> 1) {
2842 adapter
->sge
.ethqsets
--;
2843 if (adapter
->sge
.ethqsets
<= n
)
2849 * Reassign the starting Queue Sets for each of the "ports" ...
2852 for_each_port(adapter
, i
) {
2853 pi
= adap2pinfo(adapter
, i
);
2860 * We need to grab enough MSI-X vectors to cover our interrupt needs. Ideally
2861 * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2862 * need. Minimally we need one for every Virtual Interface plus those needed
2863 * for our "extras". Note that this process may lower the maximum number of
2864 * allowed Queue Sets ...
2866 static int enable_msix(struct adapter
*adapter
)
2868 int i
, want
, need
, nqsets
;
2869 struct msix_entry entries
[MSIX_ENTRIES
];
2870 struct sge
*s
= &adapter
->sge
;
2872 for (i
= 0; i
< MSIX_ENTRIES
; ++i
)
2873 entries
[i
].entry
= i
;
2876 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2877 * plus those needed for our "extras" (for example, the firmware
2878 * message queue). We _need_ at least one "Queue Set" per Virtual
2879 * Interface plus those needed for our "extras". So now we get to see
2880 * if the song is right ...
2882 want
= s
->max_ethqsets
+ MSIX_EXTRAS
;
2883 need
= adapter
->params
.nports
+ MSIX_EXTRAS
;
2885 want
= pci_enable_msix_range(adapter
->pdev
, entries
, need
, want
);
2889 nqsets
= want
- MSIX_EXTRAS
;
2890 if (nqsets
< s
->max_ethqsets
) {
2891 dev_warn(adapter
->pdev_dev
, "only enough MSI-X vectors"
2892 " for %d Queue Sets\n", nqsets
);
2893 s
->max_ethqsets
= nqsets
;
2894 if (nqsets
< s
->ethqsets
)
2895 reduce_ethqs(adapter
, nqsets
);
2897 for (i
= 0; i
< want
; ++i
)
2898 adapter
->msix_info
[i
].vec
= entries
[i
].vector
;
2903 static const struct net_device_ops cxgb4vf_netdev_ops
= {
2904 .ndo_open
= cxgb4vf_open
,
2905 .ndo_stop
= cxgb4vf_stop
,
2906 .ndo_start_xmit
= t4vf_eth_xmit
,
2907 .ndo_get_stats
= cxgb4vf_get_stats
,
2908 .ndo_set_rx_mode
= cxgb4vf_set_rxmode
,
2909 .ndo_set_mac_address
= cxgb4vf_set_mac_addr
,
2910 .ndo_validate_addr
= eth_validate_addr
,
2911 .ndo_do_ioctl
= cxgb4vf_do_ioctl
,
2912 .ndo_change_mtu
= cxgb4vf_change_mtu
,
2913 .ndo_fix_features
= cxgb4vf_fix_features
,
2914 .ndo_set_features
= cxgb4vf_set_features
,
2915 #ifdef CONFIG_NET_POLL_CONTROLLER
2916 .ndo_poll_controller
= cxgb4vf_poll_controller
,
2921 * "Probe" a device: initialize a device and construct all kernel and driver
2922 * state needed to manage the device. This routine is called "init_one" in
2925 static int cxgb4vf_pci_probe(struct pci_dev
*pdev
,
2926 const struct pci_device_id
*ent
)
2931 struct adapter
*adapter
;
2932 struct port_info
*pi
;
2933 struct net_device
*netdev
;
2937 * Print our driver banner the first time we're called to initialize a
2940 pr_info_once("%s - version %s\n", DRV_DESC
, DRV_VERSION
);
2943 * Initialize generic PCI device state.
2945 err
= pci_enable_device(pdev
);
2947 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
2952 * Reserve PCI resources for the device. If we can't get them some
2953 * other driver may have already claimed the device ...
2955 err
= pci_request_regions(pdev
, KBUILD_MODNAME
);
2957 dev_err(&pdev
->dev
, "cannot obtain PCI resources\n");
2958 goto err_disable_device
;
2962 * Set up our DMA mask: try for 64-bit address masking first and
2963 * fall back to 32-bit if we can't get 64 bits ...
2965 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
2967 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
2969 dev_err(&pdev
->dev
, "unable to obtain 64-bit DMA for"
2970 " coherent allocations\n");
2971 goto err_release_regions
;
2975 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
2977 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
2978 goto err_release_regions
;
2984 * Enable bus mastering for the device ...
2986 pci_set_master(pdev
);
2989 * Allocate our adapter data structure and attach it to the device.
2991 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
2994 goto err_release_regions
;
2996 pci_set_drvdata(pdev
, adapter
);
2997 adapter
->pdev
= pdev
;
2998 adapter
->pdev_dev
= &pdev
->dev
;
3000 adapter
->mbox_log
= kzalloc(sizeof(*adapter
->mbox_log
) +
3001 (sizeof(struct mbox_cmd
) *
3002 T4VF_OS_LOG_MBOX_CMDS
),
3004 if (!adapter
->mbox_log
) {
3006 goto err_free_adapter
;
3008 adapter
->mbox_log
->size
= T4VF_OS_LOG_MBOX_CMDS
;
3011 * Initialize SMP data synchronization resources.
3013 spin_lock_init(&adapter
->stats_lock
);
3014 spin_lock_init(&adapter
->mbox_lock
);
3015 INIT_LIST_HEAD(&adapter
->mlist
.list
);
3018 * Map our I/O registers in BAR0.
3020 adapter
->regs
= pci_ioremap_bar(pdev
, 0);
3021 if (!adapter
->regs
) {
3022 dev_err(&pdev
->dev
, "cannot map device registers\n");
3024 goto err_free_adapter
;
3027 /* Wait for the device to become ready before proceeding ...
3029 err
= t4vf_prep_adapter(adapter
);
3031 dev_err(adapter
->pdev_dev
, "device didn't become ready:"
3033 goto err_unmap_bar0
;
3036 /* For T5 and later we want to use the new BAR-based User Doorbells,
3037 * so we need to map BAR2 here ...
3039 if (!is_t4(adapter
->params
.chip
)) {
3040 adapter
->bar2
= ioremap_wc(pci_resource_start(pdev
, 2),
3041 pci_resource_len(pdev
, 2));
3042 if (!adapter
->bar2
) {
3043 dev_err(adapter
->pdev_dev
, "cannot map BAR2 doorbells\n");
3045 goto err_unmap_bar0
;
3049 * Initialize adapter level features.
3051 adapter
->name
= pci_name(pdev
);
3052 adapter
->msg_enable
= DFLT_MSG_ENABLE
;
3054 /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
3055 * Ingress Packet Data to Free List Buffers in order to allow for
3056 * chipset performance optimizations between the Root Complex and
3057 * Memory Controllers. (Messages to the associated Ingress Queue
3058 * notifying new Packet Placement in the Free Lists Buffers will be
3059 * send without the Relaxed Ordering Attribute thus guaranteeing that
3060 * all preceding PCIe Transaction Layer Packets will be processed
3061 * first.) But some Root Complexes have various issues with Upstream
3062 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
3063 * The PCIe devices which under the Root Complexes will be cleared the
3064 * Relaxed Ordering bit in the configuration space, So we check our
3065 * PCIe configuration space to see if it's flagged with advice against
3066 * using Relaxed Ordering.
3068 if (!pcie_relaxed_ordering_enabled(pdev
))
3069 adapter
->flags
|= CXGB4VF_ROOT_NO_RELAXED_ORDERING
;
3071 err
= adap_init0(adapter
);
3074 "Adapter initialization failed, error %d. Continuing in debug mode\n",
3077 /* Initialize hash mac addr list */
3078 INIT_LIST_HEAD(&adapter
->mac_hlist
);
3081 * Allocate our "adapter ports" and stitch everything together.
3083 pmask
= adapter
->params
.vfres
.pmask
;
3084 pf
= t4vf_get_pf_from_vf(adapter
);
3085 for_each_port(adapter
, pidx
) {
3088 unsigned int naddr
= 1;
3091 * We simplistically allocate our virtual interfaces
3092 * sequentially across the port numbers to which we have
3093 * access rights. This should be configurable in some manner
3098 port_id
= ffs(pmask
) - 1;
3099 pmask
&= ~(1 << port_id
);
3102 * Allocate our network device and stitch things together.
3104 netdev
= alloc_etherdev_mq(sizeof(struct port_info
),
3106 if (netdev
== NULL
) {
3110 adapter
->port
[pidx
] = netdev
;
3111 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3112 pi
= netdev_priv(netdev
);
3113 pi
->adapter
= adapter
;
3115 pi
->port_id
= port_id
;
3118 * Initialize the starting state of our "port" and register
3121 pi
->xact_addr_filt
= -1;
3122 netdev
->irq
= pdev
->irq
;
3124 netdev
->hw_features
= NETIF_F_SG
| TSO_FLAGS
| NETIF_F_GRO
|
3125 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
| NETIF_F_RXCSUM
|
3126 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
3127 netdev
->features
= netdev
->hw_features
;
3129 netdev
->features
|= NETIF_F_HIGHDMA
;
3130 netdev
->vlan_features
= netdev
->features
& VLAN_FEAT
;
3132 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
3133 netdev
->min_mtu
= 81;
3134 netdev
->max_mtu
= ETH_MAX_MTU
;
3136 netdev
->netdev_ops
= &cxgb4vf_netdev_ops
;
3137 netdev
->ethtool_ops
= &cxgb4vf_ethtool_ops
;
3138 netdev
->dev_port
= pi
->port_id
;
3141 * If we haven't been able to contact the firmware, there's
3142 * nothing else we can do for this "port" ...
3144 if (!(adapter
->flags
& CXGB4VF_FW_OK
))
3147 viid
= t4vf_alloc_vi(adapter
, port_id
);
3150 "cannot allocate VI for port %d: err=%d\n",
3158 * Initialize the hardware/software state for the port.
3160 err
= t4vf_port_init(adapter
, pidx
);
3162 dev_err(&pdev
->dev
, "cannot initialize port %d\n",
3167 err
= t4vf_get_vf_mac_acl(adapter
, pf
, &naddr
, mac
);
3170 "unable to determine MAC ACL address, "
3171 "continuing anyway.. (status %d)\n", err
);
3172 } else if (naddr
&& adapter
->params
.vfres
.nvi
== 1) {
3173 struct sockaddr addr
;
3175 ether_addr_copy(addr
.sa_data
, mac
);
3176 err
= cxgb4vf_set_mac_addr(netdev
, &addr
);
3179 "unable to set MAC address %pM\n",
3183 dev_info(&pdev
->dev
,
3184 "Using assigned MAC ACL: %pM\n", mac
);
3188 /* See what interrupts we'll be using. If we've been configured to
3189 * use MSI-X interrupts, try to enable them but fall back to using
3190 * MSI interrupts if we can't enable MSI-X interrupts. If we can't
3191 * get MSI interrupts we bail with the error.
3193 if (msi
== MSI_MSIX
&& enable_msix(adapter
) == 0)
3194 adapter
->flags
|= CXGB4VF_USING_MSIX
;
3196 if (msi
== MSI_MSIX
) {
3197 dev_info(adapter
->pdev_dev
,
3198 "Unable to use MSI-X Interrupts; falling "
3199 "back to MSI Interrupts\n");
3201 /* We're going to need a Forwarded Interrupt Queue so
3202 * that may cut into how many Queue Sets we can
3206 size_nports_qsets(adapter
);
3208 err
= pci_enable_msi(pdev
);
3210 dev_err(&pdev
->dev
, "Unable to allocate MSI Interrupts;"
3214 adapter
->flags
|= CXGB4VF_USING_MSI
;
3217 /* Now that we know how many "ports" we have and what interrupt
3218 * mechanism we're going to use, we can configure our queue resources.
3220 cfg_queues(adapter
);
3223 * The "card" is now ready to go. If any errors occur during device
3224 * registration we do not fail the whole "card" but rather proceed
3225 * only with the ports we manage to register successfully. However we
3226 * must register at least one net device.
3228 for_each_port(adapter
, pidx
) {
3229 struct port_info
*pi
= netdev_priv(adapter
->port
[pidx
]);
3230 netdev
= adapter
->port
[pidx
];
3234 netif_set_real_num_tx_queues(netdev
, pi
->nqsets
);
3235 netif_set_real_num_rx_queues(netdev
, pi
->nqsets
);
3237 err
= register_netdev(netdev
);
3239 dev_warn(&pdev
->dev
, "cannot register net device %s,"
3240 " skipping\n", netdev
->name
);
3244 netif_carrier_off(netdev
);
3245 set_bit(pidx
, &adapter
->registered_device_map
);
3247 if (adapter
->registered_device_map
== 0) {
3248 dev_err(&pdev
->dev
, "could not register any net devices\n");
3249 goto err_disable_interrupts
;
3253 * Set up our debugfs entries.
3255 if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root
)) {
3256 adapter
->debugfs_root
=
3257 debugfs_create_dir(pci_name(pdev
),
3258 cxgb4vf_debugfs_root
);
3259 setup_debugfs(adapter
);
3263 * Print a short notice on the existence and configuration of the new
3264 * VF network device ...
3266 for_each_port(adapter
, pidx
) {
3267 dev_info(adapter
->pdev_dev
, "%s: Chelsio VF NIC PCIe %s\n",
3268 adapter
->port
[pidx
]->name
,
3269 (adapter
->flags
& CXGB4VF_USING_MSIX
) ? "MSI-X" :
3270 (adapter
->flags
& CXGB4VF_USING_MSI
) ? "MSI" : "");
3279 * Error recovery and exit code. Unwind state that's been created
3280 * so far and return the error.
3282 err_disable_interrupts
:
3283 if (adapter
->flags
& CXGB4VF_USING_MSIX
) {
3284 pci_disable_msix(adapter
->pdev
);
3285 adapter
->flags
&= ~CXGB4VF_USING_MSIX
;
3286 } else if (adapter
->flags
& CXGB4VF_USING_MSI
) {
3287 pci_disable_msi(adapter
->pdev
);
3288 adapter
->flags
&= ~CXGB4VF_USING_MSI
;
3292 for_each_port(adapter
, pidx
) {
3293 netdev
= adapter
->port
[pidx
];
3296 pi
= netdev_priv(netdev
);
3298 t4vf_free_vi(adapter
, pi
->viid
);
3299 if (test_bit(pidx
, &adapter
->registered_device_map
))
3300 unregister_netdev(netdev
);
3301 free_netdev(netdev
);
3304 if (!is_t4(adapter
->params
.chip
))
3305 iounmap(adapter
->bar2
);
3308 iounmap(adapter
->regs
);
3311 kfree(adapter
->mbox_log
);
3314 err_release_regions
:
3315 pci_release_regions(pdev
);
3316 pci_clear_master(pdev
);
3319 pci_disable_device(pdev
);
3325 * "Remove" a device: tear down all kernel and driver state created in the
3326 * "probe" routine and quiesce the device (disable interrupts, etc.). (Note
3327 * that this is called "remove_one" in the PF Driver.)
3329 static void cxgb4vf_pci_remove(struct pci_dev
*pdev
)
3331 struct adapter
*adapter
= pci_get_drvdata(pdev
);
3332 struct hash_mac_addr
*entry
, *tmp
;
3335 * Tear down driver state associated with device.
3341 * Stop all of our activity. Unregister network port,
3342 * disable interrupts, etc.
3344 for_each_port(adapter
, pidx
)
3345 if (test_bit(pidx
, &adapter
->registered_device_map
))
3346 unregister_netdev(adapter
->port
[pidx
]);
3347 t4vf_sge_stop(adapter
);
3348 if (adapter
->flags
& CXGB4VF_USING_MSIX
) {
3349 pci_disable_msix(adapter
->pdev
);
3350 adapter
->flags
&= ~CXGB4VF_USING_MSIX
;
3351 } else if (adapter
->flags
& CXGB4VF_USING_MSI
) {
3352 pci_disable_msi(adapter
->pdev
);
3353 adapter
->flags
&= ~CXGB4VF_USING_MSI
;
3357 * Tear down our debugfs entries.
3359 if (!IS_ERR_OR_NULL(adapter
->debugfs_root
)) {
3360 cleanup_debugfs(adapter
);
3361 debugfs_remove_recursive(adapter
->debugfs_root
);
3365 * Free all of the various resources which we've acquired ...
3367 t4vf_free_sge_resources(adapter
);
3368 for_each_port(adapter
, pidx
) {
3369 struct net_device
*netdev
= adapter
->port
[pidx
];
3370 struct port_info
*pi
;
3375 pi
= netdev_priv(netdev
);
3377 t4vf_free_vi(adapter
, pi
->viid
);
3378 free_netdev(netdev
);
3380 iounmap(adapter
->regs
);
3381 if (!is_t4(adapter
->params
.chip
))
3382 iounmap(adapter
->bar2
);
3383 kfree(adapter
->mbox_log
);
3384 list_for_each_entry_safe(entry
, tmp
, &adapter
->mac_hlist
,
3386 list_del(&entry
->list
);
3393 * Disable the device and release its PCI resources.
3395 pci_disable_device(pdev
);
3396 pci_clear_master(pdev
);
3397 pci_release_regions(pdev
);
3401 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
3404 static void cxgb4vf_pci_shutdown(struct pci_dev
*pdev
)
3406 struct adapter
*adapter
;
3409 adapter
= pci_get_drvdata(pdev
);
3413 /* Disable all Virtual Interfaces. This will shut down the
3414 * delivery of all ingress packets into the chip for these
3415 * Virtual Interfaces.
3417 for_each_port(adapter
, pidx
)
3418 if (test_bit(pidx
, &adapter
->registered_device_map
))
3419 unregister_netdev(adapter
->port
[pidx
]);
3421 /* Free up all Queues which will prevent further DMA and
3422 * Interrupts allowing various internal pathways to drain.
3424 t4vf_sge_stop(adapter
);
3425 if (adapter
->flags
& CXGB4VF_USING_MSIX
) {
3426 pci_disable_msix(adapter
->pdev
);
3427 adapter
->flags
&= ~CXGB4VF_USING_MSIX
;
3428 } else if (adapter
->flags
& CXGB4VF_USING_MSI
) {
3429 pci_disable_msi(adapter
->pdev
);
3430 adapter
->flags
&= ~CXGB4VF_USING_MSI
;
3434 * Free up all Queues which will prevent further DMA and
3435 * Interrupts allowing various internal pathways to drain.
3437 t4vf_free_sge_resources(adapter
);
3438 pci_set_drvdata(pdev
, NULL
);
3441 /* Macros needed to support the PCI Device ID Table ...
3443 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
3444 static const struct pci_device_id cxgb4vf_pci_tbl[] = {
3445 #define CH_PCI_DEVICE_ID_FUNCTION 0x8
3447 #define CH_PCI_ID_TABLE_ENTRY(devid) \
3448 { PCI_VDEVICE(CHELSIO, (devid)), 0 }
3450 #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
3452 #include "../cxgb4/t4_pci_id_tbl.h"
3454 MODULE_DESCRIPTION(DRV_DESC
);
3455 MODULE_AUTHOR("Chelsio Communications");
3456 MODULE_LICENSE("Dual BSD/GPL");
3457 MODULE_VERSION(DRV_VERSION
);
3458 MODULE_DEVICE_TABLE(pci
, cxgb4vf_pci_tbl
);
3460 static struct pci_driver cxgb4vf_driver
= {
3461 .name
= KBUILD_MODNAME
,
3462 .id_table
= cxgb4vf_pci_tbl
,
3463 .probe
= cxgb4vf_pci_probe
,
3464 .remove
= cxgb4vf_pci_remove
,
3465 .shutdown
= cxgb4vf_pci_shutdown
,
3469 * Initialize global driver state.
3471 static int __init
cxgb4vf_module_init(void)
3476 * Vet our module parameters.
3478 if (msi
!= MSI_MSIX
&& msi
!= MSI_MSI
) {
3479 pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
3480 msi
, MSI_MSIX
, MSI_MSI
);
3484 /* Debugfs support is optional, debugfs will warn if this fails */
3485 cxgb4vf_debugfs_root
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
3487 ret
= pci_register_driver(&cxgb4vf_driver
);
3489 debugfs_remove(cxgb4vf_debugfs_root
);
3494 * Tear down global driver state.
3496 static void __exit
cxgb4vf_module_exit(void)
3498 pci_unregister_driver(&cxgb4vf_driver
);
3499 debugfs_remove(cxgb4vf_debugfs_root
);
3502 module_init(cxgb4vf_module_init
);
3503 module_exit(cxgb4vf_module_exit
);