2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if_vlan.h>
45 #include <linux/init.h>
46 #include <linux/log2.h>
47 #include <linux/mdio.h>
48 #include <linux/module.h>
49 #include <linux/moduleparam.h>
50 #include <linux/mutex.h>
51 #include <linux/netdevice.h>
52 #include <linux/pci.h>
53 #include <linux/aer.h>
54 #include <linux/rtnetlink.h>
55 #include <linux/sched.h>
56 #include <linux/seq_file.h>
57 #include <linux/sockios.h>
58 #include <linux/vmalloc.h>
59 #include <linux/workqueue.h>
60 #include <net/neighbour.h>
61 #include <net/netevent.h>
62 #include <asm/uaccess.h>
70 #define DRV_VERSION "1.0.0-ko"
71 #define DRV_DESC "Chelsio T4 Network Driver"
74 * Max interrupt hold-off timer value in us. Queues fall back to this value
75 * under extreme memory pressure so it's largish to give the system time to
78 #define MAX_SGE_TIMERVAL 200U
81 MEMWIN0_APERTURE
= 65536,
82 MEMWIN0_BASE
= 0x30000,
83 MEMWIN1_APERTURE
= 32768,
84 MEMWIN1_BASE
= 0x28000,
85 MEMWIN2_APERTURE
= 2048,
86 MEMWIN2_BASE
= 0x1b800,
90 MAX_TXQ_ENTRIES
= 16384,
91 MAX_CTRL_TXQ_ENTRIES
= 1024,
92 MAX_RSPQ_ENTRIES
= 16384,
93 MAX_RX_BUFFERS
= 16384,
95 MIN_CTRL_TXQ_ENTRIES
= 32,
96 MIN_RSPQ_ENTRIES
= 128,
100 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
101 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
102 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
104 #define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 }
106 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl
) = {
107 CH_DEVICE(0xa000), /* PE10K */
111 #define FW_FNAME "cxgb4/t4fw.bin"
113 MODULE_DESCRIPTION(DRV_DESC
);
114 MODULE_AUTHOR("Chelsio Communications");
115 MODULE_LICENSE("Dual BSD/GPL");
116 MODULE_VERSION(DRV_VERSION
);
117 MODULE_DEVICE_TABLE(pci
, cxgb4_pci_tbl
);
118 MODULE_FIRMWARE(FW_FNAME
);
120 static int dflt_msg_enable
= DFLT_MSG_ENABLE
;
122 module_param(dflt_msg_enable
, int, 0644);
123 MODULE_PARM_DESC(dflt_msg_enable
, "Chelsio T4 default message enable bitmap");
126 * The driver uses the best interrupt scheme available on a platform in the
127 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
128 * of these schemes the driver may consider as follows:
130 * msi = 2: choose from among all three options
131 * msi = 1: only consider MSI and INTx interrupts
132 * msi = 0: force INTx interrupts
136 module_param(msi
, int, 0644);
137 MODULE_PARM_DESC(msi
, "whether to use INTx (0), MSI (1) or MSI-X (2)");
140 * Queue interrupt hold-off timer values. Queues default to the first of these
143 static unsigned int intr_holdoff
[SGE_NTIMERS
- 1] = { 5, 10, 20, 50, 100 };
145 module_param_array(intr_holdoff
, uint
, NULL
, 0644);
146 MODULE_PARM_DESC(intr_holdoff
, "values for queue interrupt hold-off timers "
147 "0..4 in microseconds");
149 static unsigned int intr_cnt
[SGE_NCOUNTERS
- 1] = { 4, 8, 16 };
151 module_param_array(intr_cnt
, uint
, NULL
, 0644);
152 MODULE_PARM_DESC(intr_cnt
,
153 "thresholds 1..3 for queue interrupt packet counters");
157 #ifdef CONFIG_PCI_IOV
158 module_param(vf_acls
, bool, 0644);
159 MODULE_PARM_DESC(vf_acls
, "if set enable virtualization L2 ACL enforcement");
161 static unsigned int num_vf
[4];
163 module_param_array(num_vf
, uint
, NULL
, 0644);
164 MODULE_PARM_DESC(num_vf
, "number of VFs for each of PFs 0-3");
167 static struct dentry
*cxgb4_debugfs_root
;
169 static LIST_HEAD(adapter_list
);
170 static DEFINE_MUTEX(uld_mutex
);
171 static struct cxgb4_uld_info ulds
[CXGB4_ULD_MAX
];
172 static const char *uld_str
[] = { "RDMA", "iSCSI" };
174 static void link_report(struct net_device
*dev
)
176 if (!netif_carrier_ok(dev
))
177 netdev_info(dev
, "link down\n");
179 static const char *fc
[] = { "no", "Rx", "Tx", "Tx/Rx" };
181 const char *s
= "10Mbps";
182 const struct port_info
*p
= netdev_priv(dev
);
184 switch (p
->link_cfg
.speed
) {
196 netdev_info(dev
, "link up, %s, full-duplex, %s PAUSE\n", s
,
201 void t4_os_link_changed(struct adapter
*adapter
, int port_id
, int link_stat
)
203 struct net_device
*dev
= adapter
->port
[port_id
];
205 /* Skip changes from disabled ports. */
206 if (netif_running(dev
) && link_stat
!= netif_carrier_ok(dev
)) {
208 netif_carrier_on(dev
);
210 netif_carrier_off(dev
);
216 void t4_os_portmod_changed(const struct adapter
*adap
, int port_id
)
218 static const char *mod_str
[] = {
219 NULL
, "LR", "SR", "ER", "passive DA", "active DA"
222 const struct net_device
*dev
= adap
->port
[port_id
];
223 const struct port_info
*pi
= netdev_priv(dev
);
225 if (pi
->mod_type
== FW_PORT_MOD_TYPE_NONE
)
226 netdev_info(dev
, "port module unplugged\n");
228 netdev_info(dev
, "%s module inserted\n", mod_str
[pi
->mod_type
]);
232 * Configure the exact and hash address filters to handle a port's multicast
233 * and secondary unicast MAC addresses.
235 static int set_addr_filters(const struct net_device
*dev
, bool sleep
)
243 const struct netdev_hw_addr
*ha
;
244 int uc_cnt
= netdev_uc_count(dev
);
245 int mc_cnt
= netdev_mc_count(dev
);
246 const struct port_info
*pi
= netdev_priv(dev
);
248 /* first do the secondary unicast addresses */
249 netdev_for_each_uc_addr(ha
, dev
) {
250 addr
[naddr
++] = ha
->addr
;
251 if (--uc_cnt
== 0 || naddr
>= ARRAY_SIZE(addr
)) {
252 ret
= t4_alloc_mac_filt(pi
->adapter
, 0, pi
->viid
, free
,
253 naddr
, addr
, filt_idx
, &uhash
, sleep
);
262 /* next set up the multicast addresses */
263 netdev_for_each_mc_addr(ha
, dev
) {
264 addr
[naddr
++] = ha
->addr
;
265 if (--mc_cnt
== 0 || naddr
>= ARRAY_SIZE(addr
)) {
266 ret
= t4_alloc_mac_filt(pi
->adapter
, 0, pi
->viid
, free
,
267 naddr
, addr
, filt_idx
, &mhash
, sleep
);
276 return t4_set_addr_hash(pi
->adapter
, 0, pi
->viid
, uhash
!= 0,
277 uhash
| mhash
, sleep
);
281 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
282 * If @mtu is -1 it is left unchanged.
284 static int set_rxmode(struct net_device
*dev
, int mtu
, bool sleep_ok
)
287 struct port_info
*pi
= netdev_priv(dev
);
289 ret
= set_addr_filters(dev
, sleep_ok
);
291 ret
= t4_set_rxmode(pi
->adapter
, 0, pi
->viid
, mtu
,
292 (dev
->flags
& IFF_PROMISC
) ? 1 : 0,
293 (dev
->flags
& IFF_ALLMULTI
) ? 1 : 0, 1, -1,
299 * link_start - enable a port
300 * @dev: the port to enable
302 * Performs the MAC and PHY actions needed to enable a port.
304 static int link_start(struct net_device
*dev
)
307 struct port_info
*pi
= netdev_priv(dev
);
310 * We do not set address filters and promiscuity here, the stack does
311 * that step explicitly.
313 ret
= t4_set_rxmode(pi
->adapter
, 0, pi
->viid
, dev
->mtu
, -1, -1, -1,
314 pi
->vlan_grp
!= NULL
, true);
316 ret
= t4_change_mac(pi
->adapter
, 0, pi
->viid
,
317 pi
->xact_addr_filt
, dev
->dev_addr
, true,
320 pi
->xact_addr_filt
= ret
;
325 ret
= t4_link_start(pi
->adapter
, 0, pi
->tx_chan
, &pi
->link_cfg
);
327 ret
= t4_enable_vi(pi
->adapter
, 0, pi
->viid
, true, true);
332 * Response queue handler for the FW event queue.
334 static int fwevtq_handler(struct sge_rspq
*q
, const __be64
*rsp
,
335 const struct pkt_gl
*gl
)
337 u8 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
339 rsp
++; /* skip RSS header */
340 if (likely(opcode
== CPL_SGE_EGR_UPDATE
)) {
341 const struct cpl_sge_egr_update
*p
= (void *)rsp
;
342 unsigned int qid
= EGR_QID(ntohl(p
->opcode_qid
));
343 struct sge_txq
*txq
= q
->adap
->sge
.egr_map
[qid
];
346 if ((u8
*)txq
< (u8
*)q
->adap
->sge
.ethrxq
) {
347 struct sge_eth_txq
*eq
;
349 eq
= container_of(txq
, struct sge_eth_txq
, q
);
350 netif_tx_wake_queue(eq
->txq
);
352 struct sge_ofld_txq
*oq
;
354 oq
= container_of(txq
, struct sge_ofld_txq
, q
);
355 tasklet_schedule(&oq
->qresume_tsk
);
357 } else if (opcode
== CPL_FW6_MSG
|| opcode
== CPL_FW4_MSG
) {
358 const struct cpl_fw6_msg
*p
= (void *)rsp
;
361 t4_handle_fw_rpl(q
->adap
, p
->data
);
362 } else if (opcode
== CPL_L2T_WRITE_RPL
) {
363 const struct cpl_l2t_write_rpl
*p
= (void *)rsp
;
365 do_l2t_write_rpl(q
->adap
, p
);
367 dev_err(q
->adap
->pdev_dev
,
368 "unexpected CPL %#x on FW event queue\n", opcode
);
373 * uldrx_handler - response queue handler for ULD queues
374 * @q: the response queue that received the packet
375 * @rsp: the response queue descriptor holding the offload message
376 * @gl: the gather list of packet fragments
378 * Deliver an ingress offload packet to a ULD. All processing is done by
379 * the ULD, we just maintain statistics.
381 static int uldrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
382 const struct pkt_gl
*gl
)
384 struct sge_ofld_rxq
*rxq
= container_of(q
, struct sge_ofld_rxq
, rspq
);
386 if (ulds
[q
->uld
].rx_handler(q
->adap
->uld_handle
[q
->uld
], rsp
, gl
)) {
392 else if (gl
== CXGB4_MSG_AN
)
399 static void disable_msi(struct adapter
*adapter
)
401 if (adapter
->flags
& USING_MSIX
) {
402 pci_disable_msix(adapter
->pdev
);
403 adapter
->flags
&= ~USING_MSIX
;
404 } else if (adapter
->flags
& USING_MSI
) {
405 pci_disable_msi(adapter
->pdev
);
406 adapter
->flags
&= ~USING_MSI
;
411 * Interrupt handler for non-data events used with MSI-X.
413 static irqreturn_t
t4_nondata_intr(int irq
, void *cookie
)
415 struct adapter
*adap
= cookie
;
417 u32 v
= t4_read_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE
));
420 t4_write_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE
), v
);
422 t4_slow_intr_handler(adap
);
427 * Name the MSI-X interrupts.
429 static void name_msix_vecs(struct adapter
*adap
)
431 int i
, j
, msi_idx
= 2, n
= sizeof(adap
->msix_info
[0].desc
) - 1;
433 /* non-data interrupts */
434 snprintf(adap
->msix_info
[0].desc
, n
, "%s", adap
->name
);
435 adap
->msix_info
[0].desc
[n
] = 0;
438 snprintf(adap
->msix_info
[1].desc
, n
, "%s-FWeventq", adap
->name
);
439 adap
->msix_info
[1].desc
[n
] = 0;
441 /* Ethernet queues */
442 for_each_port(adap
, j
) {
443 struct net_device
*d
= adap
->port
[j
];
444 const struct port_info
*pi
= netdev_priv(d
);
446 for (i
= 0; i
< pi
->nqsets
; i
++, msi_idx
++) {
447 snprintf(adap
->msix_info
[msi_idx
].desc
, n
, "%s-Rx%d",
449 adap
->msix_info
[msi_idx
].desc
[n
] = 0;
454 for_each_ofldrxq(&adap
->sge
, i
) {
455 snprintf(adap
->msix_info
[msi_idx
].desc
, n
, "%s-ofld%d",
457 adap
->msix_info
[msi_idx
++].desc
[n
] = 0;
459 for_each_rdmarxq(&adap
->sge
, i
) {
460 snprintf(adap
->msix_info
[msi_idx
].desc
, n
, "%s-rdma%d",
462 adap
->msix_info
[msi_idx
++].desc
[n
] = 0;
466 static int request_msix_queue_irqs(struct adapter
*adap
)
468 struct sge
*s
= &adap
->sge
;
469 int err
, ethqidx
, ofldqidx
= 0, rdmaqidx
= 0, msi
= 2;
471 err
= request_irq(adap
->msix_info
[1].vec
, t4_sge_intr_msix
, 0,
472 adap
->msix_info
[1].desc
, &s
->fw_evtq
);
476 for_each_ethrxq(s
, ethqidx
) {
477 err
= request_irq(adap
->msix_info
[msi
].vec
, t4_sge_intr_msix
, 0,
478 adap
->msix_info
[msi
].desc
,
479 &s
->ethrxq
[ethqidx
].rspq
);
484 for_each_ofldrxq(s
, ofldqidx
) {
485 err
= request_irq(adap
->msix_info
[msi
].vec
, t4_sge_intr_msix
, 0,
486 adap
->msix_info
[msi
].desc
,
487 &s
->ofldrxq
[ofldqidx
].rspq
);
492 for_each_rdmarxq(s
, rdmaqidx
) {
493 err
= request_irq(adap
->msix_info
[msi
].vec
, t4_sge_intr_msix
, 0,
494 adap
->msix_info
[msi
].desc
,
495 &s
->rdmarxq
[rdmaqidx
].rspq
);
503 while (--rdmaqidx
>= 0)
504 free_irq(adap
->msix_info
[--msi
].vec
,
505 &s
->rdmarxq
[rdmaqidx
].rspq
);
506 while (--ofldqidx
>= 0)
507 free_irq(adap
->msix_info
[--msi
].vec
,
508 &s
->ofldrxq
[ofldqidx
].rspq
);
509 while (--ethqidx
>= 0)
510 free_irq(adap
->msix_info
[--msi
].vec
, &s
->ethrxq
[ethqidx
].rspq
);
511 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
515 static void free_msix_queue_irqs(struct adapter
*adap
)
518 struct sge
*s
= &adap
->sge
;
520 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
521 for_each_ethrxq(s
, i
)
522 free_irq(adap
->msix_info
[msi
++].vec
, &s
->ethrxq
[i
].rspq
);
523 for_each_ofldrxq(s
, i
)
524 free_irq(adap
->msix_info
[msi
++].vec
, &s
->ofldrxq
[i
].rspq
);
525 for_each_rdmarxq(s
, i
)
526 free_irq(adap
->msix_info
[msi
++].vec
, &s
->rdmarxq
[i
].rspq
);
530 * setup_rss - configure RSS
533 * Sets up RSS to distribute packets to multiple receive queues. We
534 * configure the RSS CPU lookup table to distribute to the number of HW
535 * receive queues, and the response queue lookup table to narrow that
536 * down to the response queues actually configured for each port.
537 * We always configure the RSS mapping for all ports since the mapping
538 * table has plenty of entries.
540 static int setup_rss(struct adapter
*adap
)
543 u16 rss
[MAX_ETH_QSETS
];
545 for_each_port(adap
, i
) {
546 const struct port_info
*pi
= adap2pinfo(adap
, i
);
547 const struct sge_eth_rxq
*q
= &adap
->sge
.ethrxq
[pi
->first_qset
];
549 for (j
= 0; j
< pi
->nqsets
; j
++)
550 rss
[j
] = q
[j
].rspq
.abs_id
;
552 err
= t4_config_rss_range(adap
, 0, pi
->viid
, 0, pi
->rss_size
,
561 * Wait until all NAPI handlers are descheduled.
563 static void quiesce_rx(struct adapter
*adap
)
567 for (i
= 0; i
< ARRAY_SIZE(adap
->sge
.ingr_map
); i
++) {
568 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
571 napi_disable(&q
->napi
);
576 * Enable NAPI scheduling and interrupt generation for all Rx queues.
578 static void enable_rx(struct adapter
*adap
)
582 for (i
= 0; i
< ARRAY_SIZE(adap
->sge
.ingr_map
); i
++) {
583 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
588 napi_enable(&q
->napi
);
589 /* 0-increment GTS to start the timer and enable interrupts */
590 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS
),
591 SEINTARM(q
->intr_params
) |
592 INGRESSQID(q
->cntxt_id
));
597 * setup_sge_queues - configure SGE Tx/Rx/response queues
600 * Determines how many sets of SGE queues to use and initializes them.
601 * We support multiple queue sets per port if we have MSI-X, otherwise
602 * just one queue set per port.
604 static int setup_sge_queues(struct adapter
*adap
)
606 int err
, msi_idx
, i
, j
;
607 struct sge
*s
= &adap
->sge
;
609 bitmap_zero(s
->starving_fl
, MAX_EGRQ
);
610 bitmap_zero(s
->txq_maperr
, MAX_EGRQ
);
612 if (adap
->flags
& USING_MSIX
)
613 msi_idx
= 1; /* vector 0 is for non-queue interrupts */
615 err
= t4_sge_alloc_rxq(adap
, &s
->intrq
, false, adap
->port
[0], 0,
619 msi_idx
= -((int)s
->intrq
.abs_id
+ 1);
622 err
= t4_sge_alloc_rxq(adap
, &s
->fw_evtq
, true, adap
->port
[0],
623 msi_idx
, NULL
, fwevtq_handler
);
625 freeout
: t4_free_sge_resources(adap
);
629 for_each_port(adap
, i
) {
630 struct net_device
*dev
= adap
->port
[i
];
631 struct port_info
*pi
= netdev_priv(dev
);
632 struct sge_eth_rxq
*q
= &s
->ethrxq
[pi
->first_qset
];
633 struct sge_eth_txq
*t
= &s
->ethtxq
[pi
->first_qset
];
635 for (j
= 0; j
< pi
->nqsets
; j
++, q
++) {
638 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, dev
,
644 memset(&q
->stats
, 0, sizeof(q
->stats
));
646 for (j
= 0; j
< pi
->nqsets
; j
++, t
++) {
647 err
= t4_sge_alloc_eth_txq(adap
, t
, dev
,
648 netdev_get_tx_queue(dev
, j
),
649 s
->fw_evtq
.cntxt_id
);
655 j
= s
->ofldqsets
/ adap
->params
.nports
; /* ofld queues per channel */
656 for_each_ofldrxq(s
, i
) {
657 struct sge_ofld_rxq
*q
= &s
->ofldrxq
[i
];
658 struct net_device
*dev
= adap
->port
[i
/ j
];
662 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, dev
, msi_idx
,
663 &q
->fl
, uldrx_handler
);
666 memset(&q
->stats
, 0, sizeof(q
->stats
));
667 s
->ofld_rxq
[i
] = q
->rspq
.abs_id
;
668 err
= t4_sge_alloc_ofld_txq(adap
, &s
->ofldtxq
[i
], dev
,
669 s
->fw_evtq
.cntxt_id
);
674 for_each_rdmarxq(s
, i
) {
675 struct sge_ofld_rxq
*q
= &s
->rdmarxq
[i
];
679 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, adap
->port
[i
],
680 msi_idx
, &q
->fl
, uldrx_handler
);
683 memset(&q
->stats
, 0, sizeof(q
->stats
));
684 s
->rdma_rxq
[i
] = q
->rspq
.abs_id
;
687 for_each_port(adap
, i
) {
689 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
690 * have RDMA queues, and that's the right value.
692 err
= t4_sge_alloc_ctrl_txq(adap
, &s
->ctrlq
[i
], adap
->port
[i
],
694 s
->rdmarxq
[i
].rspq
.cntxt_id
);
699 t4_write_reg(adap
, MPS_TRC_RSS_CONTROL
,
700 RSSCONTROL(netdev2pinfo(adap
->port
[0])->tx_chan
) |
701 QUEUENUMBER(s
->ethrxq
[0].rspq
.abs_id
));
706 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
707 * started but failed, and a negative errno if flash load couldn't start.
709 static int upgrade_fw(struct adapter
*adap
)
713 const struct fw_hdr
*hdr
;
714 const struct firmware
*fw
;
715 struct device
*dev
= adap
->pdev_dev
;
717 ret
= request_firmware(&fw
, FW_FNAME
, dev
);
719 dev_err(dev
, "unable to load firmware image " FW_FNAME
720 ", error %d\n", ret
);
724 hdr
= (const struct fw_hdr
*)fw
->data
;
725 vers
= ntohl(hdr
->fw_ver
);
726 if (FW_HDR_FW_VER_MAJOR_GET(vers
) != FW_VERSION_MAJOR
) {
727 ret
= -EINVAL
; /* wrong major version, won't do */
732 * If the flash FW is unusable or we found something newer, load it.
734 if (FW_HDR_FW_VER_MAJOR_GET(adap
->params
.fw_vers
) != FW_VERSION_MAJOR
||
735 vers
> adap
->params
.fw_vers
) {
736 ret
= -t4_load_fw(adap
, fw
->data
, fw
->size
);
738 dev_info(dev
, "firmware upgraded to version %pI4 from "
739 FW_FNAME
"\n", &hdr
->fw_ver
);
741 out
: release_firmware(fw
);
746 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
747 * The allocated memory is cleared.
749 void *t4_alloc_mem(size_t size
)
751 void *p
= kmalloc(size
, GFP_KERNEL
);
761 * Free memory allocated through alloc_mem().
763 void t4_free_mem(void *addr
)
765 if (is_vmalloc_addr(addr
))
771 static inline int is_offload(const struct adapter
*adap
)
773 return adap
->params
.offload
;
777 * Implementation of ethtool operations.
780 static u32
get_msglevel(struct net_device
*dev
)
782 return netdev2adap(dev
)->msg_enable
;
785 static void set_msglevel(struct net_device
*dev
, u32 val
)
787 netdev2adap(dev
)->msg_enable
= val
;
790 static char stats_strings
[][ETH_GSTRING_LEN
] = {
793 "TxBroadcastFrames ",
794 "TxMulticastFrames ",
802 "TxFrames512To1023 ",
803 "TxFrames1024To1518 ",
804 "TxFrames1519ToMax ",
819 "RxBroadcastFrames ",
820 "RxMulticastFrames ",
834 "RxFrames512To1023 ",
835 "RxFrames1024To1518 ",
836 "RxFrames1519ToMax ",
848 "RxBG0FramesDropped ",
849 "RxBG1FramesDropped ",
850 "RxBG2FramesDropped ",
851 "RxBG3FramesDropped ",
866 static int get_sset_count(struct net_device
*dev
, int sset
)
870 return ARRAY_SIZE(stats_strings
);
876 #define T4_REGMAP_SIZE (160 * 1024)
878 static int get_regs_len(struct net_device
*dev
)
880 return T4_REGMAP_SIZE
;
883 static int get_eeprom_len(struct net_device
*dev
)
888 static void get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
890 struct adapter
*adapter
= netdev2adap(dev
);
892 strcpy(info
->driver
, KBUILD_MODNAME
);
893 strcpy(info
->version
, DRV_VERSION
);
894 strcpy(info
->bus_info
, pci_name(adapter
->pdev
));
896 if (!adapter
->params
.fw_vers
)
897 strcpy(info
->fw_version
, "N/A");
899 snprintf(info
->fw_version
, sizeof(info
->fw_version
),
900 "%u.%u.%u.%u, TP %u.%u.%u.%u",
901 FW_HDR_FW_VER_MAJOR_GET(adapter
->params
.fw_vers
),
902 FW_HDR_FW_VER_MINOR_GET(adapter
->params
.fw_vers
),
903 FW_HDR_FW_VER_MICRO_GET(adapter
->params
.fw_vers
),
904 FW_HDR_FW_VER_BUILD_GET(adapter
->params
.fw_vers
),
905 FW_HDR_FW_VER_MAJOR_GET(adapter
->params
.tp_vers
),
906 FW_HDR_FW_VER_MINOR_GET(adapter
->params
.tp_vers
),
907 FW_HDR_FW_VER_MICRO_GET(adapter
->params
.tp_vers
),
908 FW_HDR_FW_VER_BUILD_GET(adapter
->params
.tp_vers
));
911 static void get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
913 if (stringset
== ETH_SS_STATS
)
914 memcpy(data
, stats_strings
, sizeof(stats_strings
));
918 * port stats maintained per queue of the port. They should be in the same
919 * order as in stats_strings above.
921 struct queue_port_stats
{
931 static void collect_sge_port_stats(const struct adapter
*adap
,
932 const struct port_info
*p
, struct queue_port_stats
*s
)
935 const struct sge_eth_txq
*tx
= &adap
->sge
.ethtxq
[p
->first_qset
];
936 const struct sge_eth_rxq
*rx
= &adap
->sge
.ethrxq
[p
->first_qset
];
938 memset(s
, 0, sizeof(*s
));
939 for (i
= 0; i
< p
->nqsets
; i
++, rx
++, tx
++) {
941 s
->tx_csum
+= tx
->tx_cso
;
942 s
->rx_csum
+= rx
->stats
.rx_cso
;
943 s
->vlan_ex
+= rx
->stats
.vlan_ex
;
944 s
->vlan_ins
+= tx
->vlan_ins
;
945 s
->gro_pkts
+= rx
->stats
.lro_pkts
;
946 s
->gro_merged
+= rx
->stats
.lro_merged
;
950 static void get_stats(struct net_device
*dev
, struct ethtool_stats
*stats
,
953 struct port_info
*pi
= netdev_priv(dev
);
954 struct adapter
*adapter
= pi
->adapter
;
956 t4_get_port_stats(adapter
, pi
->tx_chan
, (struct port_stats
*)data
);
958 data
+= sizeof(struct port_stats
) / sizeof(u64
);
959 collect_sge_port_stats(adapter
, pi
, (struct queue_port_stats
*)data
);
963 * Return a version number to identify the type of adapter. The scheme is:
964 * - bits 0..9: chip version
965 * - bits 10..15: chip revision
967 static inline unsigned int mk_adap_vers(const struct adapter
*ap
)
969 return 4 | (ap
->params
.rev
<< 10);
972 static void reg_block_dump(struct adapter
*ap
, void *buf
, unsigned int start
,
975 u32
*p
= buf
+ start
;
977 for ( ; start
<= end
; start
+= sizeof(u32
))
978 *p
++ = t4_read_reg(ap
, start
);
981 static void get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
984 static const unsigned int reg_ranges
[] = {
1203 struct adapter
*ap
= netdev2adap(dev
);
1205 regs
->version
= mk_adap_vers(ap
);
1207 memset(buf
, 0, T4_REGMAP_SIZE
);
1208 for (i
= 0; i
< ARRAY_SIZE(reg_ranges
); i
+= 2)
1209 reg_block_dump(ap
, buf
, reg_ranges
[i
], reg_ranges
[i
+ 1]);
1212 static int restart_autoneg(struct net_device
*dev
)
1214 struct port_info
*p
= netdev_priv(dev
);
1216 if (!netif_running(dev
))
1218 if (p
->link_cfg
.autoneg
!= AUTONEG_ENABLE
)
1220 t4_restart_aneg(p
->adapter
, 0, p
->tx_chan
);
1224 static int identify_port(struct net_device
*dev
, u32 data
)
1227 data
= 2; /* default to 2 seconds */
1229 return t4_identify_port(netdev2adap(dev
), 0, netdev2pinfo(dev
)->viid
,
1233 static unsigned int from_fw_linkcaps(unsigned int type
, unsigned int caps
)
1237 if (type
== FW_PORT_TYPE_BT_SGMII
|| type
== FW_PORT_TYPE_BT_XAUI
) {
1239 if (caps
& FW_PORT_CAP_SPEED_100M
)
1240 v
|= SUPPORTED_100baseT_Full
;
1241 if (caps
& FW_PORT_CAP_SPEED_1G
)
1242 v
|= SUPPORTED_1000baseT_Full
;
1243 if (caps
& FW_PORT_CAP_SPEED_10G
)
1244 v
|= SUPPORTED_10000baseT_Full
;
1245 } else if (type
== FW_PORT_TYPE_KX4
|| type
== FW_PORT_TYPE_KX
) {
1246 v
|= SUPPORTED_Backplane
;
1247 if (caps
& FW_PORT_CAP_SPEED_1G
)
1248 v
|= SUPPORTED_1000baseKX_Full
;
1249 if (caps
& FW_PORT_CAP_SPEED_10G
)
1250 v
|= SUPPORTED_10000baseKX4_Full
;
1251 } else if (type
== FW_PORT_TYPE_KR
)
1252 v
|= SUPPORTED_Backplane
| SUPPORTED_10000baseKR_Full
;
1253 else if (type
== FW_PORT_TYPE_FIBER
)
1254 v
|= SUPPORTED_FIBRE
;
1256 if (caps
& FW_PORT_CAP_ANEG
)
1257 v
|= SUPPORTED_Autoneg
;
1261 static unsigned int to_fw_linkcaps(unsigned int caps
)
1265 if (caps
& ADVERTISED_100baseT_Full
)
1266 v
|= FW_PORT_CAP_SPEED_100M
;
1267 if (caps
& ADVERTISED_1000baseT_Full
)
1268 v
|= FW_PORT_CAP_SPEED_1G
;
1269 if (caps
& ADVERTISED_10000baseT_Full
)
1270 v
|= FW_PORT_CAP_SPEED_10G
;
1274 static int get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1276 const struct port_info
*p
= netdev_priv(dev
);
1278 if (p
->port_type
== FW_PORT_TYPE_BT_SGMII
||
1279 p
->port_type
== FW_PORT_TYPE_BT_XAUI
)
1280 cmd
->port
= PORT_TP
;
1281 else if (p
->port_type
== FW_PORT_TYPE_FIBER
)
1282 cmd
->port
= PORT_FIBRE
;
1283 else if (p
->port_type
== FW_PORT_TYPE_TWINAX
)
1284 cmd
->port
= PORT_DA
;
1286 cmd
->port
= PORT_OTHER
;
1288 if (p
->mdio_addr
>= 0) {
1289 cmd
->phy_address
= p
->mdio_addr
;
1290 cmd
->transceiver
= XCVR_EXTERNAL
;
1291 cmd
->mdio_support
= p
->port_type
== FW_PORT_TYPE_BT_SGMII
?
1292 MDIO_SUPPORTS_C22
: MDIO_SUPPORTS_C45
;
1294 cmd
->phy_address
= 0; /* not really, but no better option */
1295 cmd
->transceiver
= XCVR_INTERNAL
;
1296 cmd
->mdio_support
= 0;
1299 cmd
->supported
= from_fw_linkcaps(p
->port_type
, p
->link_cfg
.supported
);
1300 cmd
->advertising
= from_fw_linkcaps(p
->port_type
,
1301 p
->link_cfg
.advertising
);
1302 cmd
->speed
= netif_carrier_ok(dev
) ? p
->link_cfg
.speed
: 0;
1303 cmd
->duplex
= DUPLEX_FULL
;
1304 cmd
->autoneg
= p
->link_cfg
.autoneg
;
1310 static unsigned int speed_to_caps(int speed
)
1312 if (speed
== SPEED_100
)
1313 return FW_PORT_CAP_SPEED_100M
;
1314 if (speed
== SPEED_1000
)
1315 return FW_PORT_CAP_SPEED_1G
;
1316 if (speed
== SPEED_10000
)
1317 return FW_PORT_CAP_SPEED_10G
;
1321 static int set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1324 struct port_info
*p
= netdev_priv(dev
);
1325 struct link_config
*lc
= &p
->link_cfg
;
1327 if (cmd
->duplex
!= DUPLEX_FULL
) /* only full-duplex supported */
1330 if (!(lc
->supported
& FW_PORT_CAP_ANEG
)) {
1332 * PHY offers a single speed. See if that's what's
1335 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
1336 (lc
->supported
& speed_to_caps(cmd
->speed
)))
1341 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
1342 cap
= speed_to_caps(cmd
->speed
);
1344 if (!(lc
->supported
& cap
) || cmd
->speed
== SPEED_1000
||
1345 cmd
->speed
== SPEED_10000
)
1347 lc
->requested_speed
= cap
;
1348 lc
->advertising
= 0;
1350 cap
= to_fw_linkcaps(cmd
->advertising
);
1351 if (!(lc
->supported
& cap
))
1353 lc
->requested_speed
= 0;
1354 lc
->advertising
= cap
| FW_PORT_CAP_ANEG
;
1356 lc
->autoneg
= cmd
->autoneg
;
1358 if (netif_running(dev
))
1359 return t4_link_start(p
->adapter
, 0, p
->tx_chan
, lc
);
1363 static void get_pauseparam(struct net_device
*dev
,
1364 struct ethtool_pauseparam
*epause
)
1366 struct port_info
*p
= netdev_priv(dev
);
1368 epause
->autoneg
= (p
->link_cfg
.requested_fc
& PAUSE_AUTONEG
) != 0;
1369 epause
->rx_pause
= (p
->link_cfg
.fc
& PAUSE_RX
) != 0;
1370 epause
->tx_pause
= (p
->link_cfg
.fc
& PAUSE_TX
) != 0;
1373 static int set_pauseparam(struct net_device
*dev
,
1374 struct ethtool_pauseparam
*epause
)
1376 struct port_info
*p
= netdev_priv(dev
);
1377 struct link_config
*lc
= &p
->link_cfg
;
1379 if (epause
->autoneg
== AUTONEG_DISABLE
)
1380 lc
->requested_fc
= 0;
1381 else if (lc
->supported
& FW_PORT_CAP_ANEG
)
1382 lc
->requested_fc
= PAUSE_AUTONEG
;
1386 if (epause
->rx_pause
)
1387 lc
->requested_fc
|= PAUSE_RX
;
1388 if (epause
->tx_pause
)
1389 lc
->requested_fc
|= PAUSE_TX
;
1390 if (netif_running(dev
))
1391 return t4_link_start(p
->adapter
, 0, p
->tx_chan
, lc
);
1395 static u32
get_rx_csum(struct net_device
*dev
)
1397 struct port_info
*p
= netdev_priv(dev
);
1399 return p
->rx_offload
& RX_CSO
;
1402 static int set_rx_csum(struct net_device
*dev
, u32 data
)
1404 struct port_info
*p
= netdev_priv(dev
);
1407 p
->rx_offload
|= RX_CSO
;
1409 p
->rx_offload
&= ~RX_CSO
;
1413 static void get_sge_param(struct net_device
*dev
, struct ethtool_ringparam
*e
)
1415 const struct port_info
*pi
= netdev_priv(dev
);
1416 const struct sge
*s
= &pi
->adapter
->sge
;
1418 e
->rx_max_pending
= MAX_RX_BUFFERS
;
1419 e
->rx_mini_max_pending
= MAX_RSPQ_ENTRIES
;
1420 e
->rx_jumbo_max_pending
= 0;
1421 e
->tx_max_pending
= MAX_TXQ_ENTRIES
;
1423 e
->rx_pending
= s
->ethrxq
[pi
->first_qset
].fl
.size
- 8;
1424 e
->rx_mini_pending
= s
->ethrxq
[pi
->first_qset
].rspq
.size
;
1425 e
->rx_jumbo_pending
= 0;
1426 e
->tx_pending
= s
->ethtxq
[pi
->first_qset
].q
.size
;
1429 static int set_sge_param(struct net_device
*dev
, struct ethtool_ringparam
*e
)
1432 const struct port_info
*pi
= netdev_priv(dev
);
1433 struct adapter
*adapter
= pi
->adapter
;
1434 struct sge
*s
= &adapter
->sge
;
1436 if (e
->rx_pending
> MAX_RX_BUFFERS
|| e
->rx_jumbo_pending
||
1437 e
->tx_pending
> MAX_TXQ_ENTRIES
||
1438 e
->rx_mini_pending
> MAX_RSPQ_ENTRIES
||
1439 e
->rx_mini_pending
< MIN_RSPQ_ENTRIES
||
1440 e
->rx_pending
< MIN_FL_ENTRIES
|| e
->tx_pending
< MIN_TXQ_ENTRIES
)
1443 if (adapter
->flags
& FULL_INIT_DONE
)
1446 for (i
= 0; i
< pi
->nqsets
; ++i
) {
1447 s
->ethtxq
[pi
->first_qset
+ i
].q
.size
= e
->tx_pending
;
1448 s
->ethrxq
[pi
->first_qset
+ i
].fl
.size
= e
->rx_pending
+ 8;
1449 s
->ethrxq
[pi
->first_qset
+ i
].rspq
.size
= e
->rx_mini_pending
;
1454 static int closest_timer(const struct sge
*s
, int time
)
1456 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
1458 for (i
= 0; i
< ARRAY_SIZE(s
->timer_val
); i
++) {
1459 delta
= time
- s
->timer_val
[i
];
1462 if (delta
< min_delta
) {
1470 static int closest_thres(const struct sge
*s
, int thres
)
1472 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
1474 for (i
= 0; i
< ARRAY_SIZE(s
->counter_val
); i
++) {
1475 delta
= thres
- s
->counter_val
[i
];
1478 if (delta
< min_delta
) {
1487 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1489 static unsigned int qtimer_val(const struct adapter
*adap
,
1490 const struct sge_rspq
*q
)
1492 unsigned int idx
= q
->intr_params
>> 1;
1494 return idx
< SGE_NTIMERS
? adap
->sge
.timer_val
[idx
] : 0;
1498 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1499 * @adap: the adapter
1501 * @us: the hold-off time in us, or 0 to disable timer
1502 * @cnt: the hold-off packet count, or 0 to disable counter
1504 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1505 * one of the two needs to be enabled for the queue to generate interrupts.
1507 static int set_rxq_intr_params(struct adapter
*adap
, struct sge_rspq
*q
,
1508 unsigned int us
, unsigned int cnt
)
1510 if ((us
| cnt
) == 0)
1517 new_idx
= closest_thres(&adap
->sge
, cnt
);
1518 if (q
->desc
&& q
->pktcnt_idx
!= new_idx
) {
1519 /* the queue has already been created, update it */
1520 v
= FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ
) |
1521 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH
) |
1522 FW_PARAMS_PARAM_YZ(q
->cntxt_id
);
1523 err
= t4_set_params(adap
, 0, 0, 0, 1, &v
, &new_idx
);
1527 q
->pktcnt_idx
= new_idx
;
1530 us
= us
== 0 ? 6 : closest_timer(&adap
->sge
, us
);
1531 q
->intr_params
= QINTR_TIMER_IDX(us
) | (cnt
> 0 ? QINTR_CNT_EN
: 0);
1535 static int set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*c
)
1537 const struct port_info
*pi
= netdev_priv(dev
);
1538 struct adapter
*adap
= pi
->adapter
;
1540 return set_rxq_intr_params(adap
, &adap
->sge
.ethrxq
[pi
->first_qset
].rspq
,
1541 c
->rx_coalesce_usecs
, c
->rx_max_coalesced_frames
);
1544 static int get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*c
)
1546 const struct port_info
*pi
= netdev_priv(dev
);
1547 const struct adapter
*adap
= pi
->adapter
;
1548 const struct sge_rspq
*rq
= &adap
->sge
.ethrxq
[pi
->first_qset
].rspq
;
1550 c
->rx_coalesce_usecs
= qtimer_val(adap
, rq
);
1551 c
->rx_max_coalesced_frames
= (rq
->intr_params
& QINTR_CNT_EN
) ?
1552 adap
->sge
.counter_val
[rq
->pktcnt_idx
] : 0;
1557 * Translate a physical EEPROM address to virtual. The first 1K is accessed
1558 * through virtual addresses starting at 31K, the rest is accessed through
1559 * virtual addresses starting at 0. This mapping is correct only for PF0.
1561 static int eeprom_ptov(unsigned int phys_addr
)
1563 if (phys_addr
< 1024)
1564 return phys_addr
+ (31 << 10);
1565 if (phys_addr
< EEPROMSIZE
)
1566 return phys_addr
- 1024;
1571 * The next two routines implement eeprom read/write from physical addresses.
1572 * The physical->virtual translation is correct only for PF0.
1574 static int eeprom_rd_phys(struct adapter
*adap
, unsigned int phys_addr
, u32
*v
)
1576 int vaddr
= eeprom_ptov(phys_addr
);
1579 vaddr
= pci_read_vpd(adap
->pdev
, vaddr
, sizeof(u32
), v
);
1580 return vaddr
< 0 ? vaddr
: 0;
1583 static int eeprom_wr_phys(struct adapter
*adap
, unsigned int phys_addr
, u32 v
)
1585 int vaddr
= eeprom_ptov(phys_addr
);
1588 vaddr
= pci_write_vpd(adap
->pdev
, vaddr
, sizeof(u32
), &v
);
1589 return vaddr
< 0 ? vaddr
: 0;
1592 #define EEPROM_MAGIC 0x38E2F10C
1594 static int get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*e
,
1598 struct adapter
*adapter
= netdev2adap(dev
);
1600 u8
*buf
= kmalloc(EEPROMSIZE
, GFP_KERNEL
);
1604 e
->magic
= EEPROM_MAGIC
;
1605 for (i
= e
->offset
& ~3; !err
&& i
< e
->offset
+ e
->len
; i
+= 4)
1606 err
= eeprom_rd_phys(adapter
, i
, (u32
*)&buf
[i
]);
1609 memcpy(data
, buf
+ e
->offset
, e
->len
);
1614 static int set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
1619 u32 aligned_offset
, aligned_len
, *p
;
1620 struct adapter
*adapter
= netdev2adap(dev
);
1622 if (eeprom
->magic
!= EEPROM_MAGIC
)
1625 aligned_offset
= eeprom
->offset
& ~3;
1626 aligned_len
= (eeprom
->len
+ (eeprom
->offset
& 3) + 3) & ~3;
1628 if (aligned_offset
!= eeprom
->offset
|| aligned_len
!= eeprom
->len
) {
1630 * RMW possibly needed for first or last words.
1632 buf
= kmalloc(aligned_len
, GFP_KERNEL
);
1635 err
= eeprom_rd_phys(adapter
, aligned_offset
, (u32
*)buf
);
1636 if (!err
&& aligned_len
> 4)
1637 err
= eeprom_rd_phys(adapter
,
1638 aligned_offset
+ aligned_len
- 4,
1639 (u32
*)&buf
[aligned_len
- 4]);
1642 memcpy(buf
+ (eeprom
->offset
& 3), data
, eeprom
->len
);
1646 err
= t4_seeprom_wp(adapter
, false);
1650 for (p
= (u32
*)buf
; !err
&& aligned_len
; aligned_len
-= 4, p
++) {
1651 err
= eeprom_wr_phys(adapter
, aligned_offset
, *p
);
1652 aligned_offset
+= 4;
1656 err
= t4_seeprom_wp(adapter
, true);
1663 static int set_flash(struct net_device
*netdev
, struct ethtool_flash
*ef
)
1666 const struct firmware
*fw
;
1667 struct adapter
*adap
= netdev2adap(netdev
);
1669 ef
->data
[sizeof(ef
->data
) - 1] = '\0';
1670 ret
= request_firmware(&fw
, ef
->data
, adap
->pdev_dev
);
1674 ret
= t4_load_fw(adap
, fw
->data
, fw
->size
);
1675 release_firmware(fw
);
1677 dev_info(adap
->pdev_dev
, "loaded firmware %s\n", ef
->data
);
1681 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1682 #define BCAST_CRC 0xa0ccc1a6
1684 static void get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1686 wol
->supported
= WAKE_BCAST
| WAKE_MAGIC
;
1687 wol
->wolopts
= netdev2adap(dev
)->wol
;
1688 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
1691 static int set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1694 struct port_info
*pi
= netdev_priv(dev
);
1696 if (wol
->wolopts
& ~WOL_SUPPORTED
)
1698 t4_wol_magic_enable(pi
->adapter
, pi
->tx_chan
,
1699 (wol
->wolopts
& WAKE_MAGIC
) ? dev
->dev_addr
: NULL
);
1700 if (wol
->wolopts
& WAKE_BCAST
) {
1701 err
= t4_wol_pat_enable(pi
->adapter
, pi
->tx_chan
, 0xfe, ~0ULL,
1704 err
= t4_wol_pat_enable(pi
->adapter
, pi
->tx_chan
, 1,
1705 ~6ULL, ~0ULL, BCAST_CRC
, true);
1707 t4_wol_pat_enable(pi
->adapter
, pi
->tx_chan
, 0, 0, 0, 0, false);
1711 static int set_tso(struct net_device
*dev
, u32 value
)
1714 dev
->features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
1716 dev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
);
1720 static int set_flags(struct net_device
*dev
, u32 flags
)
1722 if (flags
& ~ETH_FLAG_RXHASH
)
1725 if (flags
& ETH_FLAG_RXHASH
)
1726 dev
->features
|= NETIF_F_RXHASH
;
1728 dev
->features
&= ~NETIF_F_RXHASH
;
1732 static struct ethtool_ops cxgb_ethtool_ops
= {
1733 .get_settings
= get_settings
,
1734 .set_settings
= set_settings
,
1735 .get_drvinfo
= get_drvinfo
,
1736 .get_msglevel
= get_msglevel
,
1737 .set_msglevel
= set_msglevel
,
1738 .get_ringparam
= get_sge_param
,
1739 .set_ringparam
= set_sge_param
,
1740 .get_coalesce
= get_coalesce
,
1741 .set_coalesce
= set_coalesce
,
1742 .get_eeprom_len
= get_eeprom_len
,
1743 .get_eeprom
= get_eeprom
,
1744 .set_eeprom
= set_eeprom
,
1745 .get_pauseparam
= get_pauseparam
,
1746 .set_pauseparam
= set_pauseparam
,
1747 .get_rx_csum
= get_rx_csum
,
1748 .set_rx_csum
= set_rx_csum
,
1749 .set_tx_csum
= ethtool_op_set_tx_ipv6_csum
,
1750 .set_sg
= ethtool_op_set_sg
,
1751 .get_link
= ethtool_op_get_link
,
1752 .get_strings
= get_strings
,
1753 .phys_id
= identify_port
,
1754 .nway_reset
= restart_autoneg
,
1755 .get_sset_count
= get_sset_count
,
1756 .get_ethtool_stats
= get_stats
,
1757 .get_regs_len
= get_regs_len
,
1758 .get_regs
= get_regs
,
1762 .set_flags
= set_flags
,
1763 .flash_device
= set_flash
,
1770 static int mem_open(struct inode
*inode
, struct file
*file
)
1772 file
->private_data
= inode
->i_private
;
1776 static ssize_t
mem_read(struct file
*file
, char __user
*buf
, size_t count
,
1780 loff_t avail
= file
->f_path
.dentry
->d_inode
->i_size
;
1781 unsigned int mem
= (uintptr_t)file
->private_data
& 3;
1782 struct adapter
*adap
= file
->private_data
- mem
;
1788 if (count
> avail
- pos
)
1789 count
= avail
- pos
;
1797 ret
= t4_mc_read(adap
, pos
, data
, NULL
);
1799 ret
= t4_edc_read(adap
, mem
, pos
, data
, NULL
);
1803 ofst
= pos
% sizeof(data
);
1804 len
= min(count
, sizeof(data
) - ofst
);
1805 if (copy_to_user(buf
, (u8
*)data
+ ofst
, len
))
1812 count
= pos
- *ppos
;
1817 static const struct file_operations mem_debugfs_fops
= {
1818 .owner
= THIS_MODULE
,
1823 static void __devinit
add_debugfs_mem(struct adapter
*adap
, const char *name
,
1824 unsigned int idx
, unsigned int size_mb
)
1828 de
= debugfs_create_file(name
, S_IRUSR
, adap
->debugfs_root
,
1829 (void *)adap
+ idx
, &mem_debugfs_fops
);
1830 if (de
&& de
->d_inode
)
1831 de
->d_inode
->i_size
= size_mb
<< 20;
1834 static int __devinit
setup_debugfs(struct adapter
*adap
)
1838 if (IS_ERR_OR_NULL(adap
->debugfs_root
))
1841 i
= t4_read_reg(adap
, MA_TARGET_MEM_ENABLE
);
1842 if (i
& EDRAM0_ENABLE
)
1843 add_debugfs_mem(adap
, "edc0", MEM_EDC0
, 5);
1844 if (i
& EDRAM1_ENABLE
)
1845 add_debugfs_mem(adap
, "edc1", MEM_EDC1
, 5);
1846 if (i
& EXT_MEM_ENABLE
)
1847 add_debugfs_mem(adap
, "mc", MEM_MC
,
1848 EXT_MEM_SIZE_GET(t4_read_reg(adap
, MA_EXT_MEMORY_BAR
)));
1850 debugfs_create_file("l2t", S_IRUSR
, adap
->debugfs_root
, adap
,
1856 * upper-layer driver support
1860 * Allocate an active-open TID and set it to the supplied value.
1862 int cxgb4_alloc_atid(struct tid_info
*t
, void *data
)
1866 spin_lock_bh(&t
->atid_lock
);
1868 union aopen_entry
*p
= t
->afree
;
1870 atid
= p
- t
->atid_tab
;
1875 spin_unlock_bh(&t
->atid_lock
);
1878 EXPORT_SYMBOL(cxgb4_alloc_atid
);
1881 * Release an active-open TID.
1883 void cxgb4_free_atid(struct tid_info
*t
, unsigned int atid
)
1885 union aopen_entry
*p
= &t
->atid_tab
[atid
];
1887 spin_lock_bh(&t
->atid_lock
);
1891 spin_unlock_bh(&t
->atid_lock
);
1893 EXPORT_SYMBOL(cxgb4_free_atid
);
1896 * Allocate a server TID and set it to the supplied value.
1898 int cxgb4_alloc_stid(struct tid_info
*t
, int family
, void *data
)
1902 spin_lock_bh(&t
->stid_lock
);
1903 if (family
== PF_INET
) {
1904 stid
= find_first_zero_bit(t
->stid_bmap
, t
->nstids
);
1905 if (stid
< t
->nstids
)
1906 __set_bit(stid
, t
->stid_bmap
);
1910 stid
= bitmap_find_free_region(t
->stid_bmap
, t
->nstids
, 2);
1915 t
->stid_tab
[stid
].data
= data
;
1916 stid
+= t
->stid_base
;
1919 spin_unlock_bh(&t
->stid_lock
);
1922 EXPORT_SYMBOL(cxgb4_alloc_stid
);
1925 * Release a server TID.
1927 void cxgb4_free_stid(struct tid_info
*t
, unsigned int stid
, int family
)
1929 stid
-= t
->stid_base
;
1930 spin_lock_bh(&t
->stid_lock
);
1931 if (family
== PF_INET
)
1932 __clear_bit(stid
, t
->stid_bmap
);
1934 bitmap_release_region(t
->stid_bmap
, stid
, 2);
1935 t
->stid_tab
[stid
].data
= NULL
;
1937 spin_unlock_bh(&t
->stid_lock
);
1939 EXPORT_SYMBOL(cxgb4_free_stid
);
1942 * Populate a TID_RELEASE WR. Caller must properly size the skb.
1944 static void mk_tid_release(struct sk_buff
*skb
, unsigned int chan
,
1947 struct cpl_tid_release
*req
;
1949 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, chan
);
1950 req
= (struct cpl_tid_release
*)__skb_put(skb
, sizeof(*req
));
1951 INIT_TP_WR(req
, tid
);
1952 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, tid
));
1956 * Queue a TID release request and if necessary schedule a work queue to
1959 void cxgb4_queue_tid_release(struct tid_info
*t
, unsigned int chan
,
1962 void **p
= &t
->tid_tab
[tid
];
1963 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
1965 spin_lock_bh(&adap
->tid_release_lock
);
1966 *p
= adap
->tid_release_head
;
1967 /* Low 2 bits encode the Tx channel number */
1968 adap
->tid_release_head
= (void **)((uintptr_t)p
| chan
);
1969 if (!adap
->tid_release_task_busy
) {
1970 adap
->tid_release_task_busy
= true;
1971 schedule_work(&adap
->tid_release_task
);
1973 spin_unlock_bh(&adap
->tid_release_lock
);
1975 EXPORT_SYMBOL(cxgb4_queue_tid_release
);
1978 * Process the list of pending TID release requests.
1980 static void process_tid_release_list(struct work_struct
*work
)
1982 struct sk_buff
*skb
;
1983 struct adapter
*adap
;
1985 adap
= container_of(work
, struct adapter
, tid_release_task
);
1987 spin_lock_bh(&adap
->tid_release_lock
);
1988 while (adap
->tid_release_head
) {
1989 void **p
= adap
->tid_release_head
;
1990 unsigned int chan
= (uintptr_t)p
& 3;
1991 p
= (void *)p
- chan
;
1993 adap
->tid_release_head
= *p
;
1995 spin_unlock_bh(&adap
->tid_release_lock
);
1997 while (!(skb
= alloc_skb(sizeof(struct cpl_tid_release
),
1999 schedule_timeout_uninterruptible(1);
2001 mk_tid_release(skb
, chan
, p
- adap
->tids
.tid_tab
);
2002 t4_ofld_send(adap
, skb
);
2003 spin_lock_bh(&adap
->tid_release_lock
);
2005 adap
->tid_release_task_busy
= false;
2006 spin_unlock_bh(&adap
->tid_release_lock
);
2010 * Release a TID and inform HW. If we are unable to allocate the release
2011 * message we defer to a work queue.
2013 void cxgb4_remove_tid(struct tid_info
*t
, unsigned int chan
, unsigned int tid
)
2016 struct sk_buff
*skb
;
2017 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
2019 old
= t
->tid_tab
[tid
];
2020 skb
= alloc_skb(sizeof(struct cpl_tid_release
), GFP_ATOMIC
);
2022 t
->tid_tab
[tid
] = NULL
;
2023 mk_tid_release(skb
, chan
, tid
);
2024 t4_ofld_send(adap
, skb
);
2026 cxgb4_queue_tid_release(t
, chan
, tid
);
2028 atomic_dec(&t
->tids_in_use
);
2030 EXPORT_SYMBOL(cxgb4_remove_tid
);
2033 * Allocate and initialize the TID tables. Returns 0 on success.
2035 static int tid_init(struct tid_info
*t
)
2038 unsigned int natids
= t
->natids
;
2040 size
= t
->ntids
* sizeof(*t
->tid_tab
) + natids
* sizeof(*t
->atid_tab
) +
2041 t
->nstids
* sizeof(*t
->stid_tab
) +
2042 BITS_TO_LONGS(t
->nstids
) * sizeof(long);
2043 t
->tid_tab
= t4_alloc_mem(size
);
2047 t
->atid_tab
= (union aopen_entry
*)&t
->tid_tab
[t
->ntids
];
2048 t
->stid_tab
= (struct serv_entry
*)&t
->atid_tab
[natids
];
2049 t
->stid_bmap
= (unsigned long *)&t
->stid_tab
[t
->nstids
];
2050 spin_lock_init(&t
->stid_lock
);
2051 spin_lock_init(&t
->atid_lock
);
2053 t
->stids_in_use
= 0;
2055 t
->atids_in_use
= 0;
2056 atomic_set(&t
->tids_in_use
, 0);
2058 /* Setup the free list for atid_tab and clear the stid bitmap. */
2061 t
->atid_tab
[natids
- 1].next
= &t
->atid_tab
[natids
];
2062 t
->afree
= t
->atid_tab
;
2064 bitmap_zero(t
->stid_bmap
, t
->nstids
);
2069 * cxgb4_create_server - create an IP server
2071 * @stid: the server TID
2072 * @sip: local IP address to bind server to
2073 * @sport: the server's TCP port
2074 * @queue: queue to direct messages from this server to
2076 * Create an IP server for the given port and address.
2077 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2079 int cxgb4_create_server(const struct net_device
*dev
, unsigned int stid
,
2080 __be32 sip
, __be16 sport
, unsigned int queue
)
2083 struct sk_buff
*skb
;
2084 struct adapter
*adap
;
2085 struct cpl_pass_open_req
*req
;
2087 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
2091 adap
= netdev2adap(dev
);
2092 req
= (struct cpl_pass_open_req
*)__skb_put(skb
, sizeof(*req
));
2094 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ
, stid
));
2095 req
->local_port
= sport
;
2096 req
->peer_port
= htons(0);
2097 req
->local_ip
= sip
;
2098 req
->peer_ip
= htonl(0);
2099 chan
= netdev2pinfo(adap
->sge
.ingr_map
[queue
]->netdev
)->tx_chan
;
2100 req
->opt0
= cpu_to_be64(TX_CHAN(chan
));
2101 req
->opt1
= cpu_to_be64(CONN_POLICY_ASK
|
2102 SYN_RSS_ENABLE
| SYN_RSS_QUEUE(queue
));
2103 return t4_mgmt_tx(adap
, skb
);
2105 EXPORT_SYMBOL(cxgb4_create_server
);
2108 * cxgb4_create_server6 - create an IPv6 server
2110 * @stid: the server TID
2111 * @sip: local IPv6 address to bind server to
2112 * @sport: the server's TCP port
2113 * @queue: queue to direct messages from this server to
2115 * Create an IPv6 server for the given port and address.
2116 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2118 int cxgb4_create_server6(const struct net_device
*dev
, unsigned int stid
,
2119 const struct in6_addr
*sip
, __be16 sport
,
2123 struct sk_buff
*skb
;
2124 struct adapter
*adap
;
2125 struct cpl_pass_open_req6
*req
;
2127 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
2131 adap
= netdev2adap(dev
);
2132 req
= (struct cpl_pass_open_req6
*)__skb_put(skb
, sizeof(*req
));
2134 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6
, stid
));
2135 req
->local_port
= sport
;
2136 req
->peer_port
= htons(0);
2137 req
->local_ip_hi
= *(__be64
*)(sip
->s6_addr
);
2138 req
->local_ip_lo
= *(__be64
*)(sip
->s6_addr
+ 8);
2139 req
->peer_ip_hi
= cpu_to_be64(0);
2140 req
->peer_ip_lo
= cpu_to_be64(0);
2141 chan
= netdev2pinfo(adap
->sge
.ingr_map
[queue
]->netdev
)->tx_chan
;
2142 req
->opt0
= cpu_to_be64(TX_CHAN(chan
));
2143 req
->opt1
= cpu_to_be64(CONN_POLICY_ASK
|
2144 SYN_RSS_ENABLE
| SYN_RSS_QUEUE(queue
));
2145 return t4_mgmt_tx(adap
, skb
);
2147 EXPORT_SYMBOL(cxgb4_create_server6
);
2150 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2151 * @mtus: the HW MTU table
2152 * @mtu: the target MTU
2153 * @idx: index of selected entry in the MTU table
2155 * Returns the index and the value in the HW MTU table that is closest to
2156 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2157 * table, in which case that smallest available value is selected.
2159 unsigned int cxgb4_best_mtu(const unsigned short *mtus
, unsigned short mtu
,
2164 while (i
< NMTUS
- 1 && mtus
[i
+ 1] <= mtu
)
2170 EXPORT_SYMBOL(cxgb4_best_mtu
);
2173 * cxgb4_port_chan - get the HW channel of a port
2174 * @dev: the net device for the port
2176 * Return the HW Tx channel of the given port.
2178 unsigned int cxgb4_port_chan(const struct net_device
*dev
)
2180 return netdev2pinfo(dev
)->tx_chan
;
2182 EXPORT_SYMBOL(cxgb4_port_chan
);
2185 * cxgb4_port_viid - get the VI id of a port
2186 * @dev: the net device for the port
2188 * Return the VI id of the given port.
2190 unsigned int cxgb4_port_viid(const struct net_device
*dev
)
2192 return netdev2pinfo(dev
)->viid
;
2194 EXPORT_SYMBOL(cxgb4_port_viid
);
2197 * cxgb4_port_idx - get the index of a port
2198 * @dev: the net device for the port
2200 * Return the index of the given port.
2202 unsigned int cxgb4_port_idx(const struct net_device
*dev
)
2204 return netdev2pinfo(dev
)->port_id
;
2206 EXPORT_SYMBOL(cxgb4_port_idx
);
2209 * cxgb4_netdev_by_hwid - return the net device of a HW port
2210 * @pdev: identifies the adapter
2211 * @id: the HW port id
2213 * Return the net device associated with the interface with the given HW
2216 struct net_device
*cxgb4_netdev_by_hwid(struct pci_dev
*pdev
, unsigned int id
)
2218 const struct adapter
*adap
= pci_get_drvdata(pdev
);
2220 if (!adap
|| id
>= NCHAN
)
2222 id
= adap
->chan_map
[id
];
2223 return id
< MAX_NPORTS
? adap
->port
[id
] : NULL
;
2225 EXPORT_SYMBOL(cxgb4_netdev_by_hwid
);
2227 void cxgb4_get_tcp_stats(struct pci_dev
*pdev
, struct tp_tcp_stats
*v4
,
2228 struct tp_tcp_stats
*v6
)
2230 struct adapter
*adap
= pci_get_drvdata(pdev
);
2232 spin_lock(&adap
->stats_lock
);
2233 t4_tp_get_tcp_stats(adap
, v4
, v6
);
2234 spin_unlock(&adap
->stats_lock
);
2236 EXPORT_SYMBOL(cxgb4_get_tcp_stats
);
2238 void cxgb4_iscsi_init(struct net_device
*dev
, unsigned int tag_mask
,
2239 const unsigned int *pgsz_order
)
2241 struct adapter
*adap
= netdev2adap(dev
);
2243 t4_write_reg(adap
, ULP_RX_ISCSI_TAGMASK
, tag_mask
);
2244 t4_write_reg(adap
, ULP_RX_ISCSI_PSZ
, HPZ0(pgsz_order
[0]) |
2245 HPZ1(pgsz_order
[1]) | HPZ2(pgsz_order
[2]) |
2246 HPZ3(pgsz_order
[3]));
2248 EXPORT_SYMBOL(cxgb4_iscsi_init
);
2250 static struct pci_driver cxgb4_driver
;
2252 static void check_neigh_update(struct neighbour
*neigh
)
2254 const struct device
*parent
;
2255 const struct net_device
*netdev
= neigh
->dev
;
2257 if (netdev
->priv_flags
& IFF_802_1Q_VLAN
)
2258 netdev
= vlan_dev_real_dev(netdev
);
2259 parent
= netdev
->dev
.parent
;
2260 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
)
2261 t4_l2t_update(dev_get_drvdata(parent
), neigh
);
2264 static int netevent_cb(struct notifier_block
*nb
, unsigned long event
,
2268 case NETEVENT_NEIGH_UPDATE
:
2269 check_neigh_update(data
);
2271 case NETEVENT_PMTU_UPDATE
:
2272 case NETEVENT_REDIRECT
:
2279 static bool netevent_registered
;
2280 static struct notifier_block cxgb4_netevent_nb
= {
2281 .notifier_call
= netevent_cb
2284 static void uld_attach(struct adapter
*adap
, unsigned int uld
)
2287 struct cxgb4_lld_info lli
;
2289 lli
.pdev
= adap
->pdev
;
2290 lli
.l2t
= adap
->l2t
;
2291 lli
.tids
= &adap
->tids
;
2292 lli
.ports
= adap
->port
;
2293 lli
.vr
= &adap
->vres
;
2294 lli
.mtus
= adap
->params
.mtus
;
2295 if (uld
== CXGB4_ULD_RDMA
) {
2296 lli
.rxq_ids
= adap
->sge
.rdma_rxq
;
2297 lli
.nrxq
= adap
->sge
.rdmaqs
;
2298 } else if (uld
== CXGB4_ULD_ISCSI
) {
2299 lli
.rxq_ids
= adap
->sge
.ofld_rxq
;
2300 lli
.nrxq
= adap
->sge
.ofldqsets
;
2302 lli
.ntxq
= adap
->sge
.ofldqsets
;
2303 lli
.nchan
= adap
->params
.nports
;
2304 lli
.nports
= adap
->params
.nports
;
2305 lli
.wr_cred
= adap
->params
.ofldq_wr_cred
;
2306 lli
.adapter_type
= adap
->params
.rev
;
2307 lli
.iscsi_iolen
= MAXRXDATA_GET(t4_read_reg(adap
, TP_PARA_REG2
));
2308 lli
.udb_density
= 1 << QUEUESPERPAGEPF0_GET(
2309 t4_read_reg(adap
, SGE_EGRESS_QUEUES_PER_PAGE_PF
));
2310 lli
.ucq_density
= 1 << QUEUESPERPAGEPF0_GET(
2311 t4_read_reg(adap
, SGE_INGRESS_QUEUES_PER_PAGE_PF
));
2312 lli
.gts_reg
= adap
->regs
+ MYPF_REG(SGE_PF_GTS
);
2313 lli
.db_reg
= adap
->regs
+ MYPF_REG(SGE_PF_KDOORBELL
);
2314 lli
.fw_vers
= adap
->params
.fw_vers
;
2316 handle
= ulds
[uld
].add(&lli
);
2317 if (IS_ERR(handle
)) {
2318 dev_warn(adap
->pdev_dev
,
2319 "could not attach to the %s driver, error %ld\n",
2320 uld_str
[uld
], PTR_ERR(handle
));
2324 adap
->uld_handle
[uld
] = handle
;
2326 if (!netevent_registered
) {
2327 register_netevent_notifier(&cxgb4_netevent_nb
);
2328 netevent_registered
= true;
2331 if (adap
->flags
& FULL_INIT_DONE
)
2332 ulds
[uld
].state_change(handle
, CXGB4_STATE_UP
);
2335 static void attach_ulds(struct adapter
*adap
)
2339 mutex_lock(&uld_mutex
);
2340 list_add_tail(&adap
->list_node
, &adapter_list
);
2341 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2343 uld_attach(adap
, i
);
2344 mutex_unlock(&uld_mutex
);
2347 static void detach_ulds(struct adapter
*adap
)
2351 mutex_lock(&uld_mutex
);
2352 list_del(&adap
->list_node
);
2353 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2354 if (adap
->uld_handle
[i
]) {
2355 ulds
[i
].state_change(adap
->uld_handle
[i
],
2356 CXGB4_STATE_DETACH
);
2357 adap
->uld_handle
[i
] = NULL
;
2359 if (netevent_registered
&& list_empty(&adapter_list
)) {
2360 unregister_netevent_notifier(&cxgb4_netevent_nb
);
2361 netevent_registered
= false;
2363 mutex_unlock(&uld_mutex
);
2366 static void notify_ulds(struct adapter
*adap
, enum cxgb4_state new_state
)
2370 mutex_lock(&uld_mutex
);
2371 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
2372 if (adap
->uld_handle
[i
])
2373 ulds
[i
].state_change(adap
->uld_handle
[i
], new_state
);
2374 mutex_unlock(&uld_mutex
);
2378 * cxgb4_register_uld - register an upper-layer driver
2379 * @type: the ULD type
2380 * @p: the ULD methods
2382 * Registers an upper-layer driver with this driver and notifies the ULD
2383 * about any presently available devices that support its type. Returns
2384 * %-EBUSY if a ULD of the same type is already registered.
2386 int cxgb4_register_uld(enum cxgb4_uld type
, const struct cxgb4_uld_info
*p
)
2389 struct adapter
*adap
;
2391 if (type
>= CXGB4_ULD_MAX
)
2393 mutex_lock(&uld_mutex
);
2394 if (ulds
[type
].add
) {
2399 list_for_each_entry(adap
, &adapter_list
, list_node
)
2400 uld_attach(adap
, type
);
2401 out
: mutex_unlock(&uld_mutex
);
2404 EXPORT_SYMBOL(cxgb4_register_uld
);
2407 * cxgb4_unregister_uld - unregister an upper-layer driver
2408 * @type: the ULD type
2410 * Unregisters an existing upper-layer driver.
2412 int cxgb4_unregister_uld(enum cxgb4_uld type
)
2414 struct adapter
*adap
;
2416 if (type
>= CXGB4_ULD_MAX
)
2418 mutex_lock(&uld_mutex
);
2419 list_for_each_entry(adap
, &adapter_list
, list_node
)
2420 adap
->uld_handle
[type
] = NULL
;
2421 ulds
[type
].add
= NULL
;
2422 mutex_unlock(&uld_mutex
);
2425 EXPORT_SYMBOL(cxgb4_unregister_uld
);
2428 * cxgb_up - enable the adapter
2429 * @adap: adapter being enabled
2431 * Called when the first port is enabled, this function performs the
2432 * actions necessary to make an adapter operational, such as completing
2433 * the initialization of HW modules, and enabling interrupts.
2435 * Must be called with the rtnl lock held.
2437 static int cxgb_up(struct adapter
*adap
)
2441 err
= setup_sge_queues(adap
);
2444 err
= setup_rss(adap
);
2448 if (adap
->flags
& USING_MSIX
) {
2449 name_msix_vecs(adap
);
2450 err
= request_irq(adap
->msix_info
[0].vec
, t4_nondata_intr
, 0,
2451 adap
->msix_info
[0].desc
, adap
);
2455 err
= request_msix_queue_irqs(adap
);
2457 free_irq(adap
->msix_info
[0].vec
, adap
);
2461 err
= request_irq(adap
->pdev
->irq
, t4_intr_handler(adap
),
2462 (adap
->flags
& USING_MSI
) ? 0 : IRQF_SHARED
,
2469 t4_intr_enable(adap
);
2470 adap
->flags
|= FULL_INIT_DONE
;
2471 notify_ulds(adap
, CXGB4_STATE_UP
);
2475 dev_err(adap
->pdev_dev
, "request_irq failed, err %d\n", err
);
2477 t4_free_sge_resources(adap
);
2481 static void cxgb_down(struct adapter
*adapter
)
2483 t4_intr_disable(adapter
);
2484 cancel_work_sync(&adapter
->tid_release_task
);
2485 adapter
->tid_release_task_busy
= false;
2487 if (adapter
->flags
& USING_MSIX
) {
2488 free_msix_queue_irqs(adapter
);
2489 free_irq(adapter
->msix_info
[0].vec
, adapter
);
2491 free_irq(adapter
->pdev
->irq
, adapter
);
2492 quiesce_rx(adapter
);
2493 t4_sge_stop(adapter
);
2494 t4_free_sge_resources(adapter
);
2495 adapter
->flags
&= ~FULL_INIT_DONE
;
2499 * net_device operations
2501 static int cxgb_open(struct net_device
*dev
)
2504 struct port_info
*pi
= netdev_priv(dev
);
2505 struct adapter
*adapter
= pi
->adapter
;
2507 if (!(adapter
->flags
& FULL_INIT_DONE
)) {
2508 err
= cxgb_up(adapter
);
2513 dev
->real_num_tx_queues
= pi
->nqsets
;
2515 netif_tx_start_all_queues(dev
);
2519 static int cxgb_close(struct net_device
*dev
)
2521 struct port_info
*pi
= netdev_priv(dev
);
2522 struct adapter
*adapter
= pi
->adapter
;
2524 netif_tx_stop_all_queues(dev
);
2525 netif_carrier_off(dev
);
2526 return t4_enable_vi(adapter
, 0, pi
->viid
, false, false);
2529 static struct net_device_stats
*cxgb_get_stats(struct net_device
*dev
)
2531 struct port_stats stats
;
2532 struct port_info
*p
= netdev_priv(dev
);
2533 struct adapter
*adapter
= p
->adapter
;
2534 struct net_device_stats
*ns
= &dev
->stats
;
2536 spin_lock(&adapter
->stats_lock
);
2537 t4_get_port_stats(adapter
, p
->tx_chan
, &stats
);
2538 spin_unlock(&adapter
->stats_lock
);
2540 ns
->tx_bytes
= stats
.tx_octets
;
2541 ns
->tx_packets
= stats
.tx_frames
;
2542 ns
->rx_bytes
= stats
.rx_octets
;
2543 ns
->rx_packets
= stats
.rx_frames
;
2544 ns
->multicast
= stats
.rx_mcast_frames
;
2546 /* detailed rx_errors */
2547 ns
->rx_length_errors
= stats
.rx_jabber
+ stats
.rx_too_long
+
2549 ns
->rx_over_errors
= 0;
2550 ns
->rx_crc_errors
= stats
.rx_fcs_err
;
2551 ns
->rx_frame_errors
= stats
.rx_symbol_err
;
2552 ns
->rx_fifo_errors
= stats
.rx_ovflow0
+ stats
.rx_ovflow1
+
2553 stats
.rx_ovflow2
+ stats
.rx_ovflow3
+
2554 stats
.rx_trunc0
+ stats
.rx_trunc1
+
2555 stats
.rx_trunc2
+ stats
.rx_trunc3
;
2556 ns
->rx_missed_errors
= 0;
2558 /* detailed tx_errors */
2559 ns
->tx_aborted_errors
= 0;
2560 ns
->tx_carrier_errors
= 0;
2561 ns
->tx_fifo_errors
= 0;
2562 ns
->tx_heartbeat_errors
= 0;
2563 ns
->tx_window_errors
= 0;
2565 ns
->tx_errors
= stats
.tx_error_frames
;
2566 ns
->rx_errors
= stats
.rx_symbol_err
+ stats
.rx_fcs_err
+
2567 ns
->rx_length_errors
+ stats
.rx_len_err
+ ns
->rx_fifo_errors
;
2571 static int cxgb_ioctl(struct net_device
*dev
, struct ifreq
*req
, int cmd
)
2573 int ret
= 0, prtad
, devad
;
2574 struct port_info
*pi
= netdev_priv(dev
);
2575 struct mii_ioctl_data
*data
= (struct mii_ioctl_data
*)&req
->ifr_data
;
2579 if (pi
->mdio_addr
< 0)
2581 data
->phy_id
= pi
->mdio_addr
;
2585 if (mdio_phy_id_is_c45(data
->phy_id
)) {
2586 prtad
= mdio_phy_id_prtad(data
->phy_id
);
2587 devad
= mdio_phy_id_devad(data
->phy_id
);
2588 } else if (data
->phy_id
< 32) {
2589 prtad
= data
->phy_id
;
2591 data
->reg_num
&= 0x1f;
2595 if (cmd
== SIOCGMIIREG
)
2596 ret
= t4_mdio_rd(pi
->adapter
, 0, prtad
, devad
,
2597 data
->reg_num
, &data
->val_out
);
2599 ret
= t4_mdio_wr(pi
->adapter
, 0, prtad
, devad
,
2600 data
->reg_num
, data
->val_in
);
2608 static void cxgb_set_rxmode(struct net_device
*dev
)
2610 /* unfortunately we can't return errors to the stack */
2611 set_rxmode(dev
, -1, false);
2614 static int cxgb_change_mtu(struct net_device
*dev
, int new_mtu
)
2617 struct port_info
*pi
= netdev_priv(dev
);
2619 if (new_mtu
< 81 || new_mtu
> MAX_MTU
) /* accommodate SACK */
2621 ret
= t4_set_rxmode(pi
->adapter
, 0, pi
->viid
, new_mtu
, -1, -1, -1, -1,
2628 static int cxgb_set_mac_addr(struct net_device
*dev
, void *p
)
2631 struct sockaddr
*addr
= p
;
2632 struct port_info
*pi
= netdev_priv(dev
);
2634 if (!is_valid_ether_addr(addr
->sa_data
))
2637 ret
= t4_change_mac(pi
->adapter
, 0, pi
->viid
, pi
->xact_addr_filt
,
2638 addr
->sa_data
, true, true);
2642 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
2643 pi
->xact_addr_filt
= ret
;
2647 static void vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
2649 struct port_info
*pi
= netdev_priv(dev
);
2652 t4_set_rxmode(pi
->adapter
, 0, pi
->viid
, -1, -1, -1, -1, grp
!= NULL
,
2656 #ifdef CONFIG_NET_POLL_CONTROLLER
2657 static void cxgb_netpoll(struct net_device
*dev
)
2659 struct port_info
*pi
= netdev_priv(dev
);
2660 struct adapter
*adap
= pi
->adapter
;
2662 if (adap
->flags
& USING_MSIX
) {
2664 struct sge_eth_rxq
*rx
= &adap
->sge
.ethrxq
[pi
->first_qset
];
2666 for (i
= pi
->nqsets
; i
; i
--, rx
++)
2667 t4_sge_intr_msix(0, &rx
->rspq
);
2669 t4_intr_handler(adap
)(0, adap
);
2673 static const struct net_device_ops cxgb4_netdev_ops
= {
2674 .ndo_open
= cxgb_open
,
2675 .ndo_stop
= cxgb_close
,
2676 .ndo_start_xmit
= t4_eth_xmit
,
2677 .ndo_get_stats
= cxgb_get_stats
,
2678 .ndo_set_rx_mode
= cxgb_set_rxmode
,
2679 .ndo_set_mac_address
= cxgb_set_mac_addr
,
2680 .ndo_validate_addr
= eth_validate_addr
,
2681 .ndo_do_ioctl
= cxgb_ioctl
,
2682 .ndo_change_mtu
= cxgb_change_mtu
,
2683 .ndo_vlan_rx_register
= vlan_rx_register
,
2684 #ifdef CONFIG_NET_POLL_CONTROLLER
2685 .ndo_poll_controller
= cxgb_netpoll
,
2689 void t4_fatal_err(struct adapter
*adap
)
2691 t4_set_reg_field(adap
, SGE_CONTROL
, GLOBALENABLE
, 0);
2692 t4_intr_disable(adap
);
2693 dev_alert(adap
->pdev_dev
, "encountered fatal error, adapter stopped\n");
2696 static void setup_memwin(struct adapter
*adap
)
2700 bar0
= pci_resource_start(adap
->pdev
, 0); /* truncation intentional */
2701 t4_write_reg(adap
, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN
, 0),
2702 (bar0
+ MEMWIN0_BASE
) | BIR(0) |
2703 WINDOW(ilog2(MEMWIN0_APERTURE
) - 10));
2704 t4_write_reg(adap
, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN
, 1),
2705 (bar0
+ MEMWIN1_BASE
) | BIR(0) |
2706 WINDOW(ilog2(MEMWIN1_APERTURE
) - 10));
2707 t4_write_reg(adap
, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN
, 2),
2708 (bar0
+ MEMWIN2_BASE
) | BIR(0) |
2709 WINDOW(ilog2(MEMWIN2_APERTURE
) - 10));
2713 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
2715 #define MAX_ATIDS 8192U
2718 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2720 static int adap_init0(struct adapter
*adap
)
2724 enum dev_state state
;
2725 u32 params
[7], val
[7];
2726 struct fw_caps_config_cmd c
;
2728 ret
= t4_check_fw_version(adap
);
2729 if (ret
== -EINVAL
|| ret
> 0) {
2730 if (upgrade_fw(adap
) >= 0) /* recache FW version */
2731 ret
= t4_check_fw_version(adap
);
2736 /* contact FW, request master */
2737 ret
= t4_fw_hello(adap
, 0, 0, MASTER_MUST
, &state
);
2739 dev_err(adap
->pdev_dev
, "could not connect to FW, error %d\n",
2745 ret
= t4_fw_reset(adap
, 0, PIORSTMODE
| PIORST
);
2749 /* get device capabilities */
2750 memset(&c
, 0, sizeof(c
));
2751 c
.op_to_write
= htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
2752 FW_CMD_REQUEST
| FW_CMD_READ
);
2753 c
.retval_len16
= htonl(FW_LEN16(c
));
2754 ret
= t4_wr_mbox(adap
, 0, &c
, sizeof(c
), &c
);
2758 /* select capabilities we'll be using */
2759 if (c
.niccaps
& htons(FW_CAPS_CONFIG_NIC_VM
)) {
2761 c
.niccaps
^= htons(FW_CAPS_CONFIG_NIC_VM
);
2763 c
.niccaps
= htons(FW_CAPS_CONFIG_NIC_VM
);
2764 } else if (vf_acls
) {
2765 dev_err(adap
->pdev_dev
, "virtualization ACLs not supported");
2768 c
.op_to_write
= htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
2769 FW_CMD_REQUEST
| FW_CMD_WRITE
);
2770 ret
= t4_wr_mbox(adap
, 0, &c
, sizeof(c
), NULL
);
2774 ret
= t4_config_glbl_rss(adap
, 0,
2775 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
,
2776 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN
|
2777 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP
);
2781 ret
= t4_cfg_pfvf(adap
, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16,
2782 FW_CMD_CAP_PF
, FW_CMD_CAP_PF
);
2786 for (v
= 0; v
< SGE_NTIMERS
- 1; v
++)
2787 adap
->sge
.timer_val
[v
] = min(intr_holdoff
[v
], MAX_SGE_TIMERVAL
);
2788 adap
->sge
.timer_val
[SGE_NTIMERS
- 1] = MAX_SGE_TIMERVAL
;
2789 adap
->sge
.counter_val
[0] = 1;
2790 for (v
= 1; v
< SGE_NCOUNTERS
; v
++)
2791 adap
->sge
.counter_val
[v
] = min(intr_cnt
[v
- 1],
2795 /* get basic stuff going */
2796 ret
= t4_early_init(adap
, 0);
2800 #define FW_PARAM_DEV(param) \
2801 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2802 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2804 #define FW_PARAM_PFVF(param) \
2805 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
2806 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
2808 params
[0] = FW_PARAM_DEV(PORTVEC
);
2809 params
[1] = FW_PARAM_PFVF(L2T_START
);
2810 params
[2] = FW_PARAM_PFVF(L2T_END
);
2811 params
[3] = FW_PARAM_PFVF(FILTER_START
);
2812 params
[4] = FW_PARAM_PFVF(FILTER_END
);
2813 ret
= t4_query_params(adap
, 0, 0, 0, 5, params
, val
);
2817 adap
->tids
.ftid_base
= val
[3];
2818 adap
->tids
.nftids
= val
[4] - val
[3] + 1;
2821 /* query offload-related parameters */
2822 params
[0] = FW_PARAM_DEV(NTID
);
2823 params
[1] = FW_PARAM_PFVF(SERVER_START
);
2824 params
[2] = FW_PARAM_PFVF(SERVER_END
);
2825 params
[3] = FW_PARAM_PFVF(TDDP_START
);
2826 params
[4] = FW_PARAM_PFVF(TDDP_END
);
2827 params
[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ
);
2828 ret
= t4_query_params(adap
, 0, 0, 0, 6, params
, val
);
2831 adap
->tids
.ntids
= val
[0];
2832 adap
->tids
.natids
= min(adap
->tids
.ntids
/ 2, MAX_ATIDS
);
2833 adap
->tids
.stid_base
= val
[1];
2834 adap
->tids
.nstids
= val
[2] - val
[1] + 1;
2835 adap
->vres
.ddp
.start
= val
[3];
2836 adap
->vres
.ddp
.size
= val
[4] - val
[3] + 1;
2837 adap
->params
.ofldq_wr_cred
= val
[5];
2838 adap
->params
.offload
= 1;
2841 params
[0] = FW_PARAM_PFVF(STAG_START
);
2842 params
[1] = FW_PARAM_PFVF(STAG_END
);
2843 params
[2] = FW_PARAM_PFVF(RQ_START
);
2844 params
[3] = FW_PARAM_PFVF(RQ_END
);
2845 params
[4] = FW_PARAM_PFVF(PBL_START
);
2846 params
[5] = FW_PARAM_PFVF(PBL_END
);
2847 ret
= t4_query_params(adap
, 0, 0, 0, 6, params
, val
);
2850 adap
->vres
.stag
.start
= val
[0];
2851 adap
->vres
.stag
.size
= val
[1] - val
[0] + 1;
2852 adap
->vres
.rq
.start
= val
[2];
2853 adap
->vres
.rq
.size
= val
[3] - val
[2] + 1;
2854 adap
->vres
.pbl
.start
= val
[4];
2855 adap
->vres
.pbl
.size
= val
[5] - val
[4] + 1;
2858 params
[0] = FW_PARAM_PFVF(ISCSI_START
);
2859 params
[1] = FW_PARAM_PFVF(ISCSI_END
);
2860 ret
= t4_query_params(adap
, 0, 0, 0, 2, params
, val
);
2863 adap
->vres
.iscsi
.start
= val
[0];
2864 adap
->vres
.iscsi
.size
= val
[1] - val
[0] + 1;
2866 #undef FW_PARAM_PFVF
2869 adap
->params
.nports
= hweight32(port_vec
);
2870 adap
->params
.portvec
= port_vec
;
2871 adap
->flags
|= FW_OK
;
2873 /* These are finalized by FW initialization, load their values now */
2874 v
= t4_read_reg(adap
, TP_TIMER_RESOLUTION
);
2875 adap
->params
.tp
.tre
= TIMERRESOLUTION_GET(v
);
2876 t4_read_mtu_tbl(adap
, adap
->params
.mtus
, NULL
);
2877 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
2878 adap
->params
.b_wnd
);
2880 /* tweak some settings */
2881 t4_write_reg(adap
, TP_SHIFT_CNT
, 0x64f8849);
2882 t4_write_reg(adap
, ULP_RX_TDDP_PSZ
, HPZ0(PAGE_SHIFT
- 12));
2883 t4_write_reg(adap
, TP_PIO_ADDR
, TP_INGRESS_CONFIG
);
2884 v
= t4_read_reg(adap
, TP_PIO_DATA
);
2885 t4_write_reg(adap
, TP_PIO_DATA
, v
& ~CSUM_HAS_PSEUDO_HDR
);
2890 * If a command timed out or failed with EIO FW does not operate within
2891 * its spec or something catastrophic happened to HW/FW, stop issuing
2894 bye
: if (ret
!= -ETIMEDOUT
&& ret
!= -EIO
)
2899 static inline bool is_10g_port(const struct link_config
*lc
)
2901 return (lc
->supported
& FW_PORT_CAP_SPEED_10G
) != 0;
2904 static inline void init_rspq(struct sge_rspq
*q
, u8 timer_idx
, u8 pkt_cnt_idx
,
2905 unsigned int size
, unsigned int iqe_size
)
2907 q
->intr_params
= QINTR_TIMER_IDX(timer_idx
) |
2908 (pkt_cnt_idx
< SGE_NCOUNTERS
? QINTR_CNT_EN
: 0);
2909 q
->pktcnt_idx
= pkt_cnt_idx
< SGE_NCOUNTERS
? pkt_cnt_idx
: 0;
2910 q
->iqe_len
= iqe_size
;
2915 * Perform default configuration of DMA queues depending on the number and type
2916 * of ports we found and the number of available CPUs. Most settings can be
2917 * modified by the admin prior to actual use.
2919 static void __devinit
cfg_queues(struct adapter
*adap
)
2921 struct sge
*s
= &adap
->sge
;
2922 int i
, q10g
= 0, n10g
= 0, qidx
= 0;
2924 for_each_port(adap
, i
)
2925 n10g
+= is_10g_port(&adap2pinfo(adap
, i
)->link_cfg
);
2928 * We default to 1 queue per non-10G port and up to # of cores queues
2932 q10g
= (MAX_ETH_QSETS
- (adap
->params
.nports
- n10g
)) / n10g
;
2933 if (q10g
> num_online_cpus())
2934 q10g
= num_online_cpus();
2936 for_each_port(adap
, i
) {
2937 struct port_info
*pi
= adap2pinfo(adap
, i
);
2939 pi
->first_qset
= qidx
;
2940 pi
->nqsets
= is_10g_port(&pi
->link_cfg
) ? q10g
: 1;
2945 s
->max_ethqsets
= qidx
; /* MSI-X may lower it later */
2947 if (is_offload(adap
)) {
2949 * For offload we use 1 queue/channel if all ports are up to 1G,
2950 * otherwise we divide all available queues amongst the channels
2951 * capped by the number of available cores.
2954 i
= min_t(int, ARRAY_SIZE(s
->ofldrxq
),
2956 s
->ofldqsets
= roundup(i
, adap
->params
.nports
);
2958 s
->ofldqsets
= adap
->params
.nports
;
2959 /* For RDMA one Rx queue per channel suffices */
2960 s
->rdmaqs
= adap
->params
.nports
;
2963 for (i
= 0; i
< ARRAY_SIZE(s
->ethrxq
); i
++) {
2964 struct sge_eth_rxq
*r
= &s
->ethrxq
[i
];
2966 init_rspq(&r
->rspq
, 0, 0, 1024, 64);
2970 for (i
= 0; i
< ARRAY_SIZE(s
->ethtxq
); i
++)
2971 s
->ethtxq
[i
].q
.size
= 1024;
2973 for (i
= 0; i
< ARRAY_SIZE(s
->ctrlq
); i
++)
2974 s
->ctrlq
[i
].q
.size
= 512;
2976 for (i
= 0; i
< ARRAY_SIZE(s
->ofldtxq
); i
++)
2977 s
->ofldtxq
[i
].q
.size
= 1024;
2979 for (i
= 0; i
< ARRAY_SIZE(s
->ofldrxq
); i
++) {
2980 struct sge_ofld_rxq
*r
= &s
->ofldrxq
[i
];
2982 init_rspq(&r
->rspq
, 0, 0, 1024, 64);
2983 r
->rspq
.uld
= CXGB4_ULD_ISCSI
;
2987 for (i
= 0; i
< ARRAY_SIZE(s
->rdmarxq
); i
++) {
2988 struct sge_ofld_rxq
*r
= &s
->rdmarxq
[i
];
2990 init_rspq(&r
->rspq
, 0, 0, 511, 64);
2991 r
->rspq
.uld
= CXGB4_ULD_RDMA
;
2995 init_rspq(&s
->fw_evtq
, 6, 0, 512, 64);
2996 init_rspq(&s
->intrq
, 6, 0, 2 * MAX_INGQ
, 64);
3000 * Reduce the number of Ethernet queues across all ports to at most n.
3001 * n provides at least one queue per port.
3003 static void __devinit
reduce_ethqs(struct adapter
*adap
, int n
)
3006 struct port_info
*pi
;
3008 while (n
< adap
->sge
.ethqsets
)
3009 for_each_port(adap
, i
) {
3010 pi
= adap2pinfo(adap
, i
);
3011 if (pi
->nqsets
> 1) {
3013 adap
->sge
.ethqsets
--;
3014 if (adap
->sge
.ethqsets
<= n
)
3020 for_each_port(adap
, i
) {
3021 pi
= adap2pinfo(adap
, i
);
3027 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3028 #define EXTRA_VECS 2
3030 static int __devinit
enable_msix(struct adapter
*adap
)
3033 int i
, err
, want
, need
;
3034 struct sge
*s
= &adap
->sge
;
3035 unsigned int nchan
= adap
->params
.nports
;
3036 struct msix_entry entries
[MAX_INGQ
+ 1];
3038 for (i
= 0; i
< ARRAY_SIZE(entries
); ++i
)
3039 entries
[i
].entry
= i
;
3041 want
= s
->max_ethqsets
+ EXTRA_VECS
;
3042 if (is_offload(adap
)) {
3043 want
+= s
->rdmaqs
+ s
->ofldqsets
;
3044 /* need nchan for each possible ULD */
3045 ofld_need
= 2 * nchan
;
3047 need
= adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
;
3049 while ((err
= pci_enable_msix(adap
->pdev
, entries
, want
)) >= need
)
3054 * Distribute available vectors to the various queue groups.
3055 * Every group gets its minimum requirement and NIC gets top
3056 * priority for leftovers.
3058 i
= want
- EXTRA_VECS
- ofld_need
;
3059 if (i
< s
->max_ethqsets
) {
3060 s
->max_ethqsets
= i
;
3061 if (i
< s
->ethqsets
)
3062 reduce_ethqs(adap
, i
);
3064 if (is_offload(adap
)) {
3065 i
= want
- EXTRA_VECS
- s
->max_ethqsets
;
3066 i
-= ofld_need
- nchan
;
3067 s
->ofldqsets
= (i
/ nchan
) * nchan
; /* round down */
3069 for (i
= 0; i
< want
; ++i
)
3070 adap
->msix_info
[i
].vec
= entries
[i
].vector
;
3072 dev_info(adap
->pdev_dev
,
3073 "only %d MSI-X vectors left, not using MSI-X\n", err
);
3079 static void __devinit
print_port_info(struct adapter
*adap
)
3081 static const char *base
[] = {
3082 "R", "KX4", "T", "KX", "T", "KR", "CX4"
3087 const char *spd
= "";
3089 if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_2_5GB
)
3091 else if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_5_0GB
)
3094 for_each_port(adap
, i
) {
3095 struct net_device
*dev
= adap
->port
[i
];
3096 const struct port_info
*pi
= netdev_priv(dev
);
3099 if (!test_bit(i
, &adap
->registered_device_map
))
3102 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_100M
)
3103 bufp
+= sprintf(bufp
, "100/");
3104 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_1G
)
3105 bufp
+= sprintf(bufp
, "1000/");
3106 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_10G
)
3107 bufp
+= sprintf(bufp
, "10G/");
3110 sprintf(bufp
, "BASE-%s", base
[pi
->port_type
]);
3112 netdev_info(dev
, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
3113 adap
->params
.vpd
.id
, adap
->params
.rev
,
3114 buf
, is_offload(adap
) ? "R" : "",
3115 adap
->params
.pci
.width
, spd
,
3116 (adap
->flags
& USING_MSIX
) ? " MSI-X" :
3117 (adap
->flags
& USING_MSI
) ? " MSI" : "");
3118 if (adap
->name
== dev
->name
)
3119 netdev_info(dev
, "S/N: %s, E/C: %s\n",
3120 adap
->params
.vpd
.sn
, adap
->params
.vpd
.ec
);
3124 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\
3125 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3127 static int __devinit
init_one(struct pci_dev
*pdev
,
3128 const struct pci_device_id
*ent
)
3131 struct port_info
*pi
;
3132 unsigned int highdma
= 0;
3133 struct adapter
*adapter
= NULL
;
3135 printk_once(KERN_INFO
"%s - version %s\n", DRV_DESC
, DRV_VERSION
);
3137 err
= pci_request_regions(pdev
, KBUILD_MODNAME
);
3139 /* Just info, some other driver may have claimed the device. */
3140 dev_info(&pdev
->dev
, "cannot obtain PCI resources\n");
3144 /* We control everything through PF 0 */
3145 func
= PCI_FUNC(pdev
->devfn
);
3149 err
= pci_enable_device(pdev
);
3151 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
3152 goto out_release_regions
;
3155 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
3156 highdma
= NETIF_F_HIGHDMA
;
3157 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
3159 dev_err(&pdev
->dev
, "unable to obtain 64-bit DMA for "
3160 "coherent allocations\n");
3161 goto out_disable_device
;
3164 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
3166 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
3167 goto out_disable_device
;
3171 pci_enable_pcie_error_reporting(pdev
);
3172 pci_set_master(pdev
);
3173 pci_save_state(pdev
);
3175 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
3178 goto out_disable_device
;
3181 adapter
->regs
= pci_ioremap_bar(pdev
, 0);
3182 if (!adapter
->regs
) {
3183 dev_err(&pdev
->dev
, "cannot map device registers\n");
3185 goto out_free_adapter
;
3188 adapter
->pdev
= pdev
;
3189 adapter
->pdev_dev
= &pdev
->dev
;
3190 adapter
->name
= pci_name(pdev
);
3191 adapter
->msg_enable
= dflt_msg_enable
;
3192 memset(adapter
->chan_map
, 0xff, sizeof(adapter
->chan_map
));
3194 spin_lock_init(&adapter
->stats_lock
);
3195 spin_lock_init(&adapter
->tid_release_lock
);
3197 INIT_WORK(&adapter
->tid_release_task
, process_tid_release_list
);
3199 err
= t4_prep_adapter(adapter
);
3202 err
= adap_init0(adapter
);
3206 for_each_port(adapter
, i
) {
3207 struct net_device
*netdev
;
3209 netdev
= alloc_etherdev_mq(sizeof(struct port_info
),
3216 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3218 adapter
->port
[i
] = netdev
;
3219 pi
= netdev_priv(netdev
);
3220 pi
->adapter
= adapter
;
3221 pi
->xact_addr_filt
= -1;
3222 pi
->rx_offload
= RX_CSO
;
3224 netif_carrier_off(netdev
);
3225 netif_tx_stop_all_queues(netdev
);
3226 netdev
->irq
= pdev
->irq
;
3228 netdev
->features
|= NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_TSO6
;
3229 netdev
->features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
3230 netdev
->features
|= NETIF_F_GRO
| NETIF_F_RXHASH
| highdma
;
3231 netdev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
3232 netdev
->vlan_features
= netdev
->features
& VLAN_FEAT
;
3234 netdev
->netdev_ops
= &cxgb4_netdev_ops
;
3235 SET_ETHTOOL_OPS(netdev
, &cxgb_ethtool_ops
);
3238 pci_set_drvdata(pdev
, adapter
);
3240 if (adapter
->flags
& FW_OK
) {
3241 err
= t4_port_init(adapter
, 0, 0, 0);
3247 * Configure queues and allocate tables now, they can be needed as
3248 * soon as the first register_netdev completes.
3250 cfg_queues(adapter
);
3252 adapter
->l2t
= t4_init_l2t();
3253 if (!adapter
->l2t
) {
3254 /* We tolerate a lack of L2T, giving up some functionality */
3255 dev_warn(&pdev
->dev
, "could not allocate L2T, continuing\n");
3256 adapter
->params
.offload
= 0;
3259 if (is_offload(adapter
) && tid_init(&adapter
->tids
) < 0) {
3260 dev_warn(&pdev
->dev
, "could not allocate TID table, "
3262 adapter
->params
.offload
= 0;
3266 * The card is now ready to go. If any errors occur during device
3267 * registration we do not fail the whole card but rather proceed only
3268 * with the ports we manage to register successfully. However we must
3269 * register at least one net device.
3271 for_each_port(adapter
, i
) {
3272 err
= register_netdev(adapter
->port
[i
]);
3274 dev_warn(&pdev
->dev
,
3275 "cannot register net device %s, skipping\n",
3276 adapter
->port
[i
]->name
);
3279 * Change the name we use for messages to the name of
3280 * the first successfully registered interface.
3282 if (!adapter
->registered_device_map
)
3283 adapter
->name
= adapter
->port
[i
]->name
;
3285 __set_bit(i
, &adapter
->registered_device_map
);
3286 adapter
->chan_map
[adap2pinfo(adapter
, i
)->tx_chan
] = i
;
3289 if (!adapter
->registered_device_map
) {
3290 dev_err(&pdev
->dev
, "could not register any net devices\n");
3294 if (cxgb4_debugfs_root
) {
3295 adapter
->debugfs_root
= debugfs_create_dir(pci_name(pdev
),
3296 cxgb4_debugfs_root
);
3297 setup_debugfs(adapter
);
3300 /* See what interrupts we'll be using */
3301 if (msi
> 1 && enable_msix(adapter
) == 0)
3302 adapter
->flags
|= USING_MSIX
;
3303 else if (msi
> 0 && pci_enable_msi(pdev
) == 0)
3304 adapter
->flags
|= USING_MSI
;
3306 if (is_offload(adapter
))
3307 attach_ulds(adapter
);
3309 print_port_info(adapter
);
3312 #ifdef CONFIG_PCI_IOV
3313 if (func
< ARRAY_SIZE(num_vf
) && num_vf
[func
] > 0)
3314 if (pci_enable_sriov(pdev
, num_vf
[func
]) == 0)
3315 dev_info(&pdev
->dev
,
3316 "instantiated %u virtual functions\n",
3322 t4_free_mem(adapter
->tids
.tid_tab
);
3323 t4_free_mem(adapter
->l2t
);
3324 for_each_port(adapter
, i
)
3325 if (adapter
->port
[i
])
3326 free_netdev(adapter
->port
[i
]);
3327 if (adapter
->flags
& FW_OK
)
3328 t4_fw_bye(adapter
, 0);
3330 iounmap(adapter
->regs
);
3334 pci_disable_pcie_error_reporting(pdev
);
3335 pci_disable_device(pdev
);
3336 out_release_regions
:
3337 pci_release_regions(pdev
);
3338 pci_set_drvdata(pdev
, NULL
);
3342 static void __devexit
remove_one(struct pci_dev
*pdev
)
3344 struct adapter
*adapter
= pci_get_drvdata(pdev
);
3346 pci_disable_sriov(pdev
);
3351 if (is_offload(adapter
))
3352 detach_ulds(adapter
);
3354 for_each_port(adapter
, i
)
3355 if (test_bit(i
, &adapter
->registered_device_map
))
3356 unregister_netdev(adapter
->port
[i
]);
3358 if (adapter
->debugfs_root
)
3359 debugfs_remove_recursive(adapter
->debugfs_root
);
3361 if (adapter
->flags
& FULL_INIT_DONE
)
3363 t4_free_mem(adapter
->l2t
);
3364 t4_free_mem(adapter
->tids
.tid_tab
);
3365 disable_msi(adapter
);
3367 for_each_port(adapter
, i
)
3368 if (adapter
->port
[i
])
3369 free_netdev(adapter
->port
[i
]);
3371 if (adapter
->flags
& FW_OK
)
3372 t4_fw_bye(adapter
, 0);
3373 iounmap(adapter
->regs
);
3375 pci_disable_pcie_error_reporting(pdev
);
3376 pci_disable_device(pdev
);
3377 pci_release_regions(pdev
);
3378 pci_set_drvdata(pdev
, NULL
);
3379 } else if (PCI_FUNC(pdev
->devfn
) > 0)
3380 pci_release_regions(pdev
);
3383 static struct pci_driver cxgb4_driver
= {
3384 .name
= KBUILD_MODNAME
,
3385 .id_table
= cxgb4_pci_tbl
,
3387 .remove
= __devexit_p(remove_one
),
3390 static int __init
cxgb4_init_module(void)
3394 /* Debugfs support is optional, just warn if this fails */
3395 cxgb4_debugfs_root
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
3396 if (!cxgb4_debugfs_root
)
3397 pr_warning("could not create debugfs entry, continuing\n");
3399 ret
= pci_register_driver(&cxgb4_driver
);
3401 debugfs_remove(cxgb4_debugfs_root
);
3405 static void __exit
cxgb4_cleanup_module(void)
3407 pci_unregister_driver(&cxgb4_driver
);
3408 debugfs_remove(cxgb4_debugfs_root
); /* NULL ok */
3411 module_init(cxgb4_init_module
);
3412 module_exit(cxgb4_cleanup_module
);