use is_power_of_2() in cxgb3/cxgb3_main.c
[linux-2.6/verdex.git] / drivers / net / cxgb3 / cxgb3_main.c
blob6fd1e5241833d7e2f68800f092ade1074e27256a
1 /*
2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
49 #include "common.h"
50 #include "cxgb3_ioctl.h"
51 #include "regs.h"
52 #include "cxgb3_offload.h"
53 #include "version.h"
55 #include "cxgb3_ctl_defs.h"
56 #include "t3_cpl.h"
57 #include "firmware_exports.h"
59 enum {
60 MAX_TXQ_ENTRIES = 16384,
61 MAX_CTRL_TXQ_ENTRIES = 1024,
62 MAX_RSPQ_ENTRIES = 16384,
63 MAX_RX_BUFFERS = 16384,
64 MAX_RX_JUMBO_BUFFERS = 16384,
65 MIN_TXQ_ENTRIES = 4,
66 MIN_CTRL_TXQ_ENTRIES = 4,
67 MIN_RSPQ_ENTRIES = 32,
68 MIN_FL_ENTRIES = 32
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, ssid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl[] = {
83 CH_DEVICE(0x20, 1, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1, 1), /* T302E */
85 CH_DEVICE(0x22, 1, 2), /* T310E */
86 CH_DEVICE(0x23, 1, 3), /* T320X */
87 CH_DEVICE(0x24, 1, 1), /* T302X */
88 CH_DEVICE(0x25, 1, 3), /* T320E */
89 CH_DEVICE(0x26, 1, 2), /* T310X */
90 CH_DEVICE(0x30, 1, 2), /* T3B10 */
91 CH_DEVICE(0x31, 1, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1, 1), /* T3B02 */
93 {0,}
96 MODULE_DESCRIPTION(DRV_DESC);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION);
100 MODULE_DEVICE_TABLE(pci, cxgb3_pci_tbl);
102 static int dflt_msg_enable = DFLT_MSG_ENABLE;
104 module_param(dflt_msg_enable, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
116 static int msi = 2;
118 module_param(msi, int, 0644);
119 MODULE_PARM_DESC(msi, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable = 0;
128 module_param(ofld_disable, int, 0644);
129 MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct *cxgb3_wq;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device *dev)
149 if (!netif_carrier_ok(dev))
150 printk(KERN_INFO "%s: link down\n", dev->name);
151 else {
152 const char *s = "10Mbps";
153 const struct port_info *p = netdev_priv(dev);
155 switch (p->link_config.speed) {
156 case SPEED_10000:
157 s = "10Gbps";
158 break;
159 case SPEED_1000:
160 s = "1000Mbps";
161 break;
162 case SPEED_100:
163 s = "100Mbps";
164 break;
167 printk(KERN_INFO "%s: link up, %s, %s-duplex\n", dev->name, s,
168 p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
186 int speed, int duplex, int pause)
188 struct net_device *dev = adapter->port[port_id];
189 struct port_info *pi = netdev_priv(dev);
190 struct cmac *mac = &pi->mac;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev))
194 return;
196 if (link_stat != netif_carrier_ok(dev)) {
197 if (link_stat) {
198 t3_mac_enable(mac, MAC_DIRECTION_RX);
199 netif_carrier_on(dev);
200 } else {
201 netif_carrier_off(dev);
202 pi->phy.ops->power_down(&pi->phy, 1);
203 t3_mac_disable(mac, MAC_DIRECTION_RX);
204 t3_link_start(&pi->phy, mac, &pi->link_config);
207 link_report(dev);
211 static void cxgb_set_rxmode(struct net_device *dev)
213 struct t3_rx_mode rm;
214 struct port_info *pi = netdev_priv(dev);
216 init_rx_mode(&rm, dev, dev->mc_list);
217 t3_mac_set_rx_mode(&pi->mac, &rm);
221 * link_start - enable a port
222 * @dev: the device to enable
224 * Performs the MAC and PHY actions needed to enable a port.
226 static void link_start(struct net_device *dev)
228 struct t3_rx_mode rm;
229 struct port_info *pi = netdev_priv(dev);
230 struct cmac *mac = &pi->mac;
232 init_rx_mode(&rm, dev, dev->mc_list);
233 t3_mac_reset(mac);
234 t3_mac_set_mtu(mac, dev->mtu);
235 t3_mac_set_address(mac, 0, dev->dev_addr);
236 t3_mac_set_rx_mode(mac, &rm);
237 t3_link_start(&pi->phy, mac, &pi->link_config);
238 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
241 static inline void cxgb_disable_msi(struct adapter *adapter)
243 if (adapter->flags & USING_MSIX) {
244 pci_disable_msix(adapter->pdev);
245 adapter->flags &= ~USING_MSIX;
246 } else if (adapter->flags & USING_MSI) {
247 pci_disable_msi(adapter->pdev);
248 adapter->flags &= ~USING_MSI;
253 * Interrupt handler for asynchronous events used with MSI-X.
255 static irqreturn_t t3_async_intr_handler(int irq, void *cookie)
257 t3_slow_intr_handler(cookie);
258 return IRQ_HANDLED;
262 * Name the MSI-X interrupts.
264 static void name_msix_vecs(struct adapter *adap)
266 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1;
268 snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
269 adap->msix_info[0].desc[n] = 0;
271 for_each_port(adap, j) {
272 struct net_device *d = adap->port[j];
273 const struct port_info *pi = netdev_priv(d);
275 for (i = 0; i < pi->nqsets; i++, msi_idx++) {
276 snprintf(adap->msix_info[msi_idx].desc, n,
277 "%s (queue %d)", d->name, i);
278 adap->msix_info[msi_idx].desc[n] = 0;
283 static int request_msix_data_irqs(struct adapter *adap)
285 int i, j, err, qidx = 0;
287 for_each_port(adap, i) {
288 int nqsets = adap2pinfo(adap, i)->nqsets;
290 for (j = 0; j < nqsets; ++j) {
291 err = request_irq(adap->msix_info[qidx + 1].vec,
292 t3_intr_handler(adap,
293 adap->sge.qs[qidx].
294 rspq.polling), 0,
295 adap->msix_info[qidx + 1].desc,
296 &adap->sge.qs[qidx]);
297 if (err) {
298 while (--qidx >= 0)
299 free_irq(adap->msix_info[qidx + 1].vec,
300 &adap->sge.qs[qidx]);
301 return err;
303 qidx++;
306 return 0;
310 * setup_rss - configure RSS
311 * @adap: the adapter
313 * Sets up RSS to distribute packets to multiple receive queues. We
314 * configure the RSS CPU lookup table to distribute to the number of HW
315 * receive queues, and the response queue lookup table to narrow that
316 * down to the response queues actually configured for each port.
317 * We always configure the RSS mapping for two ports since the mapping
318 * table has plenty of entries.
320 static void setup_rss(struct adapter *adap)
322 int i;
323 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
324 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
325 u8 cpus[SGE_QSETS + 1];
326 u16 rspq_map[RSS_TABLE_SIZE];
328 for (i = 0; i < SGE_QSETS; ++i)
329 cpus[i] = i;
330 cpus[SGE_QSETS] = 0xff; /* terminator */
332 for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) {
333 rspq_map[i] = i % nq0;
334 rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
337 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
338 F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
339 V_RRCPLCPUSIZE(6), cpus, rspq_map);
343 * If we have multiple receive queues per port serviced by NAPI we need one
344 * netdevice per queue as NAPI operates on netdevices. We already have one
345 * netdevice, namely the one associated with the interface, so we use dummy
346 * ones for any additional queues. Note that these netdevices exist purely
347 * so that NAPI has something to work with, they do not represent network
348 * ports and are not registered.
350 static int init_dummy_netdevs(struct adapter *adap)
352 int i, j, dummy_idx = 0;
353 struct net_device *nd;
355 for_each_port(adap, i) {
356 struct net_device *dev = adap->port[i];
357 const struct port_info *pi = netdev_priv(dev);
359 for (j = 0; j < pi->nqsets - 1; j++) {
360 if (!adap->dummy_netdev[dummy_idx]) {
361 nd = alloc_netdev(0, "", ether_setup);
362 if (!nd)
363 goto free_all;
365 nd->priv = adap;
366 nd->weight = 64;
367 set_bit(__LINK_STATE_START, &nd->state);
368 adap->dummy_netdev[dummy_idx] = nd;
370 strcpy(adap->dummy_netdev[dummy_idx]->name, dev->name);
371 dummy_idx++;
374 return 0;
376 free_all:
377 while (--dummy_idx >= 0) {
378 free_netdev(adap->dummy_netdev[dummy_idx]);
379 adap->dummy_netdev[dummy_idx] = NULL;
381 return -ENOMEM;
385 * Wait until all NAPI handlers are descheduled. This includes the handlers of
386 * both netdevices representing interfaces and the dummy ones for the extra
387 * queues.
389 static void quiesce_rx(struct adapter *adap)
391 int i;
392 struct net_device *dev;
394 for_each_port(adap, i) {
395 dev = adap->port[i];
396 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
397 msleep(1);
400 for (i = 0; i < ARRAY_SIZE(adap->dummy_netdev); i++) {
401 dev = adap->dummy_netdev[i];
402 if (dev)
403 while (test_bit(__LINK_STATE_RX_SCHED, &dev->state))
404 msleep(1);
409 * setup_sge_qsets - configure SGE Tx/Rx/response queues
410 * @adap: the adapter
412 * Determines how many sets of SGE queues to use and initializes them.
413 * We support multiple queue sets per port if we have MSI-X, otherwise
414 * just one queue set per port.
416 static int setup_sge_qsets(struct adapter *adap)
418 int i, j, err, irq_idx = 0, qset_idx = 0, dummy_dev_idx = 0;
419 unsigned int ntxq = SGE_TXQ_PER_SET;
421 if (adap->params.rev > 0 && !(adap->flags & USING_MSI))
422 irq_idx = -1;
424 for_each_port(adap, i) {
425 struct net_device *dev = adap->port[i];
426 const struct port_info *pi = netdev_priv(dev);
428 for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
429 err = t3_sge_alloc_qset(adap, qset_idx, 1,
430 (adap->flags & USING_MSIX) ? qset_idx + 1 :
431 irq_idx,
432 &adap->params.sge.qset[qset_idx], ntxq,
433 j == 0 ? dev :
434 adap-> dummy_netdev[dummy_dev_idx++]);
435 if (err) {
436 t3_free_sge_resources(adap);
437 return err;
442 return 0;
445 static ssize_t attr_show(struct device *d, struct device_attribute *attr,
446 char *buf,
447 ssize_t(*format) (struct net_device *, char *))
449 ssize_t len;
451 /* Synchronize with ioctls that may shut down the device */
452 rtnl_lock();
453 len = (*format) (to_net_dev(d), buf);
454 rtnl_unlock();
455 return len;
458 static ssize_t attr_store(struct device *d, struct device_attribute *attr,
459 const char *buf, size_t len,
460 ssize_t(*set) (struct net_device *, unsigned int),
461 unsigned int min_val, unsigned int max_val)
463 char *endp;
464 ssize_t ret;
465 unsigned int val;
467 if (!capable(CAP_NET_ADMIN))
468 return -EPERM;
470 val = simple_strtoul(buf, &endp, 0);
471 if (endp == buf || val < min_val || val > max_val)
472 return -EINVAL;
474 rtnl_lock();
475 ret = (*set) (to_net_dev(d), val);
476 if (!ret)
477 ret = len;
478 rtnl_unlock();
479 return ret;
482 #define CXGB3_SHOW(name, val_expr) \
483 static ssize_t format_##name(struct net_device *dev, char *buf) \
485 struct adapter *adap = dev->priv; \
486 return sprintf(buf, "%u\n", val_expr); \
488 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
489 char *buf) \
491 return attr_show(d, attr, buf, format_##name); \
494 static ssize_t set_nfilters(struct net_device *dev, unsigned int val)
496 struct adapter *adap = dev->priv;
497 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0;
499 if (adap->flags & FULL_INIT_DONE)
500 return -EBUSY;
501 if (val && adap->params.rev == 0)
502 return -EINVAL;
503 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
504 min_tids)
505 return -EINVAL;
506 adap->params.mc5.nfilters = val;
507 return 0;
510 static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
511 const char *buf, size_t len)
513 return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
516 static ssize_t set_nservers(struct net_device *dev, unsigned int val)
518 struct adapter *adap = dev->priv;
520 if (adap->flags & FULL_INIT_DONE)
521 return -EBUSY;
522 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters -
523 MC5_MIN_TIDS)
524 return -EINVAL;
525 adap->params.mc5.nservers = val;
526 return 0;
529 static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
530 const char *buf, size_t len)
532 return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
535 #define CXGB3_ATTR_R(name, val_expr) \
536 CXGB3_SHOW(name, val_expr) \
537 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
539 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
540 CXGB3_SHOW(name, val_expr) \
541 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
543 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
544 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
545 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
547 static struct attribute *cxgb3_attrs[] = {
548 &dev_attr_cam_size.attr,
549 &dev_attr_nfilters.attr,
550 &dev_attr_nservers.attr,
551 NULL
554 static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
556 static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
557 char *buf, int sched)
559 ssize_t len;
560 unsigned int v, addr, bpt, cpt;
561 struct adapter *adap = to_net_dev(d)->priv;
563 addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
564 rtnl_lock();
565 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr);
566 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
567 if (sched & 1)
568 v >>= 16;
569 bpt = (v >> 8) & 0xff;
570 cpt = v & 0xff;
571 if (!cpt)
572 len = sprintf(buf, "disabled\n");
573 else {
574 v = (adap->params.vpd.cclk * 1000) / cpt;
575 len = sprintf(buf, "%u Kbps\n", (v * bpt) / 125);
577 rtnl_unlock();
578 return len;
581 static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
582 const char *buf, size_t len, int sched)
584 char *endp;
585 ssize_t ret;
586 unsigned int val;
587 struct adapter *adap = to_net_dev(d)->priv;
589 if (!capable(CAP_NET_ADMIN))
590 return -EPERM;
592 val = simple_strtoul(buf, &endp, 0);
593 if (endp == buf || val > 10000000)
594 return -EINVAL;
596 rtnl_lock();
597 ret = t3_config_sched(adap, val, sched);
598 if (!ret)
599 ret = len;
600 rtnl_unlock();
601 return ret;
604 #define TM_ATTR(name, sched) \
605 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
606 char *buf) \
608 return tm_attr_show(d, attr, buf, sched); \
610 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
611 const char *buf, size_t len) \
613 return tm_attr_store(d, attr, buf, len, sched); \
615 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
617 TM_ATTR(sched0, 0);
618 TM_ATTR(sched1, 1);
619 TM_ATTR(sched2, 2);
620 TM_ATTR(sched3, 3);
621 TM_ATTR(sched4, 4);
622 TM_ATTR(sched5, 5);
623 TM_ATTR(sched6, 6);
624 TM_ATTR(sched7, 7);
626 static struct attribute *offload_attrs[] = {
627 &dev_attr_sched0.attr,
628 &dev_attr_sched1.attr,
629 &dev_attr_sched2.attr,
630 &dev_attr_sched3.attr,
631 &dev_attr_sched4.attr,
632 &dev_attr_sched5.attr,
633 &dev_attr_sched6.attr,
634 &dev_attr_sched7.attr,
635 NULL
638 static struct attribute_group offload_attr_group = {.attrs = offload_attrs };
641 * Sends an sk_buff to an offload queue driver
642 * after dealing with any active network taps.
644 static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
646 int ret;
648 local_bh_disable();
649 ret = t3_offload_tx(tdev, skb);
650 local_bh_enable();
651 return ret;
654 static int write_smt_entry(struct adapter *adapter, int idx)
656 struct cpl_smt_write_req *req;
657 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
659 if (!skb)
660 return -ENOMEM;
662 req = (struct cpl_smt_write_req *)__skb_put(skb, sizeof(*req));
663 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
664 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
665 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
666 req->iff = idx;
667 memset(req->src_mac1, 0, sizeof(req->src_mac1));
668 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
669 skb->priority = 1;
670 offload_tx(&adapter->tdev, skb);
671 return 0;
674 static int init_smt(struct adapter *adapter)
676 int i;
678 for_each_port(adapter, i)
679 write_smt_entry(adapter, i);
680 return 0;
683 static void init_port_mtus(struct adapter *adapter)
685 unsigned int mtus = adapter->port[0]->mtu;
687 if (adapter->port[1])
688 mtus |= adapter->port[1]->mtu << 16;
689 t3_write_reg(adapter, A_TP_MTU_PORT_TABLE, mtus);
692 static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo,
693 int hi, int port)
695 struct sk_buff *skb;
696 struct mngt_pktsched_wr *req;
698 skb = alloc_skb(sizeof(*req), GFP_KERNEL | __GFP_NOFAIL);
699 req = (struct mngt_pktsched_wr *)skb_put(skb, sizeof(*req));
700 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT));
701 req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET;
702 req->sched = sched;
703 req->idx = qidx;
704 req->min = lo;
705 req->max = hi;
706 req->binding = port;
707 t3_mgmt_tx(adap, skb);
710 static void bind_qsets(struct adapter *adap)
712 int i, j;
714 for_each_port(adap, i) {
715 const struct port_info *pi = adap2pinfo(adap, i);
717 for (j = 0; j < pi->nqsets; ++j)
718 send_pktsched_cmd(adap, 1, pi->first_qset + j, -1,
719 -1, i);
723 #define FW_FNAME "t3fw-%d.%d.%d.bin"
725 static int upgrade_fw(struct adapter *adap)
727 int ret;
728 char buf[64];
729 const struct firmware *fw;
730 struct device *dev = &adap->pdev->dev;
732 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR,
733 FW_VERSION_MINOR, FW_VERSION_MICRO);
734 ret = request_firmware(&fw, buf, dev);
735 if (ret < 0) {
736 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
737 buf);
738 return ret;
740 ret = t3_load_fw(adap, fw->data, fw->size);
741 release_firmware(fw);
742 return ret;
746 * cxgb_up - enable the adapter
747 * @adapter: adapter being enabled
749 * Called when the first port is enabled, this function performs the
750 * actions necessary to make an adapter operational, such as completing
751 * the initialization of HW modules, and enabling interrupts.
753 * Must be called with the rtnl lock held.
755 static int cxgb_up(struct adapter *adap)
757 int err = 0;
759 if (!(adap->flags & FULL_INIT_DONE)) {
760 err = t3_check_fw_version(adap);
761 if (err == -EINVAL)
762 err = upgrade_fw(adap);
763 if (err)
764 goto out;
766 err = init_dummy_netdevs(adap);
767 if (err)
768 goto out;
770 err = t3_init_hw(adap, 0);
771 if (err)
772 goto out;
774 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
776 err = setup_sge_qsets(adap);
777 if (err)
778 goto out;
780 setup_rss(adap);
781 adap->flags |= FULL_INIT_DONE;
784 t3_intr_clear(adap);
786 if (adap->flags & USING_MSIX) {
787 name_msix_vecs(adap);
788 err = request_irq(adap->msix_info[0].vec,
789 t3_async_intr_handler, 0,
790 adap->msix_info[0].desc, adap);
791 if (err)
792 goto irq_err;
794 if (request_msix_data_irqs(adap)) {
795 free_irq(adap->msix_info[0].vec, adap);
796 goto irq_err;
798 } else if ((err = request_irq(adap->pdev->irq,
799 t3_intr_handler(adap,
800 adap->sge.qs[0].rspq.
801 polling),
802 (adap->flags & USING_MSI) ?
803 0 : IRQF_SHARED,
804 adap->name, adap)))
805 goto irq_err;
807 t3_sge_start(adap);
808 t3_intr_enable(adap);
810 if ((adap->flags & (USING_MSIX | QUEUES_BOUND)) == USING_MSIX)
811 bind_qsets(adap);
812 adap->flags |= QUEUES_BOUND;
814 out:
815 return err;
816 irq_err:
817 CH_ERR(adap, "request_irq failed, err %d\n", err);
818 goto out;
822 * Release resources when all the ports and offloading have been stopped.
824 static void cxgb_down(struct adapter *adapter)
826 t3_sge_stop(adapter);
827 spin_lock_irq(&adapter->work_lock); /* sync with PHY intr task */
828 t3_intr_disable(adapter);
829 spin_unlock_irq(&adapter->work_lock);
831 if (adapter->flags & USING_MSIX) {
832 int i, n = 0;
834 free_irq(adapter->msix_info[0].vec, adapter);
835 for_each_port(adapter, i)
836 n += adap2pinfo(adapter, i)->nqsets;
838 for (i = 0; i < n; ++i)
839 free_irq(adapter->msix_info[i + 1].vec,
840 &adapter->sge.qs[i]);
841 } else
842 free_irq(adapter->pdev->irq, adapter);
844 flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
845 quiesce_rx(adapter);
848 static void schedule_chk_task(struct adapter *adap)
850 unsigned int timeo;
852 timeo = adap->params.linkpoll_period ?
853 (HZ * adap->params.linkpoll_period) / 10 :
854 adap->params.stats_update_period * HZ;
855 if (timeo)
856 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo);
859 static int offload_open(struct net_device *dev)
861 struct adapter *adapter = dev->priv;
862 struct t3cdev *tdev = T3CDEV(dev);
863 int adap_up = adapter->open_device_map & PORT_MASK;
864 int err = 0;
866 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
867 return 0;
869 if (!adap_up && (err = cxgb_up(adapter)) < 0)
870 return err;
872 t3_tp_set_offload_mode(adapter, 1);
873 tdev->lldev = adapter->port[0];
874 err = cxgb3_offload_activate(adapter);
875 if (err)
876 goto out;
878 init_port_mtus(adapter);
879 t3_load_mtus(adapter, adapter->params.mtus, adapter->params.a_wnd,
880 adapter->params.b_wnd,
881 adapter->params.rev == 0 ?
882 adapter->port[0]->mtu : 0xffff);
883 init_smt(adapter);
885 /* Never mind if the next step fails */
886 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
888 /* Call back all registered clients */
889 cxgb3_add_clients(tdev);
891 out:
892 /* restore them in case the offload module has changed them */
893 if (err) {
894 t3_tp_set_offload_mode(adapter, 0);
895 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
896 cxgb3_set_dummy_ops(tdev);
898 return err;
901 static int offload_close(struct t3cdev *tdev)
903 struct adapter *adapter = tdev2adap(tdev);
905 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
906 return 0;
908 /* Call back all registered clients */
909 cxgb3_remove_clients(tdev);
911 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
913 tdev->lldev = NULL;
914 cxgb3_set_dummy_ops(tdev);
915 t3_tp_set_offload_mode(adapter, 0);
916 clear_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
918 if (!adapter->open_device_map)
919 cxgb_down(adapter);
921 cxgb3_offload_deactivate(adapter);
922 return 0;
925 static int cxgb_open(struct net_device *dev)
927 int err;
928 struct adapter *adapter = dev->priv;
929 struct port_info *pi = netdev_priv(dev);
930 int other_ports = adapter->open_device_map & PORT_MASK;
932 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
933 return err;
935 set_bit(pi->port_id, &adapter->open_device_map);
936 if (is_offload(adapter) && !ofld_disable) {
937 err = offload_open(dev);
938 if (err)
939 printk(KERN_WARNING
940 "Could not initialize offload capabilities\n");
943 link_start(dev);
944 t3_port_intr_enable(adapter, pi->port_id);
945 netif_start_queue(dev);
946 if (!other_ports)
947 schedule_chk_task(adapter);
949 return 0;
952 static int cxgb_close(struct net_device *dev)
954 struct adapter *adapter = dev->priv;
955 struct port_info *p = netdev_priv(dev);
957 t3_port_intr_disable(adapter, p->port_id);
958 netif_stop_queue(dev);
959 p->phy.ops->power_down(&p->phy, 1);
960 netif_carrier_off(dev);
961 t3_mac_disable(&p->mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
963 spin_lock(&adapter->work_lock); /* sync with update task */
964 clear_bit(p->port_id, &adapter->open_device_map);
965 spin_unlock(&adapter->work_lock);
967 if (!(adapter->open_device_map & PORT_MASK))
968 cancel_rearming_delayed_workqueue(cxgb3_wq,
969 &adapter->adap_check_task);
971 if (!adapter->open_device_map)
972 cxgb_down(adapter);
974 return 0;
977 static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
979 struct adapter *adapter = dev->priv;
980 struct port_info *p = netdev_priv(dev);
981 struct net_device_stats *ns = &p->netstats;
982 const struct mac_stats *pstats;
984 spin_lock(&adapter->stats_lock);
985 pstats = t3_mac_update_stats(&p->mac);
986 spin_unlock(&adapter->stats_lock);
988 ns->tx_bytes = pstats->tx_octets;
989 ns->tx_packets = pstats->tx_frames;
990 ns->rx_bytes = pstats->rx_octets;
991 ns->rx_packets = pstats->rx_frames;
992 ns->multicast = pstats->rx_mcast_frames;
994 ns->tx_errors = pstats->tx_underrun;
995 ns->rx_errors = pstats->rx_symbol_errs + pstats->rx_fcs_errs +
996 pstats->rx_too_long + pstats->rx_jabber + pstats->rx_short +
997 pstats->rx_fifo_ovfl;
999 /* detailed rx_errors */
1000 ns->rx_length_errors = pstats->rx_jabber + pstats->rx_too_long;
1001 ns->rx_over_errors = 0;
1002 ns->rx_crc_errors = pstats->rx_fcs_errs;
1003 ns->rx_frame_errors = pstats->rx_symbol_errs;
1004 ns->rx_fifo_errors = pstats->rx_fifo_ovfl;
1005 ns->rx_missed_errors = pstats->rx_cong_drops;
1007 /* detailed tx_errors */
1008 ns->tx_aborted_errors = 0;
1009 ns->tx_carrier_errors = 0;
1010 ns->tx_fifo_errors = pstats->tx_underrun;
1011 ns->tx_heartbeat_errors = 0;
1012 ns->tx_window_errors = 0;
1013 return ns;
1016 static u32 get_msglevel(struct net_device *dev)
1018 struct adapter *adapter = dev->priv;
1020 return adapter->msg_enable;
1023 static void set_msglevel(struct net_device *dev, u32 val)
1025 struct adapter *adapter = dev->priv;
1027 adapter->msg_enable = val;
1030 static char stats_strings[][ETH_GSTRING_LEN] = {
1031 "TxOctetsOK ",
1032 "TxFramesOK ",
1033 "TxMulticastFramesOK",
1034 "TxBroadcastFramesOK",
1035 "TxPauseFrames ",
1036 "TxUnderrun ",
1037 "TxExtUnderrun ",
1039 "TxFrames64 ",
1040 "TxFrames65To127 ",
1041 "TxFrames128To255 ",
1042 "TxFrames256To511 ",
1043 "TxFrames512To1023 ",
1044 "TxFrames1024To1518 ",
1045 "TxFrames1519ToMax ",
1047 "RxOctetsOK ",
1048 "RxFramesOK ",
1049 "RxMulticastFramesOK",
1050 "RxBroadcastFramesOK",
1051 "RxPauseFrames ",
1052 "RxFCSErrors ",
1053 "RxSymbolErrors ",
1054 "RxShortErrors ",
1055 "RxJabberErrors ",
1056 "RxLengthErrors ",
1057 "RxFIFOoverflow ",
1059 "RxFrames64 ",
1060 "RxFrames65To127 ",
1061 "RxFrames128To255 ",
1062 "RxFrames256To511 ",
1063 "RxFrames512To1023 ",
1064 "RxFrames1024To1518 ",
1065 "RxFrames1519ToMax ",
1067 "PhyFIFOErrors ",
1068 "TSO ",
1069 "VLANextractions ",
1070 "VLANinsertions ",
1071 "TxCsumOffload ",
1072 "RxCsumGood ",
1073 "RxDrops ",
1075 "CheckTXEnToggled ",
1076 "CheckResets ",
1080 static int get_stats_count(struct net_device *dev)
1082 return ARRAY_SIZE(stats_strings);
1085 #define T3_REGMAP_SIZE (3 * 1024)
1087 static int get_regs_len(struct net_device *dev)
1089 return T3_REGMAP_SIZE;
1092 static int get_eeprom_len(struct net_device *dev)
1094 return EEPROMSIZE;
1097 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1099 u32 fw_vers = 0;
1100 struct adapter *adapter = dev->priv;
1102 t3_get_fw_version(adapter, &fw_vers);
1104 strcpy(info->driver, DRV_NAME);
1105 strcpy(info->version, DRV_VERSION);
1106 strcpy(info->bus_info, pci_name(adapter->pdev));
1107 if (!fw_vers)
1108 strcpy(info->fw_version, "N/A");
1109 else {
1110 snprintf(info->fw_version, sizeof(info->fw_version),
1111 "%s %u.%u.%u",
1112 G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
1113 G_FW_VERSION_MAJOR(fw_vers),
1114 G_FW_VERSION_MINOR(fw_vers),
1115 G_FW_VERSION_MICRO(fw_vers));
1119 static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
1121 if (stringset == ETH_SS_STATS)
1122 memcpy(data, stats_strings, sizeof(stats_strings));
1125 static unsigned long collect_sge_port_stats(struct adapter *adapter,
1126 struct port_info *p, int idx)
1128 int i;
1129 unsigned long tot = 0;
1131 for (i = 0; i < p->nqsets; ++i)
1132 tot += adapter->sge.qs[i + p->first_qset].port_stats[idx];
1133 return tot;
1136 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1137 u64 *data)
1139 struct adapter *adapter = dev->priv;
1140 struct port_info *pi = netdev_priv(dev);
1141 const struct mac_stats *s;
1143 spin_lock(&adapter->stats_lock);
1144 s = t3_mac_update_stats(&pi->mac);
1145 spin_unlock(&adapter->stats_lock);
1147 *data++ = s->tx_octets;
1148 *data++ = s->tx_frames;
1149 *data++ = s->tx_mcast_frames;
1150 *data++ = s->tx_bcast_frames;
1151 *data++ = s->tx_pause;
1152 *data++ = s->tx_underrun;
1153 *data++ = s->tx_fifo_urun;
1155 *data++ = s->tx_frames_64;
1156 *data++ = s->tx_frames_65_127;
1157 *data++ = s->tx_frames_128_255;
1158 *data++ = s->tx_frames_256_511;
1159 *data++ = s->tx_frames_512_1023;
1160 *data++ = s->tx_frames_1024_1518;
1161 *data++ = s->tx_frames_1519_max;
1163 *data++ = s->rx_octets;
1164 *data++ = s->rx_frames;
1165 *data++ = s->rx_mcast_frames;
1166 *data++ = s->rx_bcast_frames;
1167 *data++ = s->rx_pause;
1168 *data++ = s->rx_fcs_errs;
1169 *data++ = s->rx_symbol_errs;
1170 *data++ = s->rx_short;
1171 *data++ = s->rx_jabber;
1172 *data++ = s->rx_too_long;
1173 *data++ = s->rx_fifo_ovfl;
1175 *data++ = s->rx_frames_64;
1176 *data++ = s->rx_frames_65_127;
1177 *data++ = s->rx_frames_128_255;
1178 *data++ = s->rx_frames_256_511;
1179 *data++ = s->rx_frames_512_1023;
1180 *data++ = s->rx_frames_1024_1518;
1181 *data++ = s->rx_frames_1519_max;
1183 *data++ = pi->phy.fifo_errors;
1185 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TSO);
1186 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANEX);
1187 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_VLANINS);
1188 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_TX_CSUM);
1189 *data++ = collect_sge_port_stats(adapter, pi, SGE_PSTAT_RX_CSUM_GOOD);
1190 *data++ = s->rx_cong_drops;
1192 *data++ = s->num_toggled;
1193 *data++ = s->num_resets;
1196 static inline void reg_block_dump(struct adapter *ap, void *buf,
1197 unsigned int start, unsigned int end)
1199 u32 *p = buf + start;
1201 for (; start <= end; start += sizeof(u32))
1202 *p++ = t3_read_reg(ap, start);
1205 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1206 void *buf)
1208 struct adapter *ap = dev->priv;
1211 * Version scheme:
1212 * bits 0..9: chip version
1213 * bits 10..15: chip revision
1214 * bit 31: set for PCIe cards
1216 regs->version = 3 | (ap->params.rev << 10) | (is_pcie(ap) << 31);
1219 * We skip the MAC statistics registers because they are clear-on-read.
1220 * Also reading multi-register stats would need to synchronize with the
1221 * periodic mac stats accumulation. Hard to justify the complexity.
1223 memset(buf, 0, T3_REGMAP_SIZE);
1224 reg_block_dump(ap, buf, 0, A_SG_RSPQ_CREDIT_RETURN);
1225 reg_block_dump(ap, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT);
1226 reg_block_dump(ap, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE);
1227 reg_block_dump(ap, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA);
1228 reg_block_dump(ap, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3);
1229 reg_block_dump(ap, buf, A_XGM_SERDES_STATUS0,
1230 XGM_REG(A_XGM_SERDES_STAT3, 1));
1231 reg_block_dump(ap, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1),
1232 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1));
1235 static int restart_autoneg(struct net_device *dev)
1237 struct port_info *p = netdev_priv(dev);
1239 if (!netif_running(dev))
1240 return -EAGAIN;
1241 if (p->link_config.autoneg != AUTONEG_ENABLE)
1242 return -EINVAL;
1243 p->phy.ops->autoneg_restart(&p->phy);
1244 return 0;
1247 static int cxgb3_phys_id(struct net_device *dev, u32 data)
1249 int i;
1250 struct adapter *adapter = dev->priv;
1252 if (data == 0)
1253 data = 2;
1255 for (i = 0; i < data * 2; i++) {
1256 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1257 (i & 1) ? F_GPIO0_OUT_VAL : 0);
1258 if (msleep_interruptible(500))
1259 break;
1261 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
1262 F_GPIO0_OUT_VAL);
1263 return 0;
1266 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1268 struct port_info *p = netdev_priv(dev);
1270 cmd->supported = p->link_config.supported;
1271 cmd->advertising = p->link_config.advertising;
1273 if (netif_carrier_ok(dev)) {
1274 cmd->speed = p->link_config.speed;
1275 cmd->duplex = p->link_config.duplex;
1276 } else {
1277 cmd->speed = -1;
1278 cmd->duplex = -1;
1281 cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
1282 cmd->phy_address = p->phy.addr;
1283 cmd->transceiver = XCVR_EXTERNAL;
1284 cmd->autoneg = p->link_config.autoneg;
1285 cmd->maxtxpkt = 0;
1286 cmd->maxrxpkt = 0;
1287 return 0;
1290 static int speed_duplex_to_caps(int speed, int duplex)
1292 int cap = 0;
1294 switch (speed) {
1295 case SPEED_10:
1296 if (duplex == DUPLEX_FULL)
1297 cap = SUPPORTED_10baseT_Full;
1298 else
1299 cap = SUPPORTED_10baseT_Half;
1300 break;
1301 case SPEED_100:
1302 if (duplex == DUPLEX_FULL)
1303 cap = SUPPORTED_100baseT_Full;
1304 else
1305 cap = SUPPORTED_100baseT_Half;
1306 break;
1307 case SPEED_1000:
1308 if (duplex == DUPLEX_FULL)
1309 cap = SUPPORTED_1000baseT_Full;
1310 else
1311 cap = SUPPORTED_1000baseT_Half;
1312 break;
1313 case SPEED_10000:
1314 if (duplex == DUPLEX_FULL)
1315 cap = SUPPORTED_10000baseT_Full;
1317 return cap;
1320 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1321 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1322 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1323 ADVERTISED_10000baseT_Full)
1325 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1327 struct port_info *p = netdev_priv(dev);
1328 struct link_config *lc = &p->link_config;
1330 if (!(lc->supported & SUPPORTED_Autoneg))
1331 return -EOPNOTSUPP; /* can't change speed/duplex */
1333 if (cmd->autoneg == AUTONEG_DISABLE) {
1334 int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
1336 if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
1337 return -EINVAL;
1338 lc->requested_speed = cmd->speed;
1339 lc->requested_duplex = cmd->duplex;
1340 lc->advertising = 0;
1341 } else {
1342 cmd->advertising &= ADVERTISED_MASK;
1343 cmd->advertising &= lc->supported;
1344 if (!cmd->advertising)
1345 return -EINVAL;
1346 lc->requested_speed = SPEED_INVALID;
1347 lc->requested_duplex = DUPLEX_INVALID;
1348 lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
1350 lc->autoneg = cmd->autoneg;
1351 if (netif_running(dev))
1352 t3_link_start(&p->phy, &p->mac, lc);
1353 return 0;
1356 static void get_pauseparam(struct net_device *dev,
1357 struct ethtool_pauseparam *epause)
1359 struct port_info *p = netdev_priv(dev);
1361 epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
1362 epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
1363 epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
1366 static int set_pauseparam(struct net_device *dev,
1367 struct ethtool_pauseparam *epause)
1369 struct port_info *p = netdev_priv(dev);
1370 struct link_config *lc = &p->link_config;
1372 if (epause->autoneg == AUTONEG_DISABLE)
1373 lc->requested_fc = 0;
1374 else if (lc->supported & SUPPORTED_Autoneg)
1375 lc->requested_fc = PAUSE_AUTONEG;
1376 else
1377 return -EINVAL;
1379 if (epause->rx_pause)
1380 lc->requested_fc |= PAUSE_RX;
1381 if (epause->tx_pause)
1382 lc->requested_fc |= PAUSE_TX;
1383 if (lc->autoneg == AUTONEG_ENABLE) {
1384 if (netif_running(dev))
1385 t3_link_start(&p->phy, &p->mac, lc);
1386 } else {
1387 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1388 if (netif_running(dev))
1389 t3_mac_set_speed_duplex_fc(&p->mac, -1, -1, lc->fc);
1391 return 0;
1394 static u32 get_rx_csum(struct net_device *dev)
1396 struct port_info *p = netdev_priv(dev);
1398 return p->rx_csum_offload;
1401 static int set_rx_csum(struct net_device *dev, u32 data)
1403 struct port_info *p = netdev_priv(dev);
1405 p->rx_csum_offload = data;
1406 return 0;
1409 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1411 const struct adapter *adapter = dev->priv;
1412 const struct port_info *pi = netdev_priv(dev);
1413 const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset];
1415 e->rx_max_pending = MAX_RX_BUFFERS;
1416 e->rx_mini_max_pending = 0;
1417 e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
1418 e->tx_max_pending = MAX_TXQ_ENTRIES;
1420 e->rx_pending = q->fl_size;
1421 e->rx_mini_pending = q->rspq_size;
1422 e->rx_jumbo_pending = q->jumbo_size;
1423 e->tx_pending = q->txq_size[0];
1426 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1428 int i;
1429 struct qset_params *q;
1430 struct adapter *adapter = dev->priv;
1431 const struct port_info *pi = netdev_priv(dev);
1433 if (e->rx_pending > MAX_RX_BUFFERS ||
1434 e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
1435 e->tx_pending > MAX_TXQ_ENTRIES ||
1436 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1437 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1438 e->rx_pending < MIN_FL_ENTRIES ||
1439 e->rx_jumbo_pending < MIN_FL_ENTRIES ||
1440 e->tx_pending < adapter->params.nports * MIN_TXQ_ENTRIES)
1441 return -EINVAL;
1443 if (adapter->flags & FULL_INIT_DONE)
1444 return -EBUSY;
1446 q = &adapter->params.sge.qset[pi->first_qset];
1447 for (i = 0; i < pi->nqsets; ++i, ++q) {
1448 q->rspq_size = e->rx_mini_pending;
1449 q->fl_size = e->rx_pending;
1450 q->jumbo_size = e->rx_jumbo_pending;
1451 q->txq_size[0] = e->tx_pending;
1452 q->txq_size[1] = e->tx_pending;
1453 q->txq_size[2] = e->tx_pending;
1455 return 0;
1458 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1460 struct adapter *adapter = dev->priv;
1461 struct qset_params *qsp = &adapter->params.sge.qset[0];
1462 struct sge_qset *qs = &adapter->sge.qs[0];
1464 if (c->rx_coalesce_usecs * 10 > M_NEWTIMER)
1465 return -EINVAL;
1467 qsp->coalesce_usecs = c->rx_coalesce_usecs;
1468 t3_update_qset_coalesce(qs, qsp);
1469 return 0;
1472 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1474 struct adapter *adapter = dev->priv;
1475 struct qset_params *q = adapter->params.sge.qset;
1477 c->rx_coalesce_usecs = q->coalesce_usecs;
1478 return 0;
1481 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1482 u8 * data)
1484 int i, err = 0;
1485 struct adapter *adapter = dev->priv;
1487 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1488 if (!buf)
1489 return -ENOMEM;
1491 e->magic = EEPROM_MAGIC;
1492 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1493 err = t3_seeprom_read(adapter, i, (u32 *) & buf[i]);
1495 if (!err)
1496 memcpy(data, buf + e->offset, e->len);
1497 kfree(buf);
1498 return err;
1501 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1502 u8 * data)
1504 u8 *buf;
1505 int err = 0;
1506 u32 aligned_offset, aligned_len, *p;
1507 struct adapter *adapter = dev->priv;
1509 if (eeprom->magic != EEPROM_MAGIC)
1510 return -EINVAL;
1512 aligned_offset = eeprom->offset & ~3;
1513 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1515 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1516 buf = kmalloc(aligned_len, GFP_KERNEL);
1517 if (!buf)
1518 return -ENOMEM;
1519 err = t3_seeprom_read(adapter, aligned_offset, (u32 *) buf);
1520 if (!err && aligned_len > 4)
1521 err = t3_seeprom_read(adapter,
1522 aligned_offset + aligned_len - 4,
1523 (u32 *) & buf[aligned_len - 4]);
1524 if (err)
1525 goto out;
1526 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1527 } else
1528 buf = data;
1530 err = t3_seeprom_wp(adapter, 0);
1531 if (err)
1532 goto out;
1534 for (p = (u32 *) buf; !err && aligned_len; aligned_len -= 4, p++) {
1535 err = t3_seeprom_write(adapter, aligned_offset, *p);
1536 aligned_offset += 4;
1539 if (!err)
1540 err = t3_seeprom_wp(adapter, 1);
1541 out:
1542 if (buf != data)
1543 kfree(buf);
1544 return err;
1547 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1549 wol->supported = 0;
1550 wol->wolopts = 0;
1551 memset(&wol->sopass, 0, sizeof(wol->sopass));
1554 static const struct ethtool_ops cxgb_ethtool_ops = {
1555 .get_settings = get_settings,
1556 .set_settings = set_settings,
1557 .get_drvinfo = get_drvinfo,
1558 .get_msglevel = get_msglevel,
1559 .set_msglevel = set_msglevel,
1560 .get_ringparam = get_sge_param,
1561 .set_ringparam = set_sge_param,
1562 .get_coalesce = get_coalesce,
1563 .set_coalesce = set_coalesce,
1564 .get_eeprom_len = get_eeprom_len,
1565 .get_eeprom = get_eeprom,
1566 .set_eeprom = set_eeprom,
1567 .get_pauseparam = get_pauseparam,
1568 .set_pauseparam = set_pauseparam,
1569 .get_rx_csum = get_rx_csum,
1570 .set_rx_csum = set_rx_csum,
1571 .get_tx_csum = ethtool_op_get_tx_csum,
1572 .set_tx_csum = ethtool_op_set_tx_csum,
1573 .get_sg = ethtool_op_get_sg,
1574 .set_sg = ethtool_op_set_sg,
1575 .get_link = ethtool_op_get_link,
1576 .get_strings = get_strings,
1577 .phys_id = cxgb3_phys_id,
1578 .nway_reset = restart_autoneg,
1579 .get_stats_count = get_stats_count,
1580 .get_ethtool_stats = get_stats,
1581 .get_regs_len = get_regs_len,
1582 .get_regs = get_regs,
1583 .get_wol = get_wol,
1584 .get_tso = ethtool_op_get_tso,
1585 .set_tso = ethtool_op_set_tso,
1586 .get_perm_addr = ethtool_op_get_perm_addr
1589 static int in_range(int val, int lo, int hi)
1591 return val < 0 || (val <= hi && val >= lo);
1594 static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
1596 int ret;
1597 u32 cmd;
1598 struct adapter *adapter = dev->priv;
1600 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
1601 return -EFAULT;
1603 switch (cmd) {
1604 case CHELSIO_SET_QSET_PARAMS:{
1605 int i;
1606 struct qset_params *q;
1607 struct ch_qset_params t;
1609 if (!capable(CAP_NET_ADMIN))
1610 return -EPERM;
1611 if (copy_from_user(&t, useraddr, sizeof(t)))
1612 return -EFAULT;
1613 if (t.qset_idx >= SGE_QSETS)
1614 return -EINVAL;
1615 if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
1616 !in_range(t.cong_thres, 0, 255) ||
1617 !in_range(t.txq_size[0], MIN_TXQ_ENTRIES,
1618 MAX_TXQ_ENTRIES) ||
1619 !in_range(t.txq_size[1], MIN_TXQ_ENTRIES,
1620 MAX_TXQ_ENTRIES) ||
1621 !in_range(t.txq_size[2], MIN_CTRL_TXQ_ENTRIES,
1622 MAX_CTRL_TXQ_ENTRIES) ||
1623 !in_range(t.fl_size[0], MIN_FL_ENTRIES,
1624 MAX_RX_BUFFERS)
1625 || !in_range(t.fl_size[1], MIN_FL_ENTRIES,
1626 MAX_RX_JUMBO_BUFFERS)
1627 || !in_range(t.rspq_size, MIN_RSPQ_ENTRIES,
1628 MAX_RSPQ_ENTRIES))
1629 return -EINVAL;
1630 if ((adapter->flags & FULL_INIT_DONE) &&
1631 (t.rspq_size >= 0 || t.fl_size[0] >= 0 ||
1632 t.fl_size[1] >= 0 || t.txq_size[0] >= 0 ||
1633 t.txq_size[1] >= 0 || t.txq_size[2] >= 0 ||
1634 t.polling >= 0 || t.cong_thres >= 0))
1635 return -EBUSY;
1637 q = &adapter->params.sge.qset[t.qset_idx];
1639 if (t.rspq_size >= 0)
1640 q->rspq_size = t.rspq_size;
1641 if (t.fl_size[0] >= 0)
1642 q->fl_size = t.fl_size[0];
1643 if (t.fl_size[1] >= 0)
1644 q->jumbo_size = t.fl_size[1];
1645 if (t.txq_size[0] >= 0)
1646 q->txq_size[0] = t.txq_size[0];
1647 if (t.txq_size[1] >= 0)
1648 q->txq_size[1] = t.txq_size[1];
1649 if (t.txq_size[2] >= 0)
1650 q->txq_size[2] = t.txq_size[2];
1651 if (t.cong_thres >= 0)
1652 q->cong_thres = t.cong_thres;
1653 if (t.intr_lat >= 0) {
1654 struct sge_qset *qs =
1655 &adapter->sge.qs[t.qset_idx];
1657 q->coalesce_usecs = t.intr_lat;
1658 t3_update_qset_coalesce(qs, q);
1660 if (t.polling >= 0) {
1661 if (adapter->flags & USING_MSIX)
1662 q->polling = t.polling;
1663 else {
1664 /* No polling with INTx for T3A */
1665 if (adapter->params.rev == 0 &&
1666 !(adapter->flags & USING_MSI))
1667 t.polling = 0;
1669 for (i = 0; i < SGE_QSETS; i++) {
1670 q = &adapter->params.sge.
1671 qset[i];
1672 q->polling = t.polling;
1676 break;
1678 case CHELSIO_GET_QSET_PARAMS:{
1679 struct qset_params *q;
1680 struct ch_qset_params t;
1682 if (copy_from_user(&t, useraddr, sizeof(t)))
1683 return -EFAULT;
1684 if (t.qset_idx >= SGE_QSETS)
1685 return -EINVAL;
1687 q = &adapter->params.sge.qset[t.qset_idx];
1688 t.rspq_size = q->rspq_size;
1689 t.txq_size[0] = q->txq_size[0];
1690 t.txq_size[1] = q->txq_size[1];
1691 t.txq_size[2] = q->txq_size[2];
1692 t.fl_size[0] = q->fl_size;
1693 t.fl_size[1] = q->jumbo_size;
1694 t.polling = q->polling;
1695 t.intr_lat = q->coalesce_usecs;
1696 t.cong_thres = q->cong_thres;
1698 if (copy_to_user(useraddr, &t, sizeof(t)))
1699 return -EFAULT;
1700 break;
1702 case CHELSIO_SET_QSET_NUM:{
1703 struct ch_reg edata;
1704 struct port_info *pi = netdev_priv(dev);
1705 unsigned int i, first_qset = 0, other_qsets = 0;
1707 if (!capable(CAP_NET_ADMIN))
1708 return -EPERM;
1709 if (adapter->flags & FULL_INIT_DONE)
1710 return -EBUSY;
1711 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1712 return -EFAULT;
1713 if (edata.val < 1 ||
1714 (edata.val > 1 && !(adapter->flags & USING_MSIX)))
1715 return -EINVAL;
1717 for_each_port(adapter, i)
1718 if (adapter->port[i] && adapter->port[i] != dev)
1719 other_qsets += adap2pinfo(adapter, i)->nqsets;
1721 if (edata.val + other_qsets > SGE_QSETS)
1722 return -EINVAL;
1724 pi->nqsets = edata.val;
1726 for_each_port(adapter, i)
1727 if (adapter->port[i]) {
1728 pi = adap2pinfo(adapter, i);
1729 pi->first_qset = first_qset;
1730 first_qset += pi->nqsets;
1732 break;
1734 case CHELSIO_GET_QSET_NUM:{
1735 struct ch_reg edata;
1736 struct port_info *pi = netdev_priv(dev);
1738 edata.cmd = CHELSIO_GET_QSET_NUM;
1739 edata.val = pi->nqsets;
1740 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1741 return -EFAULT;
1742 break;
1744 case CHELSIO_LOAD_FW:{
1745 u8 *fw_data;
1746 struct ch_mem_range t;
1748 if (!capable(CAP_NET_ADMIN))
1749 return -EPERM;
1750 if (copy_from_user(&t, useraddr, sizeof(t)))
1751 return -EFAULT;
1753 fw_data = kmalloc(t.len, GFP_KERNEL);
1754 if (!fw_data)
1755 return -ENOMEM;
1757 if (copy_from_user
1758 (fw_data, useraddr + sizeof(t), t.len)) {
1759 kfree(fw_data);
1760 return -EFAULT;
1763 ret = t3_load_fw(adapter, fw_data, t.len);
1764 kfree(fw_data);
1765 if (ret)
1766 return ret;
1767 break;
1769 case CHELSIO_SETMTUTAB:{
1770 struct ch_mtus m;
1771 int i;
1773 if (!is_offload(adapter))
1774 return -EOPNOTSUPP;
1775 if (!capable(CAP_NET_ADMIN))
1776 return -EPERM;
1777 if (offload_running(adapter))
1778 return -EBUSY;
1779 if (copy_from_user(&m, useraddr, sizeof(m)))
1780 return -EFAULT;
1781 if (m.nmtus != NMTUS)
1782 return -EINVAL;
1783 if (m.mtus[0] < 81) /* accommodate SACK */
1784 return -EINVAL;
1786 /* MTUs must be in ascending order */
1787 for (i = 1; i < NMTUS; ++i)
1788 if (m.mtus[i] < m.mtus[i - 1])
1789 return -EINVAL;
1791 memcpy(adapter->params.mtus, m.mtus,
1792 sizeof(adapter->params.mtus));
1793 break;
1795 case CHELSIO_GET_PM:{
1796 struct tp_params *p = &adapter->params.tp;
1797 struct ch_pm m = {.cmd = CHELSIO_GET_PM };
1799 if (!is_offload(adapter))
1800 return -EOPNOTSUPP;
1801 m.tx_pg_sz = p->tx_pg_size;
1802 m.tx_num_pg = p->tx_num_pgs;
1803 m.rx_pg_sz = p->rx_pg_size;
1804 m.rx_num_pg = p->rx_num_pgs;
1805 m.pm_total = p->pmtx_size + p->chan_rx_size * p->nchan;
1806 if (copy_to_user(useraddr, &m, sizeof(m)))
1807 return -EFAULT;
1808 break;
1810 case CHELSIO_SET_PM:{
1811 struct ch_pm m;
1812 struct tp_params *p = &adapter->params.tp;
1814 if (!is_offload(adapter))
1815 return -EOPNOTSUPP;
1816 if (!capable(CAP_NET_ADMIN))
1817 return -EPERM;
1818 if (adapter->flags & FULL_INIT_DONE)
1819 return -EBUSY;
1820 if (copy_from_user(&m, useraddr, sizeof(m)))
1821 return -EFAULT;
1822 if (!is_power_of_2(m.rx_pg_sz) ||
1823 !is_power_of_2(m.tx_pg_sz))
1824 return -EINVAL; /* not power of 2 */
1825 if (!(m.rx_pg_sz & 0x14000))
1826 return -EINVAL; /* not 16KB or 64KB */
1827 if (!(m.tx_pg_sz & 0x1554000))
1828 return -EINVAL;
1829 if (m.tx_num_pg == -1)
1830 m.tx_num_pg = p->tx_num_pgs;
1831 if (m.rx_num_pg == -1)
1832 m.rx_num_pg = p->rx_num_pgs;
1833 if (m.tx_num_pg % 24 || m.rx_num_pg % 24)
1834 return -EINVAL;
1835 if (m.rx_num_pg * m.rx_pg_sz > p->chan_rx_size ||
1836 m.tx_num_pg * m.tx_pg_sz > p->chan_tx_size)
1837 return -EINVAL;
1838 p->rx_pg_size = m.rx_pg_sz;
1839 p->tx_pg_size = m.tx_pg_sz;
1840 p->rx_num_pgs = m.rx_num_pg;
1841 p->tx_num_pgs = m.tx_num_pg;
1842 break;
1844 case CHELSIO_GET_MEM:{
1845 struct ch_mem_range t;
1846 struct mc7 *mem;
1847 u64 buf[32];
1849 if (!is_offload(adapter))
1850 return -EOPNOTSUPP;
1851 if (!(adapter->flags & FULL_INIT_DONE))
1852 return -EIO; /* need the memory controllers */
1853 if (copy_from_user(&t, useraddr, sizeof(t)))
1854 return -EFAULT;
1855 if ((t.addr & 7) || (t.len & 7))
1856 return -EINVAL;
1857 if (t.mem_id == MEM_CM)
1858 mem = &adapter->cm;
1859 else if (t.mem_id == MEM_PMRX)
1860 mem = &adapter->pmrx;
1861 else if (t.mem_id == MEM_PMTX)
1862 mem = &adapter->pmtx;
1863 else
1864 return -EINVAL;
1867 * Version scheme:
1868 * bits 0..9: chip version
1869 * bits 10..15: chip revision
1871 t.version = 3 | (adapter->params.rev << 10);
1872 if (copy_to_user(useraddr, &t, sizeof(t)))
1873 return -EFAULT;
1876 * Read 256 bytes at a time as len can be large and we don't
1877 * want to use huge intermediate buffers.
1879 useraddr += sizeof(t); /* advance to start of buffer */
1880 while (t.len) {
1881 unsigned int chunk =
1882 min_t(unsigned int, t.len, sizeof(buf));
1884 ret =
1885 t3_mc7_bd_read(mem, t.addr / 8, chunk / 8,
1886 buf);
1887 if (ret)
1888 return ret;
1889 if (copy_to_user(useraddr, buf, chunk))
1890 return -EFAULT;
1891 useraddr += chunk;
1892 t.addr += chunk;
1893 t.len -= chunk;
1895 break;
1897 case CHELSIO_SET_TRACE_FILTER:{
1898 struct ch_trace t;
1899 const struct trace_params *tp;
1901 if (!capable(CAP_NET_ADMIN))
1902 return -EPERM;
1903 if (!offload_running(adapter))
1904 return -EAGAIN;
1905 if (copy_from_user(&t, useraddr, sizeof(t)))
1906 return -EFAULT;
1908 tp = (const struct trace_params *)&t.sip;
1909 if (t.config_tx)
1910 t3_config_trace_filter(adapter, tp, 0,
1911 t.invert_match,
1912 t.trace_tx);
1913 if (t.config_rx)
1914 t3_config_trace_filter(adapter, tp, 1,
1915 t.invert_match,
1916 t.trace_rx);
1917 break;
1919 default:
1920 return -EOPNOTSUPP;
1922 return 0;
1925 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1927 int ret, mmd;
1928 struct adapter *adapter = dev->priv;
1929 struct port_info *pi = netdev_priv(dev);
1930 struct mii_ioctl_data *data = if_mii(req);
1932 switch (cmd) {
1933 case SIOCGMIIPHY:
1934 data->phy_id = pi->phy.addr;
1935 /* FALLTHRU */
1936 case SIOCGMIIREG:{
1937 u32 val;
1938 struct cphy *phy = &pi->phy;
1940 if (!phy->mdio_read)
1941 return -EOPNOTSUPP;
1942 if (is_10G(adapter)) {
1943 mmd = data->phy_id >> 8;
1944 if (!mmd)
1945 mmd = MDIO_DEV_PCS;
1946 else if (mmd > MDIO_DEV_XGXS)
1947 return -EINVAL;
1949 ret =
1950 phy->mdio_read(adapter, data->phy_id & 0x1f,
1951 mmd, data->reg_num, &val);
1952 } else
1953 ret =
1954 phy->mdio_read(adapter, data->phy_id & 0x1f,
1955 0, data->reg_num & 0x1f,
1956 &val);
1957 if (!ret)
1958 data->val_out = val;
1959 break;
1961 case SIOCSMIIREG:{
1962 struct cphy *phy = &pi->phy;
1964 if (!capable(CAP_NET_ADMIN))
1965 return -EPERM;
1966 if (!phy->mdio_write)
1967 return -EOPNOTSUPP;
1968 if (is_10G(adapter)) {
1969 mmd = data->phy_id >> 8;
1970 if (!mmd)
1971 mmd = MDIO_DEV_PCS;
1972 else if (mmd > MDIO_DEV_XGXS)
1973 return -EINVAL;
1975 ret =
1976 phy->mdio_write(adapter,
1977 data->phy_id & 0x1f, mmd,
1978 data->reg_num,
1979 data->val_in);
1980 } else
1981 ret =
1982 phy->mdio_write(adapter,
1983 data->phy_id & 0x1f, 0,
1984 data->reg_num & 0x1f,
1985 data->val_in);
1986 break;
1988 case SIOCCHIOCTL:
1989 return cxgb_extension_ioctl(dev, req->ifr_data);
1990 default:
1991 return -EOPNOTSUPP;
1993 return ret;
1996 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
1998 int ret;
1999 struct adapter *adapter = dev->priv;
2000 struct port_info *pi = netdev_priv(dev);
2002 if (new_mtu < 81) /* accommodate SACK */
2003 return -EINVAL;
2004 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
2005 return ret;
2006 dev->mtu = new_mtu;
2007 init_port_mtus(adapter);
2008 if (adapter->params.rev == 0 && offload_running(adapter))
2009 t3_load_mtus(adapter, adapter->params.mtus,
2010 adapter->params.a_wnd, adapter->params.b_wnd,
2011 adapter->port[0]->mtu);
2012 return 0;
2015 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2017 struct adapter *adapter = dev->priv;
2018 struct port_info *pi = netdev_priv(dev);
2019 struct sockaddr *addr = p;
2021 if (!is_valid_ether_addr(addr->sa_data))
2022 return -EINVAL;
2024 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2025 t3_mac_set_address(&pi->mac, 0, dev->dev_addr);
2026 if (offload_running(adapter))
2027 write_smt_entry(adapter, pi->port_id);
2028 return 0;
2032 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2033 * @adap: the adapter
2034 * @p: the port
2036 * Ensures that current Rx processing on any of the queues associated with
2037 * the given port completes before returning. We do this by acquiring and
2038 * releasing the locks of the response queues associated with the port.
2040 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
2042 int i;
2044 for (i = 0; i < p->nqsets; i++) {
2045 struct sge_rspq *q = &adap->sge.qs[i + p->first_qset].rspq;
2047 spin_lock_irq(&q->lock);
2048 spin_unlock_irq(&q->lock);
2052 static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2054 struct adapter *adapter = dev->priv;
2055 struct port_info *pi = netdev_priv(dev);
2057 pi->vlan_grp = grp;
2058 if (adapter->params.rev > 0)
2059 t3_set_vlan_accel(adapter, 1 << pi->port_id, grp != NULL);
2060 else {
2061 /* single control for all ports */
2062 unsigned int i, have_vlans = 0;
2063 for_each_port(adapter, i)
2064 have_vlans |= adap2pinfo(adapter, i)->vlan_grp != NULL;
2066 t3_set_vlan_accel(adapter, 1, have_vlans);
2068 t3_synchronize_rx(adapter, pi);
2071 #ifdef CONFIG_NET_POLL_CONTROLLER
2072 static void cxgb_netpoll(struct net_device *dev)
2074 struct adapter *adapter = dev->priv;
2075 struct port_info *pi = netdev_priv(dev);
2076 int qidx;
2078 for (qidx = pi->first_qset; qidx < pi->first_qset + pi->nqsets; qidx++) {
2079 struct sge_qset *qs = &adapter->sge.qs[qidx];
2080 void *source;
2082 if (adapter->flags & USING_MSIX)
2083 source = qs;
2084 else
2085 source = adapter;
2087 t3_intr_handler(adapter, qs->rspq.polling) (0, source);
2090 #endif
2092 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
2093 int update_tpsram(struct adapter *adap)
2095 const struct firmware *tpsram;
2096 char buf[64];
2097 struct device *dev = &adap->pdev->dev;
2098 int ret;
2099 char rev;
2101 rev = adap->params.rev == T3_REV_B2 ? 'b' : 'a';
2103 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
2104 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
2106 ret = request_firmware(&tpsram, buf, dev);
2107 if (ret < 0) {
2108 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
2109 buf);
2110 return ret;
2113 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
2114 if (ret)
2115 goto release_tpsram;
2117 ret = t3_set_proto_sram(adap, tpsram->data);
2118 if (ret)
2119 dev_err(dev, "loading protocol SRAM failed\n");
2121 release_tpsram:
2122 release_firmware(tpsram);
2124 return ret;
2129 * Periodic accumulation of MAC statistics.
2131 static void mac_stats_update(struct adapter *adapter)
2133 int i;
2135 for_each_port(adapter, i) {
2136 struct net_device *dev = adapter->port[i];
2137 struct port_info *p = netdev_priv(dev);
2139 if (netif_running(dev)) {
2140 spin_lock(&adapter->stats_lock);
2141 t3_mac_update_stats(&p->mac);
2142 spin_unlock(&adapter->stats_lock);
2147 static void check_link_status(struct adapter *adapter)
2149 int i;
2151 for_each_port(adapter, i) {
2152 struct net_device *dev = adapter->port[i];
2153 struct port_info *p = netdev_priv(dev);
2155 if (!(p->port_type->caps & SUPPORTED_IRQ) && netif_running(dev))
2156 t3_link_changed(adapter, i);
2160 static void check_t3b2_mac(struct adapter *adapter)
2162 int i;
2164 if (!rtnl_trylock()) /* synchronize with ifdown */
2165 return;
2167 for_each_port(adapter, i) {
2168 struct net_device *dev = adapter->port[i];
2169 struct port_info *p = netdev_priv(dev);
2170 int status;
2172 if (!netif_running(dev))
2173 continue;
2175 status = 0;
2176 if (netif_running(dev) && netif_carrier_ok(dev))
2177 status = t3b2_mac_watchdog_task(&p->mac);
2178 if (status == 1)
2179 p->mac.stats.num_toggled++;
2180 else if (status == 2) {
2181 struct cmac *mac = &p->mac;
2183 t3_mac_set_mtu(mac, dev->mtu);
2184 t3_mac_set_address(mac, 0, dev->dev_addr);
2185 cxgb_set_rxmode(dev);
2186 t3_link_start(&p->phy, mac, &p->link_config);
2187 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
2188 t3_port_intr_enable(adapter, p->port_id);
2189 p->mac.stats.num_resets++;
2192 rtnl_unlock();
2196 static void t3_adap_check_task(struct work_struct *work)
2198 struct adapter *adapter = container_of(work, struct adapter,
2199 adap_check_task.work);
2200 const struct adapter_params *p = &adapter->params;
2202 adapter->check_task_cnt++;
2204 /* Check link status for PHYs without interrupts */
2205 if (p->linkpoll_period)
2206 check_link_status(adapter);
2208 /* Accumulate MAC stats if needed */
2209 if (!p->linkpoll_period ||
2210 (adapter->check_task_cnt * p->linkpoll_period) / 10 >=
2211 p->stats_update_period) {
2212 mac_stats_update(adapter);
2213 adapter->check_task_cnt = 0;
2216 if (p->rev == T3_REV_B2)
2217 check_t3b2_mac(adapter);
2219 /* Schedule the next check update if any port is active. */
2220 spin_lock(&adapter->work_lock);
2221 if (adapter->open_device_map & PORT_MASK)
2222 schedule_chk_task(adapter);
2223 spin_unlock(&adapter->work_lock);
2227 * Processes external (PHY) interrupts in process context.
2229 static void ext_intr_task(struct work_struct *work)
2231 struct adapter *adapter = container_of(work, struct adapter,
2232 ext_intr_handler_task);
2234 t3_phy_intr_handler(adapter);
2236 /* Now reenable external interrupts */
2237 spin_lock_irq(&adapter->work_lock);
2238 if (adapter->slow_intr_mask) {
2239 adapter->slow_intr_mask |= F_T3DBG;
2240 t3_write_reg(adapter, A_PL_INT_CAUSE0, F_T3DBG);
2241 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2242 adapter->slow_intr_mask);
2244 spin_unlock_irq(&adapter->work_lock);
2248 * Interrupt-context handler for external (PHY) interrupts.
2250 void t3_os_ext_intr_handler(struct adapter *adapter)
2253 * Schedule a task to handle external interrupts as they may be slow
2254 * and we use a mutex to protect MDIO registers. We disable PHY
2255 * interrupts in the meantime and let the task reenable them when
2256 * it's done.
2258 spin_lock(&adapter->work_lock);
2259 if (adapter->slow_intr_mask) {
2260 adapter->slow_intr_mask &= ~F_T3DBG;
2261 t3_write_reg(adapter, A_PL_INT_ENABLE0,
2262 adapter->slow_intr_mask);
2263 queue_work(cxgb3_wq, &adapter->ext_intr_handler_task);
2265 spin_unlock(&adapter->work_lock);
2268 void t3_fatal_err(struct adapter *adapter)
2270 unsigned int fw_status[4];
2272 if (adapter->flags & FULL_INIT_DONE) {
2273 t3_sge_stop(adapter);
2274 t3_intr_disable(adapter);
2276 CH_ALERT(adapter, "encountered fatal error, operation suspended\n");
2277 if (!t3_cim_ctl_blk_read(adapter, 0xa0, 4, fw_status))
2278 CH_ALERT(adapter, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2279 fw_status[0], fw_status[1],
2280 fw_status[2], fw_status[3]);
2284 static int __devinit cxgb_enable_msix(struct adapter *adap)
2286 struct msix_entry entries[SGE_QSETS + 1];
2287 int i, err;
2289 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2290 entries[i].entry = i;
2292 err = pci_enable_msix(adap->pdev, entries, ARRAY_SIZE(entries));
2293 if (!err) {
2294 for (i = 0; i < ARRAY_SIZE(entries); ++i)
2295 adap->msix_info[i].vec = entries[i].vector;
2296 } else if (err > 0)
2297 dev_info(&adap->pdev->dev,
2298 "only %d MSI-X vectors left, not using MSI-X\n", err);
2299 return err;
2302 static void __devinit print_port_info(struct adapter *adap,
2303 const struct adapter_info *ai)
2305 static const char *pci_variant[] = {
2306 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2309 int i;
2310 char buf[80];
2312 if (is_pcie(adap))
2313 snprintf(buf, sizeof(buf), "%s x%d",
2314 pci_variant[adap->params.pci.variant],
2315 adap->params.pci.width);
2316 else
2317 snprintf(buf, sizeof(buf), "%s %dMHz/%d-bit",
2318 pci_variant[adap->params.pci.variant],
2319 adap->params.pci.speed, adap->params.pci.width);
2321 for_each_port(adap, i) {
2322 struct net_device *dev = adap->port[i];
2323 const struct port_info *pi = netdev_priv(dev);
2325 if (!test_bit(i, &adap->registered_device_map))
2326 continue;
2327 printk(KERN_INFO "%s: %s %s %sNIC (rev %d) %s%s\n",
2328 dev->name, ai->desc, pi->port_type->desc,
2329 is_offload(adap) ? "R" : "", adap->params.rev, buf,
2330 (adap->flags & USING_MSIX) ? " MSI-X" :
2331 (adap->flags & USING_MSI) ? " MSI" : "");
2332 if (adap->name == dev->name && adap->params.vpd.mclk)
2333 printk(KERN_INFO "%s: %uMB CM, %uMB PMTX, %uMB PMRX\n",
2334 adap->name, t3_mc7_size(&adap->cm) >> 20,
2335 t3_mc7_size(&adap->pmtx) >> 20,
2336 t3_mc7_size(&adap->pmrx) >> 20);
2340 static int __devinit init_one(struct pci_dev *pdev,
2341 const struct pci_device_id *ent)
2343 static int version_printed;
2345 int i, err, pci_using_dac = 0;
2346 unsigned long mmio_start, mmio_len;
2347 const struct adapter_info *ai;
2348 struct adapter *adapter = NULL;
2349 struct port_info *pi;
2351 if (!version_printed) {
2352 printk(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
2353 ++version_printed;
2356 if (!cxgb3_wq) {
2357 cxgb3_wq = create_singlethread_workqueue(DRV_NAME);
2358 if (!cxgb3_wq) {
2359 printk(KERN_ERR DRV_NAME
2360 ": cannot initialize work queue\n");
2361 return -ENOMEM;
2365 err = pci_request_regions(pdev, DRV_NAME);
2366 if (err) {
2367 /* Just info, some other driver may have claimed the device. */
2368 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
2369 return err;
2372 err = pci_enable_device(pdev);
2373 if (err) {
2374 dev_err(&pdev->dev, "cannot enable PCI device\n");
2375 goto out_release_regions;
2378 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2379 pci_using_dac = 1;
2380 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2381 if (err) {
2382 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
2383 "coherent allocations\n");
2384 goto out_disable_device;
2386 } else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
2387 dev_err(&pdev->dev, "no usable DMA configuration\n");
2388 goto out_disable_device;
2391 pci_set_master(pdev);
2393 mmio_start = pci_resource_start(pdev, 0);
2394 mmio_len = pci_resource_len(pdev, 0);
2395 ai = t3_get_adapter_info(ent->driver_data);
2397 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2398 if (!adapter) {
2399 err = -ENOMEM;
2400 goto out_disable_device;
2403 adapter->regs = ioremap_nocache(mmio_start, mmio_len);
2404 if (!adapter->regs) {
2405 dev_err(&pdev->dev, "cannot map device registers\n");
2406 err = -ENOMEM;
2407 goto out_free_adapter;
2410 adapter->pdev = pdev;
2411 adapter->name = pci_name(pdev);
2412 adapter->msg_enable = dflt_msg_enable;
2413 adapter->mmio_len = mmio_len;
2415 mutex_init(&adapter->mdio_lock);
2416 spin_lock_init(&adapter->work_lock);
2417 spin_lock_init(&adapter->stats_lock);
2419 INIT_LIST_HEAD(&adapter->adapter_list);
2420 INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
2421 INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
2423 for (i = 0; i < ai->nports; ++i) {
2424 struct net_device *netdev;
2426 netdev = alloc_etherdev(sizeof(struct port_info));
2427 if (!netdev) {
2428 err = -ENOMEM;
2429 goto out_free_dev;
2432 SET_MODULE_OWNER(netdev);
2433 SET_NETDEV_DEV(netdev, &pdev->dev);
2435 adapter->port[i] = netdev;
2436 pi = netdev_priv(netdev);
2437 pi->rx_csum_offload = 1;
2438 pi->nqsets = 1;
2439 pi->first_qset = i;
2440 pi->activity = 0;
2441 pi->port_id = i;
2442 netif_carrier_off(netdev);
2443 netdev->irq = pdev->irq;
2444 netdev->mem_start = mmio_start;
2445 netdev->mem_end = mmio_start + mmio_len - 1;
2446 netdev->priv = adapter;
2447 netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
2448 netdev->features |= NETIF_F_LLTX;
2449 if (pci_using_dac)
2450 netdev->features |= NETIF_F_HIGHDMA;
2452 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2453 netdev->vlan_rx_register = vlan_rx_register;
2455 netdev->open = cxgb_open;
2456 netdev->stop = cxgb_close;
2457 netdev->hard_start_xmit = t3_eth_xmit;
2458 netdev->get_stats = cxgb_get_stats;
2459 netdev->set_multicast_list = cxgb_set_rxmode;
2460 netdev->do_ioctl = cxgb_ioctl;
2461 netdev->change_mtu = cxgb_change_mtu;
2462 netdev->set_mac_address = cxgb_set_mac_addr;
2463 #ifdef CONFIG_NET_POLL_CONTROLLER
2464 netdev->poll_controller = cxgb_netpoll;
2465 #endif
2466 netdev->weight = 64;
2468 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
2471 pci_set_drvdata(pdev, adapter->port[0]);
2472 if (t3_prep_adapter(adapter, ai, 1) < 0) {
2473 err = -ENODEV;
2474 goto out_free_dev;
2477 err = t3_check_tpsram_version(adapter);
2478 if (err == -EINVAL)
2479 err = update_tpsram(adapter);
2481 if (err)
2482 goto out_free_dev;
2485 * The card is now ready to go. If any errors occur during device
2486 * registration we do not fail the whole card but rather proceed only
2487 * with the ports we manage to register successfully. However we must
2488 * register at least one net device.
2490 for_each_port(adapter, i) {
2491 err = register_netdev(adapter->port[i]);
2492 if (err)
2493 dev_warn(&pdev->dev,
2494 "cannot register net device %s, skipping\n",
2495 adapter->port[i]->name);
2496 else {
2498 * Change the name we use for messages to the name of
2499 * the first successfully registered interface.
2501 if (!adapter->registered_device_map)
2502 adapter->name = adapter->port[i]->name;
2504 __set_bit(i, &adapter->registered_device_map);
2507 if (!adapter->registered_device_map) {
2508 dev_err(&pdev->dev, "could not register any net devices\n");
2509 goto out_free_dev;
2512 /* Driver's ready. Reflect it on LEDs */
2513 t3_led_ready(adapter);
2515 if (is_offload(adapter)) {
2516 __set_bit(OFFLOAD_DEVMAP_BIT, &adapter->registered_device_map);
2517 cxgb3_adapter_ofld(adapter);
2520 /* See what interrupts we'll be using */
2521 if (msi > 1 && cxgb_enable_msix(adapter) == 0)
2522 adapter->flags |= USING_MSIX;
2523 else if (msi > 0 && pci_enable_msi(pdev) == 0)
2524 adapter->flags |= USING_MSI;
2526 err = sysfs_create_group(&adapter->port[0]->dev.kobj,
2527 &cxgb3_attr_group);
2529 print_port_info(adapter, ai);
2530 return 0;
2532 out_free_dev:
2533 iounmap(adapter->regs);
2534 for (i = ai->nports - 1; i >= 0; --i)
2535 if (adapter->port[i])
2536 free_netdev(adapter->port[i]);
2538 out_free_adapter:
2539 kfree(adapter);
2541 out_disable_device:
2542 pci_disable_device(pdev);
2543 out_release_regions:
2544 pci_release_regions(pdev);
2545 pci_set_drvdata(pdev, NULL);
2546 return err;
2549 static void __devexit remove_one(struct pci_dev *pdev)
2551 struct net_device *dev = pci_get_drvdata(pdev);
2553 if (dev) {
2554 int i;
2555 struct adapter *adapter = dev->priv;
2557 t3_sge_stop(adapter);
2558 sysfs_remove_group(&adapter->port[0]->dev.kobj,
2559 &cxgb3_attr_group);
2561 for_each_port(adapter, i)
2562 if (test_bit(i, &adapter->registered_device_map))
2563 unregister_netdev(adapter->port[i]);
2565 if (is_offload(adapter)) {
2566 cxgb3_adapter_unofld(adapter);
2567 if (test_bit(OFFLOAD_DEVMAP_BIT,
2568 &adapter->open_device_map))
2569 offload_close(&adapter->tdev);
2572 t3_free_sge_resources(adapter);
2573 cxgb_disable_msi(adapter);
2575 for (i = 0; i < ARRAY_SIZE(adapter->dummy_netdev); i++)
2576 if (adapter->dummy_netdev[i]) {
2577 free_netdev(adapter->dummy_netdev[i]);
2578 adapter->dummy_netdev[i] = NULL;
2581 for_each_port(adapter, i)
2582 if (adapter->port[i])
2583 free_netdev(adapter->port[i]);
2585 iounmap(adapter->regs);
2586 kfree(adapter);
2587 pci_release_regions(pdev);
2588 pci_disable_device(pdev);
2589 pci_set_drvdata(pdev, NULL);
2593 static struct pci_driver driver = {
2594 .name = DRV_NAME,
2595 .id_table = cxgb3_pci_tbl,
2596 .probe = init_one,
2597 .remove = __devexit_p(remove_one),
2600 static int __init cxgb3_init_module(void)
2602 int ret;
2604 cxgb3_offload_init();
2606 ret = pci_register_driver(&driver);
2607 return ret;
2610 static void __exit cxgb3_cleanup_module(void)
2612 pci_unregister_driver(&driver);
2613 if (cxgb3_wq)
2614 destroy_workqueue(cxgb3_wq);
2617 module_init(cxgb3_init_module);
2618 module_exit(cxgb3_cleanup_module);