2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mii.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <asm/uaccess.h>
50 #include "cxgb3_ioctl.h"
52 #include "cxgb3_offload.h"
55 #include "cxgb3_ctl_defs.h"
57 #include "firmware_exports.h"
60 MAX_TXQ_ENTRIES
= 16384,
61 MAX_CTRL_TXQ_ENTRIES
= 1024,
62 MAX_RSPQ_ENTRIES
= 16384,
63 MAX_RX_BUFFERS
= 16384,
64 MAX_RX_JUMBO_BUFFERS
= 16384,
66 MIN_CTRL_TXQ_ENTRIES
= 4,
67 MIN_RSPQ_ENTRIES
= 32,
71 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
73 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
74 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
75 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
77 #define EEPROM_MAGIC 0x38E2F10C
79 #define CH_DEVICE(devid, idx) \
80 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
82 static const struct pci_device_id cxgb3_pci_tbl
[] = {
83 CH_DEVICE(0x20, 0), /* PE9000 */
84 CH_DEVICE(0x21, 1), /* T302E */
85 CH_DEVICE(0x22, 2), /* T310E */
86 CH_DEVICE(0x23, 3), /* T320X */
87 CH_DEVICE(0x24, 1), /* T302X */
88 CH_DEVICE(0x25, 3), /* T320E */
89 CH_DEVICE(0x26, 2), /* T310X */
90 CH_DEVICE(0x30, 2), /* T3B10 */
91 CH_DEVICE(0x31, 3), /* T3B20 */
92 CH_DEVICE(0x32, 1), /* T3B02 */
96 MODULE_DESCRIPTION(DRV_DESC
);
97 MODULE_AUTHOR("Chelsio Communications");
98 MODULE_LICENSE("Dual BSD/GPL");
99 MODULE_VERSION(DRV_VERSION
);
100 MODULE_DEVICE_TABLE(pci
, cxgb3_pci_tbl
);
102 static int dflt_msg_enable
= DFLT_MSG_ENABLE
;
104 module_param(dflt_msg_enable
, int, 0644);
105 MODULE_PARM_DESC(dflt_msg_enable
, "Chelsio T3 default message enable bitmap");
108 * The driver uses the best interrupt scheme available on a platform in the
109 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
110 * of these schemes the driver may consider as follows:
112 * msi = 2: choose from among all three options
113 * msi = 1: only consider MSI and pin interrupts
114 * msi = 0: force pin interrupts
118 module_param(msi
, int, 0644);
119 MODULE_PARM_DESC(msi
, "whether to use MSI or MSI-X");
122 * The driver enables offload as a default.
123 * To disable it, use ofld_disable = 1.
126 static int ofld_disable
= 0;
128 module_param(ofld_disable
, int, 0644);
129 MODULE_PARM_DESC(ofld_disable
, "whether to enable offload at init time or not");
132 * We have work elements that we need to cancel when an interface is taken
133 * down. Normally the work elements would be executed by keventd but that
134 * can deadlock because of linkwatch. If our close method takes the rtnl
135 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
136 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
137 * for our work to complete. Get our own work queue to solve this.
139 static struct workqueue_struct
*cxgb3_wq
;
142 * link_report - show link status and link speed/duplex
143 * @p: the port whose settings are to be reported
145 * Shows the link status, speed, and duplex of a port.
147 static void link_report(struct net_device
*dev
)
149 if (!netif_carrier_ok(dev
))
150 printk(KERN_INFO
"%s: link down\n", dev
->name
);
152 const char *s
= "10Mbps";
153 const struct port_info
*p
= netdev_priv(dev
);
155 switch (p
->link_config
.speed
) {
167 printk(KERN_INFO
"%s: link up, %s, %s-duplex\n", dev
->name
, s
,
168 p
->link_config
.duplex
== DUPLEX_FULL
? "full" : "half");
173 * t3_os_link_changed - handle link status changes
174 * @adapter: the adapter associated with the link change
175 * @port_id: the port index whose limk status has changed
176 * @link_stat: the new status of the link
177 * @speed: the new speed setting
178 * @duplex: the new duplex setting
179 * @pause: the new flow-control setting
181 * This is the OS-dependent handler for link status changes. The OS
182 * neutral handler takes care of most of the processing for these events,
183 * then calls this handler for any OS-specific processing.
185 void t3_os_link_changed(struct adapter
*adapter
, int port_id
, int link_stat
,
186 int speed
, int duplex
, int pause
)
188 struct net_device
*dev
= adapter
->port
[port_id
];
189 struct port_info
*pi
= netdev_priv(dev
);
190 struct cmac
*mac
= &pi
->mac
;
192 /* Skip changes from disabled ports. */
193 if (!netif_running(dev
))
196 if (link_stat
!= netif_carrier_ok(dev
)) {
198 t3_mac_enable(mac
, MAC_DIRECTION_RX
);
199 netif_carrier_on(dev
);
201 netif_carrier_off(dev
);
202 pi
->phy
.ops
->power_down(&pi
->phy
, 1);
203 t3_mac_disable(mac
, MAC_DIRECTION_RX
);
204 t3_link_start(&pi
->phy
, mac
, &pi
->link_config
);
211 static void cxgb_set_rxmode(struct net_device
*dev
)
213 struct t3_rx_mode rm
;
214 struct port_info
*pi
= netdev_priv(dev
);
216 init_rx_mode(&rm
, dev
, dev
->mc_list
);
217 t3_mac_set_rx_mode(&pi
->mac
, &rm
);
221 * link_start - enable a port
222 * @dev: the device to enable
224 * Performs the MAC and PHY actions needed to enable a port.
226 static void link_start(struct net_device
*dev
)
228 struct t3_rx_mode rm
;
229 struct port_info
*pi
= netdev_priv(dev
);
230 struct cmac
*mac
= &pi
->mac
;
232 init_rx_mode(&rm
, dev
, dev
->mc_list
);
234 t3_mac_set_mtu(mac
, dev
->mtu
);
235 t3_mac_set_address(mac
, 0, dev
->dev_addr
);
236 t3_mac_set_rx_mode(mac
, &rm
);
237 t3_link_start(&pi
->phy
, mac
, &pi
->link_config
);
238 t3_mac_enable(mac
, MAC_DIRECTION_RX
| MAC_DIRECTION_TX
);
241 static inline void cxgb_disable_msi(struct adapter
*adapter
)
243 if (adapter
->flags
& USING_MSIX
) {
244 pci_disable_msix(adapter
->pdev
);
245 adapter
->flags
&= ~USING_MSIX
;
246 } else if (adapter
->flags
& USING_MSI
) {
247 pci_disable_msi(adapter
->pdev
);
248 adapter
->flags
&= ~USING_MSI
;
253 * Interrupt handler for asynchronous events used with MSI-X.
255 static irqreturn_t
t3_async_intr_handler(int irq
, void *cookie
)
257 t3_slow_intr_handler(cookie
);
262 * Name the MSI-X interrupts.
264 static void name_msix_vecs(struct adapter
*adap
)
266 int i
, j
, msi_idx
= 1, n
= sizeof(adap
->msix_info
[0].desc
) - 1;
268 snprintf(adap
->msix_info
[0].desc
, n
, "%s", adap
->name
);
269 adap
->msix_info
[0].desc
[n
] = 0;
271 for_each_port(adap
, j
) {
272 struct net_device
*d
= adap
->port
[j
];
273 const struct port_info
*pi
= netdev_priv(d
);
275 for (i
= 0; i
< pi
->nqsets
; i
++, msi_idx
++) {
276 snprintf(adap
->msix_info
[msi_idx
].desc
, n
,
277 "%s (queue %d)", d
->name
, i
);
278 adap
->msix_info
[msi_idx
].desc
[n
] = 0;
283 static int request_msix_data_irqs(struct adapter
*adap
)
285 int i
, j
, err
, qidx
= 0;
287 for_each_port(adap
, i
) {
288 int nqsets
= adap2pinfo(adap
, i
)->nqsets
;
290 for (j
= 0; j
< nqsets
; ++j
) {
291 err
= request_irq(adap
->msix_info
[qidx
+ 1].vec
,
292 t3_intr_handler(adap
,
295 adap
->msix_info
[qidx
+ 1].desc
,
296 &adap
->sge
.qs
[qidx
]);
299 free_irq(adap
->msix_info
[qidx
+ 1].vec
,
300 &adap
->sge
.qs
[qidx
]);
309 static int await_mgmt_replies(struct adapter
*adap
, unsigned long init_cnt
,
314 while (adap
->sge
.qs
[0].rspq
.offload_pkts
< init_cnt
+ n
) {
322 static int init_tp_parity(struct adapter
*adap
)
326 struct cpl_set_tcb_field
*greq
;
327 unsigned long cnt
= adap
->sge
.qs
[0].rspq
.offload_pkts
;
329 t3_tp_set_offload_mode(adap
, 1);
331 for (i
= 0; i
< 16; i
++) {
332 struct cpl_smt_write_req
*req
;
334 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
| __GFP_NOFAIL
);
335 req
= (struct cpl_smt_write_req
*)__skb_put(skb
, sizeof(*req
));
336 memset(req
, 0, sizeof(*req
));
337 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
338 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ
, i
));
340 t3_mgmt_tx(adap
, skb
);
343 for (i
= 0; i
< 2048; i
++) {
344 struct cpl_l2t_write_req
*req
;
346 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
| __GFP_NOFAIL
);
347 req
= (struct cpl_l2t_write_req
*)__skb_put(skb
, sizeof(*req
));
348 memset(req
, 0, sizeof(*req
));
349 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
350 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ
, i
));
351 req
->params
= htonl(V_L2T_W_IDX(i
));
352 t3_mgmt_tx(adap
, skb
);
355 for (i
= 0; i
< 2048; i
++) {
356 struct cpl_rte_write_req
*req
;
358 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
| __GFP_NOFAIL
);
359 req
= (struct cpl_rte_write_req
*)__skb_put(skb
, sizeof(*req
));
360 memset(req
, 0, sizeof(*req
));
361 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
362 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ
, i
));
363 req
->l2t_idx
= htonl(V_L2T_W_IDX(i
));
364 t3_mgmt_tx(adap
, skb
);
367 skb
= alloc_skb(sizeof(*greq
), GFP_KERNEL
| __GFP_NOFAIL
);
368 greq
= (struct cpl_set_tcb_field
*)__skb_put(skb
, sizeof(*greq
));
369 memset(greq
, 0, sizeof(*greq
));
370 greq
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
371 OPCODE_TID(greq
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, 0));
372 greq
->mask
= cpu_to_be64(1);
373 t3_mgmt_tx(adap
, skb
);
375 i
= await_mgmt_replies(adap
, cnt
, 16 + 2048 + 2048 + 1);
376 t3_tp_set_offload_mode(adap
, 0);
381 * setup_rss - configure RSS
384 * Sets up RSS to distribute packets to multiple receive queues. We
385 * configure the RSS CPU lookup table to distribute to the number of HW
386 * receive queues, and the response queue lookup table to narrow that
387 * down to the response queues actually configured for each port.
388 * We always configure the RSS mapping for two ports since the mapping
389 * table has plenty of entries.
391 static void setup_rss(struct adapter
*adap
)
394 unsigned int nq0
= adap2pinfo(adap
, 0)->nqsets
;
395 unsigned int nq1
= adap
->port
[1] ? adap2pinfo(adap
, 1)->nqsets
: 1;
396 u8 cpus
[SGE_QSETS
+ 1];
397 u16 rspq_map
[RSS_TABLE_SIZE
];
399 for (i
= 0; i
< SGE_QSETS
; ++i
)
401 cpus
[SGE_QSETS
] = 0xff; /* terminator */
403 for (i
= 0; i
< RSS_TABLE_SIZE
/ 2; ++i
) {
404 rspq_map
[i
] = i
% nq0
;
405 rspq_map
[i
+ RSS_TABLE_SIZE
/ 2] = (i
% nq1
) + nq0
;
408 t3_config_rss(adap
, F_RQFEEDBACKENABLE
| F_TNLLKPEN
| F_TNLMAPEN
|
409 F_TNLPRTEN
| F_TNL2TUPEN
| F_TNL4TUPEN
|
410 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ
, cpus
, rspq_map
);
413 static void init_napi(struct adapter
*adap
)
417 for (i
= 0; i
< SGE_QSETS
; i
++) {
418 struct sge_qset
*qs
= &adap
->sge
.qs
[i
];
421 netif_napi_add(qs
->netdev
, &qs
->napi
, qs
->napi
.poll
,
427 * Wait until all NAPI handlers are descheduled. This includes the handlers of
428 * both netdevices representing interfaces and the dummy ones for the extra
431 static void quiesce_rx(struct adapter
*adap
)
435 for (i
= 0; i
< SGE_QSETS
; i
++)
436 if (adap
->sge
.qs
[i
].adap
)
437 napi_disable(&adap
->sge
.qs
[i
].napi
);
440 static void enable_all_napi(struct adapter
*adap
)
443 for (i
= 0; i
< SGE_QSETS
; i
++)
444 if (adap
->sge
.qs
[i
].adap
)
445 napi_enable(&adap
->sge
.qs
[i
].napi
);
449 * setup_sge_qsets - configure SGE Tx/Rx/response queues
452 * Determines how many sets of SGE queues to use and initializes them.
453 * We support multiple queue sets per port if we have MSI-X, otherwise
454 * just one queue set per port.
456 static int setup_sge_qsets(struct adapter
*adap
)
458 int i
, j
, err
, irq_idx
= 0, qset_idx
= 0;
459 unsigned int ntxq
= SGE_TXQ_PER_SET
;
461 if (adap
->params
.rev
> 0 && !(adap
->flags
& USING_MSI
))
464 for_each_port(adap
, i
) {
465 struct net_device
*dev
= adap
->port
[i
];
466 struct port_info
*pi
= netdev_priv(dev
);
468 pi
->qs
= &adap
->sge
.qs
[pi
->first_qset
];
469 for (j
= 0; j
< pi
->nqsets
; ++j
, ++qset_idx
) {
470 err
= t3_sge_alloc_qset(adap
, qset_idx
, 1,
471 (adap
->flags
& USING_MSIX
) ? qset_idx
+ 1 :
473 &adap
->params
.sge
.qset
[qset_idx
], ntxq
, dev
);
475 t3_free_sge_resources(adap
);
484 static ssize_t
attr_show(struct device
*d
, char *buf
,
485 ssize_t(*format
) (struct net_device
*, char *))
489 /* Synchronize with ioctls that may shut down the device */
491 len
= (*format
) (to_net_dev(d
), buf
);
496 static ssize_t
attr_store(struct device
*d
,
497 const char *buf
, size_t len
,
498 ssize_t(*set
) (struct net_device
*, unsigned int),
499 unsigned int min_val
, unsigned int max_val
)
505 if (!capable(CAP_NET_ADMIN
))
508 val
= simple_strtoul(buf
, &endp
, 0);
509 if (endp
== buf
|| val
< min_val
|| val
> max_val
)
513 ret
= (*set
) (to_net_dev(d
), val
);
520 #define CXGB3_SHOW(name, val_expr) \
521 static ssize_t format_##name(struct net_device *dev, char *buf) \
523 struct port_info *pi = netdev_priv(dev); \
524 struct adapter *adap = pi->adapter; \
525 return sprintf(buf, "%u\n", val_expr); \
527 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
530 return attr_show(d, buf, format_##name); \
533 static ssize_t
set_nfilters(struct net_device
*dev
, unsigned int val
)
535 struct port_info
*pi
= netdev_priv(dev
);
536 struct adapter
*adap
= pi
->adapter
;
537 int min_tids
= is_offload(adap
) ? MC5_MIN_TIDS
: 0;
539 if (adap
->flags
& FULL_INIT_DONE
)
541 if (val
&& adap
->params
.rev
== 0)
543 if (val
> t3_mc5_size(&adap
->mc5
) - adap
->params
.mc5
.nservers
-
546 adap
->params
.mc5
.nfilters
= val
;
550 static ssize_t
store_nfilters(struct device
*d
, struct device_attribute
*attr
,
551 const char *buf
, size_t len
)
553 return attr_store(d
, buf
, len
, set_nfilters
, 0, ~0);
556 static ssize_t
set_nservers(struct net_device
*dev
, unsigned int val
)
558 struct port_info
*pi
= netdev_priv(dev
);
559 struct adapter
*adap
= pi
->adapter
;
561 if (adap
->flags
& FULL_INIT_DONE
)
563 if (val
> t3_mc5_size(&adap
->mc5
) - adap
->params
.mc5
.nfilters
-
566 adap
->params
.mc5
.nservers
= val
;
570 static ssize_t
store_nservers(struct device
*d
, struct device_attribute
*attr
,
571 const char *buf
, size_t len
)
573 return attr_store(d
, buf
, len
, set_nservers
, 0, ~0);
576 #define CXGB3_ATTR_R(name, val_expr) \
577 CXGB3_SHOW(name, val_expr) \
578 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
580 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
581 CXGB3_SHOW(name, val_expr) \
582 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
584 CXGB3_ATTR_R(cam_size
, t3_mc5_size(&adap
->mc5
));
585 CXGB3_ATTR_RW(nfilters
, adap
->params
.mc5
.nfilters
, store_nfilters
);
586 CXGB3_ATTR_RW(nservers
, adap
->params
.mc5
.nservers
, store_nservers
);
588 static struct attribute
*cxgb3_attrs
[] = {
589 &dev_attr_cam_size
.attr
,
590 &dev_attr_nfilters
.attr
,
591 &dev_attr_nservers
.attr
,
595 static struct attribute_group cxgb3_attr_group
= {.attrs
= cxgb3_attrs
};
597 static ssize_t
tm_attr_show(struct device
*d
,
598 char *buf
, int sched
)
600 struct port_info
*pi
= netdev_priv(to_net_dev(d
));
601 struct adapter
*adap
= pi
->adapter
;
602 unsigned int v
, addr
, bpt
, cpt
;
605 addr
= A_TP_TX_MOD_Q1_Q0_RATE_LIMIT
- sched
/ 2;
607 t3_write_reg(adap
, A_TP_TM_PIO_ADDR
, addr
);
608 v
= t3_read_reg(adap
, A_TP_TM_PIO_DATA
);
611 bpt
= (v
>> 8) & 0xff;
614 len
= sprintf(buf
, "disabled\n");
616 v
= (adap
->params
.vpd
.cclk
* 1000) / cpt
;
617 len
= sprintf(buf
, "%u Kbps\n", (v
* bpt
) / 125);
623 static ssize_t
tm_attr_store(struct device
*d
,
624 const char *buf
, size_t len
, int sched
)
626 struct port_info
*pi
= netdev_priv(to_net_dev(d
));
627 struct adapter
*adap
= pi
->adapter
;
632 if (!capable(CAP_NET_ADMIN
))
635 val
= simple_strtoul(buf
, &endp
, 0);
636 if (endp
== buf
|| val
> 10000000)
640 ret
= t3_config_sched(adap
, val
, sched
);
647 #define TM_ATTR(name, sched) \
648 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
651 return tm_attr_show(d, buf, sched); \
653 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
654 const char *buf, size_t len) \
656 return tm_attr_store(d, buf, len, sched); \
658 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
669 static struct attribute
*offload_attrs
[] = {
670 &dev_attr_sched0
.attr
,
671 &dev_attr_sched1
.attr
,
672 &dev_attr_sched2
.attr
,
673 &dev_attr_sched3
.attr
,
674 &dev_attr_sched4
.attr
,
675 &dev_attr_sched5
.attr
,
676 &dev_attr_sched6
.attr
,
677 &dev_attr_sched7
.attr
,
681 static struct attribute_group offload_attr_group
= {.attrs
= offload_attrs
};
684 * Sends an sk_buff to an offload queue driver
685 * after dealing with any active network taps.
687 static inline int offload_tx(struct t3cdev
*tdev
, struct sk_buff
*skb
)
692 ret
= t3_offload_tx(tdev
, skb
);
697 static int write_smt_entry(struct adapter
*adapter
, int idx
)
699 struct cpl_smt_write_req
*req
;
700 struct sk_buff
*skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
705 req
= (struct cpl_smt_write_req
*)__skb_put(skb
, sizeof(*req
));
706 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
707 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ
, idx
));
708 req
->mtu_idx
= NMTUS
- 1; /* should be 0 but there's a T3 bug */
710 memset(req
->src_mac1
, 0, sizeof(req
->src_mac1
));
711 memcpy(req
->src_mac0
, adapter
->port
[idx
]->dev_addr
, ETH_ALEN
);
713 offload_tx(&adapter
->tdev
, skb
);
717 static int init_smt(struct adapter
*adapter
)
721 for_each_port(adapter
, i
)
722 write_smt_entry(adapter
, i
);
726 static void init_port_mtus(struct adapter
*adapter
)
728 unsigned int mtus
= adapter
->port
[0]->mtu
;
730 if (adapter
->port
[1])
731 mtus
|= adapter
->port
[1]->mtu
<< 16;
732 t3_write_reg(adapter
, A_TP_MTU_PORT_TABLE
, mtus
);
735 static void send_pktsched_cmd(struct adapter
*adap
, int sched
, int qidx
, int lo
,
739 struct mngt_pktsched_wr
*req
;
741 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
| __GFP_NOFAIL
);
742 req
= (struct mngt_pktsched_wr
*)skb_put(skb
, sizeof(*req
));
743 req
->wr_hi
= htonl(V_WR_OP(FW_WROPCODE_MNGT
));
744 req
->mngt_opcode
= FW_MNGTOPCODE_PKTSCHED_SET
;
750 t3_mgmt_tx(adap
, skb
);
753 static void bind_qsets(struct adapter
*adap
)
757 for_each_port(adap
, i
) {
758 const struct port_info
*pi
= adap2pinfo(adap
, i
);
760 for (j
= 0; j
< pi
->nqsets
; ++j
)
761 send_pktsched_cmd(adap
, 1, pi
->first_qset
+ j
, -1,
766 #define FW_FNAME "t3fw-%d.%d.%d.bin"
767 #define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
769 static int upgrade_fw(struct adapter
*adap
)
773 const struct firmware
*fw
;
774 struct device
*dev
= &adap
->pdev
->dev
;
776 snprintf(buf
, sizeof(buf
), FW_FNAME
, FW_VERSION_MAJOR
,
777 FW_VERSION_MINOR
, FW_VERSION_MICRO
);
778 ret
= request_firmware(&fw
, buf
, dev
);
780 dev_err(dev
, "could not upgrade firmware: unable to load %s\n",
784 ret
= t3_load_fw(adap
, fw
->data
, fw
->size
);
785 release_firmware(fw
);
788 dev_info(dev
, "successful upgrade to firmware %d.%d.%d\n",
789 FW_VERSION_MAJOR
, FW_VERSION_MINOR
, FW_VERSION_MICRO
);
791 dev_err(dev
, "failed to upgrade to firmware %d.%d.%d\n",
792 FW_VERSION_MAJOR
, FW_VERSION_MINOR
, FW_VERSION_MICRO
);
797 static inline char t3rev2char(struct adapter
*adapter
)
801 switch(adapter
->params
.rev
) {
813 static int update_tpsram(struct adapter
*adap
)
815 const struct firmware
*tpsram
;
817 struct device
*dev
= &adap
->pdev
->dev
;
821 rev
= t3rev2char(adap
);
825 snprintf(buf
, sizeof(buf
), TPSRAM_NAME
, rev
,
826 TP_VERSION_MAJOR
, TP_VERSION_MINOR
, TP_VERSION_MICRO
);
828 ret
= request_firmware(&tpsram
, buf
, dev
);
830 dev_err(dev
, "could not load TP SRAM: unable to load %s\n",
835 ret
= t3_check_tpsram(adap
, tpsram
->data
, tpsram
->size
);
839 ret
= t3_set_proto_sram(adap
, tpsram
->data
);
842 "successful update of protocol engine "
844 TP_VERSION_MAJOR
, TP_VERSION_MINOR
, TP_VERSION_MICRO
);
846 dev_err(dev
, "failed to update of protocol engine %d.%d.%d\n",
847 TP_VERSION_MAJOR
, TP_VERSION_MINOR
, TP_VERSION_MICRO
);
849 dev_err(dev
, "loading protocol SRAM failed\n");
852 release_firmware(tpsram
);
858 * cxgb_up - enable the adapter
859 * @adapter: adapter being enabled
861 * Called when the first port is enabled, this function performs the
862 * actions necessary to make an adapter operational, such as completing
863 * the initialization of HW modules, and enabling interrupts.
865 * Must be called with the rtnl lock held.
867 static int cxgb_up(struct adapter
*adap
)
872 if (!(adap
->flags
& FULL_INIT_DONE
)) {
873 err
= t3_check_fw_version(adap
, &must_load
);
874 if (err
== -EINVAL
) {
875 err
= upgrade_fw(adap
);
876 if (err
&& must_load
)
880 err
= t3_check_tpsram_version(adap
, &must_load
);
881 if (err
== -EINVAL
) {
882 err
= update_tpsram(adap
);
883 if (err
&& must_load
)
887 err
= t3_init_hw(adap
, 0);
891 t3_set_reg_field(adap
, A_TP_PARA_REG5
, 0, F_RXDDPOFFINIT
);
892 t3_write_reg(adap
, A_ULPRX_TDDP_PSZ
, V_HPZ0(PAGE_SHIFT
- 12));
894 err
= setup_sge_qsets(adap
);
900 adap
->flags
|= FULL_INIT_DONE
;
905 if (adap
->flags
& USING_MSIX
) {
906 name_msix_vecs(adap
);
907 err
= request_irq(adap
->msix_info
[0].vec
,
908 t3_async_intr_handler
, 0,
909 adap
->msix_info
[0].desc
, adap
);
913 err
= request_msix_data_irqs(adap
);
915 free_irq(adap
->msix_info
[0].vec
, adap
);
918 } else if ((err
= request_irq(adap
->pdev
->irq
,
919 t3_intr_handler(adap
,
920 adap
->sge
.qs
[0].rspq
.
922 (adap
->flags
& USING_MSI
) ?
927 enable_all_napi(adap
);
929 t3_intr_enable(adap
);
931 if (adap
->params
.rev
>= T3_REV_C
&& !(adap
->flags
& TP_PARITY_INIT
) &&
932 is_offload(adap
) && init_tp_parity(adap
) == 0)
933 adap
->flags
|= TP_PARITY_INIT
;
935 if (adap
->flags
& TP_PARITY_INIT
) {
936 t3_write_reg(adap
, A_TP_INT_CAUSE
,
937 F_CMCACHEPERR
| F_ARPLUTPERR
);
938 t3_write_reg(adap
, A_TP_INT_ENABLE
, 0x7fbfffff);
941 if ((adap
->flags
& (USING_MSIX
| QUEUES_BOUND
)) == USING_MSIX
)
943 adap
->flags
|= QUEUES_BOUND
;
948 CH_ERR(adap
, "request_irq failed, err %d\n", err
);
953 * Release resources when all the ports and offloading have been stopped.
955 static void cxgb_down(struct adapter
*adapter
)
957 t3_sge_stop(adapter
);
958 spin_lock_irq(&adapter
->work_lock
); /* sync with PHY intr task */
959 t3_intr_disable(adapter
);
960 spin_unlock_irq(&adapter
->work_lock
);
962 if (adapter
->flags
& USING_MSIX
) {
965 free_irq(adapter
->msix_info
[0].vec
, adapter
);
966 for_each_port(adapter
, i
)
967 n
+= adap2pinfo(adapter
, i
)->nqsets
;
969 for (i
= 0; i
< n
; ++i
)
970 free_irq(adapter
->msix_info
[i
+ 1].vec
,
971 &adapter
->sge
.qs
[i
]);
973 free_irq(adapter
->pdev
->irq
, adapter
);
975 flush_workqueue(cxgb3_wq
); /* wait for external IRQ handler */
979 static void schedule_chk_task(struct adapter
*adap
)
983 timeo
= adap
->params
.linkpoll_period
?
984 (HZ
* adap
->params
.linkpoll_period
) / 10 :
985 adap
->params
.stats_update_period
* HZ
;
987 queue_delayed_work(cxgb3_wq
, &adap
->adap_check_task
, timeo
);
990 static int offload_open(struct net_device
*dev
)
992 struct port_info
*pi
= netdev_priv(dev
);
993 struct adapter
*adapter
= pi
->adapter
;
994 struct t3cdev
*tdev
= dev2t3cdev(dev
);
995 int adap_up
= adapter
->open_device_map
& PORT_MASK
;
998 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
))
1001 if (!adap_up
&& (err
= cxgb_up(adapter
)) < 0)
1004 t3_tp_set_offload_mode(adapter
, 1);
1005 tdev
->lldev
= adapter
->port
[0];
1006 err
= cxgb3_offload_activate(adapter
);
1010 init_port_mtus(adapter
);
1011 t3_load_mtus(adapter
, adapter
->params
.mtus
, adapter
->params
.a_wnd
,
1012 adapter
->params
.b_wnd
,
1013 adapter
->params
.rev
== 0 ?
1014 adapter
->port
[0]->mtu
: 0xffff);
1017 if (sysfs_create_group(&tdev
->lldev
->dev
.kobj
, &offload_attr_group
))
1018 dev_dbg(&dev
->dev
, "cannot create sysfs group\n");
1020 /* Call back all registered clients */
1021 cxgb3_add_clients(tdev
);
1024 /* restore them in case the offload module has changed them */
1026 t3_tp_set_offload_mode(adapter
, 0);
1027 clear_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
);
1028 cxgb3_set_dummy_ops(tdev
);
1033 static int offload_close(struct t3cdev
*tdev
)
1035 struct adapter
*adapter
= tdev2adap(tdev
);
1037 if (!test_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
))
1040 /* Call back all registered clients */
1041 cxgb3_remove_clients(tdev
);
1043 sysfs_remove_group(&tdev
->lldev
->dev
.kobj
, &offload_attr_group
);
1046 cxgb3_set_dummy_ops(tdev
);
1047 t3_tp_set_offload_mode(adapter
, 0);
1048 clear_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
);
1050 if (!adapter
->open_device_map
)
1053 cxgb3_offload_deactivate(adapter
);
1057 static int cxgb_open(struct net_device
*dev
)
1059 struct port_info
*pi
= netdev_priv(dev
);
1060 struct adapter
*adapter
= pi
->adapter
;
1061 int other_ports
= adapter
->open_device_map
& PORT_MASK
;
1064 if (!adapter
->open_device_map
&& (err
= cxgb_up(adapter
)) < 0) {
1065 quiesce_rx(adapter
);
1069 set_bit(pi
->port_id
, &adapter
->open_device_map
);
1070 if (is_offload(adapter
) && !ofld_disable
) {
1071 err
= offload_open(dev
);
1074 "Could not initialize offload capabilities\n");
1078 t3_port_intr_enable(adapter
, pi
->port_id
);
1079 netif_start_queue(dev
);
1081 schedule_chk_task(adapter
);
1086 static int cxgb_close(struct net_device
*dev
)
1088 struct port_info
*pi
= netdev_priv(dev
);
1089 struct adapter
*adapter
= pi
->adapter
;
1091 t3_port_intr_disable(adapter
, pi
->port_id
);
1092 netif_stop_queue(dev
);
1093 pi
->phy
.ops
->power_down(&pi
->phy
, 1);
1094 netif_carrier_off(dev
);
1095 t3_mac_disable(&pi
->mac
, MAC_DIRECTION_TX
| MAC_DIRECTION_RX
);
1097 spin_lock(&adapter
->work_lock
); /* sync with update task */
1098 clear_bit(pi
->port_id
, &adapter
->open_device_map
);
1099 spin_unlock(&adapter
->work_lock
);
1101 if (!(adapter
->open_device_map
& PORT_MASK
))
1102 cancel_rearming_delayed_workqueue(cxgb3_wq
,
1103 &adapter
->adap_check_task
);
1105 if (!adapter
->open_device_map
)
1111 static struct net_device_stats
*cxgb_get_stats(struct net_device
*dev
)
1113 struct port_info
*pi
= netdev_priv(dev
);
1114 struct adapter
*adapter
= pi
->adapter
;
1115 struct net_device_stats
*ns
= &pi
->netstats
;
1116 const struct mac_stats
*pstats
;
1118 spin_lock(&adapter
->stats_lock
);
1119 pstats
= t3_mac_update_stats(&pi
->mac
);
1120 spin_unlock(&adapter
->stats_lock
);
1122 ns
->tx_bytes
= pstats
->tx_octets
;
1123 ns
->tx_packets
= pstats
->tx_frames
;
1124 ns
->rx_bytes
= pstats
->rx_octets
;
1125 ns
->rx_packets
= pstats
->rx_frames
;
1126 ns
->multicast
= pstats
->rx_mcast_frames
;
1128 ns
->tx_errors
= pstats
->tx_underrun
;
1129 ns
->rx_errors
= pstats
->rx_symbol_errs
+ pstats
->rx_fcs_errs
+
1130 pstats
->rx_too_long
+ pstats
->rx_jabber
+ pstats
->rx_short
+
1131 pstats
->rx_fifo_ovfl
;
1133 /* detailed rx_errors */
1134 ns
->rx_length_errors
= pstats
->rx_jabber
+ pstats
->rx_too_long
;
1135 ns
->rx_over_errors
= 0;
1136 ns
->rx_crc_errors
= pstats
->rx_fcs_errs
;
1137 ns
->rx_frame_errors
= pstats
->rx_symbol_errs
;
1138 ns
->rx_fifo_errors
= pstats
->rx_fifo_ovfl
;
1139 ns
->rx_missed_errors
= pstats
->rx_cong_drops
;
1141 /* detailed tx_errors */
1142 ns
->tx_aborted_errors
= 0;
1143 ns
->tx_carrier_errors
= 0;
1144 ns
->tx_fifo_errors
= pstats
->tx_underrun
;
1145 ns
->tx_heartbeat_errors
= 0;
1146 ns
->tx_window_errors
= 0;
1150 static u32
get_msglevel(struct net_device
*dev
)
1152 struct port_info
*pi
= netdev_priv(dev
);
1153 struct adapter
*adapter
= pi
->adapter
;
1155 return adapter
->msg_enable
;
1158 static void set_msglevel(struct net_device
*dev
, u32 val
)
1160 struct port_info
*pi
= netdev_priv(dev
);
1161 struct adapter
*adapter
= pi
->adapter
;
1163 adapter
->msg_enable
= val
;
1166 static char stats_strings
[][ETH_GSTRING_LEN
] = {
1169 "TxMulticastFramesOK",
1170 "TxBroadcastFramesOK",
1177 "TxFrames128To255 ",
1178 "TxFrames256To511 ",
1179 "TxFrames512To1023 ",
1180 "TxFrames1024To1518 ",
1181 "TxFrames1519ToMax ",
1185 "RxMulticastFramesOK",
1186 "RxBroadcastFramesOK",
1197 "RxFrames128To255 ",
1198 "RxFrames256To511 ",
1199 "RxFrames512To1023 ",
1200 "RxFrames1024To1518 ",
1201 "RxFrames1519ToMax ",
1211 "CheckTXEnToggled ",
1216 static int get_sset_count(struct net_device
*dev
, int sset
)
1220 return ARRAY_SIZE(stats_strings
);
1226 #define T3_REGMAP_SIZE (3 * 1024)
1228 static int get_regs_len(struct net_device
*dev
)
1230 return T3_REGMAP_SIZE
;
1233 static int get_eeprom_len(struct net_device
*dev
)
1238 static void get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1240 struct port_info
*pi
= netdev_priv(dev
);
1241 struct adapter
*adapter
= pi
->adapter
;
1245 t3_get_fw_version(adapter
, &fw_vers
);
1246 t3_get_tp_version(adapter
, &tp_vers
);
1248 strcpy(info
->driver
, DRV_NAME
);
1249 strcpy(info
->version
, DRV_VERSION
);
1250 strcpy(info
->bus_info
, pci_name(adapter
->pdev
));
1252 strcpy(info
->fw_version
, "N/A");
1254 snprintf(info
->fw_version
, sizeof(info
->fw_version
),
1255 "%s %u.%u.%u TP %u.%u.%u",
1256 G_FW_VERSION_TYPE(fw_vers
) ? "T" : "N",
1257 G_FW_VERSION_MAJOR(fw_vers
),
1258 G_FW_VERSION_MINOR(fw_vers
),
1259 G_FW_VERSION_MICRO(fw_vers
),
1260 G_TP_VERSION_MAJOR(tp_vers
),
1261 G_TP_VERSION_MINOR(tp_vers
),
1262 G_TP_VERSION_MICRO(tp_vers
));
1266 static void get_strings(struct net_device
*dev
, u32 stringset
, u8
* data
)
1268 if (stringset
== ETH_SS_STATS
)
1269 memcpy(data
, stats_strings
, sizeof(stats_strings
));
1272 static unsigned long collect_sge_port_stats(struct adapter
*adapter
,
1273 struct port_info
*p
, int idx
)
1276 unsigned long tot
= 0;
1278 for (i
= 0; i
< p
->nqsets
; ++i
)
1279 tot
+= adapter
->sge
.qs
[i
+ p
->first_qset
].port_stats
[idx
];
1283 static void get_stats(struct net_device
*dev
, struct ethtool_stats
*stats
,
1286 struct port_info
*pi
= netdev_priv(dev
);
1287 struct adapter
*adapter
= pi
->adapter
;
1288 const struct mac_stats
*s
;
1290 spin_lock(&adapter
->stats_lock
);
1291 s
= t3_mac_update_stats(&pi
->mac
);
1292 spin_unlock(&adapter
->stats_lock
);
1294 *data
++ = s
->tx_octets
;
1295 *data
++ = s
->tx_frames
;
1296 *data
++ = s
->tx_mcast_frames
;
1297 *data
++ = s
->tx_bcast_frames
;
1298 *data
++ = s
->tx_pause
;
1299 *data
++ = s
->tx_underrun
;
1300 *data
++ = s
->tx_fifo_urun
;
1302 *data
++ = s
->tx_frames_64
;
1303 *data
++ = s
->tx_frames_65_127
;
1304 *data
++ = s
->tx_frames_128_255
;
1305 *data
++ = s
->tx_frames_256_511
;
1306 *data
++ = s
->tx_frames_512_1023
;
1307 *data
++ = s
->tx_frames_1024_1518
;
1308 *data
++ = s
->tx_frames_1519_max
;
1310 *data
++ = s
->rx_octets
;
1311 *data
++ = s
->rx_frames
;
1312 *data
++ = s
->rx_mcast_frames
;
1313 *data
++ = s
->rx_bcast_frames
;
1314 *data
++ = s
->rx_pause
;
1315 *data
++ = s
->rx_fcs_errs
;
1316 *data
++ = s
->rx_symbol_errs
;
1317 *data
++ = s
->rx_short
;
1318 *data
++ = s
->rx_jabber
;
1319 *data
++ = s
->rx_too_long
;
1320 *data
++ = s
->rx_fifo_ovfl
;
1322 *data
++ = s
->rx_frames_64
;
1323 *data
++ = s
->rx_frames_65_127
;
1324 *data
++ = s
->rx_frames_128_255
;
1325 *data
++ = s
->rx_frames_256_511
;
1326 *data
++ = s
->rx_frames_512_1023
;
1327 *data
++ = s
->rx_frames_1024_1518
;
1328 *data
++ = s
->rx_frames_1519_max
;
1330 *data
++ = pi
->phy
.fifo_errors
;
1332 *data
++ = collect_sge_port_stats(adapter
, pi
, SGE_PSTAT_TSO
);
1333 *data
++ = collect_sge_port_stats(adapter
, pi
, SGE_PSTAT_VLANEX
);
1334 *data
++ = collect_sge_port_stats(adapter
, pi
, SGE_PSTAT_VLANINS
);
1335 *data
++ = collect_sge_port_stats(adapter
, pi
, SGE_PSTAT_TX_CSUM
);
1336 *data
++ = collect_sge_port_stats(adapter
, pi
, SGE_PSTAT_RX_CSUM_GOOD
);
1337 *data
++ = s
->rx_cong_drops
;
1339 *data
++ = s
->num_toggled
;
1340 *data
++ = s
->num_resets
;
1343 static inline void reg_block_dump(struct adapter
*ap
, void *buf
,
1344 unsigned int start
, unsigned int end
)
1346 u32
*p
= buf
+ start
;
1348 for (; start
<= end
; start
+= sizeof(u32
))
1349 *p
++ = t3_read_reg(ap
, start
);
1352 static void get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
1355 struct port_info
*pi
= netdev_priv(dev
);
1356 struct adapter
*ap
= pi
->adapter
;
1360 * bits 0..9: chip version
1361 * bits 10..15: chip revision
1362 * bit 31: set for PCIe cards
1364 regs
->version
= 3 | (ap
->params
.rev
<< 10) | (is_pcie(ap
) << 31);
1367 * We skip the MAC statistics registers because they are clear-on-read.
1368 * Also reading multi-register stats would need to synchronize with the
1369 * periodic mac stats accumulation. Hard to justify the complexity.
1371 memset(buf
, 0, T3_REGMAP_SIZE
);
1372 reg_block_dump(ap
, buf
, 0, A_SG_RSPQ_CREDIT_RETURN
);
1373 reg_block_dump(ap
, buf
, A_SG_HI_DRB_HI_THRSH
, A_ULPRX_PBL_ULIMIT
);
1374 reg_block_dump(ap
, buf
, A_ULPTX_CONFIG
, A_MPS_INT_CAUSE
);
1375 reg_block_dump(ap
, buf
, A_CPL_SWITCH_CNTRL
, A_CPL_MAP_TBL_DATA
);
1376 reg_block_dump(ap
, buf
, A_SMB_GLOBAL_TIME_CFG
, A_XGM_SERDES_STAT3
);
1377 reg_block_dump(ap
, buf
, A_XGM_SERDES_STATUS0
,
1378 XGM_REG(A_XGM_SERDES_STAT3
, 1));
1379 reg_block_dump(ap
, buf
, XGM_REG(A_XGM_SERDES_STATUS0
, 1),
1380 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT
, 1));
1383 static int restart_autoneg(struct net_device
*dev
)
1385 struct port_info
*p
= netdev_priv(dev
);
1387 if (!netif_running(dev
))
1389 if (p
->link_config
.autoneg
!= AUTONEG_ENABLE
)
1391 p
->phy
.ops
->autoneg_restart(&p
->phy
);
1395 static int cxgb3_phys_id(struct net_device
*dev
, u32 data
)
1397 struct port_info
*pi
= netdev_priv(dev
);
1398 struct adapter
*adapter
= pi
->adapter
;
1404 for (i
= 0; i
< data
* 2; i
++) {
1405 t3_set_reg_field(adapter
, A_T3DBG_GPIO_EN
, F_GPIO0_OUT_VAL
,
1406 (i
& 1) ? F_GPIO0_OUT_VAL
: 0);
1407 if (msleep_interruptible(500))
1410 t3_set_reg_field(adapter
, A_T3DBG_GPIO_EN
, F_GPIO0_OUT_VAL
,
1415 static int get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1417 struct port_info
*p
= netdev_priv(dev
);
1419 cmd
->supported
= p
->link_config
.supported
;
1420 cmd
->advertising
= p
->link_config
.advertising
;
1422 if (netif_carrier_ok(dev
)) {
1423 cmd
->speed
= p
->link_config
.speed
;
1424 cmd
->duplex
= p
->link_config
.duplex
;
1430 cmd
->port
= (cmd
->supported
& SUPPORTED_TP
) ? PORT_TP
: PORT_FIBRE
;
1431 cmd
->phy_address
= p
->phy
.addr
;
1432 cmd
->transceiver
= XCVR_EXTERNAL
;
1433 cmd
->autoneg
= p
->link_config
.autoneg
;
1439 static int speed_duplex_to_caps(int speed
, int duplex
)
1445 if (duplex
== DUPLEX_FULL
)
1446 cap
= SUPPORTED_10baseT_Full
;
1448 cap
= SUPPORTED_10baseT_Half
;
1451 if (duplex
== DUPLEX_FULL
)
1452 cap
= SUPPORTED_100baseT_Full
;
1454 cap
= SUPPORTED_100baseT_Half
;
1457 if (duplex
== DUPLEX_FULL
)
1458 cap
= SUPPORTED_1000baseT_Full
;
1460 cap
= SUPPORTED_1000baseT_Half
;
1463 if (duplex
== DUPLEX_FULL
)
1464 cap
= SUPPORTED_10000baseT_Full
;
1469 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1470 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1471 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1472 ADVERTISED_10000baseT_Full)
1474 static int set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1476 struct port_info
*p
= netdev_priv(dev
);
1477 struct link_config
*lc
= &p
->link_config
;
1479 if (!(lc
->supported
& SUPPORTED_Autoneg
))
1480 return -EOPNOTSUPP
; /* can't change speed/duplex */
1482 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
1483 int cap
= speed_duplex_to_caps(cmd
->speed
, cmd
->duplex
);
1485 if (!(lc
->supported
& cap
) || cmd
->speed
== SPEED_1000
)
1487 lc
->requested_speed
= cmd
->speed
;
1488 lc
->requested_duplex
= cmd
->duplex
;
1489 lc
->advertising
= 0;
1491 cmd
->advertising
&= ADVERTISED_MASK
;
1492 cmd
->advertising
&= lc
->supported
;
1493 if (!cmd
->advertising
)
1495 lc
->requested_speed
= SPEED_INVALID
;
1496 lc
->requested_duplex
= DUPLEX_INVALID
;
1497 lc
->advertising
= cmd
->advertising
| ADVERTISED_Autoneg
;
1499 lc
->autoneg
= cmd
->autoneg
;
1500 if (netif_running(dev
))
1501 t3_link_start(&p
->phy
, &p
->mac
, lc
);
1505 static void get_pauseparam(struct net_device
*dev
,
1506 struct ethtool_pauseparam
*epause
)
1508 struct port_info
*p
= netdev_priv(dev
);
1510 epause
->autoneg
= (p
->link_config
.requested_fc
& PAUSE_AUTONEG
) != 0;
1511 epause
->rx_pause
= (p
->link_config
.fc
& PAUSE_RX
) != 0;
1512 epause
->tx_pause
= (p
->link_config
.fc
& PAUSE_TX
) != 0;
1515 static int set_pauseparam(struct net_device
*dev
,
1516 struct ethtool_pauseparam
*epause
)
1518 struct port_info
*p
= netdev_priv(dev
);
1519 struct link_config
*lc
= &p
->link_config
;
1521 if (epause
->autoneg
== AUTONEG_DISABLE
)
1522 lc
->requested_fc
= 0;
1523 else if (lc
->supported
& SUPPORTED_Autoneg
)
1524 lc
->requested_fc
= PAUSE_AUTONEG
;
1528 if (epause
->rx_pause
)
1529 lc
->requested_fc
|= PAUSE_RX
;
1530 if (epause
->tx_pause
)
1531 lc
->requested_fc
|= PAUSE_TX
;
1532 if (lc
->autoneg
== AUTONEG_ENABLE
) {
1533 if (netif_running(dev
))
1534 t3_link_start(&p
->phy
, &p
->mac
, lc
);
1536 lc
->fc
= lc
->requested_fc
& (PAUSE_RX
| PAUSE_TX
);
1537 if (netif_running(dev
))
1538 t3_mac_set_speed_duplex_fc(&p
->mac
, -1, -1, lc
->fc
);
1543 static u32
get_rx_csum(struct net_device
*dev
)
1545 struct port_info
*p
= netdev_priv(dev
);
1547 return p
->rx_csum_offload
;
1550 static int set_rx_csum(struct net_device
*dev
, u32 data
)
1552 struct port_info
*p
= netdev_priv(dev
);
1554 p
->rx_csum_offload
= data
;
1558 static void get_sge_param(struct net_device
*dev
, struct ethtool_ringparam
*e
)
1560 struct port_info
*pi
= netdev_priv(dev
);
1561 struct adapter
*adapter
= pi
->adapter
;
1562 const struct qset_params
*q
= &adapter
->params
.sge
.qset
[pi
->first_qset
];
1564 e
->rx_max_pending
= MAX_RX_BUFFERS
;
1565 e
->rx_mini_max_pending
= 0;
1566 e
->rx_jumbo_max_pending
= MAX_RX_JUMBO_BUFFERS
;
1567 e
->tx_max_pending
= MAX_TXQ_ENTRIES
;
1569 e
->rx_pending
= q
->fl_size
;
1570 e
->rx_mini_pending
= q
->rspq_size
;
1571 e
->rx_jumbo_pending
= q
->jumbo_size
;
1572 e
->tx_pending
= q
->txq_size
[0];
1575 static int set_sge_param(struct net_device
*dev
, struct ethtool_ringparam
*e
)
1577 struct port_info
*pi
= netdev_priv(dev
);
1578 struct adapter
*adapter
= pi
->adapter
;
1579 struct qset_params
*q
;
1582 if (e
->rx_pending
> MAX_RX_BUFFERS
||
1583 e
->rx_jumbo_pending
> MAX_RX_JUMBO_BUFFERS
||
1584 e
->tx_pending
> MAX_TXQ_ENTRIES
||
1585 e
->rx_mini_pending
> MAX_RSPQ_ENTRIES
||
1586 e
->rx_mini_pending
< MIN_RSPQ_ENTRIES
||
1587 e
->rx_pending
< MIN_FL_ENTRIES
||
1588 e
->rx_jumbo_pending
< MIN_FL_ENTRIES
||
1589 e
->tx_pending
< adapter
->params
.nports
* MIN_TXQ_ENTRIES
)
1592 if (adapter
->flags
& FULL_INIT_DONE
)
1595 q
= &adapter
->params
.sge
.qset
[pi
->first_qset
];
1596 for (i
= 0; i
< pi
->nqsets
; ++i
, ++q
) {
1597 q
->rspq_size
= e
->rx_mini_pending
;
1598 q
->fl_size
= e
->rx_pending
;
1599 q
->jumbo_size
= e
->rx_jumbo_pending
;
1600 q
->txq_size
[0] = e
->tx_pending
;
1601 q
->txq_size
[1] = e
->tx_pending
;
1602 q
->txq_size
[2] = e
->tx_pending
;
1607 static int set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*c
)
1609 struct port_info
*pi
= netdev_priv(dev
);
1610 struct adapter
*adapter
= pi
->adapter
;
1611 struct qset_params
*qsp
= &adapter
->params
.sge
.qset
[0];
1612 struct sge_qset
*qs
= &adapter
->sge
.qs
[0];
1614 if (c
->rx_coalesce_usecs
* 10 > M_NEWTIMER
)
1617 qsp
->coalesce_usecs
= c
->rx_coalesce_usecs
;
1618 t3_update_qset_coalesce(qs
, qsp
);
1622 static int get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*c
)
1624 struct port_info
*pi
= netdev_priv(dev
);
1625 struct adapter
*adapter
= pi
->adapter
;
1626 struct qset_params
*q
= adapter
->params
.sge
.qset
;
1628 c
->rx_coalesce_usecs
= q
->coalesce_usecs
;
1632 static int get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*e
,
1635 struct port_info
*pi
= netdev_priv(dev
);
1636 struct adapter
*adapter
= pi
->adapter
;
1639 u8
*buf
= kmalloc(EEPROMSIZE
, GFP_KERNEL
);
1643 e
->magic
= EEPROM_MAGIC
;
1644 for (i
= e
->offset
& ~3; !err
&& i
< e
->offset
+ e
->len
; i
+= 4)
1645 err
= t3_seeprom_read(adapter
, i
, (__le32
*) & buf
[i
]);
1648 memcpy(data
, buf
+ e
->offset
, e
->len
);
1653 static int set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
1656 struct port_info
*pi
= netdev_priv(dev
);
1657 struct adapter
*adapter
= pi
->adapter
;
1658 u32 aligned_offset
, aligned_len
;
1663 if (eeprom
->magic
!= EEPROM_MAGIC
)
1666 aligned_offset
= eeprom
->offset
& ~3;
1667 aligned_len
= (eeprom
->len
+ (eeprom
->offset
& 3) + 3) & ~3;
1669 if (aligned_offset
!= eeprom
->offset
|| aligned_len
!= eeprom
->len
) {
1670 buf
= kmalloc(aligned_len
, GFP_KERNEL
);
1673 err
= t3_seeprom_read(adapter
, aligned_offset
, (__le32
*) buf
);
1674 if (!err
&& aligned_len
> 4)
1675 err
= t3_seeprom_read(adapter
,
1676 aligned_offset
+ aligned_len
- 4,
1677 (__le32
*) & buf
[aligned_len
- 4]);
1680 memcpy(buf
+ (eeprom
->offset
& 3), data
, eeprom
->len
);
1684 err
= t3_seeprom_wp(adapter
, 0);
1688 for (p
= (__le32
*) buf
; !err
&& aligned_len
; aligned_len
-= 4, p
++) {
1689 err
= t3_seeprom_write(adapter
, aligned_offset
, *p
);
1690 aligned_offset
+= 4;
1694 err
= t3_seeprom_wp(adapter
, 1);
1701 static void get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1705 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
1708 static const struct ethtool_ops cxgb_ethtool_ops
= {
1709 .get_settings
= get_settings
,
1710 .set_settings
= set_settings
,
1711 .get_drvinfo
= get_drvinfo
,
1712 .get_msglevel
= get_msglevel
,
1713 .set_msglevel
= set_msglevel
,
1714 .get_ringparam
= get_sge_param
,
1715 .set_ringparam
= set_sge_param
,
1716 .get_coalesce
= get_coalesce
,
1717 .set_coalesce
= set_coalesce
,
1718 .get_eeprom_len
= get_eeprom_len
,
1719 .get_eeprom
= get_eeprom
,
1720 .set_eeprom
= set_eeprom
,
1721 .get_pauseparam
= get_pauseparam
,
1722 .set_pauseparam
= set_pauseparam
,
1723 .get_rx_csum
= get_rx_csum
,
1724 .set_rx_csum
= set_rx_csum
,
1725 .set_tx_csum
= ethtool_op_set_tx_csum
,
1726 .set_sg
= ethtool_op_set_sg
,
1727 .get_link
= ethtool_op_get_link
,
1728 .get_strings
= get_strings
,
1729 .phys_id
= cxgb3_phys_id
,
1730 .nway_reset
= restart_autoneg
,
1731 .get_sset_count
= get_sset_count
,
1732 .get_ethtool_stats
= get_stats
,
1733 .get_regs_len
= get_regs_len
,
1734 .get_regs
= get_regs
,
1736 .set_tso
= ethtool_op_set_tso
,
1739 static int in_range(int val
, int lo
, int hi
)
1741 return val
< 0 || (val
<= hi
&& val
>= lo
);
1744 static int cxgb_extension_ioctl(struct net_device
*dev
, void __user
*useraddr
)
1746 struct port_info
*pi
= netdev_priv(dev
);
1747 struct adapter
*adapter
= pi
->adapter
;
1751 if (copy_from_user(&cmd
, useraddr
, sizeof(cmd
)))
1755 case CHELSIO_SET_QSET_PARAMS
:{
1757 struct qset_params
*q
;
1758 struct ch_qset_params t
;
1760 if (!capable(CAP_NET_ADMIN
))
1762 if (copy_from_user(&t
, useraddr
, sizeof(t
)))
1764 if (t
.qset_idx
>= SGE_QSETS
)
1766 if (!in_range(t
.intr_lat
, 0, M_NEWTIMER
) ||
1767 !in_range(t
.cong_thres
, 0, 255) ||
1768 !in_range(t
.txq_size
[0], MIN_TXQ_ENTRIES
,
1770 !in_range(t
.txq_size
[1], MIN_TXQ_ENTRIES
,
1772 !in_range(t
.txq_size
[2], MIN_CTRL_TXQ_ENTRIES
,
1773 MAX_CTRL_TXQ_ENTRIES
) ||
1774 !in_range(t
.fl_size
[0], MIN_FL_ENTRIES
,
1776 || !in_range(t
.fl_size
[1], MIN_FL_ENTRIES
,
1777 MAX_RX_JUMBO_BUFFERS
)
1778 || !in_range(t
.rspq_size
, MIN_RSPQ_ENTRIES
,
1781 if ((adapter
->flags
& FULL_INIT_DONE
) &&
1782 (t
.rspq_size
>= 0 || t
.fl_size
[0] >= 0 ||
1783 t
.fl_size
[1] >= 0 || t
.txq_size
[0] >= 0 ||
1784 t
.txq_size
[1] >= 0 || t
.txq_size
[2] >= 0 ||
1785 t
.polling
>= 0 || t
.cong_thres
>= 0))
1788 q
= &adapter
->params
.sge
.qset
[t
.qset_idx
];
1790 if (t
.rspq_size
>= 0)
1791 q
->rspq_size
= t
.rspq_size
;
1792 if (t
.fl_size
[0] >= 0)
1793 q
->fl_size
= t
.fl_size
[0];
1794 if (t
.fl_size
[1] >= 0)
1795 q
->jumbo_size
= t
.fl_size
[1];
1796 if (t
.txq_size
[0] >= 0)
1797 q
->txq_size
[0] = t
.txq_size
[0];
1798 if (t
.txq_size
[1] >= 0)
1799 q
->txq_size
[1] = t
.txq_size
[1];
1800 if (t
.txq_size
[2] >= 0)
1801 q
->txq_size
[2] = t
.txq_size
[2];
1802 if (t
.cong_thres
>= 0)
1803 q
->cong_thres
= t
.cong_thres
;
1804 if (t
.intr_lat
>= 0) {
1805 struct sge_qset
*qs
=
1806 &adapter
->sge
.qs
[t
.qset_idx
];
1808 q
->coalesce_usecs
= t
.intr_lat
;
1809 t3_update_qset_coalesce(qs
, q
);
1811 if (t
.polling
>= 0) {
1812 if (adapter
->flags
& USING_MSIX
)
1813 q
->polling
= t
.polling
;
1815 /* No polling with INTx for T3A */
1816 if (adapter
->params
.rev
== 0 &&
1817 !(adapter
->flags
& USING_MSI
))
1820 for (i
= 0; i
< SGE_QSETS
; i
++) {
1821 q
= &adapter
->params
.sge
.
1823 q
->polling
= t
.polling
;
1829 case CHELSIO_GET_QSET_PARAMS
:{
1830 struct qset_params
*q
;
1831 struct ch_qset_params t
;
1833 if (copy_from_user(&t
, useraddr
, sizeof(t
)))
1835 if (t
.qset_idx
>= SGE_QSETS
)
1838 q
= &adapter
->params
.sge
.qset
[t
.qset_idx
];
1839 t
.rspq_size
= q
->rspq_size
;
1840 t
.txq_size
[0] = q
->txq_size
[0];
1841 t
.txq_size
[1] = q
->txq_size
[1];
1842 t
.txq_size
[2] = q
->txq_size
[2];
1843 t
.fl_size
[0] = q
->fl_size
;
1844 t
.fl_size
[1] = q
->jumbo_size
;
1845 t
.polling
= q
->polling
;
1846 t
.intr_lat
= q
->coalesce_usecs
;
1847 t
.cong_thres
= q
->cong_thres
;
1849 if (copy_to_user(useraddr
, &t
, sizeof(t
)))
1853 case CHELSIO_SET_QSET_NUM
:{
1854 struct ch_reg edata
;
1855 unsigned int i
, first_qset
= 0, other_qsets
= 0;
1857 if (!capable(CAP_NET_ADMIN
))
1859 if (adapter
->flags
& FULL_INIT_DONE
)
1861 if (copy_from_user(&edata
, useraddr
, sizeof(edata
)))
1863 if (edata
.val
< 1 ||
1864 (edata
.val
> 1 && !(adapter
->flags
& USING_MSIX
)))
1867 for_each_port(adapter
, i
)
1868 if (adapter
->port
[i
] && adapter
->port
[i
] != dev
)
1869 other_qsets
+= adap2pinfo(adapter
, i
)->nqsets
;
1871 if (edata
.val
+ other_qsets
> SGE_QSETS
)
1874 pi
->nqsets
= edata
.val
;
1876 for_each_port(adapter
, i
)
1877 if (adapter
->port
[i
]) {
1878 pi
= adap2pinfo(adapter
, i
);
1879 pi
->first_qset
= first_qset
;
1880 first_qset
+= pi
->nqsets
;
1884 case CHELSIO_GET_QSET_NUM
:{
1885 struct ch_reg edata
;
1887 edata
.cmd
= CHELSIO_GET_QSET_NUM
;
1888 edata
.val
= pi
->nqsets
;
1889 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
1893 case CHELSIO_LOAD_FW
:{
1895 struct ch_mem_range t
;
1897 if (!capable(CAP_NET_ADMIN
))
1899 if (copy_from_user(&t
, useraddr
, sizeof(t
)))
1902 fw_data
= kmalloc(t
.len
, GFP_KERNEL
);
1907 (fw_data
, useraddr
+ sizeof(t
), t
.len
)) {
1912 ret
= t3_load_fw(adapter
, fw_data
, t
.len
);
1918 case CHELSIO_SETMTUTAB
:{
1922 if (!is_offload(adapter
))
1924 if (!capable(CAP_NET_ADMIN
))
1926 if (offload_running(adapter
))
1928 if (copy_from_user(&m
, useraddr
, sizeof(m
)))
1930 if (m
.nmtus
!= NMTUS
)
1932 if (m
.mtus
[0] < 81) /* accommodate SACK */
1935 /* MTUs must be in ascending order */
1936 for (i
= 1; i
< NMTUS
; ++i
)
1937 if (m
.mtus
[i
] < m
.mtus
[i
- 1])
1940 memcpy(adapter
->params
.mtus
, m
.mtus
,
1941 sizeof(adapter
->params
.mtus
));
1944 case CHELSIO_GET_PM
:{
1945 struct tp_params
*p
= &adapter
->params
.tp
;
1946 struct ch_pm m
= {.cmd
= CHELSIO_GET_PM
};
1948 if (!is_offload(adapter
))
1950 m
.tx_pg_sz
= p
->tx_pg_size
;
1951 m
.tx_num_pg
= p
->tx_num_pgs
;
1952 m
.rx_pg_sz
= p
->rx_pg_size
;
1953 m
.rx_num_pg
= p
->rx_num_pgs
;
1954 m
.pm_total
= p
->pmtx_size
+ p
->chan_rx_size
* p
->nchan
;
1955 if (copy_to_user(useraddr
, &m
, sizeof(m
)))
1959 case CHELSIO_SET_PM
:{
1961 struct tp_params
*p
= &adapter
->params
.tp
;
1963 if (!is_offload(adapter
))
1965 if (!capable(CAP_NET_ADMIN
))
1967 if (adapter
->flags
& FULL_INIT_DONE
)
1969 if (copy_from_user(&m
, useraddr
, sizeof(m
)))
1971 if (!is_power_of_2(m
.rx_pg_sz
) ||
1972 !is_power_of_2(m
.tx_pg_sz
))
1973 return -EINVAL
; /* not power of 2 */
1974 if (!(m
.rx_pg_sz
& 0x14000))
1975 return -EINVAL
; /* not 16KB or 64KB */
1976 if (!(m
.tx_pg_sz
& 0x1554000))
1978 if (m
.tx_num_pg
== -1)
1979 m
.tx_num_pg
= p
->tx_num_pgs
;
1980 if (m
.rx_num_pg
== -1)
1981 m
.rx_num_pg
= p
->rx_num_pgs
;
1982 if (m
.tx_num_pg
% 24 || m
.rx_num_pg
% 24)
1984 if (m
.rx_num_pg
* m
.rx_pg_sz
> p
->chan_rx_size
||
1985 m
.tx_num_pg
* m
.tx_pg_sz
> p
->chan_tx_size
)
1987 p
->rx_pg_size
= m
.rx_pg_sz
;
1988 p
->tx_pg_size
= m
.tx_pg_sz
;
1989 p
->rx_num_pgs
= m
.rx_num_pg
;
1990 p
->tx_num_pgs
= m
.tx_num_pg
;
1993 case CHELSIO_GET_MEM
:{
1994 struct ch_mem_range t
;
1998 if (!is_offload(adapter
))
2000 if (!(adapter
->flags
& FULL_INIT_DONE
))
2001 return -EIO
; /* need the memory controllers */
2002 if (copy_from_user(&t
, useraddr
, sizeof(t
)))
2004 if ((t
.addr
& 7) || (t
.len
& 7))
2006 if (t
.mem_id
== MEM_CM
)
2008 else if (t
.mem_id
== MEM_PMRX
)
2009 mem
= &adapter
->pmrx
;
2010 else if (t
.mem_id
== MEM_PMTX
)
2011 mem
= &adapter
->pmtx
;
2017 * bits 0..9: chip version
2018 * bits 10..15: chip revision
2020 t
.version
= 3 | (adapter
->params
.rev
<< 10);
2021 if (copy_to_user(useraddr
, &t
, sizeof(t
)))
2025 * Read 256 bytes at a time as len can be large and we don't
2026 * want to use huge intermediate buffers.
2028 useraddr
+= sizeof(t
); /* advance to start of buffer */
2030 unsigned int chunk
=
2031 min_t(unsigned int, t
.len
, sizeof(buf
));
2034 t3_mc7_bd_read(mem
, t
.addr
/ 8, chunk
/ 8,
2038 if (copy_to_user(useraddr
, buf
, chunk
))
2046 case CHELSIO_SET_TRACE_FILTER
:{
2048 const struct trace_params
*tp
;
2050 if (!capable(CAP_NET_ADMIN
))
2052 if (!offload_running(adapter
))
2054 if (copy_from_user(&t
, useraddr
, sizeof(t
)))
2057 tp
= (const struct trace_params
*)&t
.sip
;
2059 t3_config_trace_filter(adapter
, tp
, 0,
2063 t3_config_trace_filter(adapter
, tp
, 1,
2074 static int cxgb_ioctl(struct net_device
*dev
, struct ifreq
*req
, int cmd
)
2076 struct mii_ioctl_data
*data
= if_mii(req
);
2077 struct port_info
*pi
= netdev_priv(dev
);
2078 struct adapter
*adapter
= pi
->adapter
;
2083 data
->phy_id
= pi
->phy
.addr
;
2087 struct cphy
*phy
= &pi
->phy
;
2089 if (!phy
->mdio_read
)
2091 if (is_10G(adapter
)) {
2092 mmd
= data
->phy_id
>> 8;
2095 else if (mmd
> MDIO_DEV_XGXS
)
2099 phy
->mdio_read(adapter
, data
->phy_id
& 0x1f,
2100 mmd
, data
->reg_num
, &val
);
2103 phy
->mdio_read(adapter
, data
->phy_id
& 0x1f,
2104 0, data
->reg_num
& 0x1f,
2107 data
->val_out
= val
;
2111 struct cphy
*phy
= &pi
->phy
;
2113 if (!capable(CAP_NET_ADMIN
))
2115 if (!phy
->mdio_write
)
2117 if (is_10G(adapter
)) {
2118 mmd
= data
->phy_id
>> 8;
2121 else if (mmd
> MDIO_DEV_XGXS
)
2125 phy
->mdio_write(adapter
,
2126 data
->phy_id
& 0x1f, mmd
,
2131 phy
->mdio_write(adapter
,
2132 data
->phy_id
& 0x1f, 0,
2133 data
->reg_num
& 0x1f,
2138 return cxgb_extension_ioctl(dev
, req
->ifr_data
);
2145 static int cxgb_change_mtu(struct net_device
*dev
, int new_mtu
)
2147 struct port_info
*pi
= netdev_priv(dev
);
2148 struct adapter
*adapter
= pi
->adapter
;
2151 if (new_mtu
< 81) /* accommodate SACK */
2153 if ((ret
= t3_mac_set_mtu(&pi
->mac
, new_mtu
)))
2156 init_port_mtus(adapter
);
2157 if (adapter
->params
.rev
== 0 && offload_running(adapter
))
2158 t3_load_mtus(adapter
, adapter
->params
.mtus
,
2159 adapter
->params
.a_wnd
, adapter
->params
.b_wnd
,
2160 adapter
->port
[0]->mtu
);
2164 static int cxgb_set_mac_addr(struct net_device
*dev
, void *p
)
2166 struct port_info
*pi
= netdev_priv(dev
);
2167 struct adapter
*adapter
= pi
->adapter
;
2168 struct sockaddr
*addr
= p
;
2170 if (!is_valid_ether_addr(addr
->sa_data
))
2173 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
2174 t3_mac_set_address(&pi
->mac
, 0, dev
->dev_addr
);
2175 if (offload_running(adapter
))
2176 write_smt_entry(adapter
, pi
->port_id
);
2181 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2182 * @adap: the adapter
2185 * Ensures that current Rx processing on any of the queues associated with
2186 * the given port completes before returning. We do this by acquiring and
2187 * releasing the locks of the response queues associated with the port.
2189 static void t3_synchronize_rx(struct adapter
*adap
, const struct port_info
*p
)
2193 for (i
= 0; i
< p
->nqsets
; i
++) {
2194 struct sge_rspq
*q
= &adap
->sge
.qs
[i
+ p
->first_qset
].rspq
;
2196 spin_lock_irq(&q
->lock
);
2197 spin_unlock_irq(&q
->lock
);
2201 static void vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
2203 struct port_info
*pi
= netdev_priv(dev
);
2204 struct adapter
*adapter
= pi
->adapter
;
2207 if (adapter
->params
.rev
> 0)
2208 t3_set_vlan_accel(adapter
, 1 << pi
->port_id
, grp
!= NULL
);
2210 /* single control for all ports */
2211 unsigned int i
, have_vlans
= 0;
2212 for_each_port(adapter
, i
)
2213 have_vlans
|= adap2pinfo(adapter
, i
)->vlan_grp
!= NULL
;
2215 t3_set_vlan_accel(adapter
, 1, have_vlans
);
2217 t3_synchronize_rx(adapter
, pi
);
2220 #ifdef CONFIG_NET_POLL_CONTROLLER
2221 static void cxgb_netpoll(struct net_device
*dev
)
2223 struct port_info
*pi
= netdev_priv(dev
);
2224 struct adapter
*adapter
= pi
->adapter
;
2227 for (qidx
= pi
->first_qset
; qidx
< pi
->first_qset
+ pi
->nqsets
; qidx
++) {
2228 struct sge_qset
*qs
= &adapter
->sge
.qs
[qidx
];
2231 if (adapter
->flags
& USING_MSIX
)
2236 t3_intr_handler(adapter
, qs
->rspq
.polling
) (0, source
);
2242 * Periodic accumulation of MAC statistics.
2244 static void mac_stats_update(struct adapter
*adapter
)
2248 for_each_port(adapter
, i
) {
2249 struct net_device
*dev
= adapter
->port
[i
];
2250 struct port_info
*p
= netdev_priv(dev
);
2252 if (netif_running(dev
)) {
2253 spin_lock(&adapter
->stats_lock
);
2254 t3_mac_update_stats(&p
->mac
);
2255 spin_unlock(&adapter
->stats_lock
);
2260 static void check_link_status(struct adapter
*adapter
)
2264 for_each_port(adapter
, i
) {
2265 struct net_device
*dev
= adapter
->port
[i
];
2266 struct port_info
*p
= netdev_priv(dev
);
2268 if (!(p
->port_type
->caps
& SUPPORTED_IRQ
) && netif_running(dev
))
2269 t3_link_changed(adapter
, i
);
2273 static void check_t3b2_mac(struct adapter
*adapter
)
2277 if (!rtnl_trylock()) /* synchronize with ifdown */
2280 for_each_port(adapter
, i
) {
2281 struct net_device
*dev
= adapter
->port
[i
];
2282 struct port_info
*p
= netdev_priv(dev
);
2285 if (!netif_running(dev
))
2289 if (netif_running(dev
) && netif_carrier_ok(dev
))
2290 status
= t3b2_mac_watchdog_task(&p
->mac
);
2292 p
->mac
.stats
.num_toggled
++;
2293 else if (status
== 2) {
2294 struct cmac
*mac
= &p
->mac
;
2296 t3_mac_set_mtu(mac
, dev
->mtu
);
2297 t3_mac_set_address(mac
, 0, dev
->dev_addr
);
2298 cxgb_set_rxmode(dev
);
2299 t3_link_start(&p
->phy
, mac
, &p
->link_config
);
2300 t3_mac_enable(mac
, MAC_DIRECTION_RX
| MAC_DIRECTION_TX
);
2301 t3_port_intr_enable(adapter
, p
->port_id
);
2302 p
->mac
.stats
.num_resets
++;
2309 static void t3_adap_check_task(struct work_struct
*work
)
2311 struct adapter
*adapter
= container_of(work
, struct adapter
,
2312 adap_check_task
.work
);
2313 const struct adapter_params
*p
= &adapter
->params
;
2315 adapter
->check_task_cnt
++;
2317 /* Check link status for PHYs without interrupts */
2318 if (p
->linkpoll_period
)
2319 check_link_status(adapter
);
2321 /* Accumulate MAC stats if needed */
2322 if (!p
->linkpoll_period
||
2323 (adapter
->check_task_cnt
* p
->linkpoll_period
) / 10 >=
2324 p
->stats_update_period
) {
2325 mac_stats_update(adapter
);
2326 adapter
->check_task_cnt
= 0;
2329 if (p
->rev
== T3_REV_B2
)
2330 check_t3b2_mac(adapter
);
2332 /* Schedule the next check update if any port is active. */
2333 spin_lock(&adapter
->work_lock
);
2334 if (adapter
->open_device_map
& PORT_MASK
)
2335 schedule_chk_task(adapter
);
2336 spin_unlock(&adapter
->work_lock
);
2340 * Processes external (PHY) interrupts in process context.
2342 static void ext_intr_task(struct work_struct
*work
)
2344 struct adapter
*adapter
= container_of(work
, struct adapter
,
2345 ext_intr_handler_task
);
2347 t3_phy_intr_handler(adapter
);
2349 /* Now reenable external interrupts */
2350 spin_lock_irq(&adapter
->work_lock
);
2351 if (adapter
->slow_intr_mask
) {
2352 adapter
->slow_intr_mask
|= F_T3DBG
;
2353 t3_write_reg(adapter
, A_PL_INT_CAUSE0
, F_T3DBG
);
2354 t3_write_reg(adapter
, A_PL_INT_ENABLE0
,
2355 adapter
->slow_intr_mask
);
2357 spin_unlock_irq(&adapter
->work_lock
);
2361 * Interrupt-context handler for external (PHY) interrupts.
2363 void t3_os_ext_intr_handler(struct adapter
*adapter
)
2366 * Schedule a task to handle external interrupts as they may be slow
2367 * and we use a mutex to protect MDIO registers. We disable PHY
2368 * interrupts in the meantime and let the task reenable them when
2371 spin_lock(&adapter
->work_lock
);
2372 if (adapter
->slow_intr_mask
) {
2373 adapter
->slow_intr_mask
&= ~F_T3DBG
;
2374 t3_write_reg(adapter
, A_PL_INT_ENABLE0
,
2375 adapter
->slow_intr_mask
);
2376 queue_work(cxgb3_wq
, &adapter
->ext_intr_handler_task
);
2378 spin_unlock(&adapter
->work_lock
);
2381 void t3_fatal_err(struct adapter
*adapter
)
2383 unsigned int fw_status
[4];
2385 if (adapter
->flags
& FULL_INIT_DONE
) {
2386 t3_sge_stop(adapter
);
2387 t3_write_reg(adapter
, A_XGM_TX_CTRL
, 0);
2388 t3_write_reg(adapter
, A_XGM_RX_CTRL
, 0);
2389 t3_write_reg(adapter
, XGM_REG(A_XGM_TX_CTRL
, 1), 0);
2390 t3_write_reg(adapter
, XGM_REG(A_XGM_RX_CTRL
, 1), 0);
2391 t3_intr_disable(adapter
);
2393 CH_ALERT(adapter
, "encountered fatal error, operation suspended\n");
2394 if (!t3_cim_ctl_blk_read(adapter
, 0xa0, 4, fw_status
))
2395 CH_ALERT(adapter
, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2396 fw_status
[0], fw_status
[1],
2397 fw_status
[2], fw_status
[3]);
2402 * t3_io_error_detected - called when PCI error is detected
2403 * @pdev: Pointer to PCI device
2404 * @state: The current pci connection state
2406 * This function is called after a PCI bus error affecting
2407 * this device has been detected.
2409 static pci_ers_result_t
t3_io_error_detected(struct pci_dev
*pdev
,
2410 pci_channel_state_t state
)
2412 struct adapter
*adapter
= pci_get_drvdata(pdev
);
2415 /* Stop all ports */
2416 for_each_port(adapter
, i
) {
2417 struct net_device
*netdev
= adapter
->port
[i
];
2419 if (netif_running(netdev
))
2423 if (is_offload(adapter
) &&
2424 test_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
))
2425 offload_close(&adapter
->tdev
);
2427 /* Free sge resources */
2428 t3_free_sge_resources(adapter
);
2430 adapter
->flags
&= ~FULL_INIT_DONE
;
2432 pci_disable_device(pdev
);
2434 /* Request a slot slot reset. */
2435 return PCI_ERS_RESULT_NEED_RESET
;
2439 * t3_io_slot_reset - called after the pci bus has been reset.
2440 * @pdev: Pointer to PCI device
2442 * Restart the card from scratch, as if from a cold-boot.
2444 static pci_ers_result_t
t3_io_slot_reset(struct pci_dev
*pdev
)
2446 struct adapter
*adapter
= pci_get_drvdata(pdev
);
2448 if (pci_enable_device(pdev
)) {
2450 "Cannot re-enable PCI device after reset.\n");
2451 return PCI_ERS_RESULT_DISCONNECT
;
2453 pci_set_master(pdev
);
2455 t3_prep_adapter(adapter
, adapter
->params
.info
, 1);
2457 return PCI_ERS_RESULT_RECOVERED
;
2461 * t3_io_resume - called when traffic can start flowing again.
2462 * @pdev: Pointer to PCI device
2464 * This callback is called when the error recovery driver tells us that
2465 * its OK to resume normal operation.
2467 static void t3_io_resume(struct pci_dev
*pdev
)
2469 struct adapter
*adapter
= pci_get_drvdata(pdev
);
2472 /* Restart the ports */
2473 for_each_port(adapter
, i
) {
2474 struct net_device
*netdev
= adapter
->port
[i
];
2476 if (netif_running(netdev
)) {
2477 if (cxgb_open(netdev
)) {
2479 "can't bring device back up"
2483 netif_device_attach(netdev
);
2487 if (is_offload(adapter
)) {
2488 __set_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->registered_device_map
);
2489 if (offload_open(adapter
->port
[0]))
2491 "Could not bring back offload capabilities\n");
2495 static struct pci_error_handlers t3_err_handler
= {
2496 .error_detected
= t3_io_error_detected
,
2497 .slot_reset
= t3_io_slot_reset
,
2498 .resume
= t3_io_resume
,
2501 static int __devinit
cxgb_enable_msix(struct adapter
*adap
)
2503 struct msix_entry entries
[SGE_QSETS
+ 1];
2506 for (i
= 0; i
< ARRAY_SIZE(entries
); ++i
)
2507 entries
[i
].entry
= i
;
2509 err
= pci_enable_msix(adap
->pdev
, entries
, ARRAY_SIZE(entries
));
2511 for (i
= 0; i
< ARRAY_SIZE(entries
); ++i
)
2512 adap
->msix_info
[i
].vec
= entries
[i
].vector
;
2514 dev_info(&adap
->pdev
->dev
,
2515 "only %d MSI-X vectors left, not using MSI-X\n", err
);
2519 static void __devinit
print_port_info(struct adapter
*adap
,
2520 const struct adapter_info
*ai
)
2522 static const char *pci_variant
[] = {
2523 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
2530 snprintf(buf
, sizeof(buf
), "%s x%d",
2531 pci_variant
[adap
->params
.pci
.variant
],
2532 adap
->params
.pci
.width
);
2534 snprintf(buf
, sizeof(buf
), "%s %dMHz/%d-bit",
2535 pci_variant
[adap
->params
.pci
.variant
],
2536 adap
->params
.pci
.speed
, adap
->params
.pci
.width
);
2538 for_each_port(adap
, i
) {
2539 struct net_device
*dev
= adap
->port
[i
];
2540 const struct port_info
*pi
= netdev_priv(dev
);
2542 if (!test_bit(i
, &adap
->registered_device_map
))
2544 printk(KERN_INFO
"%s: %s %s %sNIC (rev %d) %s%s\n",
2545 dev
->name
, ai
->desc
, pi
->port_type
->desc
,
2546 is_offload(adap
) ? "R" : "", adap
->params
.rev
, buf
,
2547 (adap
->flags
& USING_MSIX
) ? " MSI-X" :
2548 (adap
->flags
& USING_MSI
) ? " MSI" : "");
2549 if (adap
->name
== dev
->name
&& adap
->params
.vpd
.mclk
)
2551 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
2552 adap
->name
, t3_mc7_size(&adap
->cm
) >> 20,
2553 t3_mc7_size(&adap
->pmtx
) >> 20,
2554 t3_mc7_size(&adap
->pmrx
) >> 20,
2555 adap
->params
.vpd
.sn
);
2559 static int __devinit
init_one(struct pci_dev
*pdev
,
2560 const struct pci_device_id
*ent
)
2562 static int version_printed
;
2564 int i
, err
, pci_using_dac
= 0;
2565 unsigned long mmio_start
, mmio_len
;
2566 const struct adapter_info
*ai
;
2567 struct adapter
*adapter
= NULL
;
2568 struct port_info
*pi
;
2570 if (!version_printed
) {
2571 printk(KERN_INFO
"%s - version %s\n", DRV_DESC
, DRV_VERSION
);
2576 cxgb3_wq
= create_singlethread_workqueue(DRV_NAME
);
2578 printk(KERN_ERR DRV_NAME
2579 ": cannot initialize work queue\n");
2584 err
= pci_request_regions(pdev
, DRV_NAME
);
2586 /* Just info, some other driver may have claimed the device. */
2587 dev_info(&pdev
->dev
, "cannot obtain PCI resources\n");
2591 err
= pci_enable_device(pdev
);
2593 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
2594 goto out_release_regions
;
2597 if (!pci_set_dma_mask(pdev
, DMA_64BIT_MASK
)) {
2599 err
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
2601 dev_err(&pdev
->dev
, "unable to obtain 64-bit DMA for "
2602 "coherent allocations\n");
2603 goto out_disable_device
;
2605 } else if ((err
= pci_set_dma_mask(pdev
, DMA_32BIT_MASK
)) != 0) {
2606 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
2607 goto out_disable_device
;
2610 pci_set_master(pdev
);
2612 mmio_start
= pci_resource_start(pdev
, 0);
2613 mmio_len
= pci_resource_len(pdev
, 0);
2614 ai
= t3_get_adapter_info(ent
->driver_data
);
2616 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
2619 goto out_disable_device
;
2622 adapter
->regs
= ioremap_nocache(mmio_start
, mmio_len
);
2623 if (!adapter
->regs
) {
2624 dev_err(&pdev
->dev
, "cannot map device registers\n");
2626 goto out_free_adapter
;
2629 adapter
->pdev
= pdev
;
2630 adapter
->name
= pci_name(pdev
);
2631 adapter
->msg_enable
= dflt_msg_enable
;
2632 adapter
->mmio_len
= mmio_len
;
2634 mutex_init(&adapter
->mdio_lock
);
2635 spin_lock_init(&adapter
->work_lock
);
2636 spin_lock_init(&adapter
->stats_lock
);
2638 INIT_LIST_HEAD(&adapter
->adapter_list
);
2639 INIT_WORK(&adapter
->ext_intr_handler_task
, ext_intr_task
);
2640 INIT_DELAYED_WORK(&adapter
->adap_check_task
, t3_adap_check_task
);
2642 for (i
= 0; i
< ai
->nports
; ++i
) {
2643 struct net_device
*netdev
;
2645 netdev
= alloc_etherdev(sizeof(struct port_info
));
2651 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
2653 adapter
->port
[i
] = netdev
;
2654 pi
= netdev_priv(netdev
);
2655 pi
->adapter
= adapter
;
2656 pi
->rx_csum_offload
= 1;
2661 netif_carrier_off(netdev
);
2662 netdev
->irq
= pdev
->irq
;
2663 netdev
->mem_start
= mmio_start
;
2664 netdev
->mem_end
= mmio_start
+ mmio_len
- 1;
2665 netdev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
;
2666 netdev
->features
|= NETIF_F_LLTX
;
2668 netdev
->features
|= NETIF_F_HIGHDMA
;
2670 netdev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
2671 netdev
->vlan_rx_register
= vlan_rx_register
;
2673 netdev
->open
= cxgb_open
;
2674 netdev
->stop
= cxgb_close
;
2675 netdev
->hard_start_xmit
= t3_eth_xmit
;
2676 netdev
->get_stats
= cxgb_get_stats
;
2677 netdev
->set_multicast_list
= cxgb_set_rxmode
;
2678 netdev
->do_ioctl
= cxgb_ioctl
;
2679 netdev
->change_mtu
= cxgb_change_mtu
;
2680 netdev
->set_mac_address
= cxgb_set_mac_addr
;
2681 #ifdef CONFIG_NET_POLL_CONTROLLER
2682 netdev
->poll_controller
= cxgb_netpoll
;
2685 SET_ETHTOOL_OPS(netdev
, &cxgb_ethtool_ops
);
2688 pci_set_drvdata(pdev
, adapter
);
2689 if (t3_prep_adapter(adapter
, ai
, 1) < 0) {
2695 * The card is now ready to go. If any errors occur during device
2696 * registration we do not fail the whole card but rather proceed only
2697 * with the ports we manage to register successfully. However we must
2698 * register at least one net device.
2700 for_each_port(adapter
, i
) {
2701 err
= register_netdev(adapter
->port
[i
]);
2703 dev_warn(&pdev
->dev
,
2704 "cannot register net device %s, skipping\n",
2705 adapter
->port
[i
]->name
);
2708 * Change the name we use for messages to the name of
2709 * the first successfully registered interface.
2711 if (!adapter
->registered_device_map
)
2712 adapter
->name
= adapter
->port
[i
]->name
;
2714 __set_bit(i
, &adapter
->registered_device_map
);
2717 if (!adapter
->registered_device_map
) {
2718 dev_err(&pdev
->dev
, "could not register any net devices\n");
2722 /* Driver's ready. Reflect it on LEDs */
2723 t3_led_ready(adapter
);
2725 if (is_offload(adapter
)) {
2726 __set_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->registered_device_map
);
2727 cxgb3_adapter_ofld(adapter
);
2730 /* See what interrupts we'll be using */
2731 if (msi
> 1 && cxgb_enable_msix(adapter
) == 0)
2732 adapter
->flags
|= USING_MSIX
;
2733 else if (msi
> 0 && pci_enable_msi(pdev
) == 0)
2734 adapter
->flags
|= USING_MSI
;
2736 err
= sysfs_create_group(&adapter
->port
[0]->dev
.kobj
,
2739 print_port_info(adapter
, ai
);
2743 iounmap(adapter
->regs
);
2744 for (i
= ai
->nports
- 1; i
>= 0; --i
)
2745 if (adapter
->port
[i
])
2746 free_netdev(adapter
->port
[i
]);
2752 pci_disable_device(pdev
);
2753 out_release_regions
:
2754 pci_release_regions(pdev
);
2755 pci_set_drvdata(pdev
, NULL
);
2759 static void __devexit
remove_one(struct pci_dev
*pdev
)
2761 struct adapter
*adapter
= pci_get_drvdata(pdev
);
2766 t3_sge_stop(adapter
);
2767 sysfs_remove_group(&adapter
->port
[0]->dev
.kobj
,
2770 if (is_offload(adapter
)) {
2771 cxgb3_adapter_unofld(adapter
);
2772 if (test_bit(OFFLOAD_DEVMAP_BIT
,
2773 &adapter
->open_device_map
))
2774 offload_close(&adapter
->tdev
);
2777 for_each_port(adapter
, i
)
2778 if (test_bit(i
, &adapter
->registered_device_map
))
2779 unregister_netdev(adapter
->port
[i
]);
2781 t3_free_sge_resources(adapter
);
2782 cxgb_disable_msi(adapter
);
2784 for_each_port(adapter
, i
)
2785 if (adapter
->port
[i
])
2786 free_netdev(adapter
->port
[i
]);
2788 iounmap(adapter
->regs
);
2790 pci_release_regions(pdev
);
2791 pci_disable_device(pdev
);
2792 pci_set_drvdata(pdev
, NULL
);
2796 static struct pci_driver driver
= {
2798 .id_table
= cxgb3_pci_tbl
,
2800 .remove
= __devexit_p(remove_one
),
2801 .err_handler
= &t3_err_handler
,
2804 static int __init
cxgb3_init_module(void)
2808 cxgb3_offload_init();
2810 ret
= pci_register_driver(&driver
);
2814 static void __exit
cxgb3_cleanup_module(void)
2816 pci_unregister_driver(&driver
);
2818 destroy_workqueue(cxgb3_wq
);
2821 module_init(cxgb3_init_module
);
2822 module_exit(cxgb3_cleanup_module
);