2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/if_vlan.h>
40 #include <linux/mdio.h>
41 #include <linux/sockios.h>
42 #include <linux/workqueue.h>
43 #include <linux/proc_fs.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/firmware.h>
46 #include <linux/log2.h>
47 #include <linux/stringify.h>
48 #include <linux/sched.h>
49 #include <asm/uaccess.h>
52 #include "cxgb3_ioctl.h"
54 #include "cxgb3_offload.h"
57 #include "cxgb3_ctl_defs.h"
59 #include "firmware_exports.h"
62 MAX_TXQ_ENTRIES
= 16384,
63 MAX_CTRL_TXQ_ENTRIES
= 1024,
64 MAX_RSPQ_ENTRIES
= 16384,
65 MAX_RX_BUFFERS
= 16384,
66 MAX_RX_JUMBO_BUFFERS
= 16384,
68 MIN_CTRL_TXQ_ENTRIES
= 4,
69 MIN_RSPQ_ENTRIES
= 32,
73 #define PORT_MASK ((1 << MAX_NPORTS) - 1)
75 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
76 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
77 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
79 #define EEPROM_MAGIC 0x38E2F10C
81 #define CH_DEVICE(devid, idx) \
82 { PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
84 static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl
) = {
85 CH_DEVICE(0x20, 0), /* PE9000 */
86 CH_DEVICE(0x21, 1), /* T302E */
87 CH_DEVICE(0x22, 2), /* T310E */
88 CH_DEVICE(0x23, 3), /* T320X */
89 CH_DEVICE(0x24, 1), /* T302X */
90 CH_DEVICE(0x25, 3), /* T320E */
91 CH_DEVICE(0x26, 2), /* T310X */
92 CH_DEVICE(0x30, 2), /* T3B10 */
93 CH_DEVICE(0x31, 3), /* T3B20 */
94 CH_DEVICE(0x32, 1), /* T3B02 */
95 CH_DEVICE(0x35, 6), /* T3C20-derived T3C10 */
96 CH_DEVICE(0x36, 3), /* S320E-CR */
97 CH_DEVICE(0x37, 7), /* N320E-G2 */
101 MODULE_DESCRIPTION(DRV_DESC
);
102 MODULE_AUTHOR("Chelsio Communications");
103 MODULE_LICENSE("Dual BSD/GPL");
104 MODULE_VERSION(DRV_VERSION
);
105 MODULE_DEVICE_TABLE(pci
, cxgb3_pci_tbl
);
107 static int dflt_msg_enable
= DFLT_MSG_ENABLE
;
109 module_param(dflt_msg_enable
, int, 0644);
110 MODULE_PARM_DESC(dflt_msg_enable
, "Chelsio T3 default message enable bitmap");
113 * The driver uses the best interrupt scheme available on a platform in the
114 * order MSI-X, MSI, legacy pin interrupts. This parameter determines which
115 * of these schemes the driver may consider as follows:
117 * msi = 2: choose from among all three options
118 * msi = 1: only consider MSI and pin interrupts
119 * msi = 0: force pin interrupts
123 module_param(msi
, int, 0644);
124 MODULE_PARM_DESC(msi
, "whether to use MSI or MSI-X");
127 * The driver enables offload as a default.
128 * To disable it, use ofld_disable = 1.
131 static int ofld_disable
= 0;
133 module_param(ofld_disable
, int, 0644);
134 MODULE_PARM_DESC(ofld_disable
, "whether to enable offload at init time or not");
137 * We have work elements that we need to cancel when an interface is taken
138 * down. Normally the work elements would be executed by keventd but that
139 * can deadlock because of linkwatch. If our close method takes the rtnl
140 * lock and linkwatch is ahead of our work elements in keventd, linkwatch
141 * will block keventd as it needs the rtnl lock, and we'll deadlock waiting
142 * for our work to complete. Get our own work queue to solve this.
144 struct workqueue_struct
*cxgb3_wq
;
147 * link_report - show link status and link speed/duplex
148 * @p: the port whose settings are to be reported
150 * Shows the link status, speed, and duplex of a port.
152 static void link_report(struct net_device
*dev
)
154 if (!netif_carrier_ok(dev
))
155 printk(KERN_INFO
"%s: link down\n", dev
->name
);
157 const char *s
= "10Mbps";
158 const struct port_info
*p
= netdev_priv(dev
);
160 switch (p
->link_config
.speed
) {
172 printk(KERN_INFO
"%s: link up, %s, %s-duplex\n", dev
->name
, s
,
173 p
->link_config
.duplex
== DUPLEX_FULL
? "full" : "half");
177 static void enable_tx_fifo_drain(struct adapter
*adapter
,
178 struct port_info
*pi
)
180 t3_set_reg_field(adapter
, A_XGM_TXFIFO_CFG
+ pi
->mac
.offset
, 0,
182 t3_write_reg(adapter
, A_XGM_RX_CTRL
+ pi
->mac
.offset
, 0);
183 t3_write_reg(adapter
, A_XGM_TX_CTRL
+ pi
->mac
.offset
, F_TXEN
);
184 t3_write_reg(adapter
, A_XGM_RX_CTRL
+ pi
->mac
.offset
, F_RXEN
);
187 static void disable_tx_fifo_drain(struct adapter
*adapter
,
188 struct port_info
*pi
)
190 t3_set_reg_field(adapter
, A_XGM_TXFIFO_CFG
+ pi
->mac
.offset
,
194 void t3_os_link_fault(struct adapter
*adap
, int port_id
, int state
)
196 struct net_device
*dev
= adap
->port
[port_id
];
197 struct port_info
*pi
= netdev_priv(dev
);
199 if (state
== netif_carrier_ok(dev
))
203 struct cmac
*mac
= &pi
->mac
;
205 netif_carrier_on(dev
);
207 disable_tx_fifo_drain(adap
, pi
);
209 /* Clear local faults */
210 t3_xgm_intr_disable(adap
, pi
->port_id
);
211 t3_read_reg(adap
, A_XGM_INT_STATUS
+
214 A_XGM_INT_CAUSE
+ pi
->mac
.offset
,
217 t3_set_reg_field(adap
,
220 F_XGM_INT
, F_XGM_INT
);
221 t3_xgm_intr_enable(adap
, pi
->port_id
);
223 t3_mac_enable(mac
, MAC_DIRECTION_TX
);
225 netif_carrier_off(dev
);
228 enable_tx_fifo_drain(adap
, pi
);
234 * t3_os_link_changed - handle link status changes
235 * @adapter: the adapter associated with the link change
236 * @port_id: the port index whose limk status has changed
237 * @link_stat: the new status of the link
238 * @speed: the new speed setting
239 * @duplex: the new duplex setting
240 * @pause: the new flow-control setting
242 * This is the OS-dependent handler for link status changes. The OS
243 * neutral handler takes care of most of the processing for these events,
244 * then calls this handler for any OS-specific processing.
246 void t3_os_link_changed(struct adapter
*adapter
, int port_id
, int link_stat
,
247 int speed
, int duplex
, int pause
)
249 struct net_device
*dev
= adapter
->port
[port_id
];
250 struct port_info
*pi
= netdev_priv(dev
);
251 struct cmac
*mac
= &pi
->mac
;
253 /* Skip changes from disabled ports. */
254 if (!netif_running(dev
))
257 if (link_stat
!= netif_carrier_ok(dev
)) {
259 disable_tx_fifo_drain(adapter
, pi
);
261 t3_mac_enable(mac
, MAC_DIRECTION_RX
);
263 /* Clear local faults */
264 t3_xgm_intr_disable(adapter
, pi
->port_id
);
265 t3_read_reg(adapter
, A_XGM_INT_STATUS
+
267 t3_write_reg(adapter
,
268 A_XGM_INT_CAUSE
+ pi
->mac
.offset
,
271 t3_set_reg_field(adapter
,
272 A_XGM_INT_ENABLE
+ pi
->mac
.offset
,
273 F_XGM_INT
, F_XGM_INT
);
274 t3_xgm_intr_enable(adapter
, pi
->port_id
);
276 netif_carrier_on(dev
);
278 netif_carrier_off(dev
);
280 t3_xgm_intr_disable(adapter
, pi
->port_id
);
281 t3_read_reg(adapter
, A_XGM_INT_STATUS
+ pi
->mac
.offset
);
282 t3_set_reg_field(adapter
,
283 A_XGM_INT_ENABLE
+ pi
->mac
.offset
,
287 pi
->phy
.ops
->power_down(&pi
->phy
, 1);
289 t3_read_reg(adapter
, A_XGM_INT_STATUS
+ pi
->mac
.offset
);
290 t3_mac_disable(mac
, MAC_DIRECTION_RX
);
291 t3_link_start(&pi
->phy
, mac
, &pi
->link_config
);
294 enable_tx_fifo_drain(adapter
, pi
);
302 * t3_os_phymod_changed - handle PHY module changes
303 * @phy: the PHY reporting the module change
304 * @mod_type: new module type
306 * This is the OS-dependent handler for PHY module changes. It is
307 * invoked when a PHY module is removed or inserted for any OS-specific
310 void t3_os_phymod_changed(struct adapter
*adap
, int port_id
)
312 static const char *mod_str
[] = {
313 NULL
, "SR", "LR", "LRM", "TWINAX", "TWINAX", "unknown"
316 const struct net_device
*dev
= adap
->port
[port_id
];
317 const struct port_info
*pi
= netdev_priv(dev
);
319 if (pi
->phy
.modtype
== phy_modtype_none
)
320 printk(KERN_INFO
"%s: PHY module unplugged\n", dev
->name
);
322 printk(KERN_INFO
"%s: %s PHY module inserted\n", dev
->name
,
323 mod_str
[pi
->phy
.modtype
]);
326 static void cxgb_set_rxmode(struct net_device
*dev
)
328 struct port_info
*pi
= netdev_priv(dev
);
330 t3_mac_set_rx_mode(&pi
->mac
, dev
);
334 * link_start - enable a port
335 * @dev: the device to enable
337 * Performs the MAC and PHY actions needed to enable a port.
339 static void link_start(struct net_device
*dev
)
341 struct port_info
*pi
= netdev_priv(dev
);
342 struct cmac
*mac
= &pi
->mac
;
345 t3_mac_set_num_ucast(mac
, MAX_MAC_IDX
);
346 t3_mac_set_mtu(mac
, dev
->mtu
);
347 t3_mac_set_address(mac
, LAN_MAC_IDX
, dev
->dev_addr
);
348 t3_mac_set_address(mac
, SAN_MAC_IDX
, pi
->iscsic
.mac_addr
);
349 t3_mac_set_rx_mode(mac
, dev
);
350 t3_link_start(&pi
->phy
, mac
, &pi
->link_config
);
351 t3_mac_enable(mac
, MAC_DIRECTION_RX
| MAC_DIRECTION_TX
);
354 static inline void cxgb_disable_msi(struct adapter
*adapter
)
356 if (adapter
->flags
& USING_MSIX
) {
357 pci_disable_msix(adapter
->pdev
);
358 adapter
->flags
&= ~USING_MSIX
;
359 } else if (adapter
->flags
& USING_MSI
) {
360 pci_disable_msi(adapter
->pdev
);
361 adapter
->flags
&= ~USING_MSI
;
366 * Interrupt handler for asynchronous events used with MSI-X.
368 static irqreturn_t
t3_async_intr_handler(int irq
, void *cookie
)
370 t3_slow_intr_handler(cookie
);
375 * Name the MSI-X interrupts.
377 static void name_msix_vecs(struct adapter
*adap
)
379 int i
, j
, msi_idx
= 1, n
= sizeof(adap
->msix_info
[0].desc
) - 1;
381 snprintf(adap
->msix_info
[0].desc
, n
, "%s", adap
->name
);
382 adap
->msix_info
[0].desc
[n
] = 0;
384 for_each_port(adap
, j
) {
385 struct net_device
*d
= adap
->port
[j
];
386 const struct port_info
*pi
= netdev_priv(d
);
388 for (i
= 0; i
< pi
->nqsets
; i
++, msi_idx
++) {
389 snprintf(adap
->msix_info
[msi_idx
].desc
, n
,
390 "%s-%d", d
->name
, pi
->first_qset
+ i
);
391 adap
->msix_info
[msi_idx
].desc
[n
] = 0;
396 static int request_msix_data_irqs(struct adapter
*adap
)
398 int i
, j
, err
, qidx
= 0;
400 for_each_port(adap
, i
) {
401 int nqsets
= adap2pinfo(adap
, i
)->nqsets
;
403 for (j
= 0; j
< nqsets
; ++j
) {
404 err
= request_irq(adap
->msix_info
[qidx
+ 1].vec
,
405 t3_intr_handler(adap
,
408 adap
->msix_info
[qidx
+ 1].desc
,
409 &adap
->sge
.qs
[qidx
]);
412 free_irq(adap
->msix_info
[qidx
+ 1].vec
,
413 &adap
->sge
.qs
[qidx
]);
422 static void free_irq_resources(struct adapter
*adapter
)
424 if (adapter
->flags
& USING_MSIX
) {
427 free_irq(adapter
->msix_info
[0].vec
, adapter
);
428 for_each_port(adapter
, i
)
429 n
+= adap2pinfo(adapter
, i
)->nqsets
;
431 for (i
= 0; i
< n
; ++i
)
432 free_irq(adapter
->msix_info
[i
+ 1].vec
,
433 &adapter
->sge
.qs
[i
]);
435 free_irq(adapter
->pdev
->irq
, adapter
);
438 static int await_mgmt_replies(struct adapter
*adap
, unsigned long init_cnt
,
443 while (adap
->sge
.qs
[0].rspq
.offload_pkts
< init_cnt
+ n
) {
451 static int init_tp_parity(struct adapter
*adap
)
455 struct cpl_set_tcb_field
*greq
;
456 unsigned long cnt
= adap
->sge
.qs
[0].rspq
.offload_pkts
;
458 t3_tp_set_offload_mode(adap
, 1);
460 for (i
= 0; i
< 16; i
++) {
461 struct cpl_smt_write_req
*req
;
463 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
465 skb
= adap
->nofail_skb
;
469 req
= (struct cpl_smt_write_req
*)__skb_put(skb
, sizeof(*req
));
470 memset(req
, 0, sizeof(*req
));
471 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
472 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ
, i
));
473 req
->mtu_idx
= NMTUS
- 1;
475 t3_mgmt_tx(adap
, skb
);
476 if (skb
== adap
->nofail_skb
) {
477 await_mgmt_replies(adap
, cnt
, i
+ 1);
478 adap
->nofail_skb
= alloc_skb(sizeof(*greq
), GFP_KERNEL
);
479 if (!adap
->nofail_skb
)
484 for (i
= 0; i
< 2048; i
++) {
485 struct cpl_l2t_write_req
*req
;
487 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
489 skb
= adap
->nofail_skb
;
493 req
= (struct cpl_l2t_write_req
*)__skb_put(skb
, sizeof(*req
));
494 memset(req
, 0, sizeof(*req
));
495 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
496 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ
, i
));
497 req
->params
= htonl(V_L2T_W_IDX(i
));
498 t3_mgmt_tx(adap
, skb
);
499 if (skb
== adap
->nofail_skb
) {
500 await_mgmt_replies(adap
, cnt
, 16 + i
+ 1);
501 adap
->nofail_skb
= alloc_skb(sizeof(*greq
), GFP_KERNEL
);
502 if (!adap
->nofail_skb
)
507 for (i
= 0; i
< 2048; i
++) {
508 struct cpl_rte_write_req
*req
;
510 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
512 skb
= adap
->nofail_skb
;
516 req
= (struct cpl_rte_write_req
*)__skb_put(skb
, sizeof(*req
));
517 memset(req
, 0, sizeof(*req
));
518 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
519 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ
, i
));
520 req
->l2t_idx
= htonl(V_L2T_W_IDX(i
));
521 t3_mgmt_tx(adap
, skb
);
522 if (skb
== adap
->nofail_skb
) {
523 await_mgmt_replies(adap
, cnt
, 16 + 2048 + i
+ 1);
524 adap
->nofail_skb
= alloc_skb(sizeof(*greq
), GFP_KERNEL
);
525 if (!adap
->nofail_skb
)
530 skb
= alloc_skb(sizeof(*greq
), GFP_KERNEL
);
532 skb
= adap
->nofail_skb
;
536 greq
= (struct cpl_set_tcb_field
*)__skb_put(skb
, sizeof(*greq
));
537 memset(greq
, 0, sizeof(*greq
));
538 greq
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
539 OPCODE_TID(greq
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, 0));
540 greq
->mask
= cpu_to_be64(1);
541 t3_mgmt_tx(adap
, skb
);
543 i
= await_mgmt_replies(adap
, cnt
, 16 + 2048 + 2048 + 1);
544 if (skb
== adap
->nofail_skb
) {
545 i
= await_mgmt_replies(adap
, cnt
, 16 + 2048 + 2048 + 1);
546 adap
->nofail_skb
= alloc_skb(sizeof(*greq
), GFP_KERNEL
);
549 t3_tp_set_offload_mode(adap
, 0);
553 t3_tp_set_offload_mode(adap
, 0);
558 * setup_rss - configure RSS
561 * Sets up RSS to distribute packets to multiple receive queues. We
562 * configure the RSS CPU lookup table to distribute to the number of HW
563 * receive queues, and the response queue lookup table to narrow that
564 * down to the response queues actually configured for each port.
565 * We always configure the RSS mapping for two ports since the mapping
566 * table has plenty of entries.
568 static void setup_rss(struct adapter
*adap
)
571 unsigned int nq0
= adap2pinfo(adap
, 0)->nqsets
;
572 unsigned int nq1
= adap
->port
[1] ? adap2pinfo(adap
, 1)->nqsets
: 1;
573 u8 cpus
[SGE_QSETS
+ 1];
574 u16 rspq_map
[RSS_TABLE_SIZE
];
576 for (i
= 0; i
< SGE_QSETS
; ++i
)
578 cpus
[SGE_QSETS
] = 0xff; /* terminator */
580 for (i
= 0; i
< RSS_TABLE_SIZE
/ 2; ++i
) {
581 rspq_map
[i
] = i
% nq0
;
582 rspq_map
[i
+ RSS_TABLE_SIZE
/ 2] = (i
% nq1
) + nq0
;
585 t3_config_rss(adap
, F_RQFEEDBACKENABLE
| F_TNLLKPEN
| F_TNLMAPEN
|
586 F_TNLPRTEN
| F_TNL2TUPEN
| F_TNL4TUPEN
|
587 V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ
, cpus
, rspq_map
);
590 static void ring_dbs(struct adapter
*adap
)
594 for (i
= 0; i
< SGE_QSETS
; i
++) {
595 struct sge_qset
*qs
= &adap
->sge
.qs
[i
];
598 for (j
= 0; j
< SGE_TXQ_PER_SET
; j
++)
599 t3_write_reg(adap
, A_SG_KDOORBELL
, F_SELEGRCNTX
| V_EGRCNTX(qs
->txq
[j
].cntxt_id
));
603 static void init_napi(struct adapter
*adap
)
607 for (i
= 0; i
< SGE_QSETS
; i
++) {
608 struct sge_qset
*qs
= &adap
->sge
.qs
[i
];
611 netif_napi_add(qs
->netdev
, &qs
->napi
, qs
->napi
.poll
,
616 * netif_napi_add() can be called only once per napi_struct because it
617 * adds each new napi_struct to a list. Be careful not to call it a
618 * second time, e.g., during EEH recovery, by making a note of it.
620 adap
->flags
|= NAPI_INIT
;
624 * Wait until all NAPI handlers are descheduled. This includes the handlers of
625 * both netdevices representing interfaces and the dummy ones for the extra
628 static void quiesce_rx(struct adapter
*adap
)
632 for (i
= 0; i
< SGE_QSETS
; i
++)
633 if (adap
->sge
.qs
[i
].adap
)
634 napi_disable(&adap
->sge
.qs
[i
].napi
);
637 static void enable_all_napi(struct adapter
*adap
)
640 for (i
= 0; i
< SGE_QSETS
; i
++)
641 if (adap
->sge
.qs
[i
].adap
)
642 napi_enable(&adap
->sge
.qs
[i
].napi
);
646 * set_qset_lro - Turn a queue set's LRO capability on and off
647 * @dev: the device the qset is attached to
648 * @qset_idx: the queue set index
649 * @val: the LRO switch
651 * Sets LRO on or off for a particular queue set.
652 * the device's features flag is updated to reflect the LRO
653 * capability when all queues belonging to the device are
656 static void set_qset_lro(struct net_device
*dev
, int qset_idx
, int val
)
658 struct port_info
*pi
= netdev_priv(dev
);
659 struct adapter
*adapter
= pi
->adapter
;
661 adapter
->params
.sge
.qset
[qset_idx
].lro
= !!val
;
662 adapter
->sge
.qs
[qset_idx
].lro_enabled
= !!val
;
666 * setup_sge_qsets - configure SGE Tx/Rx/response queues
669 * Determines how many sets of SGE queues to use and initializes them.
670 * We support multiple queue sets per port if we have MSI-X, otherwise
671 * just one queue set per port.
673 static int setup_sge_qsets(struct adapter
*adap
)
675 int i
, j
, err
, irq_idx
= 0, qset_idx
= 0;
676 unsigned int ntxq
= SGE_TXQ_PER_SET
;
678 if (adap
->params
.rev
> 0 && !(adap
->flags
& USING_MSI
))
681 for_each_port(adap
, i
) {
682 struct net_device
*dev
= adap
->port
[i
];
683 struct port_info
*pi
= netdev_priv(dev
);
685 pi
->qs
= &adap
->sge
.qs
[pi
->first_qset
];
686 for (j
= 0; j
< pi
->nqsets
; ++j
, ++qset_idx
) {
687 set_qset_lro(dev
, qset_idx
, pi
->rx_offload
& T3_LRO
);
688 err
= t3_sge_alloc_qset(adap
, qset_idx
, 1,
689 (adap
->flags
& USING_MSIX
) ? qset_idx
+ 1 :
691 &adap
->params
.sge
.qset
[qset_idx
], ntxq
, dev
,
692 netdev_get_tx_queue(dev
, j
));
694 t3_free_sge_resources(adap
);
703 static ssize_t
attr_show(struct device
*d
, char *buf
,
704 ssize_t(*format
) (struct net_device
*, char *))
708 /* Synchronize with ioctls that may shut down the device */
710 len
= (*format
) (to_net_dev(d
), buf
);
715 static ssize_t
attr_store(struct device
*d
,
716 const char *buf
, size_t len
,
717 ssize_t(*set
) (struct net_device
*, unsigned int),
718 unsigned int min_val
, unsigned int max_val
)
724 if (!capable(CAP_NET_ADMIN
))
727 val
= simple_strtoul(buf
, &endp
, 0);
728 if (endp
== buf
|| val
< min_val
|| val
> max_val
)
732 ret
= (*set
) (to_net_dev(d
), val
);
739 #define CXGB3_SHOW(name, val_expr) \
740 static ssize_t format_##name(struct net_device *dev, char *buf) \
742 struct port_info *pi = netdev_priv(dev); \
743 struct adapter *adap = pi->adapter; \
744 return sprintf(buf, "%u\n", val_expr); \
746 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
749 return attr_show(d, buf, format_##name); \
752 static ssize_t
set_nfilters(struct net_device
*dev
, unsigned int val
)
754 struct port_info
*pi
= netdev_priv(dev
);
755 struct adapter
*adap
= pi
->adapter
;
756 int min_tids
= is_offload(adap
) ? MC5_MIN_TIDS
: 0;
758 if (adap
->flags
& FULL_INIT_DONE
)
760 if (val
&& adap
->params
.rev
== 0)
762 if (val
> t3_mc5_size(&adap
->mc5
) - adap
->params
.mc5
.nservers
-
765 adap
->params
.mc5
.nfilters
= val
;
769 static ssize_t
store_nfilters(struct device
*d
, struct device_attribute
*attr
,
770 const char *buf
, size_t len
)
772 return attr_store(d
, buf
, len
, set_nfilters
, 0, ~0);
775 static ssize_t
set_nservers(struct net_device
*dev
, unsigned int val
)
777 struct port_info
*pi
= netdev_priv(dev
);
778 struct adapter
*adap
= pi
->adapter
;
780 if (adap
->flags
& FULL_INIT_DONE
)
782 if (val
> t3_mc5_size(&adap
->mc5
) - adap
->params
.mc5
.nfilters
-
785 adap
->params
.mc5
.nservers
= val
;
789 static ssize_t
store_nservers(struct device
*d
, struct device_attribute
*attr
,
790 const char *buf
, size_t len
)
792 return attr_store(d
, buf
, len
, set_nservers
, 0, ~0);
795 #define CXGB3_ATTR_R(name, val_expr) \
796 CXGB3_SHOW(name, val_expr) \
797 static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
799 #define CXGB3_ATTR_RW(name, val_expr, store_method) \
800 CXGB3_SHOW(name, val_expr) \
801 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
803 CXGB3_ATTR_R(cam_size
, t3_mc5_size(&adap
->mc5
));
804 CXGB3_ATTR_RW(nfilters
, adap
->params
.mc5
.nfilters
, store_nfilters
);
805 CXGB3_ATTR_RW(nservers
, adap
->params
.mc5
.nservers
, store_nservers
);
807 static struct attribute
*cxgb3_attrs
[] = {
808 &dev_attr_cam_size
.attr
,
809 &dev_attr_nfilters
.attr
,
810 &dev_attr_nservers
.attr
,
814 static struct attribute_group cxgb3_attr_group
= {.attrs
= cxgb3_attrs
};
816 static ssize_t
tm_attr_show(struct device
*d
,
817 char *buf
, int sched
)
819 struct port_info
*pi
= netdev_priv(to_net_dev(d
));
820 struct adapter
*adap
= pi
->adapter
;
821 unsigned int v
, addr
, bpt
, cpt
;
824 addr
= A_TP_TX_MOD_Q1_Q0_RATE_LIMIT
- sched
/ 2;
826 t3_write_reg(adap
, A_TP_TM_PIO_ADDR
, addr
);
827 v
= t3_read_reg(adap
, A_TP_TM_PIO_DATA
);
830 bpt
= (v
>> 8) & 0xff;
833 len
= sprintf(buf
, "disabled\n");
835 v
= (adap
->params
.vpd
.cclk
* 1000) / cpt
;
836 len
= sprintf(buf
, "%u Kbps\n", (v
* bpt
) / 125);
842 static ssize_t
tm_attr_store(struct device
*d
,
843 const char *buf
, size_t len
, int sched
)
845 struct port_info
*pi
= netdev_priv(to_net_dev(d
));
846 struct adapter
*adap
= pi
->adapter
;
851 if (!capable(CAP_NET_ADMIN
))
854 val
= simple_strtoul(buf
, &endp
, 0);
855 if (endp
== buf
|| val
> 10000000)
859 ret
= t3_config_sched(adap
, val
, sched
);
866 #define TM_ATTR(name, sched) \
867 static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
870 return tm_attr_show(d, buf, sched); \
872 static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
873 const char *buf, size_t len) \
875 return tm_attr_store(d, buf, len, sched); \
877 static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
888 static struct attribute
*offload_attrs
[] = {
889 &dev_attr_sched0
.attr
,
890 &dev_attr_sched1
.attr
,
891 &dev_attr_sched2
.attr
,
892 &dev_attr_sched3
.attr
,
893 &dev_attr_sched4
.attr
,
894 &dev_attr_sched5
.attr
,
895 &dev_attr_sched6
.attr
,
896 &dev_attr_sched7
.attr
,
900 static struct attribute_group offload_attr_group
= {.attrs
= offload_attrs
};
903 * Sends an sk_buff to an offload queue driver
904 * after dealing with any active network taps.
906 static inline int offload_tx(struct t3cdev
*tdev
, struct sk_buff
*skb
)
911 ret
= t3_offload_tx(tdev
, skb
);
916 static int write_smt_entry(struct adapter
*adapter
, int idx
)
918 struct cpl_smt_write_req
*req
;
919 struct port_info
*pi
= netdev_priv(adapter
->port
[idx
]);
920 struct sk_buff
*skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
925 req
= (struct cpl_smt_write_req
*)__skb_put(skb
, sizeof(*req
));
926 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
927 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ
, idx
));
928 req
->mtu_idx
= NMTUS
- 1; /* should be 0 but there's a T3 bug */
930 memcpy(req
->src_mac0
, adapter
->port
[idx
]->dev_addr
, ETH_ALEN
);
931 memcpy(req
->src_mac1
, pi
->iscsic
.mac_addr
, ETH_ALEN
);
933 offload_tx(&adapter
->tdev
, skb
);
937 static int init_smt(struct adapter
*adapter
)
941 for_each_port(adapter
, i
)
942 write_smt_entry(adapter
, i
);
946 static void init_port_mtus(struct adapter
*adapter
)
948 unsigned int mtus
= adapter
->port
[0]->mtu
;
950 if (adapter
->port
[1])
951 mtus
|= adapter
->port
[1]->mtu
<< 16;
952 t3_write_reg(adapter
, A_TP_MTU_PORT_TABLE
, mtus
);
955 static int send_pktsched_cmd(struct adapter
*adap
, int sched
, int qidx
, int lo
,
959 struct mngt_pktsched_wr
*req
;
962 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
964 skb
= adap
->nofail_skb
;
968 req
= (struct mngt_pktsched_wr
*)skb_put(skb
, sizeof(*req
));
969 req
->wr_hi
= htonl(V_WR_OP(FW_WROPCODE_MNGT
));
970 req
->mngt_opcode
= FW_MNGTOPCODE_PKTSCHED_SET
;
976 ret
= t3_mgmt_tx(adap
, skb
);
977 if (skb
== adap
->nofail_skb
) {
978 adap
->nofail_skb
= alloc_skb(sizeof(struct cpl_set_tcb_field
),
980 if (!adap
->nofail_skb
)
987 static int bind_qsets(struct adapter
*adap
)
991 for_each_port(adap
, i
) {
992 const struct port_info
*pi
= adap2pinfo(adap
, i
);
994 for (j
= 0; j
< pi
->nqsets
; ++j
) {
995 int ret
= send_pktsched_cmd(adap
, 1,
996 pi
->first_qset
+ j
, -1,
1006 #define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
1007 __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
1008 #define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
1009 #define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
1010 __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
1011 #define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
1012 #define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
1013 #define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
1014 #define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1015 MODULE_FIRMWARE(FW_FNAME
);
1016 MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION
".bin");
1017 MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION
".bin");
1018 MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME
);
1019 MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME
);
1020 MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME
);
1022 static inline const char *get_edc_fw_name(int edc_idx
)
1024 const char *fw_name
= NULL
;
1027 case EDC_OPT_AEL2005
:
1028 fw_name
= AEL2005_OPT_EDC_NAME
;
1030 case EDC_TWX_AEL2005
:
1031 fw_name
= AEL2005_TWX_EDC_NAME
;
1033 case EDC_TWX_AEL2020
:
1034 fw_name
= AEL2020_TWX_EDC_NAME
;
1040 int t3_get_edc_fw(struct cphy
*phy
, int edc_idx
, int size
)
1042 struct adapter
*adapter
= phy
->adapter
;
1043 const struct firmware
*fw
;
1047 u16
*cache
= phy
->phy_cache
;
1050 snprintf(buf
, sizeof(buf
), get_edc_fw_name(edc_idx
));
1052 ret
= request_firmware(&fw
, buf
, &adapter
->pdev
->dev
);
1054 dev_err(&adapter
->pdev
->dev
,
1055 "could not upgrade firmware: unable to load %s\n",
1060 /* check size, take checksum in account */
1061 if (fw
->size
> size
+ 4) {
1062 CH_ERR(adapter
, "firmware image too large %u, expected %d\n",
1063 (unsigned int)fw
->size
, size
+ 4);
1067 /* compute checksum */
1068 p
= (const __be32
*)fw
->data
;
1069 for (csum
= 0, i
= 0; i
< fw
->size
/ sizeof(csum
); i
++)
1070 csum
+= ntohl(p
[i
]);
1072 if (csum
!= 0xffffffff) {
1073 CH_ERR(adapter
, "corrupted firmware image, checksum %u\n",
1078 for (i
= 0; i
< size
/ 4 ; i
++) {
1079 *cache
++ = (be32_to_cpu(p
[i
]) & 0xffff0000) >> 16;
1080 *cache
++ = be32_to_cpu(p
[i
]) & 0xffff;
1083 release_firmware(fw
);
1088 static int upgrade_fw(struct adapter
*adap
)
1091 const struct firmware
*fw
;
1092 struct device
*dev
= &adap
->pdev
->dev
;
1094 ret
= request_firmware(&fw
, FW_FNAME
, dev
);
1096 dev_err(dev
, "could not upgrade firmware: unable to load %s\n",
1100 ret
= t3_load_fw(adap
, fw
->data
, fw
->size
);
1101 release_firmware(fw
);
1104 dev_info(dev
, "successful upgrade to firmware %d.%d.%d\n",
1105 FW_VERSION_MAJOR
, FW_VERSION_MINOR
, FW_VERSION_MICRO
);
1107 dev_err(dev
, "failed to upgrade to firmware %d.%d.%d\n",
1108 FW_VERSION_MAJOR
, FW_VERSION_MINOR
, FW_VERSION_MICRO
);
1113 static inline char t3rev2char(struct adapter
*adapter
)
1117 switch(adapter
->params
.rev
) {
1129 static int update_tpsram(struct adapter
*adap
)
1131 const struct firmware
*tpsram
;
1133 struct device
*dev
= &adap
->pdev
->dev
;
1137 rev
= t3rev2char(adap
);
1141 snprintf(buf
, sizeof(buf
), TPSRAM_NAME
, rev
);
1143 ret
= request_firmware(&tpsram
, buf
, dev
);
1145 dev_err(dev
, "could not load TP SRAM: unable to load %s\n",
1150 ret
= t3_check_tpsram(adap
, tpsram
->data
, tpsram
->size
);
1152 goto release_tpsram
;
1154 ret
= t3_set_proto_sram(adap
, tpsram
->data
);
1157 "successful update of protocol engine "
1159 TP_VERSION_MAJOR
, TP_VERSION_MINOR
, TP_VERSION_MICRO
);
1161 dev_err(dev
, "failed to update of protocol engine %d.%d.%d\n",
1162 TP_VERSION_MAJOR
, TP_VERSION_MINOR
, TP_VERSION_MICRO
);
1164 dev_err(dev
, "loading protocol SRAM failed\n");
1167 release_firmware(tpsram
);
1173 * cxgb_up - enable the adapter
1174 * @adapter: adapter being enabled
1176 * Called when the first port is enabled, this function performs the
1177 * actions necessary to make an adapter operational, such as completing
1178 * the initialization of HW modules, and enabling interrupts.
1180 * Must be called with the rtnl lock held.
1182 static int cxgb_up(struct adapter
*adap
)
1186 if (!(adap
->flags
& FULL_INIT_DONE
)) {
1187 err
= t3_check_fw_version(adap
);
1188 if (err
== -EINVAL
) {
1189 err
= upgrade_fw(adap
);
1190 CH_WARN(adap
, "FW upgrade to %d.%d.%d %s\n",
1191 FW_VERSION_MAJOR
, FW_VERSION_MINOR
,
1192 FW_VERSION_MICRO
, err
? "failed" : "succeeded");
1195 err
= t3_check_tpsram_version(adap
);
1196 if (err
== -EINVAL
) {
1197 err
= update_tpsram(adap
);
1198 CH_WARN(adap
, "TP upgrade to %d.%d.%d %s\n",
1199 TP_VERSION_MAJOR
, TP_VERSION_MINOR
,
1200 TP_VERSION_MICRO
, err
? "failed" : "succeeded");
1204 * Clear interrupts now to catch errors if t3_init_hw fails.
1205 * We clear them again later as initialization may trigger
1206 * conditions that can interrupt.
1208 t3_intr_clear(adap
);
1210 err
= t3_init_hw(adap
, 0);
1214 t3_set_reg_field(adap
, A_TP_PARA_REG5
, 0, F_RXDDPOFFINIT
);
1215 t3_write_reg(adap
, A_ULPRX_TDDP_PSZ
, V_HPZ0(PAGE_SHIFT
- 12));
1217 err
= setup_sge_qsets(adap
);
1222 if (!(adap
->flags
& NAPI_INIT
))
1225 t3_start_sge_timers(adap
);
1226 adap
->flags
|= FULL_INIT_DONE
;
1229 t3_intr_clear(adap
);
1231 if (adap
->flags
& USING_MSIX
) {
1232 name_msix_vecs(adap
);
1233 err
= request_irq(adap
->msix_info
[0].vec
,
1234 t3_async_intr_handler
, 0,
1235 adap
->msix_info
[0].desc
, adap
);
1239 err
= request_msix_data_irqs(adap
);
1241 free_irq(adap
->msix_info
[0].vec
, adap
);
1244 } else if ((err
= request_irq(adap
->pdev
->irq
,
1245 t3_intr_handler(adap
,
1246 adap
->sge
.qs
[0].rspq
.
1248 (adap
->flags
& USING_MSI
) ?
1253 enable_all_napi(adap
);
1255 t3_intr_enable(adap
);
1257 if (adap
->params
.rev
>= T3_REV_C
&& !(adap
->flags
& TP_PARITY_INIT
) &&
1258 is_offload(adap
) && init_tp_parity(adap
) == 0)
1259 adap
->flags
|= TP_PARITY_INIT
;
1261 if (adap
->flags
& TP_PARITY_INIT
) {
1262 t3_write_reg(adap
, A_TP_INT_CAUSE
,
1263 F_CMCACHEPERR
| F_ARPLUTPERR
);
1264 t3_write_reg(adap
, A_TP_INT_ENABLE
, 0x7fbfffff);
1267 if (!(adap
->flags
& QUEUES_BOUND
)) {
1268 err
= bind_qsets(adap
);
1270 CH_ERR(adap
, "failed to bind qsets, err %d\n", err
);
1271 t3_intr_disable(adap
);
1272 free_irq_resources(adap
);
1275 adap
->flags
|= QUEUES_BOUND
;
1281 CH_ERR(adap
, "request_irq failed, err %d\n", err
);
1286 * Release resources when all the ports and offloading have been stopped.
1288 static void cxgb_down(struct adapter
*adapter
)
1290 t3_sge_stop(adapter
);
1291 spin_lock_irq(&adapter
->work_lock
); /* sync with PHY intr task */
1292 t3_intr_disable(adapter
);
1293 spin_unlock_irq(&adapter
->work_lock
);
1295 free_irq_resources(adapter
);
1296 quiesce_rx(adapter
);
1297 t3_sge_stop(adapter
);
1298 flush_workqueue(cxgb3_wq
); /* wait for external IRQ handler */
1301 static void schedule_chk_task(struct adapter
*adap
)
1305 timeo
= adap
->params
.linkpoll_period
?
1306 (HZ
* adap
->params
.linkpoll_period
) / 10 :
1307 adap
->params
.stats_update_period
* HZ
;
1309 queue_delayed_work(cxgb3_wq
, &adap
->adap_check_task
, timeo
);
1312 static int offload_open(struct net_device
*dev
)
1314 struct port_info
*pi
= netdev_priv(dev
);
1315 struct adapter
*adapter
= pi
->adapter
;
1316 struct t3cdev
*tdev
= dev2t3cdev(dev
);
1317 int adap_up
= adapter
->open_device_map
& PORT_MASK
;
1320 if (test_and_set_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
))
1323 if (!adap_up
&& (err
= cxgb_up(adapter
)) < 0)
1326 t3_tp_set_offload_mode(adapter
, 1);
1327 tdev
->lldev
= adapter
->port
[0];
1328 err
= cxgb3_offload_activate(adapter
);
1332 init_port_mtus(adapter
);
1333 t3_load_mtus(adapter
, adapter
->params
.mtus
, adapter
->params
.a_wnd
,
1334 adapter
->params
.b_wnd
,
1335 adapter
->params
.rev
== 0 ?
1336 adapter
->port
[0]->mtu
: 0xffff);
1339 if (sysfs_create_group(&tdev
->lldev
->dev
.kobj
, &offload_attr_group
))
1340 dev_dbg(&dev
->dev
, "cannot create sysfs group\n");
1342 /* Call back all registered clients */
1343 cxgb3_add_clients(tdev
);
1346 /* restore them in case the offload module has changed them */
1348 t3_tp_set_offload_mode(adapter
, 0);
1349 clear_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
);
1350 cxgb3_set_dummy_ops(tdev
);
1355 static int offload_close(struct t3cdev
*tdev
)
1357 struct adapter
*adapter
= tdev2adap(tdev
);
1359 if (!test_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
))
1362 /* Call back all registered clients */
1363 cxgb3_remove_clients(tdev
);
1365 sysfs_remove_group(&tdev
->lldev
->dev
.kobj
, &offload_attr_group
);
1367 /* Flush work scheduled while releasing TIDs */
1368 flush_scheduled_work();
1371 cxgb3_set_dummy_ops(tdev
);
1372 t3_tp_set_offload_mode(adapter
, 0);
1373 clear_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
);
1375 if (!adapter
->open_device_map
)
1378 cxgb3_offload_deactivate(adapter
);
1382 static int cxgb_open(struct net_device
*dev
)
1384 struct port_info
*pi
= netdev_priv(dev
);
1385 struct adapter
*adapter
= pi
->adapter
;
1386 int other_ports
= adapter
->open_device_map
& PORT_MASK
;
1389 if (!adapter
->open_device_map
&& (err
= cxgb_up(adapter
)) < 0)
1392 set_bit(pi
->port_id
, &adapter
->open_device_map
);
1393 if (is_offload(adapter
) && !ofld_disable
) {
1394 err
= offload_open(dev
);
1397 "Could not initialize offload capabilities\n");
1400 dev
->real_num_tx_queues
= pi
->nqsets
;
1402 t3_port_intr_enable(adapter
, pi
->port_id
);
1403 netif_tx_start_all_queues(dev
);
1405 schedule_chk_task(adapter
);
1407 cxgb3_event_notify(&adapter
->tdev
, OFFLOAD_PORT_UP
, pi
->port_id
);
1411 static int cxgb_close(struct net_device
*dev
)
1413 struct port_info
*pi
= netdev_priv(dev
);
1414 struct adapter
*adapter
= pi
->adapter
;
1417 if (!adapter
->open_device_map
)
1420 /* Stop link fault interrupts */
1421 t3_xgm_intr_disable(adapter
, pi
->port_id
);
1422 t3_read_reg(adapter
, A_XGM_INT_STATUS
+ pi
->mac
.offset
);
1424 t3_port_intr_disable(adapter
, pi
->port_id
);
1425 netif_tx_stop_all_queues(dev
);
1426 pi
->phy
.ops
->power_down(&pi
->phy
, 1);
1427 netif_carrier_off(dev
);
1428 t3_mac_disable(&pi
->mac
, MAC_DIRECTION_TX
| MAC_DIRECTION_RX
);
1430 spin_lock_irq(&adapter
->work_lock
); /* sync with update task */
1431 clear_bit(pi
->port_id
, &adapter
->open_device_map
);
1432 spin_unlock_irq(&adapter
->work_lock
);
1434 if (!(adapter
->open_device_map
& PORT_MASK
))
1435 cancel_delayed_work_sync(&adapter
->adap_check_task
);
1437 if (!adapter
->open_device_map
)
1440 cxgb3_event_notify(&adapter
->tdev
, OFFLOAD_PORT_DOWN
, pi
->port_id
);
1444 static struct net_device_stats
*cxgb_get_stats(struct net_device
*dev
)
1446 struct port_info
*pi
= netdev_priv(dev
);
1447 struct adapter
*adapter
= pi
->adapter
;
1448 struct net_device_stats
*ns
= &pi
->netstats
;
1449 const struct mac_stats
*pstats
;
1451 spin_lock(&adapter
->stats_lock
);
1452 pstats
= t3_mac_update_stats(&pi
->mac
);
1453 spin_unlock(&adapter
->stats_lock
);
1455 ns
->tx_bytes
= pstats
->tx_octets
;
1456 ns
->tx_packets
= pstats
->tx_frames
;
1457 ns
->rx_bytes
= pstats
->rx_octets
;
1458 ns
->rx_packets
= pstats
->rx_frames
;
1459 ns
->multicast
= pstats
->rx_mcast_frames
;
1461 ns
->tx_errors
= pstats
->tx_underrun
;
1462 ns
->rx_errors
= pstats
->rx_symbol_errs
+ pstats
->rx_fcs_errs
+
1463 pstats
->rx_too_long
+ pstats
->rx_jabber
+ pstats
->rx_short
+
1464 pstats
->rx_fifo_ovfl
;
1466 /* detailed rx_errors */
1467 ns
->rx_length_errors
= pstats
->rx_jabber
+ pstats
->rx_too_long
;
1468 ns
->rx_over_errors
= 0;
1469 ns
->rx_crc_errors
= pstats
->rx_fcs_errs
;
1470 ns
->rx_frame_errors
= pstats
->rx_symbol_errs
;
1471 ns
->rx_fifo_errors
= pstats
->rx_fifo_ovfl
;
1472 ns
->rx_missed_errors
= pstats
->rx_cong_drops
;
1474 /* detailed tx_errors */
1475 ns
->tx_aborted_errors
= 0;
1476 ns
->tx_carrier_errors
= 0;
1477 ns
->tx_fifo_errors
= pstats
->tx_underrun
;
1478 ns
->tx_heartbeat_errors
= 0;
1479 ns
->tx_window_errors
= 0;
1483 static u32
get_msglevel(struct net_device
*dev
)
1485 struct port_info
*pi
= netdev_priv(dev
);
1486 struct adapter
*adapter
= pi
->adapter
;
1488 return adapter
->msg_enable
;
1491 static void set_msglevel(struct net_device
*dev
, u32 val
)
1493 struct port_info
*pi
= netdev_priv(dev
);
1494 struct adapter
*adapter
= pi
->adapter
;
1496 adapter
->msg_enable
= val
;
1499 static char stats_strings
[][ETH_GSTRING_LEN
] = {
1502 "TxMulticastFramesOK",
1503 "TxBroadcastFramesOK",
1510 "TxFrames128To255 ",
1511 "TxFrames256To511 ",
1512 "TxFrames512To1023 ",
1513 "TxFrames1024To1518 ",
1514 "TxFrames1519ToMax ",
1518 "RxMulticastFramesOK",
1519 "RxBroadcastFramesOK",
1530 "RxFrames128To255 ",
1531 "RxFrames256To511 ",
1532 "RxFrames512To1023 ",
1533 "RxFrames1024To1518 ",
1534 "RxFrames1519ToMax ",
1547 "CheckTXEnToggled ",
1553 static int get_sset_count(struct net_device
*dev
, int sset
)
1557 return ARRAY_SIZE(stats_strings
);
1563 #define T3_REGMAP_SIZE (3 * 1024)
1565 static int get_regs_len(struct net_device
*dev
)
1567 return T3_REGMAP_SIZE
;
1570 static int get_eeprom_len(struct net_device
*dev
)
1575 static void get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1577 struct port_info
*pi
= netdev_priv(dev
);
1578 struct adapter
*adapter
= pi
->adapter
;
1582 spin_lock(&adapter
->stats_lock
);
1583 t3_get_fw_version(adapter
, &fw_vers
);
1584 t3_get_tp_version(adapter
, &tp_vers
);
1585 spin_unlock(&adapter
->stats_lock
);
1587 strcpy(info
->driver
, DRV_NAME
);
1588 strcpy(info
->version
, DRV_VERSION
);
1589 strcpy(info
->bus_info
, pci_name(adapter
->pdev
));
1591 strcpy(info
->fw_version
, "N/A");
1593 snprintf(info
->fw_version
, sizeof(info
->fw_version
),
1594 "%s %u.%u.%u TP %u.%u.%u",
1595 G_FW_VERSION_TYPE(fw_vers
) ? "T" : "N",
1596 G_FW_VERSION_MAJOR(fw_vers
),
1597 G_FW_VERSION_MINOR(fw_vers
),
1598 G_FW_VERSION_MICRO(fw_vers
),
1599 G_TP_VERSION_MAJOR(tp_vers
),
1600 G_TP_VERSION_MINOR(tp_vers
),
1601 G_TP_VERSION_MICRO(tp_vers
));
1605 static void get_strings(struct net_device
*dev
, u32 stringset
, u8
* data
)
1607 if (stringset
== ETH_SS_STATS
)
1608 memcpy(data
, stats_strings
, sizeof(stats_strings
));
1611 static unsigned long collect_sge_port_stats(struct adapter
*adapter
,
1612 struct port_info
*p
, int idx
)
1615 unsigned long tot
= 0;
1617 for (i
= p
->first_qset
; i
< p
->first_qset
+ p
->nqsets
; ++i
)
1618 tot
+= adapter
->sge
.qs
[i
].port_stats
[idx
];
1622 static void get_stats(struct net_device
*dev
, struct ethtool_stats
*stats
,
1625 struct port_info
*pi
= netdev_priv(dev
);
1626 struct adapter
*adapter
= pi
->adapter
;
1627 const struct mac_stats
*s
;
1629 spin_lock(&adapter
->stats_lock
);
1630 s
= t3_mac_update_stats(&pi
->mac
);
1631 spin_unlock(&adapter
->stats_lock
);
1633 *data
++ = s
->tx_octets
;
1634 *data
++ = s
->tx_frames
;
1635 *data
++ = s
->tx_mcast_frames
;
1636 *data
++ = s
->tx_bcast_frames
;
1637 *data
++ = s
->tx_pause
;
1638 *data
++ = s
->tx_underrun
;
1639 *data
++ = s
->tx_fifo_urun
;
1641 *data
++ = s
->tx_frames_64
;
1642 *data
++ = s
->tx_frames_65_127
;
1643 *data
++ = s
->tx_frames_128_255
;
1644 *data
++ = s
->tx_frames_256_511
;
1645 *data
++ = s
->tx_frames_512_1023
;
1646 *data
++ = s
->tx_frames_1024_1518
;
1647 *data
++ = s
->tx_frames_1519_max
;
1649 *data
++ = s
->rx_octets
;
1650 *data
++ = s
->rx_frames
;
1651 *data
++ = s
->rx_mcast_frames
;
1652 *data
++ = s
->rx_bcast_frames
;
1653 *data
++ = s
->rx_pause
;
1654 *data
++ = s
->rx_fcs_errs
;
1655 *data
++ = s
->rx_symbol_errs
;
1656 *data
++ = s
->rx_short
;
1657 *data
++ = s
->rx_jabber
;
1658 *data
++ = s
->rx_too_long
;
1659 *data
++ = s
->rx_fifo_ovfl
;
1661 *data
++ = s
->rx_frames_64
;
1662 *data
++ = s
->rx_frames_65_127
;
1663 *data
++ = s
->rx_frames_128_255
;
1664 *data
++ = s
->rx_frames_256_511
;
1665 *data
++ = s
->rx_frames_512_1023
;
1666 *data
++ = s
->rx_frames_1024_1518
;
1667 *data
++ = s
->rx_frames_1519_max
;
1669 *data
++ = pi
->phy
.fifo_errors
;
1671 *data
++ = collect_sge_port_stats(adapter
, pi
, SGE_PSTAT_TSO
);
1672 *data
++ = collect_sge_port_stats(adapter
, pi
, SGE_PSTAT_VLANEX
);
1673 *data
++ = collect_sge_port_stats(adapter
, pi
, SGE_PSTAT_VLANINS
);
1674 *data
++ = collect_sge_port_stats(adapter
, pi
, SGE_PSTAT_TX_CSUM
);
1675 *data
++ = collect_sge_port_stats(adapter
, pi
, SGE_PSTAT_RX_CSUM_GOOD
);
1679 *data
++ = s
->rx_cong_drops
;
1681 *data
++ = s
->num_toggled
;
1682 *data
++ = s
->num_resets
;
1684 *data
++ = s
->link_faults
;
1687 static inline void reg_block_dump(struct adapter
*ap
, void *buf
,
1688 unsigned int start
, unsigned int end
)
1690 u32
*p
= buf
+ start
;
1692 for (; start
<= end
; start
+= sizeof(u32
))
1693 *p
++ = t3_read_reg(ap
, start
);
1696 static void get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
1699 struct port_info
*pi
= netdev_priv(dev
);
1700 struct adapter
*ap
= pi
->adapter
;
1704 * bits 0..9: chip version
1705 * bits 10..15: chip revision
1706 * bit 31: set for PCIe cards
1708 regs
->version
= 3 | (ap
->params
.rev
<< 10) | (is_pcie(ap
) << 31);
1711 * We skip the MAC statistics registers because they are clear-on-read.
1712 * Also reading multi-register stats would need to synchronize with the
1713 * periodic mac stats accumulation. Hard to justify the complexity.
1715 memset(buf
, 0, T3_REGMAP_SIZE
);
1716 reg_block_dump(ap
, buf
, 0, A_SG_RSPQ_CREDIT_RETURN
);
1717 reg_block_dump(ap
, buf
, A_SG_HI_DRB_HI_THRSH
, A_ULPRX_PBL_ULIMIT
);
1718 reg_block_dump(ap
, buf
, A_ULPTX_CONFIG
, A_MPS_INT_CAUSE
);
1719 reg_block_dump(ap
, buf
, A_CPL_SWITCH_CNTRL
, A_CPL_MAP_TBL_DATA
);
1720 reg_block_dump(ap
, buf
, A_SMB_GLOBAL_TIME_CFG
, A_XGM_SERDES_STAT3
);
1721 reg_block_dump(ap
, buf
, A_XGM_SERDES_STATUS0
,
1722 XGM_REG(A_XGM_SERDES_STAT3
, 1));
1723 reg_block_dump(ap
, buf
, XGM_REG(A_XGM_SERDES_STATUS0
, 1),
1724 XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT
, 1));
1727 static int restart_autoneg(struct net_device
*dev
)
1729 struct port_info
*p
= netdev_priv(dev
);
1731 if (!netif_running(dev
))
1733 if (p
->link_config
.autoneg
!= AUTONEG_ENABLE
)
1735 p
->phy
.ops
->autoneg_restart(&p
->phy
);
1739 static int cxgb3_phys_id(struct net_device
*dev
, u32 data
)
1741 struct port_info
*pi
= netdev_priv(dev
);
1742 struct adapter
*adapter
= pi
->adapter
;
1748 for (i
= 0; i
< data
* 2; i
++) {
1749 t3_set_reg_field(adapter
, A_T3DBG_GPIO_EN
, F_GPIO0_OUT_VAL
,
1750 (i
& 1) ? F_GPIO0_OUT_VAL
: 0);
1751 if (msleep_interruptible(500))
1754 t3_set_reg_field(adapter
, A_T3DBG_GPIO_EN
, F_GPIO0_OUT_VAL
,
1759 static int get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1761 struct port_info
*p
= netdev_priv(dev
);
1763 cmd
->supported
= p
->link_config
.supported
;
1764 cmd
->advertising
= p
->link_config
.advertising
;
1766 if (netif_carrier_ok(dev
)) {
1767 cmd
->speed
= p
->link_config
.speed
;
1768 cmd
->duplex
= p
->link_config
.duplex
;
1774 cmd
->port
= (cmd
->supported
& SUPPORTED_TP
) ? PORT_TP
: PORT_FIBRE
;
1775 cmd
->phy_address
= p
->phy
.mdio
.prtad
;
1776 cmd
->transceiver
= XCVR_EXTERNAL
;
1777 cmd
->autoneg
= p
->link_config
.autoneg
;
1783 static int speed_duplex_to_caps(int speed
, int duplex
)
1789 if (duplex
== DUPLEX_FULL
)
1790 cap
= SUPPORTED_10baseT_Full
;
1792 cap
= SUPPORTED_10baseT_Half
;
1795 if (duplex
== DUPLEX_FULL
)
1796 cap
= SUPPORTED_100baseT_Full
;
1798 cap
= SUPPORTED_100baseT_Half
;
1801 if (duplex
== DUPLEX_FULL
)
1802 cap
= SUPPORTED_1000baseT_Full
;
1804 cap
= SUPPORTED_1000baseT_Half
;
1807 if (duplex
== DUPLEX_FULL
)
1808 cap
= SUPPORTED_10000baseT_Full
;
1813 #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1814 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1815 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
1816 ADVERTISED_10000baseT_Full)
1818 static int set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
1820 struct port_info
*p
= netdev_priv(dev
);
1821 struct link_config
*lc
= &p
->link_config
;
1823 if (!(lc
->supported
& SUPPORTED_Autoneg
)) {
1825 * PHY offers a single speed/duplex. See if that's what's
1828 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
1829 int cap
= speed_duplex_to_caps(cmd
->speed
, cmd
->duplex
);
1830 if (lc
->supported
& cap
)
1836 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
1837 int cap
= speed_duplex_to_caps(cmd
->speed
, cmd
->duplex
);
1839 if (!(lc
->supported
& cap
) || cmd
->speed
== SPEED_1000
)
1841 lc
->requested_speed
= cmd
->speed
;
1842 lc
->requested_duplex
= cmd
->duplex
;
1843 lc
->advertising
= 0;
1845 cmd
->advertising
&= ADVERTISED_MASK
;
1846 cmd
->advertising
&= lc
->supported
;
1847 if (!cmd
->advertising
)
1849 lc
->requested_speed
= SPEED_INVALID
;
1850 lc
->requested_duplex
= DUPLEX_INVALID
;
1851 lc
->advertising
= cmd
->advertising
| ADVERTISED_Autoneg
;
1853 lc
->autoneg
= cmd
->autoneg
;
1854 if (netif_running(dev
))
1855 t3_link_start(&p
->phy
, &p
->mac
, lc
);
1859 static void get_pauseparam(struct net_device
*dev
,
1860 struct ethtool_pauseparam
*epause
)
1862 struct port_info
*p
= netdev_priv(dev
);
1864 epause
->autoneg
= (p
->link_config
.requested_fc
& PAUSE_AUTONEG
) != 0;
1865 epause
->rx_pause
= (p
->link_config
.fc
& PAUSE_RX
) != 0;
1866 epause
->tx_pause
= (p
->link_config
.fc
& PAUSE_TX
) != 0;
1869 static int set_pauseparam(struct net_device
*dev
,
1870 struct ethtool_pauseparam
*epause
)
1872 struct port_info
*p
= netdev_priv(dev
);
1873 struct link_config
*lc
= &p
->link_config
;
1875 if (epause
->autoneg
== AUTONEG_DISABLE
)
1876 lc
->requested_fc
= 0;
1877 else if (lc
->supported
& SUPPORTED_Autoneg
)
1878 lc
->requested_fc
= PAUSE_AUTONEG
;
1882 if (epause
->rx_pause
)
1883 lc
->requested_fc
|= PAUSE_RX
;
1884 if (epause
->tx_pause
)
1885 lc
->requested_fc
|= PAUSE_TX
;
1886 if (lc
->autoneg
== AUTONEG_ENABLE
) {
1887 if (netif_running(dev
))
1888 t3_link_start(&p
->phy
, &p
->mac
, lc
);
1890 lc
->fc
= lc
->requested_fc
& (PAUSE_RX
| PAUSE_TX
);
1891 if (netif_running(dev
))
1892 t3_mac_set_speed_duplex_fc(&p
->mac
, -1, -1, lc
->fc
);
1897 static u32
get_rx_csum(struct net_device
*dev
)
1899 struct port_info
*p
= netdev_priv(dev
);
1901 return p
->rx_offload
& T3_RX_CSUM
;
1904 static int set_rx_csum(struct net_device
*dev
, u32 data
)
1906 struct port_info
*p
= netdev_priv(dev
);
1909 p
->rx_offload
|= T3_RX_CSUM
;
1913 p
->rx_offload
&= ~(T3_RX_CSUM
| T3_LRO
);
1914 for (i
= p
->first_qset
; i
< p
->first_qset
+ p
->nqsets
; i
++)
1915 set_qset_lro(dev
, i
, 0);
1920 static void get_sge_param(struct net_device
*dev
, struct ethtool_ringparam
*e
)
1922 struct port_info
*pi
= netdev_priv(dev
);
1923 struct adapter
*adapter
= pi
->adapter
;
1924 const struct qset_params
*q
= &adapter
->params
.sge
.qset
[pi
->first_qset
];
1926 e
->rx_max_pending
= MAX_RX_BUFFERS
;
1927 e
->rx_mini_max_pending
= 0;
1928 e
->rx_jumbo_max_pending
= MAX_RX_JUMBO_BUFFERS
;
1929 e
->tx_max_pending
= MAX_TXQ_ENTRIES
;
1931 e
->rx_pending
= q
->fl_size
;
1932 e
->rx_mini_pending
= q
->rspq_size
;
1933 e
->rx_jumbo_pending
= q
->jumbo_size
;
1934 e
->tx_pending
= q
->txq_size
[0];
1937 static int set_sge_param(struct net_device
*dev
, struct ethtool_ringparam
*e
)
1939 struct port_info
*pi
= netdev_priv(dev
);
1940 struct adapter
*adapter
= pi
->adapter
;
1941 struct qset_params
*q
;
1944 if (e
->rx_pending
> MAX_RX_BUFFERS
||
1945 e
->rx_jumbo_pending
> MAX_RX_JUMBO_BUFFERS
||
1946 e
->tx_pending
> MAX_TXQ_ENTRIES
||
1947 e
->rx_mini_pending
> MAX_RSPQ_ENTRIES
||
1948 e
->rx_mini_pending
< MIN_RSPQ_ENTRIES
||
1949 e
->rx_pending
< MIN_FL_ENTRIES
||
1950 e
->rx_jumbo_pending
< MIN_FL_ENTRIES
||
1951 e
->tx_pending
< adapter
->params
.nports
* MIN_TXQ_ENTRIES
)
1954 if (adapter
->flags
& FULL_INIT_DONE
)
1957 q
= &adapter
->params
.sge
.qset
[pi
->first_qset
];
1958 for (i
= 0; i
< pi
->nqsets
; ++i
, ++q
) {
1959 q
->rspq_size
= e
->rx_mini_pending
;
1960 q
->fl_size
= e
->rx_pending
;
1961 q
->jumbo_size
= e
->rx_jumbo_pending
;
1962 q
->txq_size
[0] = e
->tx_pending
;
1963 q
->txq_size
[1] = e
->tx_pending
;
1964 q
->txq_size
[2] = e
->tx_pending
;
1969 static int set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*c
)
1971 struct port_info
*pi
= netdev_priv(dev
);
1972 struct adapter
*adapter
= pi
->adapter
;
1973 struct qset_params
*qsp
= &adapter
->params
.sge
.qset
[0];
1974 struct sge_qset
*qs
= &adapter
->sge
.qs
[0];
1976 if (c
->rx_coalesce_usecs
* 10 > M_NEWTIMER
)
1979 qsp
->coalesce_usecs
= c
->rx_coalesce_usecs
;
1980 t3_update_qset_coalesce(qs
, qsp
);
1984 static int get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*c
)
1986 struct port_info
*pi
= netdev_priv(dev
);
1987 struct adapter
*adapter
= pi
->adapter
;
1988 struct qset_params
*q
= adapter
->params
.sge
.qset
;
1990 c
->rx_coalesce_usecs
= q
->coalesce_usecs
;
1994 static int get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*e
,
1997 struct port_info
*pi
= netdev_priv(dev
);
1998 struct adapter
*adapter
= pi
->adapter
;
2001 u8
*buf
= kmalloc(EEPROMSIZE
, GFP_KERNEL
);
2005 e
->magic
= EEPROM_MAGIC
;
2006 for (i
= e
->offset
& ~3; !err
&& i
< e
->offset
+ e
->len
; i
+= 4)
2007 err
= t3_seeprom_read(adapter
, i
, (__le32
*) & buf
[i
]);
2010 memcpy(data
, buf
+ e
->offset
, e
->len
);
2015 static int set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
2018 struct port_info
*pi
= netdev_priv(dev
);
2019 struct adapter
*adapter
= pi
->adapter
;
2020 u32 aligned_offset
, aligned_len
;
2025 if (eeprom
->magic
!= EEPROM_MAGIC
)
2028 aligned_offset
= eeprom
->offset
& ~3;
2029 aligned_len
= (eeprom
->len
+ (eeprom
->offset
& 3) + 3) & ~3;
2031 if (aligned_offset
!= eeprom
->offset
|| aligned_len
!= eeprom
->len
) {
2032 buf
= kmalloc(aligned_len
, GFP_KERNEL
);
2035 err
= t3_seeprom_read(adapter
, aligned_offset
, (__le32
*) buf
);
2036 if (!err
&& aligned_len
> 4)
2037 err
= t3_seeprom_read(adapter
,
2038 aligned_offset
+ aligned_len
- 4,
2039 (__le32
*) & buf
[aligned_len
- 4]);
2042 memcpy(buf
+ (eeprom
->offset
& 3), data
, eeprom
->len
);
2046 err
= t3_seeprom_wp(adapter
, 0);
2050 for (p
= (__le32
*) buf
; !err
&& aligned_len
; aligned_len
-= 4, p
++) {
2051 err
= t3_seeprom_write(adapter
, aligned_offset
, *p
);
2052 aligned_offset
+= 4;
2056 err
= t3_seeprom_wp(adapter
, 1);
2063 static void get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
2067 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
2070 static const struct ethtool_ops cxgb_ethtool_ops
= {
2071 .get_settings
= get_settings
,
2072 .set_settings
= set_settings
,
2073 .get_drvinfo
= get_drvinfo
,
2074 .get_msglevel
= get_msglevel
,
2075 .set_msglevel
= set_msglevel
,
2076 .get_ringparam
= get_sge_param
,
2077 .set_ringparam
= set_sge_param
,
2078 .get_coalesce
= get_coalesce
,
2079 .set_coalesce
= set_coalesce
,
2080 .get_eeprom_len
= get_eeprom_len
,
2081 .get_eeprom
= get_eeprom
,
2082 .set_eeprom
= set_eeprom
,
2083 .get_pauseparam
= get_pauseparam
,
2084 .set_pauseparam
= set_pauseparam
,
2085 .get_rx_csum
= get_rx_csum
,
2086 .set_rx_csum
= set_rx_csum
,
2087 .set_tx_csum
= ethtool_op_set_tx_csum
,
2088 .set_sg
= ethtool_op_set_sg
,
2089 .get_link
= ethtool_op_get_link
,
2090 .get_strings
= get_strings
,
2091 .phys_id
= cxgb3_phys_id
,
2092 .nway_reset
= restart_autoneg
,
2093 .get_sset_count
= get_sset_count
,
2094 .get_ethtool_stats
= get_stats
,
2095 .get_regs_len
= get_regs_len
,
2096 .get_regs
= get_regs
,
2098 .set_tso
= ethtool_op_set_tso
,
2101 static int in_range(int val
, int lo
, int hi
)
2103 return val
< 0 || (val
<= hi
&& val
>= lo
);
2106 static int cxgb_extension_ioctl(struct net_device
*dev
, void __user
*useraddr
)
2108 struct port_info
*pi
= netdev_priv(dev
);
2109 struct adapter
*adapter
= pi
->adapter
;
2113 if (copy_from_user(&cmd
, useraddr
, sizeof(cmd
)))
2117 case CHELSIO_SET_QSET_PARAMS
:{
2119 struct qset_params
*q
;
2120 struct ch_qset_params t
;
2121 int q1
= pi
->first_qset
;
2122 int nqsets
= pi
->nqsets
;
2124 if (!capable(CAP_NET_ADMIN
))
2126 if (copy_from_user(&t
, useraddr
, sizeof(t
)))
2128 if (t
.qset_idx
>= SGE_QSETS
)
2130 if (!in_range(t
.intr_lat
, 0, M_NEWTIMER
) ||
2131 !in_range(t
.cong_thres
, 0, 255) ||
2132 !in_range(t
.txq_size
[0], MIN_TXQ_ENTRIES
,
2134 !in_range(t
.txq_size
[1], MIN_TXQ_ENTRIES
,
2136 !in_range(t
.txq_size
[2], MIN_CTRL_TXQ_ENTRIES
,
2137 MAX_CTRL_TXQ_ENTRIES
) ||
2138 !in_range(t
.fl_size
[0], MIN_FL_ENTRIES
,
2140 !in_range(t
.fl_size
[1], MIN_FL_ENTRIES
,
2141 MAX_RX_JUMBO_BUFFERS
) ||
2142 !in_range(t
.rspq_size
, MIN_RSPQ_ENTRIES
,
2146 if ((adapter
->flags
& FULL_INIT_DONE
) && t
.lro
> 0)
2147 for_each_port(adapter
, i
) {
2148 pi
= adap2pinfo(adapter
, i
);
2149 if (t
.qset_idx
>= pi
->first_qset
&&
2150 t
.qset_idx
< pi
->first_qset
+ pi
->nqsets
&&
2151 !(pi
->rx_offload
& T3_RX_CSUM
))
2155 if ((adapter
->flags
& FULL_INIT_DONE
) &&
2156 (t
.rspq_size
>= 0 || t
.fl_size
[0] >= 0 ||
2157 t
.fl_size
[1] >= 0 || t
.txq_size
[0] >= 0 ||
2158 t
.txq_size
[1] >= 0 || t
.txq_size
[2] >= 0 ||
2159 t
.polling
>= 0 || t
.cong_thres
>= 0))
2162 /* Allow setting of any available qset when offload enabled */
2163 if (test_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
)) {
2165 for_each_port(adapter
, i
) {
2166 pi
= adap2pinfo(adapter
, i
);
2167 nqsets
+= pi
->first_qset
+ pi
->nqsets
;
2171 if (t
.qset_idx
< q1
)
2173 if (t
.qset_idx
> q1
+ nqsets
- 1)
2176 q
= &adapter
->params
.sge
.qset
[t
.qset_idx
];
2178 if (t
.rspq_size
>= 0)
2179 q
->rspq_size
= t
.rspq_size
;
2180 if (t
.fl_size
[0] >= 0)
2181 q
->fl_size
= t
.fl_size
[0];
2182 if (t
.fl_size
[1] >= 0)
2183 q
->jumbo_size
= t
.fl_size
[1];
2184 if (t
.txq_size
[0] >= 0)
2185 q
->txq_size
[0] = t
.txq_size
[0];
2186 if (t
.txq_size
[1] >= 0)
2187 q
->txq_size
[1] = t
.txq_size
[1];
2188 if (t
.txq_size
[2] >= 0)
2189 q
->txq_size
[2] = t
.txq_size
[2];
2190 if (t
.cong_thres
>= 0)
2191 q
->cong_thres
= t
.cong_thres
;
2192 if (t
.intr_lat
>= 0) {
2193 struct sge_qset
*qs
=
2194 &adapter
->sge
.qs
[t
.qset_idx
];
2196 q
->coalesce_usecs
= t
.intr_lat
;
2197 t3_update_qset_coalesce(qs
, q
);
2199 if (t
.polling
>= 0) {
2200 if (adapter
->flags
& USING_MSIX
)
2201 q
->polling
= t
.polling
;
2203 /* No polling with INTx for T3A */
2204 if (adapter
->params
.rev
== 0 &&
2205 !(adapter
->flags
& USING_MSI
))
2208 for (i
= 0; i
< SGE_QSETS
; i
++) {
2209 q
= &adapter
->params
.sge
.
2211 q
->polling
= t
.polling
;
2216 set_qset_lro(dev
, t
.qset_idx
, t
.lro
);
2220 case CHELSIO_GET_QSET_PARAMS
:{
2221 struct qset_params
*q
;
2222 struct ch_qset_params t
;
2223 int q1
= pi
->first_qset
;
2224 int nqsets
= pi
->nqsets
;
2227 if (copy_from_user(&t
, useraddr
, sizeof(t
)))
2230 /* Display qsets for all ports when offload enabled */
2231 if (test_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
)) {
2233 for_each_port(adapter
, i
) {
2234 pi
= adap2pinfo(adapter
, i
);
2235 nqsets
= pi
->first_qset
+ pi
->nqsets
;
2239 if (t
.qset_idx
>= nqsets
)
2242 q
= &adapter
->params
.sge
.qset
[q1
+ t
.qset_idx
];
2243 t
.rspq_size
= q
->rspq_size
;
2244 t
.txq_size
[0] = q
->txq_size
[0];
2245 t
.txq_size
[1] = q
->txq_size
[1];
2246 t
.txq_size
[2] = q
->txq_size
[2];
2247 t
.fl_size
[0] = q
->fl_size
;
2248 t
.fl_size
[1] = q
->jumbo_size
;
2249 t
.polling
= q
->polling
;
2251 t
.intr_lat
= q
->coalesce_usecs
;
2252 t
.cong_thres
= q
->cong_thres
;
2255 if (adapter
->flags
& USING_MSIX
)
2256 t
.vector
= adapter
->msix_info
[q1
+ t
.qset_idx
+ 1].vec
;
2258 t
.vector
= adapter
->pdev
->irq
;
2260 if (copy_to_user(useraddr
, &t
, sizeof(t
)))
2264 case CHELSIO_SET_QSET_NUM
:{
2265 struct ch_reg edata
;
2266 unsigned int i
, first_qset
= 0, other_qsets
= 0;
2268 if (!capable(CAP_NET_ADMIN
))
2270 if (adapter
->flags
& FULL_INIT_DONE
)
2272 if (copy_from_user(&edata
, useraddr
, sizeof(edata
)))
2274 if (edata
.val
< 1 ||
2275 (edata
.val
> 1 && !(adapter
->flags
& USING_MSIX
)))
2278 for_each_port(adapter
, i
)
2279 if (adapter
->port
[i
] && adapter
->port
[i
] != dev
)
2280 other_qsets
+= adap2pinfo(adapter
, i
)->nqsets
;
2282 if (edata
.val
+ other_qsets
> SGE_QSETS
)
2285 pi
->nqsets
= edata
.val
;
2287 for_each_port(adapter
, i
)
2288 if (adapter
->port
[i
]) {
2289 pi
= adap2pinfo(adapter
, i
);
2290 pi
->first_qset
= first_qset
;
2291 first_qset
+= pi
->nqsets
;
2295 case CHELSIO_GET_QSET_NUM
:{
2296 struct ch_reg edata
;
2298 edata
.cmd
= CHELSIO_GET_QSET_NUM
;
2299 edata
.val
= pi
->nqsets
;
2300 if (copy_to_user(useraddr
, &edata
, sizeof(edata
)))
2304 case CHELSIO_LOAD_FW
:{
2306 struct ch_mem_range t
;
2308 if (!capable(CAP_SYS_RAWIO
))
2310 if (copy_from_user(&t
, useraddr
, sizeof(t
)))
2312 /* Check t.len sanity ? */
2313 fw_data
= kmalloc(t
.len
, GFP_KERNEL
);
2318 (fw_data
, useraddr
+ sizeof(t
), t
.len
)) {
2323 ret
= t3_load_fw(adapter
, fw_data
, t
.len
);
2329 case CHELSIO_SETMTUTAB
:{
2333 if (!is_offload(adapter
))
2335 if (!capable(CAP_NET_ADMIN
))
2337 if (offload_running(adapter
))
2339 if (copy_from_user(&m
, useraddr
, sizeof(m
)))
2341 if (m
.nmtus
!= NMTUS
)
2343 if (m
.mtus
[0] < 81) /* accommodate SACK */
2346 /* MTUs must be in ascending order */
2347 for (i
= 1; i
< NMTUS
; ++i
)
2348 if (m
.mtus
[i
] < m
.mtus
[i
- 1])
2351 memcpy(adapter
->params
.mtus
, m
.mtus
,
2352 sizeof(adapter
->params
.mtus
));
2355 case CHELSIO_GET_PM
:{
2356 struct tp_params
*p
= &adapter
->params
.tp
;
2357 struct ch_pm m
= {.cmd
= CHELSIO_GET_PM
};
2359 if (!is_offload(adapter
))
2361 m
.tx_pg_sz
= p
->tx_pg_size
;
2362 m
.tx_num_pg
= p
->tx_num_pgs
;
2363 m
.rx_pg_sz
= p
->rx_pg_size
;
2364 m
.rx_num_pg
= p
->rx_num_pgs
;
2365 m
.pm_total
= p
->pmtx_size
+ p
->chan_rx_size
* p
->nchan
;
2366 if (copy_to_user(useraddr
, &m
, sizeof(m
)))
2370 case CHELSIO_SET_PM
:{
2372 struct tp_params
*p
= &adapter
->params
.tp
;
2374 if (!is_offload(adapter
))
2376 if (!capable(CAP_NET_ADMIN
))
2378 if (adapter
->flags
& FULL_INIT_DONE
)
2380 if (copy_from_user(&m
, useraddr
, sizeof(m
)))
2382 if (!is_power_of_2(m
.rx_pg_sz
) ||
2383 !is_power_of_2(m
.tx_pg_sz
))
2384 return -EINVAL
; /* not power of 2 */
2385 if (!(m
.rx_pg_sz
& 0x14000))
2386 return -EINVAL
; /* not 16KB or 64KB */
2387 if (!(m
.tx_pg_sz
& 0x1554000))
2389 if (m
.tx_num_pg
== -1)
2390 m
.tx_num_pg
= p
->tx_num_pgs
;
2391 if (m
.rx_num_pg
== -1)
2392 m
.rx_num_pg
= p
->rx_num_pgs
;
2393 if (m
.tx_num_pg
% 24 || m
.rx_num_pg
% 24)
2395 if (m
.rx_num_pg
* m
.rx_pg_sz
> p
->chan_rx_size
||
2396 m
.tx_num_pg
* m
.tx_pg_sz
> p
->chan_tx_size
)
2398 p
->rx_pg_size
= m
.rx_pg_sz
;
2399 p
->tx_pg_size
= m
.tx_pg_sz
;
2400 p
->rx_num_pgs
= m
.rx_num_pg
;
2401 p
->tx_num_pgs
= m
.tx_num_pg
;
2404 case CHELSIO_GET_MEM
:{
2405 struct ch_mem_range t
;
2409 if (!is_offload(adapter
))
2411 if (!(adapter
->flags
& FULL_INIT_DONE
))
2412 return -EIO
; /* need the memory controllers */
2413 if (copy_from_user(&t
, useraddr
, sizeof(t
)))
2415 if ((t
.addr
& 7) || (t
.len
& 7))
2417 if (t
.mem_id
== MEM_CM
)
2419 else if (t
.mem_id
== MEM_PMRX
)
2420 mem
= &adapter
->pmrx
;
2421 else if (t
.mem_id
== MEM_PMTX
)
2422 mem
= &adapter
->pmtx
;
2428 * bits 0..9: chip version
2429 * bits 10..15: chip revision
2431 t
.version
= 3 | (adapter
->params
.rev
<< 10);
2432 if (copy_to_user(useraddr
, &t
, sizeof(t
)))
2436 * Read 256 bytes at a time as len can be large and we don't
2437 * want to use huge intermediate buffers.
2439 useraddr
+= sizeof(t
); /* advance to start of buffer */
2441 unsigned int chunk
=
2442 min_t(unsigned int, t
.len
, sizeof(buf
));
2445 t3_mc7_bd_read(mem
, t
.addr
/ 8, chunk
/ 8,
2449 if (copy_to_user(useraddr
, buf
, chunk
))
2457 case CHELSIO_SET_TRACE_FILTER
:{
2459 const struct trace_params
*tp
;
2461 if (!capable(CAP_NET_ADMIN
))
2463 if (!offload_running(adapter
))
2465 if (copy_from_user(&t
, useraddr
, sizeof(t
)))
2468 tp
= (const struct trace_params
*)&t
.sip
;
2470 t3_config_trace_filter(adapter
, tp
, 0,
2474 t3_config_trace_filter(adapter
, tp
, 1,
2485 static int cxgb_ioctl(struct net_device
*dev
, struct ifreq
*req
, int cmd
)
2487 struct mii_ioctl_data
*data
= if_mii(req
);
2488 struct port_info
*pi
= netdev_priv(dev
);
2489 struct adapter
*adapter
= pi
->adapter
;
2494 /* Convert phy_id from older PRTAD/DEVAD format */
2495 if (is_10G(adapter
) &&
2496 !mdio_phy_id_is_c45(data
->phy_id
) &&
2497 (data
->phy_id
& 0x1f00) &&
2498 !(data
->phy_id
& 0xe0e0))
2499 data
->phy_id
= mdio_phy_id_c45(data
->phy_id
>> 8,
2500 data
->phy_id
& 0x1f);
2503 return mdio_mii_ioctl(&pi
->phy
.mdio
, data
, cmd
);
2505 return cxgb_extension_ioctl(dev
, req
->ifr_data
);
2511 static int cxgb_change_mtu(struct net_device
*dev
, int new_mtu
)
2513 struct port_info
*pi
= netdev_priv(dev
);
2514 struct adapter
*adapter
= pi
->adapter
;
2517 if (new_mtu
< 81) /* accommodate SACK */
2519 if ((ret
= t3_mac_set_mtu(&pi
->mac
, new_mtu
)))
2522 init_port_mtus(adapter
);
2523 if (adapter
->params
.rev
== 0 && offload_running(adapter
))
2524 t3_load_mtus(adapter
, adapter
->params
.mtus
,
2525 adapter
->params
.a_wnd
, adapter
->params
.b_wnd
,
2526 adapter
->port
[0]->mtu
);
2530 static int cxgb_set_mac_addr(struct net_device
*dev
, void *p
)
2532 struct port_info
*pi
= netdev_priv(dev
);
2533 struct adapter
*adapter
= pi
->adapter
;
2534 struct sockaddr
*addr
= p
;
2536 if (!is_valid_ether_addr(addr
->sa_data
))
2539 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
2540 t3_mac_set_address(&pi
->mac
, LAN_MAC_IDX
, dev
->dev_addr
);
2541 if (offload_running(adapter
))
2542 write_smt_entry(adapter
, pi
->port_id
);
2547 * t3_synchronize_rx - wait for current Rx processing on a port to complete
2548 * @adap: the adapter
2551 * Ensures that current Rx processing on any of the queues associated with
2552 * the given port completes before returning. We do this by acquiring and
2553 * releasing the locks of the response queues associated with the port.
2555 static void t3_synchronize_rx(struct adapter
*adap
, const struct port_info
*p
)
2559 for (i
= p
->first_qset
; i
< p
->first_qset
+ p
->nqsets
; i
++) {
2560 struct sge_rspq
*q
= &adap
->sge
.qs
[i
].rspq
;
2562 spin_lock_irq(&q
->lock
);
2563 spin_unlock_irq(&q
->lock
);
2567 static void vlan_rx_register(struct net_device
*dev
, struct vlan_group
*grp
)
2569 struct port_info
*pi
= netdev_priv(dev
);
2570 struct adapter
*adapter
= pi
->adapter
;
2573 if (adapter
->params
.rev
> 0)
2574 t3_set_vlan_accel(adapter
, 1 << pi
->port_id
, grp
!= NULL
);
2576 /* single control for all ports */
2577 unsigned int i
, have_vlans
= 0;
2578 for_each_port(adapter
, i
)
2579 have_vlans
|= adap2pinfo(adapter
, i
)->vlan_grp
!= NULL
;
2581 t3_set_vlan_accel(adapter
, 1, have_vlans
);
2583 t3_synchronize_rx(adapter
, pi
);
2586 #ifdef CONFIG_NET_POLL_CONTROLLER
2587 static void cxgb_netpoll(struct net_device
*dev
)
2589 struct port_info
*pi
= netdev_priv(dev
);
2590 struct adapter
*adapter
= pi
->adapter
;
2593 for (qidx
= pi
->first_qset
; qidx
< pi
->first_qset
+ pi
->nqsets
; qidx
++) {
2594 struct sge_qset
*qs
= &adapter
->sge
.qs
[qidx
];
2597 if (adapter
->flags
& USING_MSIX
)
2602 t3_intr_handler(adapter
, qs
->rspq
.polling
) (0, source
);
2608 * Periodic accumulation of MAC statistics.
2610 static void mac_stats_update(struct adapter
*adapter
)
2614 for_each_port(adapter
, i
) {
2615 struct net_device
*dev
= adapter
->port
[i
];
2616 struct port_info
*p
= netdev_priv(dev
);
2618 if (netif_running(dev
)) {
2619 spin_lock(&adapter
->stats_lock
);
2620 t3_mac_update_stats(&p
->mac
);
2621 spin_unlock(&adapter
->stats_lock
);
2626 static void check_link_status(struct adapter
*adapter
)
2630 for_each_port(adapter
, i
) {
2631 struct net_device
*dev
= adapter
->port
[i
];
2632 struct port_info
*p
= netdev_priv(dev
);
2635 spin_lock_irq(&adapter
->work_lock
);
2636 link_fault
= p
->link_fault
;
2637 spin_unlock_irq(&adapter
->work_lock
);
2640 t3_link_fault(adapter
, i
);
2644 if (!(p
->phy
.caps
& SUPPORTED_IRQ
) && netif_running(dev
)) {
2645 t3_xgm_intr_disable(adapter
, i
);
2646 t3_read_reg(adapter
, A_XGM_INT_STATUS
+ p
->mac
.offset
);
2648 t3_link_changed(adapter
, i
);
2649 t3_xgm_intr_enable(adapter
, i
);
2654 static void check_t3b2_mac(struct adapter
*adapter
)
2658 if (!rtnl_trylock()) /* synchronize with ifdown */
2661 for_each_port(adapter
, i
) {
2662 struct net_device
*dev
= adapter
->port
[i
];
2663 struct port_info
*p
= netdev_priv(dev
);
2666 if (!netif_running(dev
))
2670 if (netif_running(dev
) && netif_carrier_ok(dev
))
2671 status
= t3b2_mac_watchdog_task(&p
->mac
);
2673 p
->mac
.stats
.num_toggled
++;
2674 else if (status
== 2) {
2675 struct cmac
*mac
= &p
->mac
;
2677 t3_mac_set_mtu(mac
, dev
->mtu
);
2678 t3_mac_set_address(mac
, LAN_MAC_IDX
, dev
->dev_addr
);
2679 cxgb_set_rxmode(dev
);
2680 t3_link_start(&p
->phy
, mac
, &p
->link_config
);
2681 t3_mac_enable(mac
, MAC_DIRECTION_RX
| MAC_DIRECTION_TX
);
2682 t3_port_intr_enable(adapter
, p
->port_id
);
2683 p
->mac
.stats
.num_resets
++;
2690 static void t3_adap_check_task(struct work_struct
*work
)
2692 struct adapter
*adapter
= container_of(work
, struct adapter
,
2693 adap_check_task
.work
);
2694 const struct adapter_params
*p
= &adapter
->params
;
2696 unsigned int v
, status
, reset
;
2698 adapter
->check_task_cnt
++;
2700 check_link_status(adapter
);
2702 /* Accumulate MAC stats if needed */
2703 if (!p
->linkpoll_period
||
2704 (adapter
->check_task_cnt
* p
->linkpoll_period
) / 10 >=
2705 p
->stats_update_period
) {
2706 mac_stats_update(adapter
);
2707 adapter
->check_task_cnt
= 0;
2710 if (p
->rev
== T3_REV_B2
)
2711 check_t3b2_mac(adapter
);
2714 * Scan the XGMAC's to check for various conditions which we want to
2715 * monitor in a periodic polling manner rather than via an interrupt
2716 * condition. This is used for conditions which would otherwise flood
2717 * the system with interrupts and we only really need to know that the
2718 * conditions are "happening" ... For each condition we count the
2719 * detection of the condition and reset it for the next polling loop.
2721 for_each_port(adapter
, port
) {
2722 struct cmac
*mac
= &adap2pinfo(adapter
, port
)->mac
;
2725 cause
= t3_read_reg(adapter
, A_XGM_INT_CAUSE
+ mac
->offset
);
2727 if (cause
& F_RXFIFO_OVERFLOW
) {
2728 mac
->stats
.rx_fifo_ovfl
++;
2729 reset
|= F_RXFIFO_OVERFLOW
;
2732 t3_write_reg(adapter
, A_XGM_INT_CAUSE
+ mac
->offset
, reset
);
2736 * We do the same as above for FL_EMPTY interrupts.
2738 status
= t3_read_reg(adapter
, A_SG_INT_CAUSE
);
2741 if (status
& F_FLEMPTY
) {
2742 struct sge_qset
*qs
= &adapter
->sge
.qs
[0];
2747 v
= (t3_read_reg(adapter
, A_SG_RSPQ_FL_STATUS
) >> S_FL0EMPTY
) &
2751 qs
->fl
[i
].empty
+= (v
& 1);
2759 t3_write_reg(adapter
, A_SG_INT_CAUSE
, reset
);
2761 /* Schedule the next check update if any port is active. */
2762 spin_lock_irq(&adapter
->work_lock
);
2763 if (adapter
->open_device_map
& PORT_MASK
)
2764 schedule_chk_task(adapter
);
2765 spin_unlock_irq(&adapter
->work_lock
);
2768 static void db_full_task(struct work_struct
*work
)
2770 struct adapter
*adapter
= container_of(work
, struct adapter
,
2773 cxgb3_event_notify(&adapter
->tdev
, OFFLOAD_DB_FULL
, 0);
2776 static void db_empty_task(struct work_struct
*work
)
2778 struct adapter
*adapter
= container_of(work
, struct adapter
,
2781 cxgb3_event_notify(&adapter
->tdev
, OFFLOAD_DB_EMPTY
, 0);
2784 static void db_drop_task(struct work_struct
*work
)
2786 struct adapter
*adapter
= container_of(work
, struct adapter
,
2788 unsigned long delay
= 1000;
2791 cxgb3_event_notify(&adapter
->tdev
, OFFLOAD_DB_DROP
, 0);
2794 * Sleep a while before ringing the driver qset dbs.
2795 * The delay is between 1000-2023 usecs.
2797 get_random_bytes(&r
, 2);
2799 set_current_state(TASK_UNINTERRUPTIBLE
);
2800 schedule_timeout(usecs_to_jiffies(delay
));
2805 * Processes external (PHY) interrupts in process context.
2807 static void ext_intr_task(struct work_struct
*work
)
2809 struct adapter
*adapter
= container_of(work
, struct adapter
,
2810 ext_intr_handler_task
);
2813 /* Disable link fault interrupts */
2814 for_each_port(adapter
, i
) {
2815 struct net_device
*dev
= adapter
->port
[i
];
2816 struct port_info
*p
= netdev_priv(dev
);
2818 t3_xgm_intr_disable(adapter
, i
);
2819 t3_read_reg(adapter
, A_XGM_INT_STATUS
+ p
->mac
.offset
);
2822 /* Re-enable link fault interrupts */
2823 t3_phy_intr_handler(adapter
);
2825 for_each_port(adapter
, i
)
2826 t3_xgm_intr_enable(adapter
, i
);
2828 /* Now reenable external interrupts */
2829 spin_lock_irq(&adapter
->work_lock
);
2830 if (adapter
->slow_intr_mask
) {
2831 adapter
->slow_intr_mask
|= F_T3DBG
;
2832 t3_write_reg(adapter
, A_PL_INT_CAUSE0
, F_T3DBG
);
2833 t3_write_reg(adapter
, A_PL_INT_ENABLE0
,
2834 adapter
->slow_intr_mask
);
2836 spin_unlock_irq(&adapter
->work_lock
);
2840 * Interrupt-context handler for external (PHY) interrupts.
2842 void t3_os_ext_intr_handler(struct adapter
*adapter
)
2845 * Schedule a task to handle external interrupts as they may be slow
2846 * and we use a mutex to protect MDIO registers. We disable PHY
2847 * interrupts in the meantime and let the task reenable them when
2850 spin_lock(&adapter
->work_lock
);
2851 if (adapter
->slow_intr_mask
) {
2852 adapter
->slow_intr_mask
&= ~F_T3DBG
;
2853 t3_write_reg(adapter
, A_PL_INT_ENABLE0
,
2854 adapter
->slow_intr_mask
);
2855 queue_work(cxgb3_wq
, &adapter
->ext_intr_handler_task
);
2857 spin_unlock(&adapter
->work_lock
);
2860 void t3_os_link_fault_handler(struct adapter
*adapter
, int port_id
)
2862 struct net_device
*netdev
= adapter
->port
[port_id
];
2863 struct port_info
*pi
= netdev_priv(netdev
);
2865 spin_lock(&adapter
->work_lock
);
2867 spin_unlock(&adapter
->work_lock
);
2870 static int t3_adapter_error(struct adapter
*adapter
, int reset
)
2874 if (is_offload(adapter
) &&
2875 test_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->open_device_map
)) {
2876 cxgb3_event_notify(&adapter
->tdev
, OFFLOAD_STATUS_DOWN
, 0);
2877 offload_close(&adapter
->tdev
);
2880 /* Stop all ports */
2881 for_each_port(adapter
, i
) {
2882 struct net_device
*netdev
= adapter
->port
[i
];
2884 if (netif_running(netdev
))
2888 /* Stop SGE timers */
2889 t3_stop_sge_timers(adapter
);
2891 adapter
->flags
&= ~FULL_INIT_DONE
;
2894 ret
= t3_reset_adapter(adapter
);
2896 pci_disable_device(adapter
->pdev
);
2901 static int t3_reenable_adapter(struct adapter
*adapter
)
2903 if (pci_enable_device(adapter
->pdev
)) {
2904 dev_err(&adapter
->pdev
->dev
,
2905 "Cannot re-enable PCI device after reset.\n");
2908 pci_set_master(adapter
->pdev
);
2909 pci_restore_state(adapter
->pdev
);
2910 pci_save_state(adapter
->pdev
);
2912 /* Free sge resources */
2913 t3_free_sge_resources(adapter
);
2915 if (t3_replay_prep_adapter(adapter
))
2923 static void t3_resume_ports(struct adapter
*adapter
)
2927 /* Restart the ports */
2928 for_each_port(adapter
, i
) {
2929 struct net_device
*netdev
= adapter
->port
[i
];
2931 if (netif_running(netdev
)) {
2932 if (cxgb_open(netdev
)) {
2933 dev_err(&adapter
->pdev
->dev
,
2934 "can't bring device back up"
2941 if (is_offload(adapter
) && !ofld_disable
)
2942 cxgb3_event_notify(&adapter
->tdev
, OFFLOAD_STATUS_UP
, 0);
2946 * processes a fatal error.
2947 * Bring the ports down, reset the chip, bring the ports back up.
2949 static void fatal_error_task(struct work_struct
*work
)
2951 struct adapter
*adapter
= container_of(work
, struct adapter
,
2952 fatal_error_handler_task
);
2956 err
= t3_adapter_error(adapter
, 1);
2958 err
= t3_reenable_adapter(adapter
);
2960 t3_resume_ports(adapter
);
2962 CH_ALERT(adapter
, "adapter reset %s\n", err
? "failed" : "succeeded");
2966 void t3_fatal_err(struct adapter
*adapter
)
2968 unsigned int fw_status
[4];
2970 if (adapter
->flags
& FULL_INIT_DONE
) {
2971 t3_sge_stop(adapter
);
2972 t3_write_reg(adapter
, A_XGM_TX_CTRL
, 0);
2973 t3_write_reg(adapter
, A_XGM_RX_CTRL
, 0);
2974 t3_write_reg(adapter
, XGM_REG(A_XGM_TX_CTRL
, 1), 0);
2975 t3_write_reg(adapter
, XGM_REG(A_XGM_RX_CTRL
, 1), 0);
2977 spin_lock(&adapter
->work_lock
);
2978 t3_intr_disable(adapter
);
2979 queue_work(cxgb3_wq
, &adapter
->fatal_error_handler_task
);
2980 spin_unlock(&adapter
->work_lock
);
2982 CH_ALERT(adapter
, "encountered fatal error, operation suspended\n");
2983 if (!t3_cim_ctl_blk_read(adapter
, 0xa0, 4, fw_status
))
2984 CH_ALERT(adapter
, "FW status: 0x%x, 0x%x, 0x%x, 0x%x\n",
2985 fw_status
[0], fw_status
[1],
2986 fw_status
[2], fw_status
[3]);
2990 * t3_io_error_detected - called when PCI error is detected
2991 * @pdev: Pointer to PCI device
2992 * @state: The current pci connection state
2994 * This function is called after a PCI bus error affecting
2995 * this device has been detected.
2997 static pci_ers_result_t
t3_io_error_detected(struct pci_dev
*pdev
,
2998 pci_channel_state_t state
)
3000 struct adapter
*adapter
= pci_get_drvdata(pdev
);
3003 if (state
== pci_channel_io_perm_failure
)
3004 return PCI_ERS_RESULT_DISCONNECT
;
3006 ret
= t3_adapter_error(adapter
, 0);
3008 /* Request a slot reset. */
3009 return PCI_ERS_RESULT_NEED_RESET
;
3013 * t3_io_slot_reset - called after the pci bus has been reset.
3014 * @pdev: Pointer to PCI device
3016 * Restart the card from scratch, as if from a cold-boot.
3018 static pci_ers_result_t
t3_io_slot_reset(struct pci_dev
*pdev
)
3020 struct adapter
*adapter
= pci_get_drvdata(pdev
);
3022 if (!t3_reenable_adapter(adapter
))
3023 return PCI_ERS_RESULT_RECOVERED
;
3025 return PCI_ERS_RESULT_DISCONNECT
;
3029 * t3_io_resume - called when traffic can start flowing again.
3030 * @pdev: Pointer to PCI device
3032 * This callback is called when the error recovery driver tells us that
3033 * its OK to resume normal operation.
3035 static void t3_io_resume(struct pci_dev
*pdev
)
3037 struct adapter
*adapter
= pci_get_drvdata(pdev
);
3039 CH_ALERT(adapter
, "adapter recovering, PEX ERR 0x%x\n",
3040 t3_read_reg(adapter
, A_PCIE_PEX_ERR
));
3042 t3_resume_ports(adapter
);
3045 static struct pci_error_handlers t3_err_handler
= {
3046 .error_detected
= t3_io_error_detected
,
3047 .slot_reset
= t3_io_slot_reset
,
3048 .resume
= t3_io_resume
,
3052 * Set the number of qsets based on the number of CPUs and the number of ports,
3053 * not to exceed the number of available qsets, assuming there are enough qsets
3056 static void set_nqsets(struct adapter
*adap
)
3059 int num_cpus
= num_online_cpus();
3060 int hwports
= adap
->params
.nports
;
3061 int nqsets
= adap
->msix_nvectors
- 1;
3063 if (adap
->params
.rev
> 0 && adap
->flags
& USING_MSIX
) {
3065 (hwports
* nqsets
> SGE_QSETS
||
3066 num_cpus
>= nqsets
/ hwports
))
3068 if (nqsets
> num_cpus
)
3070 if (nqsets
< 1 || hwports
== 4)
3075 for_each_port(adap
, i
) {
3076 struct port_info
*pi
= adap2pinfo(adap
, i
);
3079 pi
->nqsets
= nqsets
;
3080 j
= pi
->first_qset
+ nqsets
;
3082 dev_info(&adap
->pdev
->dev
,
3083 "Port %d using %d queue sets.\n", i
, nqsets
);
3087 static int __devinit
cxgb_enable_msix(struct adapter
*adap
)
3089 struct msix_entry entries
[SGE_QSETS
+ 1];
3093 vectors
= ARRAY_SIZE(entries
);
3094 for (i
= 0; i
< vectors
; ++i
)
3095 entries
[i
].entry
= i
;
3097 while ((err
= pci_enable_msix(adap
->pdev
, entries
, vectors
)) > 0)
3101 pci_disable_msix(adap
->pdev
);
3103 if (!err
&& vectors
< (adap
->params
.nports
+ 1)) {
3104 pci_disable_msix(adap
->pdev
);
3109 for (i
= 0; i
< vectors
; ++i
)
3110 adap
->msix_info
[i
].vec
= entries
[i
].vector
;
3111 adap
->msix_nvectors
= vectors
;
3117 static void __devinit
print_port_info(struct adapter
*adap
,
3118 const struct adapter_info
*ai
)
3120 static const char *pci_variant
[] = {
3121 "PCI", "PCI-X", "PCI-X ECC", "PCI-X 266", "PCI Express"
3128 snprintf(buf
, sizeof(buf
), "%s x%d",
3129 pci_variant
[adap
->params
.pci
.variant
],
3130 adap
->params
.pci
.width
);
3132 snprintf(buf
, sizeof(buf
), "%s %dMHz/%d-bit",
3133 pci_variant
[adap
->params
.pci
.variant
],
3134 adap
->params
.pci
.speed
, adap
->params
.pci
.width
);
3136 for_each_port(adap
, i
) {
3137 struct net_device
*dev
= adap
->port
[i
];
3138 const struct port_info
*pi
= netdev_priv(dev
);
3140 if (!test_bit(i
, &adap
->registered_device_map
))
3142 printk(KERN_INFO
"%s: %s %s %sNIC (rev %d) %s%s\n",
3143 dev
->name
, ai
->desc
, pi
->phy
.desc
,
3144 is_offload(adap
) ? "R" : "", adap
->params
.rev
, buf
,
3145 (adap
->flags
& USING_MSIX
) ? " MSI-X" :
3146 (adap
->flags
& USING_MSI
) ? " MSI" : "");
3147 if (adap
->name
== dev
->name
&& adap
->params
.vpd
.mclk
)
3149 "%s: %uMB CM, %uMB PMTX, %uMB PMRX, S/N: %s\n",
3150 adap
->name
, t3_mc7_size(&adap
->cm
) >> 20,
3151 t3_mc7_size(&adap
->pmtx
) >> 20,
3152 t3_mc7_size(&adap
->pmrx
) >> 20,
3153 adap
->params
.vpd
.sn
);
3157 static const struct net_device_ops cxgb_netdev_ops
= {
3158 .ndo_open
= cxgb_open
,
3159 .ndo_stop
= cxgb_close
,
3160 .ndo_start_xmit
= t3_eth_xmit
,
3161 .ndo_get_stats
= cxgb_get_stats
,
3162 .ndo_validate_addr
= eth_validate_addr
,
3163 .ndo_set_multicast_list
= cxgb_set_rxmode
,
3164 .ndo_do_ioctl
= cxgb_ioctl
,
3165 .ndo_change_mtu
= cxgb_change_mtu
,
3166 .ndo_set_mac_address
= cxgb_set_mac_addr
,
3167 .ndo_vlan_rx_register
= vlan_rx_register
,
3168 #ifdef CONFIG_NET_POLL_CONTROLLER
3169 .ndo_poll_controller
= cxgb_netpoll
,
3173 static void __devinit
cxgb3_init_iscsi_mac(struct net_device
*dev
)
3175 struct port_info
*pi
= netdev_priv(dev
);
3177 memcpy(pi
->iscsic
.mac_addr
, dev
->dev_addr
, ETH_ALEN
);
3178 pi
->iscsic
.mac_addr
[3] |= 0x80;
3181 static int __devinit
init_one(struct pci_dev
*pdev
,
3182 const struct pci_device_id
*ent
)
3184 static int version_printed
;
3186 int i
, err
, pci_using_dac
= 0;
3187 resource_size_t mmio_start
, mmio_len
;
3188 const struct adapter_info
*ai
;
3189 struct adapter
*adapter
= NULL
;
3190 struct port_info
*pi
;
3192 if (!version_printed
) {
3193 printk(KERN_INFO
"%s - version %s\n", DRV_DESC
, DRV_VERSION
);
3198 cxgb3_wq
= create_singlethread_workqueue(DRV_NAME
);
3200 printk(KERN_ERR DRV_NAME
3201 ": cannot initialize work queue\n");
3206 err
= pci_request_regions(pdev
, DRV_NAME
);
3208 /* Just info, some other driver may have claimed the device. */
3209 dev_info(&pdev
->dev
, "cannot obtain PCI resources\n");
3213 err
= pci_enable_device(pdev
);
3215 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
3216 goto out_release_regions
;
3219 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
3221 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
3223 dev_err(&pdev
->dev
, "unable to obtain 64-bit DMA for "
3224 "coherent allocations\n");
3225 goto out_disable_device
;
3227 } else if ((err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32))) != 0) {
3228 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
3229 goto out_disable_device
;
3232 pci_set_master(pdev
);
3233 pci_save_state(pdev
);
3235 mmio_start
= pci_resource_start(pdev
, 0);
3236 mmio_len
= pci_resource_len(pdev
, 0);
3237 ai
= t3_get_adapter_info(ent
->driver_data
);
3239 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
3242 goto out_disable_device
;
3245 adapter
->nofail_skb
=
3246 alloc_skb(sizeof(struct cpl_set_tcb_field
), GFP_KERNEL
);
3247 if (!adapter
->nofail_skb
) {
3248 dev_err(&pdev
->dev
, "cannot allocate nofail buffer\n");
3250 goto out_free_adapter
;
3253 adapter
->regs
= ioremap_nocache(mmio_start
, mmio_len
);
3254 if (!adapter
->regs
) {
3255 dev_err(&pdev
->dev
, "cannot map device registers\n");
3257 goto out_free_adapter
;
3260 adapter
->pdev
= pdev
;
3261 adapter
->name
= pci_name(pdev
);
3262 adapter
->msg_enable
= dflt_msg_enable
;
3263 adapter
->mmio_len
= mmio_len
;
3265 mutex_init(&adapter
->mdio_lock
);
3266 spin_lock_init(&adapter
->work_lock
);
3267 spin_lock_init(&adapter
->stats_lock
);
3269 INIT_LIST_HEAD(&adapter
->adapter_list
);
3270 INIT_WORK(&adapter
->ext_intr_handler_task
, ext_intr_task
);
3271 INIT_WORK(&adapter
->fatal_error_handler_task
, fatal_error_task
);
3273 INIT_WORK(&adapter
->db_full_task
, db_full_task
);
3274 INIT_WORK(&adapter
->db_empty_task
, db_empty_task
);
3275 INIT_WORK(&adapter
->db_drop_task
, db_drop_task
);
3277 INIT_DELAYED_WORK(&adapter
->adap_check_task
, t3_adap_check_task
);
3279 for (i
= 0; i
< ai
->nports0
+ ai
->nports1
; ++i
) {
3280 struct net_device
*netdev
;
3282 netdev
= alloc_etherdev_mq(sizeof(struct port_info
), SGE_QSETS
);
3288 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3290 adapter
->port
[i
] = netdev
;
3291 pi
= netdev_priv(netdev
);
3292 pi
->adapter
= adapter
;
3293 pi
->rx_offload
= T3_RX_CSUM
| T3_LRO
;
3295 netif_carrier_off(netdev
);
3296 netif_tx_stop_all_queues(netdev
);
3297 netdev
->irq
= pdev
->irq
;
3298 netdev
->mem_start
= mmio_start
;
3299 netdev
->mem_end
= mmio_start
+ mmio_len
- 1;
3300 netdev
->features
|= NETIF_F_SG
| NETIF_F_IP_CSUM
| NETIF_F_TSO
;
3301 netdev
->features
|= NETIF_F_GRO
;
3303 netdev
->features
|= NETIF_F_HIGHDMA
;
3305 netdev
->features
|= NETIF_F_HW_VLAN_TX
| NETIF_F_HW_VLAN_RX
;
3306 netdev
->netdev_ops
= &cxgb_netdev_ops
;
3307 SET_ETHTOOL_OPS(netdev
, &cxgb_ethtool_ops
);
3310 pci_set_drvdata(pdev
, adapter
);
3311 if (t3_prep_adapter(adapter
, ai
, 1) < 0) {
3317 * The card is now ready to go. If any errors occur during device
3318 * registration we do not fail the whole card but rather proceed only
3319 * with the ports we manage to register successfully. However we must
3320 * register at least one net device.
3322 for_each_port(adapter
, i
) {
3323 err
= register_netdev(adapter
->port
[i
]);
3325 dev_warn(&pdev
->dev
,
3326 "cannot register net device %s, skipping\n",
3327 adapter
->port
[i
]->name
);
3330 * Change the name we use for messages to the name of
3331 * the first successfully registered interface.
3333 if (!adapter
->registered_device_map
)
3334 adapter
->name
= adapter
->port
[i
]->name
;
3336 __set_bit(i
, &adapter
->registered_device_map
);
3339 if (!adapter
->registered_device_map
) {
3340 dev_err(&pdev
->dev
, "could not register any net devices\n");
3344 for_each_port(adapter
, i
)
3345 cxgb3_init_iscsi_mac(adapter
->port
[i
]);
3347 /* Driver's ready. Reflect it on LEDs */
3348 t3_led_ready(adapter
);
3350 if (is_offload(adapter
)) {
3351 __set_bit(OFFLOAD_DEVMAP_BIT
, &adapter
->registered_device_map
);
3352 cxgb3_adapter_ofld(adapter
);
3355 /* See what interrupts we'll be using */
3356 if (msi
> 1 && cxgb_enable_msix(adapter
) == 0)
3357 adapter
->flags
|= USING_MSIX
;
3358 else if (msi
> 0 && pci_enable_msi(pdev
) == 0)
3359 adapter
->flags
|= USING_MSI
;
3361 set_nqsets(adapter
);
3363 err
= sysfs_create_group(&adapter
->port
[0]->dev
.kobj
,
3366 print_port_info(adapter
, ai
);
3370 iounmap(adapter
->regs
);
3371 for (i
= ai
->nports0
+ ai
->nports1
- 1; i
>= 0; --i
)
3372 if (adapter
->port
[i
])
3373 free_netdev(adapter
->port
[i
]);
3379 pci_disable_device(pdev
);
3380 out_release_regions
:
3381 pci_release_regions(pdev
);
3382 pci_set_drvdata(pdev
, NULL
);
3386 static void __devexit
remove_one(struct pci_dev
*pdev
)
3388 struct adapter
*adapter
= pci_get_drvdata(pdev
);
3393 t3_sge_stop(adapter
);
3394 sysfs_remove_group(&adapter
->port
[0]->dev
.kobj
,
3397 if (is_offload(adapter
)) {
3398 cxgb3_adapter_unofld(adapter
);
3399 if (test_bit(OFFLOAD_DEVMAP_BIT
,
3400 &adapter
->open_device_map
))
3401 offload_close(&adapter
->tdev
);
3404 for_each_port(adapter
, i
)
3405 if (test_bit(i
, &adapter
->registered_device_map
))
3406 unregister_netdev(adapter
->port
[i
]);
3408 t3_stop_sge_timers(adapter
);
3409 t3_free_sge_resources(adapter
);
3410 cxgb_disable_msi(adapter
);
3412 for_each_port(adapter
, i
)
3413 if (adapter
->port
[i
])
3414 free_netdev(adapter
->port
[i
]);
3416 iounmap(adapter
->regs
);
3417 if (adapter
->nofail_skb
)
3418 kfree_skb(adapter
->nofail_skb
);
3420 pci_release_regions(pdev
);
3421 pci_disable_device(pdev
);
3422 pci_set_drvdata(pdev
, NULL
);
3426 static struct pci_driver driver
= {
3428 .id_table
= cxgb3_pci_tbl
,
3430 .remove
= __devexit_p(remove_one
),
3431 .err_handler
= &t3_err_handler
,
3434 static int __init
cxgb3_init_module(void)
3438 cxgb3_offload_init();
3440 ret
= pci_register_driver(&driver
);
3444 static void __exit
cxgb3_cleanup_module(void)
3446 pci_unregister_driver(&driver
);
3448 destroy_workqueue(cxgb3_wq
);
3451 module_init(cxgb3_init_module
);
3452 module_exit(cxgb3_cleanup_module
);