2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <asm/uaccess.h>
72 #include <../drivers/net/bonding/bonding.h>
77 #define DRV_VERSION "2.0.0-ko"
78 #define DRV_DESC "Chelsio T4/T5 Network Driver"
81 * Max interrupt hold-off timer value in us. Queues fall back to this value
82 * under extreme memory pressure so it's largish to give the system time to
85 #define MAX_SGE_TIMERVAL 200U
89 * Physical Function provisioning constants.
91 PFRES_NVI
= 4, /* # of Virtual Interfaces */
92 PFRES_NETHCTRL
= 128, /* # of EQs used for ETH or CTRL Qs */
93 PFRES_NIQFLINT
= 128, /* # of ingress Qs/w Free List(s)/intr
95 PFRES_NEQ
= 256, /* # of egress queues */
96 PFRES_NIQ
= 0, /* # of ingress queues */
97 PFRES_TC
= 0, /* PCI-E traffic class */
98 PFRES_NEXACTF
= 128, /* # of exact MPS filters */
100 PFRES_R_CAPS
= FW_CMD_CAP_PF
,
101 PFRES_WX_CAPS
= FW_CMD_CAP_PF
,
103 #ifdef CONFIG_PCI_IOV
105 * Virtual Function provisioning constants. We need two extra Ingress
106 * Queues with Interrupt capability to serve as the VF's Firmware
107 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
108 * neither will have Free Lists associated with them). For each
109 * Ethernet/Control Egress Queue and for each Free List, we need an
112 VFRES_NPORTS
= 1, /* # of "ports" per VF */
113 VFRES_NQSETS
= 2, /* # of "Queue Sets" per VF */
115 VFRES_NVI
= VFRES_NPORTS
, /* # of Virtual Interfaces */
116 VFRES_NETHCTRL
= VFRES_NQSETS
, /* # of EQs used for ETH or CTRL Qs */
117 VFRES_NIQFLINT
= VFRES_NQSETS
+2,/* # of ingress Qs/w Free List(s)/intr */
118 VFRES_NEQ
= VFRES_NQSETS
*2, /* # of egress queues */
119 VFRES_NIQ
= 0, /* # of non-fl/int ingress queues */
120 VFRES_TC
= 0, /* PCI-E traffic class */
121 VFRES_NEXACTF
= 16, /* # of exact MPS filters */
123 VFRES_R_CAPS
= FW_CMD_CAP_DMAQ
|FW_CMD_CAP_VF
|FW_CMD_CAP_PORT
,
124 VFRES_WX_CAPS
= FW_CMD_CAP_DMAQ
|FW_CMD_CAP_VF
,
129 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
130 * static and likely not to be useful in the long run. We really need to
131 * implement some form of persistent configuration which the firmware
134 static unsigned int pfvfres_pmask(struct adapter
*adapter
,
135 unsigned int pf
, unsigned int vf
)
137 unsigned int portn
, portvec
;
140 * Give PF's access to all of the ports.
143 return FW_PFVF_CMD_PMASK_MASK
;
146 * For VFs, we'll assign them access to the ports based purely on the
147 * PF. We assign active ports in order, wrapping around if there are
148 * fewer active ports than PFs: e.g. active port[pf % nports].
149 * Unfortunately the adapter's port_info structs haven't been
150 * initialized yet so we have to compute this.
152 if (adapter
->params
.nports
== 0)
155 portn
= pf
% adapter
->params
.nports
;
156 portvec
= adapter
->params
.portvec
;
159 * Isolate the lowest set bit in the port vector. If we're at
160 * the port number that we want, return that as the pmask.
161 * otherwise mask that bit out of the port vector and
162 * decrement our port number ...
164 unsigned int pmask
= portvec
^ (portvec
& (portvec
-1));
174 MAX_TXQ_ENTRIES
= 16384,
175 MAX_CTRL_TXQ_ENTRIES
= 1024,
176 MAX_RSPQ_ENTRIES
= 16384,
177 MAX_RX_BUFFERS
= 16384,
178 MIN_TXQ_ENTRIES
= 32,
179 MIN_CTRL_TXQ_ENTRIES
= 32,
180 MIN_RSPQ_ENTRIES
= 128,
184 /* Host shadow copy of ingress filter entry. This is in host native format
185 * and doesn't match the ordering or bit order, etc. of the hardware of the
186 * firmware command. The use of bit-field structure elements is purely to
187 * remind ourselves of the field size limitations and save memory in the case
188 * where the filter table is large.
190 struct filter_entry
{
191 /* Administrative fields for filter.
193 u32 valid
:1; /* filter allocated and valid */
194 u32 locked
:1; /* filter is administratively locked */
196 u32 pending
:1; /* filter action is pending firmware reply */
197 u32 smtidx
:8; /* Source MAC Table index for smac */
198 struct l2t_entry
*l2t
; /* Layer Two Table entry for dmac */
200 /* The filter itself. Most of this is a straight copy of information
201 * provided by the extended ioctl(). Some fields are translated to
202 * internal forms -- for instance the Ingress Queue ID passed in from
203 * the ioctl() is translated into the Absolute Ingress Queue ID.
205 struct ch_filter_specification fs
;
208 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
209 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
210 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
212 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
214 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl
) = {
215 CH_DEVICE(0xa000, 0), /* PE10K */
216 CH_DEVICE(0x4001, -1),
217 CH_DEVICE(0x4002, -1),
218 CH_DEVICE(0x4003, -1),
219 CH_DEVICE(0x4004, -1),
220 CH_DEVICE(0x4005, -1),
221 CH_DEVICE(0x4006, -1),
222 CH_DEVICE(0x4007, -1),
223 CH_DEVICE(0x4008, -1),
224 CH_DEVICE(0x4009, -1),
225 CH_DEVICE(0x400a, -1),
226 CH_DEVICE(0x4401, 4),
227 CH_DEVICE(0x4402, 4),
228 CH_DEVICE(0x4403, 4),
229 CH_DEVICE(0x4404, 4),
230 CH_DEVICE(0x4405, 4),
231 CH_DEVICE(0x4406, 4),
232 CH_DEVICE(0x4407, 4),
233 CH_DEVICE(0x4408, 4),
234 CH_DEVICE(0x4409, 4),
235 CH_DEVICE(0x440a, 4),
236 CH_DEVICE(0x440d, 4),
237 CH_DEVICE(0x440e, 4),
238 CH_DEVICE(0x5001, 4),
239 CH_DEVICE(0x5002, 4),
240 CH_DEVICE(0x5003, 4),
241 CH_DEVICE(0x5004, 4),
242 CH_DEVICE(0x5005, 4),
243 CH_DEVICE(0x5006, 4),
244 CH_DEVICE(0x5007, 4),
245 CH_DEVICE(0x5008, 4),
246 CH_DEVICE(0x5009, 4),
247 CH_DEVICE(0x500A, 4),
248 CH_DEVICE(0x500B, 4),
249 CH_DEVICE(0x500C, 4),
250 CH_DEVICE(0x500D, 4),
251 CH_DEVICE(0x500E, 4),
252 CH_DEVICE(0x500F, 4),
253 CH_DEVICE(0x5010, 4),
254 CH_DEVICE(0x5011, 4),
255 CH_DEVICE(0x5012, 4),
256 CH_DEVICE(0x5013, 4),
257 CH_DEVICE(0x5401, 4),
258 CH_DEVICE(0x5402, 4),
259 CH_DEVICE(0x5403, 4),
260 CH_DEVICE(0x5404, 4),
261 CH_DEVICE(0x5405, 4),
262 CH_DEVICE(0x5406, 4),
263 CH_DEVICE(0x5407, 4),
264 CH_DEVICE(0x5408, 4),
265 CH_DEVICE(0x5409, 4),
266 CH_DEVICE(0x540A, 4),
267 CH_DEVICE(0x540B, 4),
268 CH_DEVICE(0x540C, 4),
269 CH_DEVICE(0x540D, 4),
270 CH_DEVICE(0x540E, 4),
271 CH_DEVICE(0x540F, 4),
272 CH_DEVICE(0x5410, 4),
273 CH_DEVICE(0x5411, 4),
274 CH_DEVICE(0x5412, 4),
275 CH_DEVICE(0x5413, 4),
279 #define FW4_FNAME "cxgb4/t4fw.bin"
280 #define FW5_FNAME "cxgb4/t5fw.bin"
281 #define FW4_CFNAME "cxgb4/t4-config.txt"
282 #define FW5_CFNAME "cxgb4/t5-config.txt"
284 MODULE_DESCRIPTION(DRV_DESC
);
285 MODULE_AUTHOR("Chelsio Communications");
286 MODULE_LICENSE("Dual BSD/GPL");
287 MODULE_VERSION(DRV_VERSION
);
288 MODULE_DEVICE_TABLE(pci
, cxgb4_pci_tbl
);
289 MODULE_FIRMWARE(FW4_FNAME
);
290 MODULE_FIRMWARE(FW5_FNAME
);
293 * Normally we're willing to become the firmware's Master PF but will be happy
294 * if another PF has already become the Master and initialized the adapter.
295 * Setting "force_init" will cause this driver to forcibly establish itself as
296 * the Master PF and initialize the adapter.
298 static uint force_init
;
300 module_param(force_init
, uint
, 0644);
301 MODULE_PARM_DESC(force_init
, "Forcibly become Master PF and initialize adapter");
304 * Normally if the firmware we connect to has Configuration File support, we
305 * use that and only fall back to the old Driver-based initialization if the
306 * Configuration File fails for some reason. If force_old_init is set, then
307 * we'll always use the old Driver-based initialization sequence.
309 static uint force_old_init
;
311 module_param(force_old_init
, uint
, 0644);
312 MODULE_PARM_DESC(force_old_init
, "Force old initialization sequence");
314 static int dflt_msg_enable
= DFLT_MSG_ENABLE
;
316 module_param(dflt_msg_enable
, int, 0644);
317 MODULE_PARM_DESC(dflt_msg_enable
, "Chelsio T4 default message enable bitmap");
320 * The driver uses the best interrupt scheme available on a platform in the
321 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
322 * of these schemes the driver may consider as follows:
324 * msi = 2: choose from among all three options
325 * msi = 1: only consider MSI and INTx interrupts
326 * msi = 0: force INTx interrupts
330 module_param(msi
, int, 0644);
331 MODULE_PARM_DESC(msi
, "whether to use INTx (0), MSI (1) or MSI-X (2)");
334 * Queue interrupt hold-off timer values. Queues default to the first of these
337 static unsigned int intr_holdoff
[SGE_NTIMERS
- 1] = { 5, 10, 20, 50, 100 };
339 module_param_array(intr_holdoff
, uint
, NULL
, 0644);
340 MODULE_PARM_DESC(intr_holdoff
, "values for queue interrupt hold-off timers "
341 "0..4 in microseconds");
343 static unsigned int intr_cnt
[SGE_NCOUNTERS
- 1] = { 4, 8, 16 };
345 module_param_array(intr_cnt
, uint
, NULL
, 0644);
346 MODULE_PARM_DESC(intr_cnt
,
347 "thresholds 1..3 for queue interrupt packet counters");
350 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
351 * offset by 2 bytes in order to have the IP headers line up on 4-byte
352 * boundaries. This is a requirement for many architectures which will throw
353 * a machine check fault if an attempt is made to access one of the 4-byte IP
354 * header fields on a non-4-byte boundary. And it's a major performance issue
355 * even on some architectures which allow it like some implementations of the
356 * x86 ISA. However, some architectures don't mind this and for some very
357 * edge-case performance sensitive applications (like forwarding large volumes
358 * of small packets), setting this DMA offset to 0 will decrease the number of
359 * PCI-E Bus transfers enough to measurably affect performance.
361 static int rx_dma_offset
= 2;
365 #ifdef CONFIG_PCI_IOV
366 module_param(vf_acls
, bool, 0644);
367 MODULE_PARM_DESC(vf_acls
, "if set enable virtualization L2 ACL enforcement");
369 /* Configure the number of PCI-E Virtual Function which are to be instantiated
370 * on SR-IOV Capable Physical Functions.
372 static unsigned int num_vf
[NUM_OF_PF_WITH_SRIOV
];
374 module_param_array(num_vf
, uint
, NULL
, 0644);
375 MODULE_PARM_DESC(num_vf
, "number of VFs for each of PFs 0-3");
379 * The filter TCAM has a fixed portion and a variable portion. The fixed
380 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
381 * ports. The variable portion is 36 bits which can include things like Exact
382 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
383 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
384 * far exceed the 36-bit budget for this "compressed" header portion of the
385 * filter. Thus, we have a scarce resource which must be carefully managed.
387 * By default we set this up to mostly match the set of filter matching
388 * capabilities of T3 but with accommodations for some of T4's more
389 * interesting features:
391 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
392 * [Inner] VLAN (17), Port (3), FCoE (1) }
395 TP_VLAN_PRI_MAP_DEFAULT
= HW_TPL_FR_MT_PR_IV_P_FC
,
396 TP_VLAN_PRI_MAP_FIRST
= FCOE_SHIFT
,
397 TP_VLAN_PRI_MAP_LAST
= FRAGMENTATION_SHIFT
,
400 static unsigned int tp_vlan_pri_map
= TP_VLAN_PRI_MAP_DEFAULT
;
402 module_param(tp_vlan_pri_map
, uint
, 0644);
403 MODULE_PARM_DESC(tp_vlan_pri_map
, "global compressed filter configuration");
405 static struct dentry
*cxgb4_debugfs_root
;
407 static LIST_HEAD(adapter_list
);
408 static DEFINE_MUTEX(uld_mutex
);
409 /* Adapter list to be accessed from atomic context */
410 static LIST_HEAD(adap_rcu_list
);
411 static DEFINE_SPINLOCK(adap_rcu_lock
);
412 static struct cxgb4_uld_info ulds
[CXGB4_ULD_MAX
];
413 static const char *uld_str
[] = { "RDMA", "iSCSI" };
415 static void link_report(struct net_device
*dev
)
417 if (!netif_carrier_ok(dev
))
418 netdev_info(dev
, "link down\n");
420 static const char *fc
[] = { "no", "Rx", "Tx", "Tx/Rx" };
422 const char *s
= "10Mbps";
423 const struct port_info
*p
= netdev_priv(dev
);
425 switch (p
->link_cfg
.speed
) {
437 netdev_info(dev
, "link up, %s, full-duplex, %s PAUSE\n", s
,
442 void t4_os_link_changed(struct adapter
*adapter
, int port_id
, int link_stat
)
444 struct net_device
*dev
= adapter
->port
[port_id
];
446 /* Skip changes from disabled ports. */
447 if (netif_running(dev
) && link_stat
!= netif_carrier_ok(dev
)) {
449 netif_carrier_on(dev
);
451 netif_carrier_off(dev
);
457 void t4_os_portmod_changed(const struct adapter
*adap
, int port_id
)
459 static const char *mod_str
[] = {
460 NULL
, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
463 const struct net_device
*dev
= adap
->port
[port_id
];
464 const struct port_info
*pi
= netdev_priv(dev
);
466 if (pi
->mod_type
== FW_PORT_MOD_TYPE_NONE
)
467 netdev_info(dev
, "port module unplugged\n");
468 else if (pi
->mod_type
< ARRAY_SIZE(mod_str
))
469 netdev_info(dev
, "%s module inserted\n", mod_str
[pi
->mod_type
]);
473 * Configure the exact and hash address filters to handle a port's multicast
474 * and secondary unicast MAC addresses.
476 static int set_addr_filters(const struct net_device
*dev
, bool sleep
)
484 const struct netdev_hw_addr
*ha
;
485 int uc_cnt
= netdev_uc_count(dev
);
486 int mc_cnt
= netdev_mc_count(dev
);
487 const struct port_info
*pi
= netdev_priv(dev
);
488 unsigned int mb
= pi
->adapter
->fn
;
490 /* first do the secondary unicast addresses */
491 netdev_for_each_uc_addr(ha
, dev
) {
492 addr
[naddr
++] = ha
->addr
;
493 if (--uc_cnt
== 0 || naddr
>= ARRAY_SIZE(addr
)) {
494 ret
= t4_alloc_mac_filt(pi
->adapter
, mb
, pi
->viid
, free
,
495 naddr
, addr
, filt_idx
, &uhash
, sleep
);
504 /* next set up the multicast addresses */
505 netdev_for_each_mc_addr(ha
, dev
) {
506 addr
[naddr
++] = ha
->addr
;
507 if (--mc_cnt
== 0 || naddr
>= ARRAY_SIZE(addr
)) {
508 ret
= t4_alloc_mac_filt(pi
->adapter
, mb
, pi
->viid
, free
,
509 naddr
, addr
, filt_idx
, &mhash
, sleep
);
518 return t4_set_addr_hash(pi
->adapter
, mb
, pi
->viid
, uhash
!= 0,
519 uhash
| mhash
, sleep
);
522 int dbfifo_int_thresh
= 10; /* 10 == 640 entry threshold */
523 module_param(dbfifo_int_thresh
, int, 0644);
524 MODULE_PARM_DESC(dbfifo_int_thresh
, "doorbell fifo interrupt threshold");
527 * usecs to sleep while draining the dbfifo
529 static int dbfifo_drain_delay
= 1000;
530 module_param(dbfifo_drain_delay
, int, 0644);
531 MODULE_PARM_DESC(dbfifo_drain_delay
,
532 "usecs to sleep while draining the dbfifo");
535 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
536 * If @mtu is -1 it is left unchanged.
538 static int set_rxmode(struct net_device
*dev
, int mtu
, bool sleep_ok
)
541 struct port_info
*pi
= netdev_priv(dev
);
543 ret
= set_addr_filters(dev
, sleep_ok
);
545 ret
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->fn
, pi
->viid
, mtu
,
546 (dev
->flags
& IFF_PROMISC
) ? 1 : 0,
547 (dev
->flags
& IFF_ALLMULTI
) ? 1 : 0, 1, -1,
552 static struct workqueue_struct
*workq
;
555 * link_start - enable a port
556 * @dev: the port to enable
558 * Performs the MAC and PHY actions needed to enable a port.
560 static int link_start(struct net_device
*dev
)
563 struct port_info
*pi
= netdev_priv(dev
);
564 unsigned int mb
= pi
->adapter
->fn
;
567 * We do not set address filters and promiscuity here, the stack does
568 * that step explicitly.
570 ret
= t4_set_rxmode(pi
->adapter
, mb
, pi
->viid
, dev
->mtu
, -1, -1, -1,
571 !!(dev
->features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
573 ret
= t4_change_mac(pi
->adapter
, mb
, pi
->viid
,
574 pi
->xact_addr_filt
, dev
->dev_addr
, true,
577 pi
->xact_addr_filt
= ret
;
582 ret
= t4_link_start(pi
->adapter
, mb
, pi
->tx_chan
,
585 ret
= t4_enable_vi(pi
->adapter
, mb
, pi
->viid
, true, true);
589 /* Clear a filter and release any of its resources that we own. This also
590 * clears the filter's "pending" status.
592 static void clear_filter(struct adapter
*adap
, struct filter_entry
*f
)
594 /* If the new or old filter have loopback rewriteing rules then we'll
595 * need to free any existing Layer Two Table (L2T) entries of the old
596 * filter rule. The firmware will handle freeing up any Source MAC
597 * Table (SMT) entries used for rewriting Source MAC Addresses in
601 cxgb4_l2t_release(f
->l2t
);
603 /* The zeroing of the filter rule below clears the filter valid,
604 * pending, locked flags, l2t pointer, etc. so it's all we need for
607 memset(f
, 0, sizeof(*f
));
610 /* Handle a filter write/deletion reply.
612 static void filter_rpl(struct adapter
*adap
, const struct cpl_set_tcb_rpl
*rpl
)
614 unsigned int idx
= GET_TID(rpl
);
615 unsigned int nidx
= idx
- adap
->tids
.ftid_base
;
617 struct filter_entry
*f
;
619 if (idx
>= adap
->tids
.ftid_base
&& nidx
<
620 (adap
->tids
.nftids
+ adap
->tids
.nsftids
)) {
622 ret
= GET_TCB_COOKIE(rpl
->cookie
);
623 f
= &adap
->tids
.ftid_tab
[idx
];
625 if (ret
== FW_FILTER_WR_FLT_DELETED
) {
626 /* Clear the filter when we get confirmation from the
627 * hardware that the filter has been deleted.
629 clear_filter(adap
, f
);
630 } else if (ret
== FW_FILTER_WR_SMT_TBL_FULL
) {
631 dev_err(adap
->pdev_dev
, "filter %u setup failed due to full SMT\n",
633 clear_filter(adap
, f
);
634 } else if (ret
== FW_FILTER_WR_FLT_ADDED
) {
635 f
->smtidx
= (be64_to_cpu(rpl
->oldval
) >> 24) & 0xff;
636 f
->pending
= 0; /* asynchronous setup completed */
639 /* Something went wrong. Issue a warning about the
640 * problem and clear everything out.
642 dev_err(adap
->pdev_dev
, "filter %u setup failed with error %u\n",
644 clear_filter(adap
, f
);
649 /* Response queue handler for the FW event queue.
651 static int fwevtq_handler(struct sge_rspq
*q
, const __be64
*rsp
,
652 const struct pkt_gl
*gl
)
654 u8 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
656 rsp
++; /* skip RSS header */
658 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
660 if (unlikely(opcode
== CPL_FW4_MSG
&&
661 ((const struct cpl_fw4_msg
*)rsp
)->type
== FW_TYPE_RSSCPL
)) {
663 opcode
= ((const struct rss_header
*)rsp
)->opcode
;
665 if (opcode
!= CPL_SGE_EGR_UPDATE
) {
666 dev_err(q
->adap
->pdev_dev
, "unexpected FW4/CPL %#x on FW event queue\n"
672 if (likely(opcode
== CPL_SGE_EGR_UPDATE
)) {
673 const struct cpl_sge_egr_update
*p
= (void *)rsp
;
674 unsigned int qid
= EGR_QID(ntohl(p
->opcode_qid
));
677 txq
= q
->adap
->sge
.egr_map
[qid
- q
->adap
->sge
.egr_start
];
679 if ((u8
*)txq
< (u8
*)q
->adap
->sge
.ofldtxq
) {
680 struct sge_eth_txq
*eq
;
682 eq
= container_of(txq
, struct sge_eth_txq
, q
);
683 netif_tx_wake_queue(eq
->txq
);
685 struct sge_ofld_txq
*oq
;
687 oq
= container_of(txq
, struct sge_ofld_txq
, q
);
688 tasklet_schedule(&oq
->qresume_tsk
);
690 } else if (opcode
== CPL_FW6_MSG
|| opcode
== CPL_FW4_MSG
) {
691 const struct cpl_fw6_msg
*p
= (void *)rsp
;
694 t4_handle_fw_rpl(q
->adap
, p
->data
);
695 } else if (opcode
== CPL_L2T_WRITE_RPL
) {
696 const struct cpl_l2t_write_rpl
*p
= (void *)rsp
;
698 do_l2t_write_rpl(q
->adap
, p
);
699 } else if (opcode
== CPL_SET_TCB_RPL
) {
700 const struct cpl_set_tcb_rpl
*p
= (void *)rsp
;
702 filter_rpl(q
->adap
, p
);
704 dev_err(q
->adap
->pdev_dev
,
705 "unexpected CPL %#x on FW event queue\n", opcode
);
711 * uldrx_handler - response queue handler for ULD queues
712 * @q: the response queue that received the packet
713 * @rsp: the response queue descriptor holding the offload message
714 * @gl: the gather list of packet fragments
716 * Deliver an ingress offload packet to a ULD. All processing is done by
717 * the ULD, we just maintain statistics.
719 static int uldrx_handler(struct sge_rspq
*q
, const __be64
*rsp
,
720 const struct pkt_gl
*gl
)
722 struct sge_ofld_rxq
*rxq
= container_of(q
, struct sge_ofld_rxq
, rspq
);
724 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
726 if (((const struct rss_header
*)rsp
)->opcode
== CPL_FW4_MSG
&&
727 ((const struct cpl_fw4_msg
*)(rsp
+ 1))->type
== FW_TYPE_RSSCPL
)
730 if (ulds
[q
->uld
].rx_handler(q
->adap
->uld_handle
[q
->uld
], rsp
, gl
)) {
736 else if (gl
== CXGB4_MSG_AN
)
743 static void disable_msi(struct adapter
*adapter
)
745 if (adapter
->flags
& USING_MSIX
) {
746 pci_disable_msix(adapter
->pdev
);
747 adapter
->flags
&= ~USING_MSIX
;
748 } else if (adapter
->flags
& USING_MSI
) {
749 pci_disable_msi(adapter
->pdev
);
750 adapter
->flags
&= ~USING_MSI
;
755 * Interrupt handler for non-data events used with MSI-X.
757 static irqreturn_t
t4_nondata_intr(int irq
, void *cookie
)
759 struct adapter
*adap
= cookie
;
761 u32 v
= t4_read_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE
));
764 t4_write_reg(adap
, MYPF_REG(PL_PF_INT_CAUSE
), v
);
766 t4_slow_intr_handler(adap
);
771 * Name the MSI-X interrupts.
773 static void name_msix_vecs(struct adapter
*adap
)
775 int i
, j
, msi_idx
= 2, n
= sizeof(adap
->msix_info
[0].desc
);
777 /* non-data interrupts */
778 snprintf(adap
->msix_info
[0].desc
, n
, "%s", adap
->port
[0]->name
);
781 snprintf(adap
->msix_info
[1].desc
, n
, "%s-FWeventq",
782 adap
->port
[0]->name
);
784 /* Ethernet queues */
785 for_each_port(adap
, j
) {
786 struct net_device
*d
= adap
->port
[j
];
787 const struct port_info
*pi
= netdev_priv(d
);
789 for (i
= 0; i
< pi
->nqsets
; i
++, msi_idx
++)
790 snprintf(adap
->msix_info
[msi_idx
].desc
, n
, "%s-Rx%d",
795 for_each_ofldrxq(&adap
->sge
, i
)
796 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-ofld%d",
797 adap
->port
[0]->name
, i
);
799 for_each_rdmarxq(&adap
->sge
, i
)
800 snprintf(adap
->msix_info
[msi_idx
++].desc
, n
, "%s-rdma%d",
801 adap
->port
[0]->name
, i
);
804 static int request_msix_queue_irqs(struct adapter
*adap
)
806 struct sge
*s
= &adap
->sge
;
807 int err
, ethqidx
, ofldqidx
= 0, rdmaqidx
= 0, msi_index
= 2;
809 err
= request_irq(adap
->msix_info
[1].vec
, t4_sge_intr_msix
, 0,
810 adap
->msix_info
[1].desc
, &s
->fw_evtq
);
814 for_each_ethrxq(s
, ethqidx
) {
815 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
817 adap
->msix_info
[msi_index
].desc
,
818 &s
->ethrxq
[ethqidx
].rspq
);
823 for_each_ofldrxq(s
, ofldqidx
) {
824 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
826 adap
->msix_info
[msi_index
].desc
,
827 &s
->ofldrxq
[ofldqidx
].rspq
);
832 for_each_rdmarxq(s
, rdmaqidx
) {
833 err
= request_irq(adap
->msix_info
[msi_index
].vec
,
835 adap
->msix_info
[msi_index
].desc
,
836 &s
->rdmarxq
[rdmaqidx
].rspq
);
844 while (--rdmaqidx
>= 0)
845 free_irq(adap
->msix_info
[--msi_index
].vec
,
846 &s
->rdmarxq
[rdmaqidx
].rspq
);
847 while (--ofldqidx
>= 0)
848 free_irq(adap
->msix_info
[--msi_index
].vec
,
849 &s
->ofldrxq
[ofldqidx
].rspq
);
850 while (--ethqidx
>= 0)
851 free_irq(adap
->msix_info
[--msi_index
].vec
,
852 &s
->ethrxq
[ethqidx
].rspq
);
853 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
857 static void free_msix_queue_irqs(struct adapter
*adap
)
859 int i
, msi_index
= 2;
860 struct sge
*s
= &adap
->sge
;
862 free_irq(adap
->msix_info
[1].vec
, &s
->fw_evtq
);
863 for_each_ethrxq(s
, i
)
864 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->ethrxq
[i
].rspq
);
865 for_each_ofldrxq(s
, i
)
866 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->ofldrxq
[i
].rspq
);
867 for_each_rdmarxq(s
, i
)
868 free_irq(adap
->msix_info
[msi_index
++].vec
, &s
->rdmarxq
[i
].rspq
);
872 * write_rss - write the RSS table for a given port
874 * @queues: array of queue indices for RSS
876 * Sets up the portion of the HW RSS table for the port's VI to distribute
877 * packets to the Rx queues in @queues.
879 static int write_rss(const struct port_info
*pi
, const u16
*queues
)
883 const struct sge_eth_rxq
*q
= &pi
->adapter
->sge
.ethrxq
[pi
->first_qset
];
885 rss
= kmalloc(pi
->rss_size
* sizeof(u16
), GFP_KERNEL
);
889 /* map the queue indices to queue ids */
890 for (i
= 0; i
< pi
->rss_size
; i
++, queues
++)
891 rss
[i
] = q
[*queues
].rspq
.abs_id
;
893 err
= t4_config_rss_range(pi
->adapter
, pi
->adapter
->fn
, pi
->viid
, 0,
894 pi
->rss_size
, rss
, pi
->rss_size
);
900 * setup_rss - configure RSS
903 * Sets up RSS for each port.
905 static int setup_rss(struct adapter
*adap
)
909 for_each_port(adap
, i
) {
910 const struct port_info
*pi
= adap2pinfo(adap
, i
);
912 err
= write_rss(pi
, pi
->rss
);
920 * Return the channel of the ingress queue with the given qid.
922 static unsigned int rxq_to_chan(const struct sge
*p
, unsigned int qid
)
924 qid
-= p
->ingr_start
;
925 return netdev2pinfo(p
->ingr_map
[qid
]->netdev
)->tx_chan
;
929 * Wait until all NAPI handlers are descheduled.
931 static void quiesce_rx(struct adapter
*adap
)
935 for (i
= 0; i
< ARRAY_SIZE(adap
->sge
.ingr_map
); i
++) {
936 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
939 napi_disable(&q
->napi
);
944 * Enable NAPI scheduling and interrupt generation for all Rx queues.
946 static void enable_rx(struct adapter
*adap
)
950 for (i
= 0; i
< ARRAY_SIZE(adap
->sge
.ingr_map
); i
++) {
951 struct sge_rspq
*q
= adap
->sge
.ingr_map
[i
];
956 napi_enable(&q
->napi
);
957 /* 0-increment GTS to start the timer and enable interrupts */
958 t4_write_reg(adap
, MYPF_REG(SGE_PF_GTS
),
959 SEINTARM(q
->intr_params
) |
960 INGRESSQID(q
->cntxt_id
));
965 * setup_sge_queues - configure SGE Tx/Rx/response queues
968 * Determines how many sets of SGE queues to use and initializes them.
969 * We support multiple queue sets per port if we have MSI-X, otherwise
970 * just one queue set per port.
972 static int setup_sge_queues(struct adapter
*adap
)
974 int err
, msi_idx
, i
, j
;
975 struct sge
*s
= &adap
->sge
;
977 bitmap_zero(s
->starving_fl
, MAX_EGRQ
);
978 bitmap_zero(s
->txq_maperr
, MAX_EGRQ
);
980 if (adap
->flags
& USING_MSIX
)
981 msi_idx
= 1; /* vector 0 is for non-queue interrupts */
983 err
= t4_sge_alloc_rxq(adap
, &s
->intrq
, false, adap
->port
[0], 0,
987 msi_idx
= -((int)s
->intrq
.abs_id
+ 1);
990 err
= t4_sge_alloc_rxq(adap
, &s
->fw_evtq
, true, adap
->port
[0],
991 msi_idx
, NULL
, fwevtq_handler
);
993 freeout
: t4_free_sge_resources(adap
);
997 for_each_port(adap
, i
) {
998 struct net_device
*dev
= adap
->port
[i
];
999 struct port_info
*pi
= netdev_priv(dev
);
1000 struct sge_eth_rxq
*q
= &s
->ethrxq
[pi
->first_qset
];
1001 struct sge_eth_txq
*t
= &s
->ethtxq
[pi
->first_qset
];
1003 for (j
= 0; j
< pi
->nqsets
; j
++, q
++) {
1006 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, dev
,
1012 memset(&q
->stats
, 0, sizeof(q
->stats
));
1014 for (j
= 0; j
< pi
->nqsets
; j
++, t
++) {
1015 err
= t4_sge_alloc_eth_txq(adap
, t
, dev
,
1016 netdev_get_tx_queue(dev
, j
),
1017 s
->fw_evtq
.cntxt_id
);
1023 j
= s
->ofldqsets
/ adap
->params
.nports
; /* ofld queues per channel */
1024 for_each_ofldrxq(s
, i
) {
1025 struct sge_ofld_rxq
*q
= &s
->ofldrxq
[i
];
1026 struct net_device
*dev
= adap
->port
[i
/ j
];
1030 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, dev
, msi_idx
,
1031 &q
->fl
, uldrx_handler
);
1034 memset(&q
->stats
, 0, sizeof(q
->stats
));
1035 s
->ofld_rxq
[i
] = q
->rspq
.abs_id
;
1036 err
= t4_sge_alloc_ofld_txq(adap
, &s
->ofldtxq
[i
], dev
,
1037 s
->fw_evtq
.cntxt_id
);
1042 for_each_rdmarxq(s
, i
) {
1043 struct sge_ofld_rxq
*q
= &s
->rdmarxq
[i
];
1047 err
= t4_sge_alloc_rxq(adap
, &q
->rspq
, false, adap
->port
[i
],
1048 msi_idx
, &q
->fl
, uldrx_handler
);
1051 memset(&q
->stats
, 0, sizeof(q
->stats
));
1052 s
->rdma_rxq
[i
] = q
->rspq
.abs_id
;
1055 for_each_port(adap
, i
) {
1057 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1058 * have RDMA queues, and that's the right value.
1060 err
= t4_sge_alloc_ctrl_txq(adap
, &s
->ctrlq
[i
], adap
->port
[i
],
1061 s
->fw_evtq
.cntxt_id
,
1062 s
->rdmarxq
[i
].rspq
.cntxt_id
);
1067 t4_write_reg(adap
, MPS_TRC_RSS_CONTROL
,
1068 RSSCONTROL(netdev2pinfo(adap
->port
[0])->tx_chan
) |
1069 QUEUENUMBER(s
->ethrxq
[0].rspq
.abs_id
));
1074 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1075 * The allocated memory is cleared.
1077 void *t4_alloc_mem(size_t size
)
1079 void *p
= kzalloc(size
, GFP_KERNEL
| __GFP_NOWARN
);
1087 * Free memory allocated through alloc_mem().
1089 static void t4_free_mem(void *addr
)
1091 if (is_vmalloc_addr(addr
))
1097 /* Send a Work Request to write the filter at a specified index. We construct
1098 * a Firmware Filter Work Request to have the work done and put the indicated
1099 * filter into "pending" mode which will prevent any further actions against
1100 * it till we get a reply from the firmware on the completion status of the
1103 static int set_filter_wr(struct adapter
*adapter
, int fidx
)
1105 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[fidx
];
1106 struct sk_buff
*skb
;
1107 struct fw_filter_wr
*fwr
;
1110 /* If the new filter requires loopback Destination MAC and/or VLAN
1111 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1114 if (f
->fs
.newdmac
|| f
->fs
.newvlan
) {
1115 /* allocate L2T entry for new filter */
1116 f
->l2t
= t4_l2t_alloc_switching(adapter
->l2t
);
1119 if (t4_l2t_set_switching(adapter
, f
->l2t
, f
->fs
.vlan
,
1120 f
->fs
.eport
, f
->fs
.dmac
)) {
1121 cxgb4_l2t_release(f
->l2t
);
1127 ftid
= adapter
->tids
.ftid_base
+ fidx
;
1129 skb
= alloc_skb(sizeof(*fwr
), GFP_KERNEL
| __GFP_NOFAIL
);
1130 fwr
= (struct fw_filter_wr
*)__skb_put(skb
, sizeof(*fwr
));
1131 memset(fwr
, 0, sizeof(*fwr
));
1133 /* It would be nice to put most of the following in t4_hw.c but most
1134 * of the work is translating the cxgbtool ch_filter_specification
1135 * into the Work Request and the definition of that structure is
1136 * currently in cxgbtool.h which isn't appropriate to pull into the
1137 * common code. We may eventually try to come up with a more neutral
1138 * filter specification structure but for now it's easiest to simply
1139 * put this fairly direct code in line ...
1141 fwr
->op_pkd
= htonl(FW_WR_OP(FW_FILTER_WR
));
1142 fwr
->len16_pkd
= htonl(FW_WR_LEN16(sizeof(*fwr
)/16));
1144 htonl(V_FW_FILTER_WR_TID(ftid
) |
1145 V_FW_FILTER_WR_RQTYPE(f
->fs
.type
) |
1146 V_FW_FILTER_WR_NOREPLY(0) |
1147 V_FW_FILTER_WR_IQ(f
->fs
.iq
));
1148 fwr
->del_filter_to_l2tix
=
1149 htonl(V_FW_FILTER_WR_RPTTID(f
->fs
.rpttid
) |
1150 V_FW_FILTER_WR_DROP(f
->fs
.action
== FILTER_DROP
) |
1151 V_FW_FILTER_WR_DIRSTEER(f
->fs
.dirsteer
) |
1152 V_FW_FILTER_WR_MASKHASH(f
->fs
.maskhash
) |
1153 V_FW_FILTER_WR_DIRSTEERHASH(f
->fs
.dirsteerhash
) |
1154 V_FW_FILTER_WR_LPBK(f
->fs
.action
== FILTER_SWITCH
) |
1155 V_FW_FILTER_WR_DMAC(f
->fs
.newdmac
) |
1156 V_FW_FILTER_WR_SMAC(f
->fs
.newsmac
) |
1157 V_FW_FILTER_WR_INSVLAN(f
->fs
.newvlan
== VLAN_INSERT
||
1158 f
->fs
.newvlan
== VLAN_REWRITE
) |
1159 V_FW_FILTER_WR_RMVLAN(f
->fs
.newvlan
== VLAN_REMOVE
||
1160 f
->fs
.newvlan
== VLAN_REWRITE
) |
1161 V_FW_FILTER_WR_HITCNTS(f
->fs
.hitcnts
) |
1162 V_FW_FILTER_WR_TXCHAN(f
->fs
.eport
) |
1163 V_FW_FILTER_WR_PRIO(f
->fs
.prio
) |
1164 V_FW_FILTER_WR_L2TIX(f
->l2t
? f
->l2t
->idx
: 0));
1165 fwr
->ethtype
= htons(f
->fs
.val
.ethtype
);
1166 fwr
->ethtypem
= htons(f
->fs
.mask
.ethtype
);
1167 fwr
->frag_to_ovlan_vldm
=
1168 (V_FW_FILTER_WR_FRAG(f
->fs
.val
.frag
) |
1169 V_FW_FILTER_WR_FRAGM(f
->fs
.mask
.frag
) |
1170 V_FW_FILTER_WR_IVLAN_VLD(f
->fs
.val
.ivlan_vld
) |
1171 V_FW_FILTER_WR_OVLAN_VLD(f
->fs
.val
.ovlan_vld
) |
1172 V_FW_FILTER_WR_IVLAN_VLDM(f
->fs
.mask
.ivlan_vld
) |
1173 V_FW_FILTER_WR_OVLAN_VLDM(f
->fs
.mask
.ovlan_vld
));
1175 fwr
->rx_chan_rx_rpl_iq
=
1176 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1177 V_FW_FILTER_WR_RX_RPL_IQ(adapter
->sge
.fw_evtq
.abs_id
));
1178 fwr
->maci_to_matchtypem
=
1179 htonl(V_FW_FILTER_WR_MACI(f
->fs
.val
.macidx
) |
1180 V_FW_FILTER_WR_MACIM(f
->fs
.mask
.macidx
) |
1181 V_FW_FILTER_WR_FCOE(f
->fs
.val
.fcoe
) |
1182 V_FW_FILTER_WR_FCOEM(f
->fs
.mask
.fcoe
) |
1183 V_FW_FILTER_WR_PORT(f
->fs
.val
.iport
) |
1184 V_FW_FILTER_WR_PORTM(f
->fs
.mask
.iport
) |
1185 V_FW_FILTER_WR_MATCHTYPE(f
->fs
.val
.matchtype
) |
1186 V_FW_FILTER_WR_MATCHTYPEM(f
->fs
.mask
.matchtype
));
1187 fwr
->ptcl
= f
->fs
.val
.proto
;
1188 fwr
->ptclm
= f
->fs
.mask
.proto
;
1189 fwr
->ttyp
= f
->fs
.val
.tos
;
1190 fwr
->ttypm
= f
->fs
.mask
.tos
;
1191 fwr
->ivlan
= htons(f
->fs
.val
.ivlan
);
1192 fwr
->ivlanm
= htons(f
->fs
.mask
.ivlan
);
1193 fwr
->ovlan
= htons(f
->fs
.val
.ovlan
);
1194 fwr
->ovlanm
= htons(f
->fs
.mask
.ovlan
);
1195 memcpy(fwr
->lip
, f
->fs
.val
.lip
, sizeof(fwr
->lip
));
1196 memcpy(fwr
->lipm
, f
->fs
.mask
.lip
, sizeof(fwr
->lipm
));
1197 memcpy(fwr
->fip
, f
->fs
.val
.fip
, sizeof(fwr
->fip
));
1198 memcpy(fwr
->fipm
, f
->fs
.mask
.fip
, sizeof(fwr
->fipm
));
1199 fwr
->lp
= htons(f
->fs
.val
.lport
);
1200 fwr
->lpm
= htons(f
->fs
.mask
.lport
);
1201 fwr
->fp
= htons(f
->fs
.val
.fport
);
1202 fwr
->fpm
= htons(f
->fs
.mask
.fport
);
1204 memcpy(fwr
->sma
, f
->fs
.smac
, sizeof(fwr
->sma
));
1206 /* Mark the filter as "pending" and ship off the Filter Work Request.
1207 * When we get the Work Request Reply we'll clear the pending status.
1210 set_wr_txq(skb
, CPL_PRIORITY_CONTROL
, f
->fs
.val
.iport
& 0x3);
1211 t4_ofld_send(adapter
, skb
);
1215 /* Delete the filter at a specified index.
1217 static int del_filter_wr(struct adapter
*adapter
, int fidx
)
1219 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[fidx
];
1220 struct sk_buff
*skb
;
1221 struct fw_filter_wr
*fwr
;
1222 unsigned int len
, ftid
;
1225 ftid
= adapter
->tids
.ftid_base
+ fidx
;
1227 skb
= alloc_skb(len
, GFP_KERNEL
| __GFP_NOFAIL
);
1228 fwr
= (struct fw_filter_wr
*)__skb_put(skb
, len
);
1229 t4_mk_filtdelwr(ftid
, fwr
, adapter
->sge
.fw_evtq
.abs_id
);
1231 /* Mark the filter as "pending" and ship off the Filter Work Request.
1232 * When we get the Work Request Reply we'll clear the pending status.
1235 t4_mgmt_tx(adapter
, skb
);
1239 static inline int is_offload(const struct adapter
*adap
)
1241 return adap
->params
.offload
;
1245 * Implementation of ethtool operations.
1248 static u32
get_msglevel(struct net_device
*dev
)
1250 return netdev2adap(dev
)->msg_enable
;
1253 static void set_msglevel(struct net_device
*dev
, u32 val
)
1255 netdev2adap(dev
)->msg_enable
= val
;
1258 static char stats_strings
[][ETH_GSTRING_LEN
] = {
1261 "TxBroadcastFrames ",
1262 "TxMulticastFrames ",
1268 "TxFrames128To255 ",
1269 "TxFrames256To511 ",
1270 "TxFrames512To1023 ",
1271 "TxFrames1024To1518 ",
1272 "TxFrames1519ToMax ",
1287 "RxBroadcastFrames ",
1288 "RxMulticastFrames ",
1300 "RxFrames128To255 ",
1301 "RxFrames256To511 ",
1302 "RxFrames512To1023 ",
1303 "RxFrames1024To1518 ",
1304 "RxFrames1519ToMax ",
1316 "RxBG0FramesDropped ",
1317 "RxBG1FramesDropped ",
1318 "RxBG2FramesDropped ",
1319 "RxBG3FramesDropped ",
1320 "RxBG0FramesTrunc ",
1321 "RxBG1FramesTrunc ",
1322 "RxBG2FramesTrunc ",
1323 "RxBG3FramesTrunc ",
1332 "WriteCoalSuccess ",
1336 static int get_sset_count(struct net_device
*dev
, int sset
)
1340 return ARRAY_SIZE(stats_strings
);
1346 #define T4_REGMAP_SIZE (160 * 1024)
1347 #define T5_REGMAP_SIZE (332 * 1024)
1349 static int get_regs_len(struct net_device
*dev
)
1351 struct adapter
*adap
= netdev2adap(dev
);
1352 if (is_t4(adap
->params
.chip
))
1353 return T4_REGMAP_SIZE
;
1355 return T5_REGMAP_SIZE
;
1358 static int get_eeprom_len(struct net_device
*dev
)
1363 static void get_drvinfo(struct net_device
*dev
, struct ethtool_drvinfo
*info
)
1365 struct adapter
*adapter
= netdev2adap(dev
);
1367 strlcpy(info
->driver
, KBUILD_MODNAME
, sizeof(info
->driver
));
1368 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
1369 strlcpy(info
->bus_info
, pci_name(adapter
->pdev
),
1370 sizeof(info
->bus_info
));
1372 if (adapter
->params
.fw_vers
)
1373 snprintf(info
->fw_version
, sizeof(info
->fw_version
),
1374 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1375 FW_HDR_FW_VER_MAJOR_GET(adapter
->params
.fw_vers
),
1376 FW_HDR_FW_VER_MINOR_GET(adapter
->params
.fw_vers
),
1377 FW_HDR_FW_VER_MICRO_GET(adapter
->params
.fw_vers
),
1378 FW_HDR_FW_VER_BUILD_GET(adapter
->params
.fw_vers
),
1379 FW_HDR_FW_VER_MAJOR_GET(adapter
->params
.tp_vers
),
1380 FW_HDR_FW_VER_MINOR_GET(adapter
->params
.tp_vers
),
1381 FW_HDR_FW_VER_MICRO_GET(adapter
->params
.tp_vers
),
1382 FW_HDR_FW_VER_BUILD_GET(adapter
->params
.tp_vers
));
1385 static void get_strings(struct net_device
*dev
, u32 stringset
, u8
*data
)
1387 if (stringset
== ETH_SS_STATS
)
1388 memcpy(data
, stats_strings
, sizeof(stats_strings
));
1392 * port stats maintained per queue of the port. They should be in the same
1393 * order as in stats_strings above.
1395 struct queue_port_stats
{
1405 static void collect_sge_port_stats(const struct adapter
*adap
,
1406 const struct port_info
*p
, struct queue_port_stats
*s
)
1409 const struct sge_eth_txq
*tx
= &adap
->sge
.ethtxq
[p
->first_qset
];
1410 const struct sge_eth_rxq
*rx
= &adap
->sge
.ethrxq
[p
->first_qset
];
1412 memset(s
, 0, sizeof(*s
));
1413 for (i
= 0; i
< p
->nqsets
; i
++, rx
++, tx
++) {
1415 s
->tx_csum
+= tx
->tx_cso
;
1416 s
->rx_csum
+= rx
->stats
.rx_cso
;
1417 s
->vlan_ex
+= rx
->stats
.vlan_ex
;
1418 s
->vlan_ins
+= tx
->vlan_ins
;
1419 s
->gro_pkts
+= rx
->stats
.lro_pkts
;
1420 s
->gro_merged
+= rx
->stats
.lro_merged
;
1424 static void get_stats(struct net_device
*dev
, struct ethtool_stats
*stats
,
1427 struct port_info
*pi
= netdev_priv(dev
);
1428 struct adapter
*adapter
= pi
->adapter
;
1431 t4_get_port_stats(adapter
, pi
->tx_chan
, (struct port_stats
*)data
);
1433 data
+= sizeof(struct port_stats
) / sizeof(u64
);
1434 collect_sge_port_stats(adapter
, pi
, (struct queue_port_stats
*)data
);
1435 data
+= sizeof(struct queue_port_stats
) / sizeof(u64
);
1436 if (!is_t4(adapter
->params
.chip
)) {
1437 t4_write_reg(adapter
, SGE_STAT_CFG
, STATSOURCE_T5(7));
1438 val1
= t4_read_reg(adapter
, SGE_STAT_TOTAL
);
1439 val2
= t4_read_reg(adapter
, SGE_STAT_MATCH
);
1440 *data
= val1
- val2
;
1445 memset(data
, 0, 2 * sizeof(u64
));
1451 * Return a version number to identify the type of adapter. The scheme is:
1452 * - bits 0..9: chip version
1453 * - bits 10..15: chip revision
1454 * - bits 16..23: register dump version
1456 static inline unsigned int mk_adap_vers(const struct adapter
*ap
)
1458 return CHELSIO_CHIP_VERSION(ap
->params
.chip
) |
1459 (CHELSIO_CHIP_RELEASE(ap
->params
.chip
) << 10) | (1 << 16);
1462 static void reg_block_dump(struct adapter
*ap
, void *buf
, unsigned int start
,
1465 u32
*p
= buf
+ start
;
1467 for ( ; start
<= end
; start
+= sizeof(u32
))
1468 *p
++ = t4_read_reg(ap
, start
);
1471 static void get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
1474 static const unsigned int t4_reg_ranges
[] = {
1694 static const unsigned int t5_reg_ranges
[] = {
2122 struct adapter
*ap
= netdev2adap(dev
);
2123 static const unsigned int *reg_ranges
;
2124 int arr_size
= 0, buf_size
= 0;
2126 if (is_t4(ap
->params
.chip
)) {
2127 reg_ranges
= &t4_reg_ranges
[0];
2128 arr_size
= ARRAY_SIZE(t4_reg_ranges
);
2129 buf_size
= T4_REGMAP_SIZE
;
2131 reg_ranges
= &t5_reg_ranges
[0];
2132 arr_size
= ARRAY_SIZE(t5_reg_ranges
);
2133 buf_size
= T5_REGMAP_SIZE
;
2136 regs
->version
= mk_adap_vers(ap
);
2138 memset(buf
, 0, buf_size
);
2139 for (i
= 0; i
< arr_size
; i
+= 2)
2140 reg_block_dump(ap
, buf
, reg_ranges
[i
], reg_ranges
[i
+ 1]);
2143 static int restart_autoneg(struct net_device
*dev
)
2145 struct port_info
*p
= netdev_priv(dev
);
2147 if (!netif_running(dev
))
2149 if (p
->link_cfg
.autoneg
!= AUTONEG_ENABLE
)
2151 t4_restart_aneg(p
->adapter
, p
->adapter
->fn
, p
->tx_chan
);
2155 static int identify_port(struct net_device
*dev
,
2156 enum ethtool_phys_id_state state
)
2159 struct adapter
*adap
= netdev2adap(dev
);
2161 if (state
== ETHTOOL_ID_ACTIVE
)
2163 else if (state
== ETHTOOL_ID_INACTIVE
)
2168 return t4_identify_port(adap
, adap
->fn
, netdev2pinfo(dev
)->viid
, val
);
2171 static unsigned int from_fw_linkcaps(unsigned int type
, unsigned int caps
)
2175 if (type
== FW_PORT_TYPE_BT_SGMII
|| type
== FW_PORT_TYPE_BT_XFI
||
2176 type
== FW_PORT_TYPE_BT_XAUI
) {
2178 if (caps
& FW_PORT_CAP_SPEED_100M
)
2179 v
|= SUPPORTED_100baseT_Full
;
2180 if (caps
& FW_PORT_CAP_SPEED_1G
)
2181 v
|= SUPPORTED_1000baseT_Full
;
2182 if (caps
& FW_PORT_CAP_SPEED_10G
)
2183 v
|= SUPPORTED_10000baseT_Full
;
2184 } else if (type
== FW_PORT_TYPE_KX4
|| type
== FW_PORT_TYPE_KX
) {
2185 v
|= SUPPORTED_Backplane
;
2186 if (caps
& FW_PORT_CAP_SPEED_1G
)
2187 v
|= SUPPORTED_1000baseKX_Full
;
2188 if (caps
& FW_PORT_CAP_SPEED_10G
)
2189 v
|= SUPPORTED_10000baseKX4_Full
;
2190 } else if (type
== FW_PORT_TYPE_KR
)
2191 v
|= SUPPORTED_Backplane
| SUPPORTED_10000baseKR_Full
;
2192 else if (type
== FW_PORT_TYPE_BP_AP
)
2193 v
|= SUPPORTED_Backplane
| SUPPORTED_10000baseR_FEC
|
2194 SUPPORTED_10000baseKR_Full
| SUPPORTED_1000baseKX_Full
;
2195 else if (type
== FW_PORT_TYPE_BP4_AP
)
2196 v
|= SUPPORTED_Backplane
| SUPPORTED_10000baseR_FEC
|
2197 SUPPORTED_10000baseKR_Full
| SUPPORTED_1000baseKX_Full
|
2198 SUPPORTED_10000baseKX4_Full
;
2199 else if (type
== FW_PORT_TYPE_FIBER_XFI
||
2200 type
== FW_PORT_TYPE_FIBER_XAUI
|| type
== FW_PORT_TYPE_SFP
)
2201 v
|= SUPPORTED_FIBRE
;
2203 if (caps
& FW_PORT_CAP_ANEG
)
2204 v
|= SUPPORTED_Autoneg
;
2208 static unsigned int to_fw_linkcaps(unsigned int caps
)
2212 if (caps
& ADVERTISED_100baseT_Full
)
2213 v
|= FW_PORT_CAP_SPEED_100M
;
2214 if (caps
& ADVERTISED_1000baseT_Full
)
2215 v
|= FW_PORT_CAP_SPEED_1G
;
2216 if (caps
& ADVERTISED_10000baseT_Full
)
2217 v
|= FW_PORT_CAP_SPEED_10G
;
2221 static int get_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2223 const struct port_info
*p
= netdev_priv(dev
);
2225 if (p
->port_type
== FW_PORT_TYPE_BT_SGMII
||
2226 p
->port_type
== FW_PORT_TYPE_BT_XFI
||
2227 p
->port_type
== FW_PORT_TYPE_BT_XAUI
)
2228 cmd
->port
= PORT_TP
;
2229 else if (p
->port_type
== FW_PORT_TYPE_FIBER_XFI
||
2230 p
->port_type
== FW_PORT_TYPE_FIBER_XAUI
)
2231 cmd
->port
= PORT_FIBRE
;
2232 else if (p
->port_type
== FW_PORT_TYPE_SFP
) {
2233 if (p
->mod_type
== FW_PORT_MOD_TYPE_TWINAX_PASSIVE
||
2234 p
->mod_type
== FW_PORT_MOD_TYPE_TWINAX_ACTIVE
)
2235 cmd
->port
= PORT_DA
;
2237 cmd
->port
= PORT_FIBRE
;
2239 cmd
->port
= PORT_OTHER
;
2241 if (p
->mdio_addr
>= 0) {
2242 cmd
->phy_address
= p
->mdio_addr
;
2243 cmd
->transceiver
= XCVR_EXTERNAL
;
2244 cmd
->mdio_support
= p
->port_type
== FW_PORT_TYPE_BT_SGMII
?
2245 MDIO_SUPPORTS_C22
: MDIO_SUPPORTS_C45
;
2247 cmd
->phy_address
= 0; /* not really, but no better option */
2248 cmd
->transceiver
= XCVR_INTERNAL
;
2249 cmd
->mdio_support
= 0;
2252 cmd
->supported
= from_fw_linkcaps(p
->port_type
, p
->link_cfg
.supported
);
2253 cmd
->advertising
= from_fw_linkcaps(p
->port_type
,
2254 p
->link_cfg
.advertising
);
2255 ethtool_cmd_speed_set(cmd
,
2256 netif_carrier_ok(dev
) ? p
->link_cfg
.speed
: 0);
2257 cmd
->duplex
= DUPLEX_FULL
;
2258 cmd
->autoneg
= p
->link_cfg
.autoneg
;
2264 static unsigned int speed_to_caps(int speed
)
2266 if (speed
== SPEED_100
)
2267 return FW_PORT_CAP_SPEED_100M
;
2268 if (speed
== SPEED_1000
)
2269 return FW_PORT_CAP_SPEED_1G
;
2270 if (speed
== SPEED_10000
)
2271 return FW_PORT_CAP_SPEED_10G
;
2275 static int set_settings(struct net_device
*dev
, struct ethtool_cmd
*cmd
)
2278 struct port_info
*p
= netdev_priv(dev
);
2279 struct link_config
*lc
= &p
->link_cfg
;
2280 u32 speed
= ethtool_cmd_speed(cmd
);
2282 if (cmd
->duplex
!= DUPLEX_FULL
) /* only full-duplex supported */
2285 if (!(lc
->supported
& FW_PORT_CAP_ANEG
)) {
2287 * PHY offers a single speed. See if that's what's
2290 if (cmd
->autoneg
== AUTONEG_DISABLE
&&
2291 (lc
->supported
& speed_to_caps(speed
)))
2296 if (cmd
->autoneg
== AUTONEG_DISABLE
) {
2297 cap
= speed_to_caps(speed
);
2299 if (!(lc
->supported
& cap
) || (speed
== SPEED_1000
) ||
2300 (speed
== SPEED_10000
))
2302 lc
->requested_speed
= cap
;
2303 lc
->advertising
= 0;
2305 cap
= to_fw_linkcaps(cmd
->advertising
);
2306 if (!(lc
->supported
& cap
))
2308 lc
->requested_speed
= 0;
2309 lc
->advertising
= cap
| FW_PORT_CAP_ANEG
;
2311 lc
->autoneg
= cmd
->autoneg
;
2313 if (netif_running(dev
))
2314 return t4_link_start(p
->adapter
, p
->adapter
->fn
, p
->tx_chan
,
2319 static void get_pauseparam(struct net_device
*dev
,
2320 struct ethtool_pauseparam
*epause
)
2322 struct port_info
*p
= netdev_priv(dev
);
2324 epause
->autoneg
= (p
->link_cfg
.requested_fc
& PAUSE_AUTONEG
) != 0;
2325 epause
->rx_pause
= (p
->link_cfg
.fc
& PAUSE_RX
) != 0;
2326 epause
->tx_pause
= (p
->link_cfg
.fc
& PAUSE_TX
) != 0;
2329 static int set_pauseparam(struct net_device
*dev
,
2330 struct ethtool_pauseparam
*epause
)
2332 struct port_info
*p
= netdev_priv(dev
);
2333 struct link_config
*lc
= &p
->link_cfg
;
2335 if (epause
->autoneg
== AUTONEG_DISABLE
)
2336 lc
->requested_fc
= 0;
2337 else if (lc
->supported
& FW_PORT_CAP_ANEG
)
2338 lc
->requested_fc
= PAUSE_AUTONEG
;
2342 if (epause
->rx_pause
)
2343 lc
->requested_fc
|= PAUSE_RX
;
2344 if (epause
->tx_pause
)
2345 lc
->requested_fc
|= PAUSE_TX
;
2346 if (netif_running(dev
))
2347 return t4_link_start(p
->adapter
, p
->adapter
->fn
, p
->tx_chan
,
2352 static void get_sge_param(struct net_device
*dev
, struct ethtool_ringparam
*e
)
2354 const struct port_info
*pi
= netdev_priv(dev
);
2355 const struct sge
*s
= &pi
->adapter
->sge
;
2357 e
->rx_max_pending
= MAX_RX_BUFFERS
;
2358 e
->rx_mini_max_pending
= MAX_RSPQ_ENTRIES
;
2359 e
->rx_jumbo_max_pending
= 0;
2360 e
->tx_max_pending
= MAX_TXQ_ENTRIES
;
2362 e
->rx_pending
= s
->ethrxq
[pi
->first_qset
].fl
.size
- 8;
2363 e
->rx_mini_pending
= s
->ethrxq
[pi
->first_qset
].rspq
.size
;
2364 e
->rx_jumbo_pending
= 0;
2365 e
->tx_pending
= s
->ethtxq
[pi
->first_qset
].q
.size
;
2368 static int set_sge_param(struct net_device
*dev
, struct ethtool_ringparam
*e
)
2371 const struct port_info
*pi
= netdev_priv(dev
);
2372 struct adapter
*adapter
= pi
->adapter
;
2373 struct sge
*s
= &adapter
->sge
;
2375 if (e
->rx_pending
> MAX_RX_BUFFERS
|| e
->rx_jumbo_pending
||
2376 e
->tx_pending
> MAX_TXQ_ENTRIES
||
2377 e
->rx_mini_pending
> MAX_RSPQ_ENTRIES
||
2378 e
->rx_mini_pending
< MIN_RSPQ_ENTRIES
||
2379 e
->rx_pending
< MIN_FL_ENTRIES
|| e
->tx_pending
< MIN_TXQ_ENTRIES
)
2382 if (adapter
->flags
& FULL_INIT_DONE
)
2385 for (i
= 0; i
< pi
->nqsets
; ++i
) {
2386 s
->ethtxq
[pi
->first_qset
+ i
].q
.size
= e
->tx_pending
;
2387 s
->ethrxq
[pi
->first_qset
+ i
].fl
.size
= e
->rx_pending
+ 8;
2388 s
->ethrxq
[pi
->first_qset
+ i
].rspq
.size
= e
->rx_mini_pending
;
2393 static int closest_timer(const struct sge
*s
, int time
)
2395 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
2397 for (i
= 0; i
< ARRAY_SIZE(s
->timer_val
); i
++) {
2398 delta
= time
- s
->timer_val
[i
];
2401 if (delta
< min_delta
) {
2409 static int closest_thres(const struct sge
*s
, int thres
)
2411 int i
, delta
, match
= 0, min_delta
= INT_MAX
;
2413 for (i
= 0; i
< ARRAY_SIZE(s
->counter_val
); i
++) {
2414 delta
= thres
- s
->counter_val
[i
];
2417 if (delta
< min_delta
) {
2426 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2428 static unsigned int qtimer_val(const struct adapter
*adap
,
2429 const struct sge_rspq
*q
)
2431 unsigned int idx
= q
->intr_params
>> 1;
2433 return idx
< SGE_NTIMERS
? adap
->sge
.timer_val
[idx
] : 0;
2437 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
2438 * @adap: the adapter
2440 * @us: the hold-off time in us, or 0 to disable timer
2441 * @cnt: the hold-off packet count, or 0 to disable counter
2443 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2444 * one of the two needs to be enabled for the queue to generate interrupts.
2446 static int set_rxq_intr_params(struct adapter
*adap
, struct sge_rspq
*q
,
2447 unsigned int us
, unsigned int cnt
)
2449 if ((us
| cnt
) == 0)
2456 new_idx
= closest_thres(&adap
->sge
, cnt
);
2457 if (q
->desc
&& q
->pktcnt_idx
!= new_idx
) {
2458 /* the queue has already been created, update it */
2459 v
= FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ
) |
2460 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH
) |
2461 FW_PARAMS_PARAM_YZ(q
->cntxt_id
);
2462 err
= t4_set_params(adap
, adap
->fn
, adap
->fn
, 0, 1, &v
,
2467 q
->pktcnt_idx
= new_idx
;
2470 us
= us
== 0 ? 6 : closest_timer(&adap
->sge
, us
);
2471 q
->intr_params
= QINTR_TIMER_IDX(us
) | (cnt
> 0 ? QINTR_CNT_EN
: 0);
2475 static int set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*c
)
2477 const struct port_info
*pi
= netdev_priv(dev
);
2478 struct adapter
*adap
= pi
->adapter
;
2483 for (i
= pi
->first_qset
; i
< pi
->first_qset
+ pi
->nqsets
; i
++) {
2484 q
= &adap
->sge
.ethrxq
[i
].rspq
;
2485 r
= set_rxq_intr_params(adap
, q
, c
->rx_coalesce_usecs
,
2486 c
->rx_max_coalesced_frames
);
2488 dev_err(&dev
->dev
, "failed to set coalesce %d\n", r
);
2495 static int get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*c
)
2497 const struct port_info
*pi
= netdev_priv(dev
);
2498 const struct adapter
*adap
= pi
->adapter
;
2499 const struct sge_rspq
*rq
= &adap
->sge
.ethrxq
[pi
->first_qset
].rspq
;
2501 c
->rx_coalesce_usecs
= qtimer_val(adap
, rq
);
2502 c
->rx_max_coalesced_frames
= (rq
->intr_params
& QINTR_CNT_EN
) ?
2503 adap
->sge
.counter_val
[rq
->pktcnt_idx
] : 0;
2508 * eeprom_ptov - translate a physical EEPROM address to virtual
2509 * @phys_addr: the physical EEPROM address
2510 * @fn: the PCI function number
2511 * @sz: size of function-specific area
2513 * Translate a physical EEPROM address to virtual. The first 1K is
2514 * accessed through virtual addresses starting at 31K, the rest is
2515 * accessed through virtual addresses starting at 0.
2517 * The mapping is as follows:
2518 * [0..1K) -> [31K..32K)
2519 * [1K..1K+A) -> [31K-A..31K)
2520 * [1K+A..ES) -> [0..ES-A-1K)
2522 * where A = @fn * @sz, and ES = EEPROM size.
2524 static int eeprom_ptov(unsigned int phys_addr
, unsigned int fn
, unsigned int sz
)
2527 if (phys_addr
< 1024)
2528 return phys_addr
+ (31 << 10);
2529 if (phys_addr
< 1024 + fn
)
2530 return 31744 - fn
+ phys_addr
- 1024;
2531 if (phys_addr
< EEPROMSIZE
)
2532 return phys_addr
- 1024 - fn
;
2537 * The next two routines implement eeprom read/write from physical addresses.
2539 static int eeprom_rd_phys(struct adapter
*adap
, unsigned int phys_addr
, u32
*v
)
2541 int vaddr
= eeprom_ptov(phys_addr
, adap
->fn
, EEPROMPFSIZE
);
2544 vaddr
= pci_read_vpd(adap
->pdev
, vaddr
, sizeof(u32
), v
);
2545 return vaddr
< 0 ? vaddr
: 0;
2548 static int eeprom_wr_phys(struct adapter
*adap
, unsigned int phys_addr
, u32 v
)
2550 int vaddr
= eeprom_ptov(phys_addr
, adap
->fn
, EEPROMPFSIZE
);
2553 vaddr
= pci_write_vpd(adap
->pdev
, vaddr
, sizeof(u32
), &v
);
2554 return vaddr
< 0 ? vaddr
: 0;
2557 #define EEPROM_MAGIC 0x38E2F10C
2559 static int get_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*e
,
2563 struct adapter
*adapter
= netdev2adap(dev
);
2565 u8
*buf
= kmalloc(EEPROMSIZE
, GFP_KERNEL
);
2569 e
->magic
= EEPROM_MAGIC
;
2570 for (i
= e
->offset
& ~3; !err
&& i
< e
->offset
+ e
->len
; i
+= 4)
2571 err
= eeprom_rd_phys(adapter
, i
, (u32
*)&buf
[i
]);
2574 memcpy(data
, buf
+ e
->offset
, e
->len
);
2579 static int set_eeprom(struct net_device
*dev
, struct ethtool_eeprom
*eeprom
,
2584 u32 aligned_offset
, aligned_len
, *p
;
2585 struct adapter
*adapter
= netdev2adap(dev
);
2587 if (eeprom
->magic
!= EEPROM_MAGIC
)
2590 aligned_offset
= eeprom
->offset
& ~3;
2591 aligned_len
= (eeprom
->len
+ (eeprom
->offset
& 3) + 3) & ~3;
2593 if (adapter
->fn
> 0) {
2594 u32 start
= 1024 + adapter
->fn
* EEPROMPFSIZE
;
2596 if (aligned_offset
< start
||
2597 aligned_offset
+ aligned_len
> start
+ EEPROMPFSIZE
)
2601 if (aligned_offset
!= eeprom
->offset
|| aligned_len
!= eeprom
->len
) {
2603 * RMW possibly needed for first or last words.
2605 buf
= kmalloc(aligned_len
, GFP_KERNEL
);
2608 err
= eeprom_rd_phys(adapter
, aligned_offset
, (u32
*)buf
);
2609 if (!err
&& aligned_len
> 4)
2610 err
= eeprom_rd_phys(adapter
,
2611 aligned_offset
+ aligned_len
- 4,
2612 (u32
*)&buf
[aligned_len
- 4]);
2615 memcpy(buf
+ (eeprom
->offset
& 3), data
, eeprom
->len
);
2619 err
= t4_seeprom_wp(adapter
, false);
2623 for (p
= (u32
*)buf
; !err
&& aligned_len
; aligned_len
-= 4, p
++) {
2624 err
= eeprom_wr_phys(adapter
, aligned_offset
, *p
);
2625 aligned_offset
+= 4;
2629 err
= t4_seeprom_wp(adapter
, true);
2636 static int set_flash(struct net_device
*netdev
, struct ethtool_flash
*ef
)
2639 const struct firmware
*fw
;
2640 struct adapter
*adap
= netdev2adap(netdev
);
2642 ef
->data
[sizeof(ef
->data
) - 1] = '\0';
2643 ret
= request_firmware(&fw
, ef
->data
, adap
->pdev_dev
);
2647 ret
= t4_load_fw(adap
, fw
->data
, fw
->size
);
2648 release_firmware(fw
);
2650 dev_info(adap
->pdev_dev
, "loaded firmware %s\n", ef
->data
);
2654 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2655 #define BCAST_CRC 0xa0ccc1a6
2657 static void get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
2659 wol
->supported
= WAKE_BCAST
| WAKE_MAGIC
;
2660 wol
->wolopts
= netdev2adap(dev
)->wol
;
2661 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
2664 static int set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
2667 struct port_info
*pi
= netdev_priv(dev
);
2669 if (wol
->wolopts
& ~WOL_SUPPORTED
)
2671 t4_wol_magic_enable(pi
->adapter
, pi
->tx_chan
,
2672 (wol
->wolopts
& WAKE_MAGIC
) ? dev
->dev_addr
: NULL
);
2673 if (wol
->wolopts
& WAKE_BCAST
) {
2674 err
= t4_wol_pat_enable(pi
->adapter
, pi
->tx_chan
, 0xfe, ~0ULL,
2677 err
= t4_wol_pat_enable(pi
->adapter
, pi
->tx_chan
, 1,
2678 ~6ULL, ~0ULL, BCAST_CRC
, true);
2680 t4_wol_pat_enable(pi
->adapter
, pi
->tx_chan
, 0, 0, 0, 0, false);
2684 static int cxgb_set_features(struct net_device
*dev
, netdev_features_t features
)
2686 const struct port_info
*pi
= netdev_priv(dev
);
2687 netdev_features_t changed
= dev
->features
^ features
;
2690 if (!(changed
& NETIF_F_HW_VLAN_CTAG_RX
))
2693 err
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->fn
, pi
->viid
, -1,
2695 !!(features
& NETIF_F_HW_VLAN_CTAG_RX
), true);
2697 dev
->features
= features
^ NETIF_F_HW_VLAN_CTAG_RX
;
2701 static u32
get_rss_table_size(struct net_device
*dev
)
2703 const struct port_info
*pi
= netdev_priv(dev
);
2705 return pi
->rss_size
;
2708 static int get_rss_table(struct net_device
*dev
, u32
*p
)
2710 const struct port_info
*pi
= netdev_priv(dev
);
2711 unsigned int n
= pi
->rss_size
;
2718 static int set_rss_table(struct net_device
*dev
, const u32
*p
)
2721 struct port_info
*pi
= netdev_priv(dev
);
2723 for (i
= 0; i
< pi
->rss_size
; i
++)
2725 if (pi
->adapter
->flags
& FULL_INIT_DONE
)
2726 return write_rss(pi
, pi
->rss
);
2730 static int get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
2733 const struct port_info
*pi
= netdev_priv(dev
);
2735 switch (info
->cmd
) {
2736 case ETHTOOL_GRXFH
: {
2737 unsigned int v
= pi
->rss_mode
;
2740 switch (info
->flow_type
) {
2742 if (v
& FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN
)
2743 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
2744 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2745 else if (v
& FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN
)
2746 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
2749 if ((v
& FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN
) &&
2750 (v
& FW_RSS_VI_CONFIG_CMD_UDPEN
))
2751 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
2752 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2753 else if (v
& FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN
)
2754 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
2757 case AH_ESP_V4_FLOW
:
2759 if (v
& FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN
)
2760 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
2763 if (v
& FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN
)
2764 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
2765 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2766 else if (v
& FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN
)
2767 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
2770 if ((v
& FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN
) &&
2771 (v
& FW_RSS_VI_CONFIG_CMD_UDPEN
))
2772 info
->data
= RXH_IP_SRC
| RXH_IP_DST
|
2773 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2774 else if (v
& FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN
)
2775 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
2778 case AH_ESP_V6_FLOW
:
2780 if (v
& FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN
)
2781 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
2786 case ETHTOOL_GRXRINGS
:
2787 info
->data
= pi
->nqsets
;
2793 static const struct ethtool_ops cxgb_ethtool_ops
= {
2794 .get_settings
= get_settings
,
2795 .set_settings
= set_settings
,
2796 .get_drvinfo
= get_drvinfo
,
2797 .get_msglevel
= get_msglevel
,
2798 .set_msglevel
= set_msglevel
,
2799 .get_ringparam
= get_sge_param
,
2800 .set_ringparam
= set_sge_param
,
2801 .get_coalesce
= get_coalesce
,
2802 .set_coalesce
= set_coalesce
,
2803 .get_eeprom_len
= get_eeprom_len
,
2804 .get_eeprom
= get_eeprom
,
2805 .set_eeprom
= set_eeprom
,
2806 .get_pauseparam
= get_pauseparam
,
2807 .set_pauseparam
= set_pauseparam
,
2808 .get_link
= ethtool_op_get_link
,
2809 .get_strings
= get_strings
,
2810 .set_phys_id
= identify_port
,
2811 .nway_reset
= restart_autoneg
,
2812 .get_sset_count
= get_sset_count
,
2813 .get_ethtool_stats
= get_stats
,
2814 .get_regs_len
= get_regs_len
,
2815 .get_regs
= get_regs
,
2818 .get_rxnfc
= get_rxnfc
,
2819 .get_rxfh_indir_size
= get_rss_table_size
,
2820 .get_rxfh_indir
= get_rss_table
,
2821 .set_rxfh_indir
= set_rss_table
,
2822 .flash_device
= set_flash
,
2828 static ssize_t
mem_read(struct file
*file
, char __user
*buf
, size_t count
,
2832 loff_t avail
= file_inode(file
)->i_size
;
2833 unsigned int mem
= (uintptr_t)file
->private_data
& 3;
2834 struct adapter
*adap
= file
->private_data
- mem
;
2840 if (count
> avail
- pos
)
2841 count
= avail
- pos
;
2848 if ((mem
== MEM_MC
) || (mem
== MEM_MC1
))
2849 ret
= t4_mc_read(adap
, mem
% MEM_MC
, pos
, data
, NULL
);
2851 ret
= t4_edc_read(adap
, mem
, pos
, data
, NULL
);
2855 ofst
= pos
% sizeof(data
);
2856 len
= min(count
, sizeof(data
) - ofst
);
2857 if (copy_to_user(buf
, (u8
*)data
+ ofst
, len
))
2864 count
= pos
- *ppos
;
2869 static const struct file_operations mem_debugfs_fops
= {
2870 .owner
= THIS_MODULE
,
2871 .open
= simple_open
,
2873 .llseek
= default_llseek
,
2876 static void add_debugfs_mem(struct adapter
*adap
, const char *name
,
2877 unsigned int idx
, unsigned int size_mb
)
2881 de
= debugfs_create_file(name
, S_IRUSR
, adap
->debugfs_root
,
2882 (void *)adap
+ idx
, &mem_debugfs_fops
);
2883 if (de
&& de
->d_inode
)
2884 de
->d_inode
->i_size
= size_mb
<< 20;
2887 static int setup_debugfs(struct adapter
*adap
)
2892 if (IS_ERR_OR_NULL(adap
->debugfs_root
))
2895 i
= t4_read_reg(adap
, MA_TARGET_MEM_ENABLE
);
2896 if (i
& EDRAM0_ENABLE
) {
2897 size
= t4_read_reg(adap
, MA_EDRAM0_BAR
);
2898 add_debugfs_mem(adap
, "edc0", MEM_EDC0
, EDRAM_SIZE_GET(size
));
2900 if (i
& EDRAM1_ENABLE
) {
2901 size
= t4_read_reg(adap
, MA_EDRAM1_BAR
);
2902 add_debugfs_mem(adap
, "edc1", MEM_EDC1
, EDRAM_SIZE_GET(size
));
2904 if (is_t4(adap
->params
.chip
)) {
2905 size
= t4_read_reg(adap
, MA_EXT_MEMORY_BAR
);
2906 if (i
& EXT_MEM_ENABLE
)
2907 add_debugfs_mem(adap
, "mc", MEM_MC
,
2908 EXT_MEM_SIZE_GET(size
));
2910 if (i
& EXT_MEM_ENABLE
) {
2911 size
= t4_read_reg(adap
, MA_EXT_MEMORY_BAR
);
2912 add_debugfs_mem(adap
, "mc0", MEM_MC0
,
2913 EXT_MEM_SIZE_GET(size
));
2915 if (i
& EXT_MEM1_ENABLE
) {
2916 size
= t4_read_reg(adap
, MA_EXT_MEMORY1_BAR
);
2917 add_debugfs_mem(adap
, "mc1", MEM_MC1
,
2918 EXT_MEM_SIZE_GET(size
));
2922 debugfs_create_file("l2t", S_IRUSR
, adap
->debugfs_root
, adap
,
2928 * upper-layer driver support
2932 * Allocate an active-open TID and set it to the supplied value.
2934 int cxgb4_alloc_atid(struct tid_info
*t
, void *data
)
2938 spin_lock_bh(&t
->atid_lock
);
2940 union aopen_entry
*p
= t
->afree
;
2942 atid
= (p
- t
->atid_tab
) + t
->atid_base
;
2947 spin_unlock_bh(&t
->atid_lock
);
2950 EXPORT_SYMBOL(cxgb4_alloc_atid
);
2953 * Release an active-open TID.
2955 void cxgb4_free_atid(struct tid_info
*t
, unsigned int atid
)
2957 union aopen_entry
*p
= &t
->atid_tab
[atid
- t
->atid_base
];
2959 spin_lock_bh(&t
->atid_lock
);
2963 spin_unlock_bh(&t
->atid_lock
);
2965 EXPORT_SYMBOL(cxgb4_free_atid
);
2968 * Allocate a server TID and set it to the supplied value.
2970 int cxgb4_alloc_stid(struct tid_info
*t
, int family
, void *data
)
2974 spin_lock_bh(&t
->stid_lock
);
2975 if (family
== PF_INET
) {
2976 stid
= find_first_zero_bit(t
->stid_bmap
, t
->nstids
);
2977 if (stid
< t
->nstids
)
2978 __set_bit(stid
, t
->stid_bmap
);
2982 stid
= bitmap_find_free_region(t
->stid_bmap
, t
->nstids
, 2);
2987 t
->stid_tab
[stid
].data
= data
;
2988 stid
+= t
->stid_base
;
2989 /* IPv6 requires max of 520 bits or 16 cells in TCAM
2990 * This is equivalent to 4 TIDs. With CLIP enabled it
2993 if (family
== PF_INET
)
2996 t
->stids_in_use
+= 4;
2998 spin_unlock_bh(&t
->stid_lock
);
3001 EXPORT_SYMBOL(cxgb4_alloc_stid
);
3003 /* Allocate a server filter TID and set it to the supplied value.
3005 int cxgb4_alloc_sftid(struct tid_info
*t
, int family
, void *data
)
3009 spin_lock_bh(&t
->stid_lock
);
3010 if (family
== PF_INET
) {
3011 stid
= find_next_zero_bit(t
->stid_bmap
,
3012 t
->nstids
+ t
->nsftids
, t
->nstids
);
3013 if (stid
< (t
->nstids
+ t
->nsftids
))
3014 __set_bit(stid
, t
->stid_bmap
);
3021 t
->stid_tab
[stid
].data
= data
;
3023 stid
+= t
->sftid_base
;
3026 spin_unlock_bh(&t
->stid_lock
);
3029 EXPORT_SYMBOL(cxgb4_alloc_sftid
);
3031 /* Release a server TID.
3033 void cxgb4_free_stid(struct tid_info
*t
, unsigned int stid
, int family
)
3035 /* Is it a server filter TID? */
3036 if (t
->nsftids
&& (stid
>= t
->sftid_base
)) {
3037 stid
-= t
->sftid_base
;
3040 stid
-= t
->stid_base
;
3043 spin_lock_bh(&t
->stid_lock
);
3044 if (family
== PF_INET
)
3045 __clear_bit(stid
, t
->stid_bmap
);
3047 bitmap_release_region(t
->stid_bmap
, stid
, 2);
3048 t
->stid_tab
[stid
].data
= NULL
;
3049 if (family
== PF_INET
)
3052 t
->stids_in_use
-= 4;
3053 spin_unlock_bh(&t
->stid_lock
);
3055 EXPORT_SYMBOL(cxgb4_free_stid
);
3058 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3060 static void mk_tid_release(struct sk_buff
*skb
, unsigned int chan
,
3063 struct cpl_tid_release
*req
;
3065 set_wr_txq(skb
, CPL_PRIORITY_SETUP
, chan
);
3066 req
= (struct cpl_tid_release
*)__skb_put(skb
, sizeof(*req
));
3067 INIT_TP_WR(req
, tid
);
3068 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE
, tid
));
3072 * Queue a TID release request and if necessary schedule a work queue to
3075 static void cxgb4_queue_tid_release(struct tid_info
*t
, unsigned int chan
,
3078 void **p
= &t
->tid_tab
[tid
];
3079 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
3081 spin_lock_bh(&adap
->tid_release_lock
);
3082 *p
= adap
->tid_release_head
;
3083 /* Low 2 bits encode the Tx channel number */
3084 adap
->tid_release_head
= (void **)((uintptr_t)p
| chan
);
3085 if (!adap
->tid_release_task_busy
) {
3086 adap
->tid_release_task_busy
= true;
3087 queue_work(workq
, &adap
->tid_release_task
);
3089 spin_unlock_bh(&adap
->tid_release_lock
);
3093 * Process the list of pending TID release requests.
3095 static void process_tid_release_list(struct work_struct
*work
)
3097 struct sk_buff
*skb
;
3098 struct adapter
*adap
;
3100 adap
= container_of(work
, struct adapter
, tid_release_task
);
3102 spin_lock_bh(&adap
->tid_release_lock
);
3103 while (adap
->tid_release_head
) {
3104 void **p
= adap
->tid_release_head
;
3105 unsigned int chan
= (uintptr_t)p
& 3;
3106 p
= (void *)p
- chan
;
3108 adap
->tid_release_head
= *p
;
3110 spin_unlock_bh(&adap
->tid_release_lock
);
3112 while (!(skb
= alloc_skb(sizeof(struct cpl_tid_release
),
3114 schedule_timeout_uninterruptible(1);
3116 mk_tid_release(skb
, chan
, p
- adap
->tids
.tid_tab
);
3117 t4_ofld_send(adap
, skb
);
3118 spin_lock_bh(&adap
->tid_release_lock
);
3120 adap
->tid_release_task_busy
= false;
3121 spin_unlock_bh(&adap
->tid_release_lock
);
3125 * Release a TID and inform HW. If we are unable to allocate the release
3126 * message we defer to a work queue.
3128 void cxgb4_remove_tid(struct tid_info
*t
, unsigned int chan
, unsigned int tid
)
3131 struct sk_buff
*skb
;
3132 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
3134 old
= t
->tid_tab
[tid
];
3135 skb
= alloc_skb(sizeof(struct cpl_tid_release
), GFP_ATOMIC
);
3137 t
->tid_tab
[tid
] = NULL
;
3138 mk_tid_release(skb
, chan
, tid
);
3139 t4_ofld_send(adap
, skb
);
3141 cxgb4_queue_tid_release(t
, chan
, tid
);
3143 atomic_dec(&t
->tids_in_use
);
3145 EXPORT_SYMBOL(cxgb4_remove_tid
);
3148 * Allocate and initialize the TID tables. Returns 0 on success.
3150 static int tid_init(struct tid_info
*t
)
3153 unsigned int stid_bmap_size
;
3154 unsigned int natids
= t
->natids
;
3155 struct adapter
*adap
= container_of(t
, struct adapter
, tids
);
3157 stid_bmap_size
= BITS_TO_LONGS(t
->nstids
+ t
->nsftids
);
3158 size
= t
->ntids
* sizeof(*t
->tid_tab
) +
3159 natids
* sizeof(*t
->atid_tab
) +
3160 t
->nstids
* sizeof(*t
->stid_tab
) +
3161 t
->nsftids
* sizeof(*t
->stid_tab
) +
3162 stid_bmap_size
* sizeof(long) +
3163 t
->nftids
* sizeof(*t
->ftid_tab
) +
3164 t
->nsftids
* sizeof(*t
->ftid_tab
);
3166 t
->tid_tab
= t4_alloc_mem(size
);
3170 t
->atid_tab
= (union aopen_entry
*)&t
->tid_tab
[t
->ntids
];
3171 t
->stid_tab
= (struct serv_entry
*)&t
->atid_tab
[natids
];
3172 t
->stid_bmap
= (unsigned long *)&t
->stid_tab
[t
->nstids
+ t
->nsftids
];
3173 t
->ftid_tab
= (struct filter_entry
*)&t
->stid_bmap
[stid_bmap_size
];
3174 spin_lock_init(&t
->stid_lock
);
3175 spin_lock_init(&t
->atid_lock
);
3177 t
->stids_in_use
= 0;
3179 t
->atids_in_use
= 0;
3180 atomic_set(&t
->tids_in_use
, 0);
3182 /* Setup the free list for atid_tab and clear the stid bitmap. */
3185 t
->atid_tab
[natids
- 1].next
= &t
->atid_tab
[natids
];
3186 t
->afree
= t
->atid_tab
;
3188 bitmap_zero(t
->stid_bmap
, t
->nstids
+ t
->nsftids
);
3189 /* Reserve stid 0 for T4/T5 adapters */
3190 if (!t
->stid_base
&&
3191 (is_t4(adap
->params
.chip
) || is_t5(adap
->params
.chip
)))
3192 __set_bit(0, t
->stid_bmap
);
3197 static int cxgb4_clip_get(const struct net_device
*dev
,
3198 const struct in6_addr
*lip
)
3200 struct adapter
*adap
;
3201 struct fw_clip_cmd c
;
3203 adap
= netdev2adap(dev
);
3204 memset(&c
, 0, sizeof(c
));
3205 c
.op_to_write
= htonl(FW_CMD_OP(FW_CLIP_CMD
) |
3206 FW_CMD_REQUEST
| FW_CMD_WRITE
);
3207 c
.alloc_to_len16
= htonl(F_FW_CLIP_CMD_ALLOC
| FW_LEN16(c
));
3208 *(__be64
*)&c
.ip_hi
= *(__be64
*)(lip
->s6_addr
);
3209 *(__be64
*)&c
.ip_lo
= *(__be64
*)(lip
->s6_addr
+ 8);
3210 return t4_wr_mbox_meat(adap
, adap
->mbox
, &c
, sizeof(c
), &c
, false);
3213 static int cxgb4_clip_release(const struct net_device
*dev
,
3214 const struct in6_addr
*lip
)
3216 struct adapter
*adap
;
3217 struct fw_clip_cmd c
;
3219 adap
= netdev2adap(dev
);
3220 memset(&c
, 0, sizeof(c
));
3221 c
.op_to_write
= htonl(FW_CMD_OP(FW_CLIP_CMD
) |
3222 FW_CMD_REQUEST
| FW_CMD_READ
);
3223 c
.alloc_to_len16
= htonl(F_FW_CLIP_CMD_FREE
| FW_LEN16(c
));
3224 *(__be64
*)&c
.ip_hi
= *(__be64
*)(lip
->s6_addr
);
3225 *(__be64
*)&c
.ip_lo
= *(__be64
*)(lip
->s6_addr
+ 8);
3226 return t4_wr_mbox_meat(adap
, adap
->mbox
, &c
, sizeof(c
), &c
, false);
3230 * cxgb4_create_server - create an IP server
3232 * @stid: the server TID
3233 * @sip: local IP address to bind server to
3234 * @sport: the server's TCP port
3235 * @queue: queue to direct messages from this server to
3237 * Create an IP server for the given port and address.
3238 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3240 int cxgb4_create_server(const struct net_device
*dev
, unsigned int stid
,
3241 __be32 sip
, __be16 sport
, __be16 vlan
,
3245 struct sk_buff
*skb
;
3246 struct adapter
*adap
;
3247 struct cpl_pass_open_req
*req
;
3250 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
3254 adap
= netdev2adap(dev
);
3255 req
= (struct cpl_pass_open_req
*)__skb_put(skb
, sizeof(*req
));
3257 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ
, stid
));
3258 req
->local_port
= sport
;
3259 req
->peer_port
= htons(0);
3260 req
->local_ip
= sip
;
3261 req
->peer_ip
= htonl(0);
3262 chan
= rxq_to_chan(&adap
->sge
, queue
);
3263 req
->opt0
= cpu_to_be64(TX_CHAN(chan
));
3264 req
->opt1
= cpu_to_be64(CONN_POLICY_ASK
|
3265 SYN_RSS_ENABLE
| SYN_RSS_QUEUE(queue
));
3266 ret
= t4_mgmt_tx(adap
, skb
);
3267 return net_xmit_eval(ret
);
3269 EXPORT_SYMBOL(cxgb4_create_server
);
3271 /* cxgb4_create_server6 - create an IPv6 server
3273 * @stid: the server TID
3274 * @sip: local IPv6 address to bind server to
3275 * @sport: the server's TCP port
3276 * @queue: queue to direct messages from this server to
3278 * Create an IPv6 server for the given port and address.
3279 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3281 int cxgb4_create_server6(const struct net_device
*dev
, unsigned int stid
,
3282 const struct in6_addr
*sip
, __be16 sport
,
3286 struct sk_buff
*skb
;
3287 struct adapter
*adap
;
3288 struct cpl_pass_open_req6
*req
;
3291 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
3295 adap
= netdev2adap(dev
);
3296 req
= (struct cpl_pass_open_req6
*)__skb_put(skb
, sizeof(*req
));
3298 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6
, stid
));
3299 req
->local_port
= sport
;
3300 req
->peer_port
= htons(0);
3301 req
->local_ip_hi
= *(__be64
*)(sip
->s6_addr
);
3302 req
->local_ip_lo
= *(__be64
*)(sip
->s6_addr
+ 8);
3303 req
->peer_ip_hi
= cpu_to_be64(0);
3304 req
->peer_ip_lo
= cpu_to_be64(0);
3305 chan
= rxq_to_chan(&adap
->sge
, queue
);
3306 req
->opt0
= cpu_to_be64(TX_CHAN(chan
));
3307 req
->opt1
= cpu_to_be64(CONN_POLICY_ASK
|
3308 SYN_RSS_ENABLE
| SYN_RSS_QUEUE(queue
));
3309 ret
= t4_mgmt_tx(adap
, skb
);
3310 return net_xmit_eval(ret
);
3312 EXPORT_SYMBOL(cxgb4_create_server6
);
3314 int cxgb4_remove_server(const struct net_device
*dev
, unsigned int stid
,
3315 unsigned int queue
, bool ipv6
)
3317 struct sk_buff
*skb
;
3318 struct adapter
*adap
;
3319 struct cpl_close_listsvr_req
*req
;
3322 adap
= netdev2adap(dev
);
3324 skb
= alloc_skb(sizeof(*req
), GFP_KERNEL
);
3328 req
= (struct cpl_close_listsvr_req
*)__skb_put(skb
, sizeof(*req
));
3330 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ
, stid
));
3331 req
->reply_ctrl
= htons(NO_REPLY(0) | (ipv6
? LISTSVR_IPV6(1) :
3332 LISTSVR_IPV6(0)) | QUEUENO(queue
));
3333 ret
= t4_mgmt_tx(adap
, skb
);
3334 return net_xmit_eval(ret
);
3336 EXPORT_SYMBOL(cxgb4_remove_server
);
3339 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3340 * @mtus: the HW MTU table
3341 * @mtu: the target MTU
3342 * @idx: index of selected entry in the MTU table
3344 * Returns the index and the value in the HW MTU table that is closest to
3345 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3346 * table, in which case that smallest available value is selected.
3348 unsigned int cxgb4_best_mtu(const unsigned short *mtus
, unsigned short mtu
,
3353 while (i
< NMTUS
- 1 && mtus
[i
+ 1] <= mtu
)
3359 EXPORT_SYMBOL(cxgb4_best_mtu
);
3362 * cxgb4_port_chan - get the HW channel of a port
3363 * @dev: the net device for the port
3365 * Return the HW Tx channel of the given port.
3367 unsigned int cxgb4_port_chan(const struct net_device
*dev
)
3369 return netdev2pinfo(dev
)->tx_chan
;
3371 EXPORT_SYMBOL(cxgb4_port_chan
);
3373 unsigned int cxgb4_dbfifo_count(const struct net_device
*dev
, int lpfifo
)
3375 struct adapter
*adap
= netdev2adap(dev
);
3376 u32 v1
, v2
, lp_count
, hp_count
;
3378 v1
= t4_read_reg(adap
, A_SGE_DBFIFO_STATUS
);
3379 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2
);
3380 if (is_t4(adap
->params
.chip
)) {
3381 lp_count
= G_LP_COUNT(v1
);
3382 hp_count
= G_HP_COUNT(v1
);
3384 lp_count
= G_LP_COUNT_T5(v1
);
3385 hp_count
= G_HP_COUNT_T5(v2
);
3387 return lpfifo
? lp_count
: hp_count
;
3389 EXPORT_SYMBOL(cxgb4_dbfifo_count
);
3392 * cxgb4_port_viid - get the VI id of a port
3393 * @dev: the net device for the port
3395 * Return the VI id of the given port.
3397 unsigned int cxgb4_port_viid(const struct net_device
*dev
)
3399 return netdev2pinfo(dev
)->viid
;
3401 EXPORT_SYMBOL(cxgb4_port_viid
);
3404 * cxgb4_port_idx - get the index of a port
3405 * @dev: the net device for the port
3407 * Return the index of the given port.
3409 unsigned int cxgb4_port_idx(const struct net_device
*dev
)
3411 return netdev2pinfo(dev
)->port_id
;
3413 EXPORT_SYMBOL(cxgb4_port_idx
);
3415 void cxgb4_get_tcp_stats(struct pci_dev
*pdev
, struct tp_tcp_stats
*v4
,
3416 struct tp_tcp_stats
*v6
)
3418 struct adapter
*adap
= pci_get_drvdata(pdev
);
3420 spin_lock(&adap
->stats_lock
);
3421 t4_tp_get_tcp_stats(adap
, v4
, v6
);
3422 spin_unlock(&adap
->stats_lock
);
3424 EXPORT_SYMBOL(cxgb4_get_tcp_stats
);
3426 void cxgb4_iscsi_init(struct net_device
*dev
, unsigned int tag_mask
,
3427 const unsigned int *pgsz_order
)
3429 struct adapter
*adap
= netdev2adap(dev
);
3431 t4_write_reg(adap
, ULP_RX_ISCSI_TAGMASK
, tag_mask
);
3432 t4_write_reg(adap
, ULP_RX_ISCSI_PSZ
, HPZ0(pgsz_order
[0]) |
3433 HPZ1(pgsz_order
[1]) | HPZ2(pgsz_order
[2]) |
3434 HPZ3(pgsz_order
[3]));
3436 EXPORT_SYMBOL(cxgb4_iscsi_init
);
3438 int cxgb4_flush_eq_cache(struct net_device
*dev
)
3440 struct adapter
*adap
= netdev2adap(dev
);
3443 ret
= t4_fwaddrspace_write(adap
, adap
->mbox
,
3444 0xe1000000 + A_SGE_CTXT_CMD
, 0x20000000);
3447 EXPORT_SYMBOL(cxgb4_flush_eq_cache
);
3449 static int read_eq_indices(struct adapter
*adap
, u16 qid
, u16
*pidx
, u16
*cidx
)
3451 u32 addr
= t4_read_reg(adap
, A_SGE_DBQ_CTXT_BADDR
) + 24 * qid
+ 8;
3455 ret
= t4_mem_win_read_len(adap
, addr
, (__be32
*)&indices
, 8);
3457 *cidx
= (be64_to_cpu(indices
) >> 25) & 0xffff;
3458 *pidx
= (be64_to_cpu(indices
) >> 9) & 0xffff;
3463 int cxgb4_sync_txq_pidx(struct net_device
*dev
, u16 qid
, u16 pidx
,
3466 struct adapter
*adap
= netdev2adap(dev
);
3467 u16 hw_pidx
, hw_cidx
;
3470 ret
= read_eq_indices(adap
, qid
, &hw_pidx
, &hw_cidx
);
3474 if (pidx
!= hw_pidx
) {
3477 if (pidx
>= hw_pidx
)
3478 delta
= pidx
- hw_pidx
;
3480 delta
= size
- hw_pidx
+ pidx
;
3482 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL
),
3483 QID(qid
) | PIDX(delta
));
3488 EXPORT_SYMBOL(cxgb4_sync_txq_pidx
);
3490 void cxgb4_disable_db_coalescing(struct net_device
*dev
)
3492 struct adapter
*adap
;
3494 adap
= netdev2adap(dev
);
3495 t4_set_reg_field(adap
, A_SGE_DOORBELL_CONTROL
, F_NOCOALESCE
,
3498 EXPORT_SYMBOL(cxgb4_disable_db_coalescing
);
3500 void cxgb4_enable_db_coalescing(struct net_device
*dev
)
3502 struct adapter
*adap
;
3504 adap
= netdev2adap(dev
);
3505 t4_set_reg_field(adap
, A_SGE_DOORBELL_CONTROL
, F_NOCOALESCE
, 0);
3507 EXPORT_SYMBOL(cxgb4_enable_db_coalescing
);
3509 static struct pci_driver cxgb4_driver
;
3511 static void check_neigh_update(struct neighbour
*neigh
)
3513 const struct device
*parent
;
3514 const struct net_device
*netdev
= neigh
->dev
;
3516 if (netdev
->priv_flags
& IFF_802_1Q_VLAN
)
3517 netdev
= vlan_dev_real_dev(netdev
);
3518 parent
= netdev
->dev
.parent
;
3519 if (parent
&& parent
->driver
== &cxgb4_driver
.driver
)
3520 t4_l2t_update(dev_get_drvdata(parent
), neigh
);
3523 static int netevent_cb(struct notifier_block
*nb
, unsigned long event
,
3527 case NETEVENT_NEIGH_UPDATE
:
3528 check_neigh_update(data
);
3530 case NETEVENT_REDIRECT
:
3537 static bool netevent_registered
;
3538 static struct notifier_block cxgb4_netevent_nb
= {
3539 .notifier_call
= netevent_cb
3542 static void drain_db_fifo(struct adapter
*adap
, int usecs
)
3544 u32 v1
, v2
, lp_count
, hp_count
;
3547 v1
= t4_read_reg(adap
, A_SGE_DBFIFO_STATUS
);
3548 v2
= t4_read_reg(adap
, SGE_DBFIFO_STATUS2
);
3549 if (is_t4(adap
->params
.chip
)) {
3550 lp_count
= G_LP_COUNT(v1
);
3551 hp_count
= G_HP_COUNT(v1
);
3553 lp_count
= G_LP_COUNT_T5(v1
);
3554 hp_count
= G_HP_COUNT_T5(v2
);
3557 if (lp_count
== 0 && hp_count
== 0)
3559 set_current_state(TASK_UNINTERRUPTIBLE
);
3560 schedule_timeout(usecs_to_jiffies(usecs
));
3564 static void disable_txq_db(struct sge_txq
*q
)
3566 spin_lock_irq(&q
->db_lock
);
3568 spin_unlock_irq(&q
->db_lock
);
3571 static void enable_txq_db(struct sge_txq
*q
)
3573 spin_lock_irq(&q
->db_lock
);
3575 spin_unlock_irq(&q
->db_lock
);
3578 static void disable_dbs(struct adapter
*adap
)
3582 for_each_ethrxq(&adap
->sge
, i
)
3583 disable_txq_db(&adap
->sge
.ethtxq
[i
].q
);
3584 for_each_ofldrxq(&adap
->sge
, i
)
3585 disable_txq_db(&adap
->sge
.ofldtxq
[i
].q
);
3586 for_each_port(adap
, i
)
3587 disable_txq_db(&adap
->sge
.ctrlq
[i
].q
);
3590 static void enable_dbs(struct adapter
*adap
)
3594 for_each_ethrxq(&adap
->sge
, i
)
3595 enable_txq_db(&adap
->sge
.ethtxq
[i
].q
);
3596 for_each_ofldrxq(&adap
->sge
, i
)
3597 enable_txq_db(&adap
->sge
.ofldtxq
[i
].q
);
3598 for_each_port(adap
, i
)
3599 enable_txq_db(&adap
->sge
.ctrlq
[i
].q
);
3602 static void sync_txq_pidx(struct adapter
*adap
, struct sge_txq
*q
)
3604 u16 hw_pidx
, hw_cidx
;
3607 spin_lock_bh(&q
->db_lock
);
3608 ret
= read_eq_indices(adap
, (u16
)q
->cntxt_id
, &hw_pidx
, &hw_cidx
);
3611 if (q
->db_pidx
!= hw_pidx
) {
3614 if (q
->db_pidx
>= hw_pidx
)
3615 delta
= q
->db_pidx
- hw_pidx
;
3617 delta
= q
->size
- hw_pidx
+ q
->db_pidx
;
3619 t4_write_reg(adap
, MYPF_REG(SGE_PF_KDOORBELL
),
3620 QID(q
->cntxt_id
) | PIDX(delta
));
3624 spin_unlock_bh(&q
->db_lock
);
3626 CH_WARN(adap
, "DB drop recovery failed.\n");
3628 static void recover_all_queues(struct adapter
*adap
)
3632 for_each_ethrxq(&adap
->sge
, i
)
3633 sync_txq_pidx(adap
, &adap
->sge
.ethtxq
[i
].q
);
3634 for_each_ofldrxq(&adap
->sge
, i
)
3635 sync_txq_pidx(adap
, &adap
->sge
.ofldtxq
[i
].q
);
3636 for_each_port(adap
, i
)
3637 sync_txq_pidx(adap
, &adap
->sge
.ctrlq
[i
].q
);
3640 static void notify_rdma_uld(struct adapter
*adap
, enum cxgb4_control cmd
)
3642 mutex_lock(&uld_mutex
);
3643 if (adap
->uld_handle
[CXGB4_ULD_RDMA
])
3644 ulds
[CXGB4_ULD_RDMA
].control(adap
->uld_handle
[CXGB4_ULD_RDMA
],
3646 mutex_unlock(&uld_mutex
);
3649 static void process_db_full(struct work_struct
*work
)
3651 struct adapter
*adap
;
3653 adap
= container_of(work
, struct adapter
, db_full_task
);
3655 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_FULL
);
3656 drain_db_fifo(adap
, dbfifo_drain_delay
);
3657 t4_set_reg_field(adap
, SGE_INT_ENABLE3
,
3658 DBFIFO_HP_INT
| DBFIFO_LP_INT
,
3659 DBFIFO_HP_INT
| DBFIFO_LP_INT
);
3660 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_EMPTY
);
3663 static void process_db_drop(struct work_struct
*work
)
3665 struct adapter
*adap
;
3667 adap
= container_of(work
, struct adapter
, db_drop_task
);
3669 if (is_t4(adap
->params
.chip
)) {
3671 notify_rdma_uld(adap
, CXGB4_CONTROL_DB_DROP
);
3672 drain_db_fifo(adap
, 1);
3673 recover_all_queues(adap
);
3676 u32 dropped_db
= t4_read_reg(adap
, 0x010ac);
3677 u16 qid
= (dropped_db
>> 15) & 0x1ffff;
3678 u16 pidx_inc
= dropped_db
& 0x1fff;
3680 unsigned short udb_density
;
3681 unsigned long qpshift
;
3685 dev_warn(adap
->pdev_dev
,
3686 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3688 (dropped_db
>> 14) & 1,
3689 (dropped_db
>> 13) & 1,
3692 drain_db_fifo(adap
, 1);
3694 s_qpp
= QUEUESPERPAGEPF1
* adap
->fn
;
3695 udb_density
= 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap
,
3696 SGE_EGRESS_QUEUES_PER_PAGE_PF
) >> s_qpp
);
3697 qpshift
= PAGE_SHIFT
- ilog2(udb_density
);
3698 udb
= qid
<< qpshift
;
3700 page
= udb
/ PAGE_SIZE
;
3701 udb
+= (qid
- (page
* udb_density
)) * 128;
3703 writel(PIDX(pidx_inc
), adap
->bar2
+ udb
+ 8);
3705 /* Re-enable BAR2 WC */
3706 t4_set_reg_field(adap
, 0x10b0, 1<<15, 1<<15);
3709 t4_set_reg_field(adap
, A_SGE_DOORBELL_CONTROL
, F_DROPPED_DB
, 0);
3712 void t4_db_full(struct adapter
*adap
)
3714 if (is_t4(adap
->params
.chip
)) {
3715 t4_set_reg_field(adap
, SGE_INT_ENABLE3
,
3716 DBFIFO_HP_INT
| DBFIFO_LP_INT
, 0);
3717 queue_work(workq
, &adap
->db_full_task
);
3721 void t4_db_dropped(struct adapter
*adap
)
3723 if (is_t4(adap
->params
.chip
))
3724 queue_work(workq
, &adap
->db_drop_task
);
3727 static void uld_attach(struct adapter
*adap
, unsigned int uld
)
3730 struct cxgb4_lld_info lli
;
3733 lli
.pdev
= adap
->pdev
;
3734 lli
.l2t
= adap
->l2t
;
3735 lli
.tids
= &adap
->tids
;
3736 lli
.ports
= adap
->port
;
3737 lli
.vr
= &adap
->vres
;
3738 lli
.mtus
= adap
->params
.mtus
;
3739 if (uld
== CXGB4_ULD_RDMA
) {
3740 lli
.rxq_ids
= adap
->sge
.rdma_rxq
;
3741 lli
.nrxq
= adap
->sge
.rdmaqs
;
3742 } else if (uld
== CXGB4_ULD_ISCSI
) {
3743 lli
.rxq_ids
= adap
->sge
.ofld_rxq
;
3744 lli
.nrxq
= adap
->sge
.ofldqsets
;
3746 lli
.ntxq
= adap
->sge
.ofldqsets
;
3747 lli
.nchan
= adap
->params
.nports
;
3748 lli
.nports
= adap
->params
.nports
;
3749 lli
.wr_cred
= adap
->params
.ofldq_wr_cred
;
3750 lli
.adapter_type
= adap
->params
.chip
;
3751 lli
.iscsi_iolen
= MAXRXDATA_GET(t4_read_reg(adap
, TP_PARA_REG2
));
3752 lli
.udb_density
= 1 << QUEUESPERPAGEPF0_GET(
3753 t4_read_reg(adap
, SGE_EGRESS_QUEUES_PER_PAGE_PF
) >>
3755 lli
.ucq_density
= 1 << QUEUESPERPAGEPF0_GET(
3756 t4_read_reg(adap
, SGE_INGRESS_QUEUES_PER_PAGE_PF
) >>
3758 lli
.filt_mode
= adap
->params
.tp
.vlan_pri_map
;
3759 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3760 for (i
= 0; i
< NCHAN
; i
++)
3762 lli
.gts_reg
= adap
->regs
+ MYPF_REG(SGE_PF_GTS
);
3763 lli
.db_reg
= adap
->regs
+ MYPF_REG(SGE_PF_KDOORBELL
);
3764 lli
.fw_vers
= adap
->params
.fw_vers
;
3765 lli
.dbfifo_int_thresh
= dbfifo_int_thresh
;
3766 lli
.sge_pktshift
= adap
->sge
.pktshift
;
3767 lli
.enable_fw_ofld_conn
= adap
->flags
& FW_OFLD_CONN
;
3769 handle
= ulds
[uld
].add(&lli
);
3770 if (IS_ERR(handle
)) {
3771 dev_warn(adap
->pdev_dev
,
3772 "could not attach to the %s driver, error %ld\n",
3773 uld_str
[uld
], PTR_ERR(handle
));
3777 adap
->uld_handle
[uld
] = handle
;
3779 if (!netevent_registered
) {
3780 register_netevent_notifier(&cxgb4_netevent_nb
);
3781 netevent_registered
= true;
3784 if (adap
->flags
& FULL_INIT_DONE
)
3785 ulds
[uld
].state_change(handle
, CXGB4_STATE_UP
);
3788 static void attach_ulds(struct adapter
*adap
)
3792 spin_lock(&adap_rcu_lock
);
3793 list_add_tail_rcu(&adap
->rcu_node
, &adap_rcu_list
);
3794 spin_unlock(&adap_rcu_lock
);
3796 mutex_lock(&uld_mutex
);
3797 list_add_tail(&adap
->list_node
, &adapter_list
);
3798 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
3800 uld_attach(adap
, i
);
3801 mutex_unlock(&uld_mutex
);
3804 static void detach_ulds(struct adapter
*adap
)
3808 mutex_lock(&uld_mutex
);
3809 list_del(&adap
->list_node
);
3810 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
3811 if (adap
->uld_handle
[i
]) {
3812 ulds
[i
].state_change(adap
->uld_handle
[i
],
3813 CXGB4_STATE_DETACH
);
3814 adap
->uld_handle
[i
] = NULL
;
3816 if (netevent_registered
&& list_empty(&adapter_list
)) {
3817 unregister_netevent_notifier(&cxgb4_netevent_nb
);
3818 netevent_registered
= false;
3820 mutex_unlock(&uld_mutex
);
3822 spin_lock(&adap_rcu_lock
);
3823 list_del_rcu(&adap
->rcu_node
);
3824 spin_unlock(&adap_rcu_lock
);
3827 static void notify_ulds(struct adapter
*adap
, enum cxgb4_state new_state
)
3831 mutex_lock(&uld_mutex
);
3832 for (i
= 0; i
< CXGB4_ULD_MAX
; i
++)
3833 if (adap
->uld_handle
[i
])
3834 ulds
[i
].state_change(adap
->uld_handle
[i
], new_state
);
3835 mutex_unlock(&uld_mutex
);
3839 * cxgb4_register_uld - register an upper-layer driver
3840 * @type: the ULD type
3841 * @p: the ULD methods
3843 * Registers an upper-layer driver with this driver and notifies the ULD
3844 * about any presently available devices that support its type. Returns
3845 * %-EBUSY if a ULD of the same type is already registered.
3847 int cxgb4_register_uld(enum cxgb4_uld type
, const struct cxgb4_uld_info
*p
)
3850 struct adapter
*adap
;
3852 if (type
>= CXGB4_ULD_MAX
)
3854 mutex_lock(&uld_mutex
);
3855 if (ulds
[type
].add
) {
3860 list_for_each_entry(adap
, &adapter_list
, list_node
)
3861 uld_attach(adap
, type
);
3862 out
: mutex_unlock(&uld_mutex
);
3865 EXPORT_SYMBOL(cxgb4_register_uld
);
3868 * cxgb4_unregister_uld - unregister an upper-layer driver
3869 * @type: the ULD type
3871 * Unregisters an existing upper-layer driver.
3873 int cxgb4_unregister_uld(enum cxgb4_uld type
)
3875 struct adapter
*adap
;
3877 if (type
>= CXGB4_ULD_MAX
)
3879 mutex_lock(&uld_mutex
);
3880 list_for_each_entry(adap
, &adapter_list
, list_node
)
3881 adap
->uld_handle
[type
] = NULL
;
3882 ulds
[type
].add
= NULL
;
3883 mutex_unlock(&uld_mutex
);
3886 EXPORT_SYMBOL(cxgb4_unregister_uld
);
3888 /* Check if netdev on which event is occured belongs to us or not. Return
3889 * suceess (1) if it belongs otherwise failure (0).
3891 static int cxgb4_netdev(struct net_device
*netdev
)
3893 struct adapter
*adap
;
3896 spin_lock(&adap_rcu_lock
);
3897 list_for_each_entry_rcu(adap
, &adap_rcu_list
, rcu_node
)
3898 for (i
= 0; i
< MAX_NPORTS
; i
++)
3899 if (adap
->port
[i
] == netdev
) {
3900 spin_unlock(&adap_rcu_lock
);
3903 spin_unlock(&adap_rcu_lock
);
3907 static int clip_add(struct net_device
*event_dev
, struct inet6_ifaddr
*ifa
,
3908 unsigned long event
)
3910 int ret
= NOTIFY_DONE
;
3913 if (cxgb4_netdev(event_dev
)) {
3916 ret
= cxgb4_clip_get(event_dev
,
3917 (const struct in6_addr
*)ifa
->addr
.s6_addr
);
3925 cxgb4_clip_release(event_dev
,
3926 (const struct in6_addr
*)ifa
->addr
.s6_addr
);
3937 static int cxgb4_inet6addr_handler(struct notifier_block
*this,
3938 unsigned long event
, void *data
)
3940 struct inet6_ifaddr
*ifa
= data
;
3941 struct net_device
*event_dev
;
3942 int ret
= NOTIFY_DONE
;
3943 struct bonding
*bond
= netdev_priv(ifa
->idev
->dev
);
3944 struct list_head
*iter
;
3945 struct slave
*slave
;
3946 struct pci_dev
*first_pdev
= NULL
;
3948 if (ifa
->idev
->dev
->priv_flags
& IFF_802_1Q_VLAN
) {
3949 event_dev
= vlan_dev_real_dev(ifa
->idev
->dev
);
3950 ret
= clip_add(event_dev
, ifa
, event
);
3951 } else if (ifa
->idev
->dev
->flags
& IFF_MASTER
) {
3952 /* It is possible that two different adapters are bonded in one
3953 * bond. We need to find such different adapters and add clip
3954 * in all of them only once.
3956 read_lock(&bond
->lock
);
3957 bond_for_each_slave(bond
, slave
, iter
) {
3959 ret
= clip_add(slave
->dev
, ifa
, event
);
3960 /* If clip_add is success then only initialize
3961 * first_pdev since it means it is our device
3963 if (ret
== NOTIFY_OK
)
3964 first_pdev
= to_pci_dev(
3965 slave
->dev
->dev
.parent
);
3966 } else if (first_pdev
!=
3967 to_pci_dev(slave
->dev
->dev
.parent
))
3968 ret
= clip_add(slave
->dev
, ifa
, event
);
3970 read_unlock(&bond
->lock
);
3972 ret
= clip_add(ifa
->idev
->dev
, ifa
, event
);
3977 static struct notifier_block cxgb4_inet6addr_notifier
= {
3978 .notifier_call
= cxgb4_inet6addr_handler
3981 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
3982 * a physical device.
3983 * The physical device reference is needed to send the actul CLIP command.
3985 static int update_dev_clip(struct net_device
*root_dev
, struct net_device
*dev
)
3987 struct inet6_dev
*idev
= NULL
;
3988 struct inet6_ifaddr
*ifa
;
3991 idev
= __in6_dev_get(root_dev
);
3995 read_lock_bh(&idev
->lock
);
3996 list_for_each_entry(ifa
, &idev
->addr_list
, if_list
) {
3997 ret
= cxgb4_clip_get(dev
,
3998 (const struct in6_addr
*)ifa
->addr
.s6_addr
);
4002 read_unlock_bh(&idev
->lock
);
4007 static int update_root_dev_clip(struct net_device
*dev
)
4009 struct net_device
*root_dev
= NULL
;
4012 /* First populate the real net device's IPv6 addresses */
4013 ret
= update_dev_clip(dev
, dev
);
4017 /* Parse all bond and vlan devices layered on top of the physical dev */
4018 for (i
= 0; i
< VLAN_N_VID
; i
++) {
4019 root_dev
= __vlan_find_dev_deep(dev
, htons(ETH_P_8021Q
), i
);
4023 ret
= update_dev_clip(root_dev
, dev
);
4030 static void update_clip(const struct adapter
*adap
)
4033 struct net_device
*dev
;
4038 for (i
= 0; i
< MAX_NPORTS
; i
++) {
4039 dev
= adap
->port
[i
];
4043 ret
= update_root_dev_clip(dev
);
4052 * cxgb_up - enable the adapter
4053 * @adap: adapter being enabled
4055 * Called when the first port is enabled, this function performs the
4056 * actions necessary to make an adapter operational, such as completing
4057 * the initialization of HW modules, and enabling interrupts.
4059 * Must be called with the rtnl lock held.
4061 static int cxgb_up(struct adapter
*adap
)
4065 err
= setup_sge_queues(adap
);
4068 err
= setup_rss(adap
);
4072 if (adap
->flags
& USING_MSIX
) {
4073 name_msix_vecs(adap
);
4074 err
= request_irq(adap
->msix_info
[0].vec
, t4_nondata_intr
, 0,
4075 adap
->msix_info
[0].desc
, adap
);
4079 err
= request_msix_queue_irqs(adap
);
4081 free_irq(adap
->msix_info
[0].vec
, adap
);
4085 err
= request_irq(adap
->pdev
->irq
, t4_intr_handler(adap
),
4086 (adap
->flags
& USING_MSI
) ? 0 : IRQF_SHARED
,
4087 adap
->port
[0]->name
, adap
);
4093 t4_intr_enable(adap
);
4094 adap
->flags
|= FULL_INIT_DONE
;
4095 notify_ulds(adap
, CXGB4_STATE_UP
);
4100 dev_err(adap
->pdev_dev
, "request_irq failed, err %d\n", err
);
4102 t4_free_sge_resources(adap
);
4106 static void cxgb_down(struct adapter
*adapter
)
4108 t4_intr_disable(adapter
);
4109 cancel_work_sync(&adapter
->tid_release_task
);
4110 cancel_work_sync(&adapter
->db_full_task
);
4111 cancel_work_sync(&adapter
->db_drop_task
);
4112 adapter
->tid_release_task_busy
= false;
4113 adapter
->tid_release_head
= NULL
;
4115 if (adapter
->flags
& USING_MSIX
) {
4116 free_msix_queue_irqs(adapter
);
4117 free_irq(adapter
->msix_info
[0].vec
, adapter
);
4119 free_irq(adapter
->pdev
->irq
, adapter
);
4120 quiesce_rx(adapter
);
4121 t4_sge_stop(adapter
);
4122 t4_free_sge_resources(adapter
);
4123 adapter
->flags
&= ~FULL_INIT_DONE
;
4127 * net_device operations
4129 static int cxgb_open(struct net_device
*dev
)
4132 struct port_info
*pi
= netdev_priv(dev
);
4133 struct adapter
*adapter
= pi
->adapter
;
4135 netif_carrier_off(dev
);
4137 if (!(adapter
->flags
& FULL_INIT_DONE
)) {
4138 err
= cxgb_up(adapter
);
4143 err
= link_start(dev
);
4145 netif_tx_start_all_queues(dev
);
4149 static int cxgb_close(struct net_device
*dev
)
4151 struct port_info
*pi
= netdev_priv(dev
);
4152 struct adapter
*adapter
= pi
->adapter
;
4154 netif_tx_stop_all_queues(dev
);
4155 netif_carrier_off(dev
);
4156 return t4_enable_vi(adapter
, adapter
->fn
, pi
->viid
, false, false);
4159 /* Return an error number if the indicated filter isn't writable ...
4161 static int writable_filter(struct filter_entry
*f
)
4171 /* Delete the filter at the specified index (if valid). The checks for all
4172 * the common problems with doing this like the filter being locked, currently
4173 * pending in another operation, etc.
4175 static int delete_filter(struct adapter
*adapter
, unsigned int fidx
)
4177 struct filter_entry
*f
;
4180 if (fidx
>= adapter
->tids
.nftids
+ adapter
->tids
.nsftids
)
4183 f
= &adapter
->tids
.ftid_tab
[fidx
];
4184 ret
= writable_filter(f
);
4188 return del_filter_wr(adapter
, fidx
);
4193 int cxgb4_create_server_filter(const struct net_device
*dev
, unsigned int stid
,
4194 __be32 sip
, __be16 sport
, __be16 vlan
,
4195 unsigned int queue
, unsigned char port
, unsigned char mask
)
4198 struct filter_entry
*f
;
4199 struct adapter
*adap
;
4203 adap
= netdev2adap(dev
);
4205 /* Adjust stid to correct filter index */
4206 stid
-= adap
->tids
.sftid_base
;
4207 stid
+= adap
->tids
.nftids
;
4209 /* Check to make sure the filter requested is writable ...
4211 f
= &adap
->tids
.ftid_tab
[stid
];
4212 ret
= writable_filter(f
);
4216 /* Clear out any old resources being used by the filter before
4217 * we start constructing the new filter.
4220 clear_filter(adap
, f
);
4222 /* Clear out filter specifications */
4223 memset(&f
->fs
, 0, sizeof(struct ch_filter_specification
));
4224 f
->fs
.val
.lport
= cpu_to_be16(sport
);
4225 f
->fs
.mask
.lport
= ~0;
4227 if ((val
[0] | val
[1] | val
[2] | val
[3]) != 0) {
4228 for (i
= 0; i
< 4; i
++) {
4229 f
->fs
.val
.lip
[i
] = val
[i
];
4230 f
->fs
.mask
.lip
[i
] = ~0;
4232 if (adap
->params
.tp
.vlan_pri_map
& F_PORT
) {
4233 f
->fs
.val
.iport
= port
;
4234 f
->fs
.mask
.iport
= mask
;
4238 if (adap
->params
.tp
.vlan_pri_map
& F_PROTOCOL
) {
4239 f
->fs
.val
.proto
= IPPROTO_TCP
;
4240 f
->fs
.mask
.proto
= ~0;
4245 /* Mark filter as locked */
4249 ret
= set_filter_wr(adap
, stid
);
4251 clear_filter(adap
, f
);
4257 EXPORT_SYMBOL(cxgb4_create_server_filter
);
4259 int cxgb4_remove_server_filter(const struct net_device
*dev
, unsigned int stid
,
4260 unsigned int queue
, bool ipv6
)
4263 struct filter_entry
*f
;
4264 struct adapter
*adap
;
4266 adap
= netdev2adap(dev
);
4268 /* Adjust stid to correct filter index */
4269 stid
-= adap
->tids
.sftid_base
;
4270 stid
+= adap
->tids
.nftids
;
4272 f
= &adap
->tids
.ftid_tab
[stid
];
4273 /* Unlock the filter */
4276 ret
= delete_filter(adap
, stid
);
4282 EXPORT_SYMBOL(cxgb4_remove_server_filter
);
4284 static struct rtnl_link_stats64
*cxgb_get_stats(struct net_device
*dev
,
4285 struct rtnl_link_stats64
*ns
)
4287 struct port_stats stats
;
4288 struct port_info
*p
= netdev_priv(dev
);
4289 struct adapter
*adapter
= p
->adapter
;
4291 /* Block retrieving statistics during EEH error
4292 * recovery. Otherwise, the recovery might fail
4293 * and the PCI device will be removed permanently
4295 spin_lock(&adapter
->stats_lock
);
4296 if (!netif_device_present(dev
)) {
4297 spin_unlock(&adapter
->stats_lock
);
4300 t4_get_port_stats(adapter
, p
->tx_chan
, &stats
);
4301 spin_unlock(&adapter
->stats_lock
);
4303 ns
->tx_bytes
= stats
.tx_octets
;
4304 ns
->tx_packets
= stats
.tx_frames
;
4305 ns
->rx_bytes
= stats
.rx_octets
;
4306 ns
->rx_packets
= stats
.rx_frames
;
4307 ns
->multicast
= stats
.rx_mcast_frames
;
4309 /* detailed rx_errors */
4310 ns
->rx_length_errors
= stats
.rx_jabber
+ stats
.rx_too_long
+
4312 ns
->rx_over_errors
= 0;
4313 ns
->rx_crc_errors
= stats
.rx_fcs_err
;
4314 ns
->rx_frame_errors
= stats
.rx_symbol_err
;
4315 ns
->rx_fifo_errors
= stats
.rx_ovflow0
+ stats
.rx_ovflow1
+
4316 stats
.rx_ovflow2
+ stats
.rx_ovflow3
+
4317 stats
.rx_trunc0
+ stats
.rx_trunc1
+
4318 stats
.rx_trunc2
+ stats
.rx_trunc3
;
4319 ns
->rx_missed_errors
= 0;
4321 /* detailed tx_errors */
4322 ns
->tx_aborted_errors
= 0;
4323 ns
->tx_carrier_errors
= 0;
4324 ns
->tx_fifo_errors
= 0;
4325 ns
->tx_heartbeat_errors
= 0;
4326 ns
->tx_window_errors
= 0;
4328 ns
->tx_errors
= stats
.tx_error_frames
;
4329 ns
->rx_errors
= stats
.rx_symbol_err
+ stats
.rx_fcs_err
+
4330 ns
->rx_length_errors
+ stats
.rx_len_err
+ ns
->rx_fifo_errors
;
4334 static int cxgb_ioctl(struct net_device
*dev
, struct ifreq
*req
, int cmd
)
4337 int ret
= 0, prtad
, devad
;
4338 struct port_info
*pi
= netdev_priv(dev
);
4339 struct mii_ioctl_data
*data
= (struct mii_ioctl_data
*)&req
->ifr_data
;
4343 if (pi
->mdio_addr
< 0)
4345 data
->phy_id
= pi
->mdio_addr
;
4349 if (mdio_phy_id_is_c45(data
->phy_id
)) {
4350 prtad
= mdio_phy_id_prtad(data
->phy_id
);
4351 devad
= mdio_phy_id_devad(data
->phy_id
);
4352 } else if (data
->phy_id
< 32) {
4353 prtad
= data
->phy_id
;
4355 data
->reg_num
&= 0x1f;
4359 mbox
= pi
->adapter
->fn
;
4360 if (cmd
== SIOCGMIIREG
)
4361 ret
= t4_mdio_rd(pi
->adapter
, mbox
, prtad
, devad
,
4362 data
->reg_num
, &data
->val_out
);
4364 ret
= t4_mdio_wr(pi
->adapter
, mbox
, prtad
, devad
,
4365 data
->reg_num
, data
->val_in
);
4373 static void cxgb_set_rxmode(struct net_device
*dev
)
4375 /* unfortunately we can't return errors to the stack */
4376 set_rxmode(dev
, -1, false);
4379 static int cxgb_change_mtu(struct net_device
*dev
, int new_mtu
)
4382 struct port_info
*pi
= netdev_priv(dev
);
4384 if (new_mtu
< 81 || new_mtu
> MAX_MTU
) /* accommodate SACK */
4386 ret
= t4_set_rxmode(pi
->adapter
, pi
->adapter
->fn
, pi
->viid
, new_mtu
, -1,
4393 static int cxgb_set_mac_addr(struct net_device
*dev
, void *p
)
4396 struct sockaddr
*addr
= p
;
4397 struct port_info
*pi
= netdev_priv(dev
);
4399 if (!is_valid_ether_addr(addr
->sa_data
))
4400 return -EADDRNOTAVAIL
;
4402 ret
= t4_change_mac(pi
->adapter
, pi
->adapter
->fn
, pi
->viid
,
4403 pi
->xact_addr_filt
, addr
->sa_data
, true, true);
4407 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
4408 pi
->xact_addr_filt
= ret
;
4412 #ifdef CONFIG_NET_POLL_CONTROLLER
4413 static void cxgb_netpoll(struct net_device
*dev
)
4415 struct port_info
*pi
= netdev_priv(dev
);
4416 struct adapter
*adap
= pi
->adapter
;
4418 if (adap
->flags
& USING_MSIX
) {
4420 struct sge_eth_rxq
*rx
= &adap
->sge
.ethrxq
[pi
->first_qset
];
4422 for (i
= pi
->nqsets
; i
; i
--, rx
++)
4423 t4_sge_intr_msix(0, &rx
->rspq
);
4425 t4_intr_handler(adap
)(0, adap
);
4429 static const struct net_device_ops cxgb4_netdev_ops
= {
4430 .ndo_open
= cxgb_open
,
4431 .ndo_stop
= cxgb_close
,
4432 .ndo_start_xmit
= t4_eth_xmit
,
4433 .ndo_get_stats64
= cxgb_get_stats
,
4434 .ndo_set_rx_mode
= cxgb_set_rxmode
,
4435 .ndo_set_mac_address
= cxgb_set_mac_addr
,
4436 .ndo_set_features
= cxgb_set_features
,
4437 .ndo_validate_addr
= eth_validate_addr
,
4438 .ndo_do_ioctl
= cxgb_ioctl
,
4439 .ndo_change_mtu
= cxgb_change_mtu
,
4440 #ifdef CONFIG_NET_POLL_CONTROLLER
4441 .ndo_poll_controller
= cxgb_netpoll
,
4445 void t4_fatal_err(struct adapter
*adap
)
4447 t4_set_reg_field(adap
, SGE_CONTROL
, GLOBALENABLE
, 0);
4448 t4_intr_disable(adap
);
4449 dev_alert(adap
->pdev_dev
, "encountered fatal error, adapter stopped\n");
4452 static void setup_memwin(struct adapter
*adap
)
4454 u32 bar0
, mem_win0_base
, mem_win1_base
, mem_win2_base
;
4456 bar0
= pci_resource_start(adap
->pdev
, 0); /* truncation intentional */
4457 if (is_t4(adap
->params
.chip
)) {
4458 mem_win0_base
= bar0
+ MEMWIN0_BASE
;
4459 mem_win1_base
= bar0
+ MEMWIN1_BASE
;
4460 mem_win2_base
= bar0
+ MEMWIN2_BASE
;
4462 /* For T5, only relative offset inside the PCIe BAR is passed */
4463 mem_win0_base
= MEMWIN0_BASE
;
4464 mem_win1_base
= MEMWIN1_BASE_T5
;
4465 mem_win2_base
= MEMWIN2_BASE_T5
;
4467 t4_write_reg(adap
, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN
, 0),
4468 mem_win0_base
| BIR(0) |
4469 WINDOW(ilog2(MEMWIN0_APERTURE
) - 10));
4470 t4_write_reg(adap
, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN
, 1),
4471 mem_win1_base
| BIR(0) |
4472 WINDOW(ilog2(MEMWIN1_APERTURE
) - 10));
4473 t4_write_reg(adap
, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN
, 2),
4474 mem_win2_base
| BIR(0) |
4475 WINDOW(ilog2(MEMWIN2_APERTURE
) - 10));
4478 static void setup_memwin_rdma(struct adapter
*adap
)
4480 if (adap
->vres
.ocq
.size
) {
4481 unsigned int start
, sz_kb
;
4483 start
= pci_resource_start(adap
->pdev
, 2) +
4484 OCQ_WIN_OFFSET(adap
->pdev
, &adap
->vres
);
4485 sz_kb
= roundup_pow_of_two(adap
->vres
.ocq
.size
) >> 10;
4487 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN
, 3),
4488 start
| BIR(1) | WINDOW(ilog2(sz_kb
)));
4490 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET
, 3),
4491 adap
->vres
.ocq
.start
);
4493 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET
, 3));
4497 static int adap_init1(struct adapter
*adap
, struct fw_caps_config_cmd
*c
)
4502 /* get device capabilities */
4503 memset(c
, 0, sizeof(*c
));
4504 c
->op_to_write
= htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
4505 FW_CMD_REQUEST
| FW_CMD_READ
);
4506 c
->cfvalid_to_len16
= htonl(FW_LEN16(*c
));
4507 ret
= t4_wr_mbox(adap
, adap
->fn
, c
, sizeof(*c
), c
);
4511 /* select capabilities we'll be using */
4512 if (c
->niccaps
& htons(FW_CAPS_CONFIG_NIC_VM
)) {
4514 c
->niccaps
^= htons(FW_CAPS_CONFIG_NIC_VM
);
4516 c
->niccaps
= htons(FW_CAPS_CONFIG_NIC_VM
);
4517 } else if (vf_acls
) {
4518 dev_err(adap
->pdev_dev
, "virtualization ACLs not supported");
4521 c
->op_to_write
= htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
4522 FW_CMD_REQUEST
| FW_CMD_WRITE
);
4523 ret
= t4_wr_mbox(adap
, adap
->fn
, c
, sizeof(*c
), NULL
);
4527 ret
= t4_config_glbl_rss(adap
, adap
->fn
,
4528 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
,
4529 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN
|
4530 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP
);
4534 ret
= t4_cfg_pfvf(adap
, adap
->fn
, adap
->fn
, 0, MAX_EGRQ
, 64, MAX_INGQ
,
4535 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF
, FW_CMD_CAP_PF
);
4541 /* tweak some settings */
4542 t4_write_reg(adap
, TP_SHIFT_CNT
, 0x64f8849);
4543 t4_write_reg(adap
, ULP_RX_TDDP_PSZ
, HPZ0(PAGE_SHIFT
- 12));
4544 t4_write_reg(adap
, TP_PIO_ADDR
, TP_INGRESS_CONFIG
);
4545 v
= t4_read_reg(adap
, TP_PIO_DATA
);
4546 t4_write_reg(adap
, TP_PIO_DATA
, v
& ~CSUM_HAS_PSEUDO_HDR
);
4548 /* first 4 Tx modulation queues point to consecutive Tx channels */
4549 adap
->params
.tp
.tx_modq_map
= 0xE4;
4550 t4_write_reg(adap
, A_TP_TX_MOD_QUEUE_REQ_MAP
,
4551 V_TX_MOD_QUEUE_REQ_MAP(adap
->params
.tp
.tx_modq_map
));
4553 /* associate each Tx modulation queue with consecutive Tx channels */
4555 t4_write_indirect(adap
, TP_PIO_ADDR
, TP_PIO_DATA
,
4556 &v
, 1, A_TP_TX_SCHED_HDR
);
4557 t4_write_indirect(adap
, TP_PIO_ADDR
, TP_PIO_DATA
,
4558 &v
, 1, A_TP_TX_SCHED_FIFO
);
4559 t4_write_indirect(adap
, TP_PIO_ADDR
, TP_PIO_DATA
,
4560 &v
, 1, A_TP_TX_SCHED_PCMD
);
4562 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4563 if (is_offload(adap
)) {
4564 t4_write_reg(adap
, A_TP_TX_MOD_QUEUE_WEIGHT0
,
4565 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
4566 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
4567 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
4568 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
4569 t4_write_reg(adap
, A_TP_TX_MOD_CHANNEL_WEIGHT
,
4570 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
4571 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
4572 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT
) |
4573 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT
));
4576 /* get basic stuff going */
4577 return t4_early_init(adap
, adap
->fn
);
4581 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
4583 #define MAX_ATIDS 8192U
4586 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4588 * If the firmware we're dealing with has Configuration File support, then
4589 * we use that to perform all configuration
4593 * Tweak configuration based on module parameters, etc. Most of these have
4594 * defaults assigned to them by Firmware Configuration Files (if we're using
4595 * them) but need to be explicitly set if we're using hard-coded
4596 * initialization. But even in the case of using Firmware Configuration
4597 * Files, we'd like to expose the ability to change these via module
4598 * parameters so these are essentially common tweaks/settings for
4599 * Configuration Files and hard-coded initialization ...
4601 static int adap_init0_tweaks(struct adapter
*adapter
)
4604 * Fix up various Host-Dependent Parameters like Page Size, Cache
4605 * Line Size, etc. The firmware default is for a 4KB Page Size and
4606 * 64B Cache Line Size ...
4608 t4_fixup_host_params(adapter
, PAGE_SIZE
, L1_CACHE_BYTES
);
4611 * Process module parameters which affect early initialization.
4613 if (rx_dma_offset
!= 2 && rx_dma_offset
!= 0) {
4614 dev_err(&adapter
->pdev
->dev
,
4615 "Ignoring illegal rx_dma_offset=%d, using 2\n",
4619 t4_set_reg_field(adapter
, SGE_CONTROL
,
4621 PKTSHIFT(rx_dma_offset
));
4624 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4625 * adds the pseudo header itself.
4627 t4_tp_wr_bits_indirect(adapter
, TP_INGRESS_CONFIG
,
4628 CSUM_HAS_PSEUDO_HDR
, 0);
4634 * Attempt to initialize the adapter via a Firmware Configuration File.
4636 static int adap_init0_config(struct adapter
*adapter
, int reset
)
4638 struct fw_caps_config_cmd caps_cmd
;
4639 const struct firmware
*cf
;
4640 unsigned long mtype
= 0, maddr
= 0;
4641 u32 finiver
, finicsum
, cfcsum
;
4643 int config_issued
= 0;
4644 char *fw_config_file
, fw_config_file_path
[256];
4645 char *config_name
= NULL
;
4648 * Reset device if necessary.
4651 ret
= t4_fw_reset(adapter
, adapter
->mbox
,
4652 PIORSTMODE
| PIORST
);
4658 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4659 * then use that. Otherwise, use the configuration file stored
4660 * in the adapter flash ...
4662 switch (CHELSIO_CHIP_VERSION(adapter
->params
.chip
)) {
4664 fw_config_file
= FW4_CFNAME
;
4667 fw_config_file
= FW5_CFNAME
;
4670 dev_err(adapter
->pdev_dev
, "Device %d is not supported\n",
4671 adapter
->pdev
->device
);
4676 ret
= request_firmware(&cf
, fw_config_file
, adapter
->pdev_dev
);
4678 config_name
= "On FLASH";
4679 mtype
= FW_MEMTYPE_CF_FLASH
;
4680 maddr
= t4_flash_cfg_addr(adapter
);
4682 u32 params
[7], val
[7];
4684 sprintf(fw_config_file_path
,
4685 "/lib/firmware/%s", fw_config_file
);
4686 config_name
= fw_config_file_path
;
4688 if (cf
->size
>= FLASH_CFG_MAX_SIZE
)
4691 params
[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV
) |
4692 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF
));
4693 ret
= t4_query_params(adapter
, adapter
->mbox
,
4694 adapter
->fn
, 0, 1, params
, val
);
4697 * For t4_memory_write() below addresses and
4698 * sizes have to be in terms of multiples of 4
4699 * bytes. So, if the Configuration File isn't
4700 * a multiple of 4 bytes in length we'll have
4701 * to write that out separately since we can't
4702 * guarantee that the bytes following the
4703 * residual byte in the buffer returned by
4704 * request_firmware() are zeroed out ...
4706 size_t resid
= cf
->size
& 0x3;
4707 size_t size
= cf
->size
& ~0x3;
4708 __be32
*data
= (__be32
*)cf
->data
;
4710 mtype
= FW_PARAMS_PARAM_Y_GET(val
[0]);
4711 maddr
= FW_PARAMS_PARAM_Z_GET(val
[0]) << 16;
4713 ret
= t4_memory_write(adapter
, mtype
, maddr
,
4715 if (ret
== 0 && resid
!= 0) {
4722 last
.word
= data
[size
>> 2];
4723 for (i
= resid
; i
< 4; i
++)
4725 ret
= t4_memory_write(adapter
, mtype
,
4732 release_firmware(cf
);
4738 * Issue a Capability Configuration command to the firmware to get it
4739 * to parse the Configuration File. We don't use t4_fw_config_file()
4740 * because we want the ability to modify various features after we've
4741 * processed the configuration file ...
4743 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
4744 caps_cmd
.op_to_write
=
4745 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
4748 caps_cmd
.cfvalid_to_len16
=
4749 htonl(FW_CAPS_CONFIG_CMD_CFVALID
|
4750 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype
) |
4751 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr
>> 16) |
4752 FW_LEN16(caps_cmd
));
4753 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
4756 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4757 * Configuration File in FLASH), our last gasp effort is to use the
4758 * Firmware Configuration File which is embedded in the firmware. A
4759 * very few early versions of the firmware didn't have one embedded
4760 * but we can ignore those.
4762 if (ret
== -ENOENT
) {
4763 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
4764 caps_cmd
.op_to_write
=
4765 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
4768 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
4769 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
,
4770 sizeof(caps_cmd
), &caps_cmd
);
4771 config_name
= "Firmware Default";
4778 finiver
= ntohl(caps_cmd
.finiver
);
4779 finicsum
= ntohl(caps_cmd
.finicsum
);
4780 cfcsum
= ntohl(caps_cmd
.cfcsum
);
4781 if (finicsum
!= cfcsum
)
4782 dev_warn(adapter
->pdev_dev
, "Configuration File checksum "\
4783 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4787 * And now tell the firmware to use the configuration we just loaded.
4789 caps_cmd
.op_to_write
=
4790 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
4793 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
4794 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
4800 * Tweak configuration based on system architecture, module
4803 ret
= adap_init0_tweaks(adapter
);
4808 * And finally tell the firmware to initialize itself using the
4809 * parameters from the Configuration File.
4811 ret
= t4_fw_initialize(adapter
, adapter
->mbox
);
4816 * Return successfully and note that we're operating with parameters
4817 * not supplied by the driver, rather than from hard-wired
4818 * initialization constants burried in the driver.
4820 adapter
->flags
|= USING_SOFT_PARAMS
;
4821 dev_info(adapter
->pdev_dev
, "Successfully configured using Firmware "\
4822 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4823 config_name
, finiver
, cfcsum
);
4827 * Something bad happened. Return the error ... (If the "error"
4828 * is that there's no Configuration File on the adapter we don't
4829 * want to issue a warning since this is fairly common.)
4832 if (config_issued
&& ret
!= -ENOENT
)
4833 dev_warn(adapter
->pdev_dev
, "\"%s\" configuration file error %d\n",
4839 * Attempt to initialize the adapter via hard-coded, driver supplied
4842 static int adap_init0_no_config(struct adapter
*adapter
, int reset
)
4844 struct sge
*s
= &adapter
->sge
;
4845 struct fw_caps_config_cmd caps_cmd
;
4850 * Reset device if necessary
4853 ret
= t4_fw_reset(adapter
, adapter
->mbox
,
4854 PIORSTMODE
| PIORST
);
4860 * Get device capabilities and select which we'll be using.
4862 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
4863 caps_cmd
.op_to_write
= htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
4864 FW_CMD_REQUEST
| FW_CMD_READ
);
4865 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
4866 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
4871 if (caps_cmd
.niccaps
& htons(FW_CAPS_CONFIG_NIC_VM
)) {
4873 caps_cmd
.niccaps
^= htons(FW_CAPS_CONFIG_NIC_VM
);
4875 caps_cmd
.niccaps
= htons(FW_CAPS_CONFIG_NIC_VM
);
4876 } else if (vf_acls
) {
4877 dev_err(adapter
->pdev_dev
, "virtualization ACLs not supported");
4880 caps_cmd
.op_to_write
= htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
4881 FW_CMD_REQUEST
| FW_CMD_WRITE
);
4882 ret
= t4_wr_mbox(adapter
, adapter
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
4888 * Tweak configuration based on system architecture, module
4891 ret
= adap_init0_tweaks(adapter
);
4896 * Select RSS Global Mode we want to use. We use "Basic Virtual"
4897 * mode which maps each Virtual Interface to its own section of
4898 * the RSS Table and we turn on all map and hash enables ...
4900 adapter
->flags
|= RSS_TNLALLLOOKUP
;
4901 ret
= t4_config_glbl_rss(adapter
, adapter
->mbox
,
4902 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL
,
4903 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN
|
4904 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ
|
4905 ((adapter
->flags
& RSS_TNLALLLOOKUP
) ?
4906 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP
: 0));
4911 * Set up our own fundamental resource provisioning ...
4913 ret
= t4_cfg_pfvf(adapter
, adapter
->mbox
, adapter
->fn
, 0,
4914 PFRES_NEQ
, PFRES_NETHCTRL
,
4915 PFRES_NIQFLINT
, PFRES_NIQ
,
4916 PFRES_TC
, PFRES_NVI
,
4917 FW_PFVF_CMD_CMASK_MASK
,
4918 pfvfres_pmask(adapter
, adapter
->fn
, 0),
4920 PFRES_R_CAPS
, PFRES_WX_CAPS
);
4925 * Perform low level SGE initialization. We need to do this before we
4926 * send the firmware the INITIALIZE command because that will cause
4927 * any other PF Drivers which are waiting for the Master
4928 * Initialization to proceed forward.
4930 for (i
= 0; i
< SGE_NTIMERS
- 1; i
++)
4931 s
->timer_val
[i
] = min(intr_holdoff
[i
], MAX_SGE_TIMERVAL
);
4932 s
->timer_val
[SGE_NTIMERS
- 1] = MAX_SGE_TIMERVAL
;
4933 s
->counter_val
[0] = 1;
4934 for (i
= 1; i
< SGE_NCOUNTERS
; i
++)
4935 s
->counter_val
[i
] = min(intr_cnt
[i
- 1],
4936 THRESHOLD_0_GET(THRESHOLD_0_MASK
));
4937 t4_sge_init(adapter
);
4939 #ifdef CONFIG_PCI_IOV
4941 * Provision resource limits for Virtual Functions. We currently
4942 * grant them all the same static resource limits except for the Port
4943 * Access Rights Mask which we're assigning based on the PF. All of
4944 * the static provisioning stuff for both the PF and VF really needs
4945 * to be managed in a persistent manner for each device which the
4946 * firmware controls.
4951 for (pf
= 0; pf
< ARRAY_SIZE(num_vf
); pf
++) {
4952 if (num_vf
[pf
] <= 0)
4955 /* VF numbering starts at 1! */
4956 for (vf
= 1; vf
<= num_vf
[pf
]; vf
++) {
4957 ret
= t4_cfg_pfvf(adapter
, adapter
->mbox
,
4959 VFRES_NEQ
, VFRES_NETHCTRL
,
4960 VFRES_NIQFLINT
, VFRES_NIQ
,
4961 VFRES_TC
, VFRES_NVI
,
4962 FW_PFVF_CMD_CMASK_MASK
,
4966 VFRES_R_CAPS
, VFRES_WX_CAPS
);
4968 dev_warn(adapter
->pdev_dev
,
4970 "provision pf/vf=%d/%d; "
4971 "err=%d\n", pf
, vf
, ret
);
4978 * Set up the default filter mode. Later we'll want to implement this
4979 * via a firmware command, etc. ... This needs to be done before the
4980 * firmare initialization command ... If the selected set of fields
4981 * isn't equal to the default value, we'll need to make sure that the
4982 * field selections will fit in the 36-bit budget.
4984 if (tp_vlan_pri_map
!= TP_VLAN_PRI_MAP_DEFAULT
) {
4987 for (j
= TP_VLAN_PRI_MAP_FIRST
; j
<= TP_VLAN_PRI_MAP_LAST
; j
++)
4988 switch (tp_vlan_pri_map
& (1 << j
)) {
4990 /* compressed filter field not enabled */
5010 case ETHERTYPE_MASK
:
5016 case MPSHITTYPE_MASK
:
5019 case FRAGMENTATION_MASK
:
5025 dev_err(adapter
->pdev_dev
,
5026 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5027 " using %#x\n", tp_vlan_pri_map
, bits
,
5028 TP_VLAN_PRI_MAP_DEFAULT
);
5029 tp_vlan_pri_map
= TP_VLAN_PRI_MAP_DEFAULT
;
5032 v
= tp_vlan_pri_map
;
5033 t4_write_indirect(adapter
, TP_PIO_ADDR
, TP_PIO_DATA
,
5034 &v
, 1, TP_VLAN_PRI_MAP
);
5037 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5038 * to support any of the compressed filter fields above. Newer
5039 * versions of the firmware do this automatically but it doesn't hurt
5040 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5041 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5042 * since the firmware automatically turns this on and off when we have
5043 * a non-zero number of filters active (since it does have a
5044 * performance impact).
5046 if (tp_vlan_pri_map
)
5047 t4_set_reg_field(adapter
, TP_GLOBAL_CONFIG
,
5048 FIVETUPLELOOKUP_MASK
,
5049 FIVETUPLELOOKUP_MASK
);
5052 * Tweak some settings.
5054 t4_write_reg(adapter
, TP_SHIFT_CNT
, SYNSHIFTMAX(6) |
5055 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5056 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5057 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5060 * Get basic stuff going by issuing the Firmware Initialize command.
5061 * Note that this _must_ be after all PFVF commands ...
5063 ret
= t4_fw_initialize(adapter
, adapter
->mbox
);
5068 * Return successfully!
5070 dev_info(adapter
->pdev_dev
, "Successfully configured using built-in "\
5071 "driver parameters\n");
5075 * Something bad happened. Return the error ...
5081 static struct fw_info fw_info_array
[] = {
5084 .fs_name
= FW4_CFNAME
,
5085 .fw_mod_name
= FW4_FNAME
,
5087 .chip
= FW_HDR_CHIP_T4
,
5088 .fw_ver
= __cpu_to_be32(FW_VERSION(T4
)),
5089 .intfver_nic
= FW_INTFVER(T4
, NIC
),
5090 .intfver_vnic
= FW_INTFVER(T4
, VNIC
),
5091 .intfver_ri
= FW_INTFVER(T4
, RI
),
5092 .intfver_iscsi
= FW_INTFVER(T4
, ISCSI
),
5093 .intfver_fcoe
= FW_INTFVER(T4
, FCOE
),
5097 .fs_name
= FW5_CFNAME
,
5098 .fw_mod_name
= FW5_FNAME
,
5100 .chip
= FW_HDR_CHIP_T5
,
5101 .fw_ver
= __cpu_to_be32(FW_VERSION(T5
)),
5102 .intfver_nic
= FW_INTFVER(T5
, NIC
),
5103 .intfver_vnic
= FW_INTFVER(T5
, VNIC
),
5104 .intfver_ri
= FW_INTFVER(T5
, RI
),
5105 .intfver_iscsi
= FW_INTFVER(T5
, ISCSI
),
5106 .intfver_fcoe
= FW_INTFVER(T5
, FCOE
),
5111 static struct fw_info
*find_fw_info(int chip
)
5115 for (i
= 0; i
< ARRAY_SIZE(fw_info_array
); i
++) {
5116 if (fw_info_array
[i
].chip
== chip
)
5117 return &fw_info_array
[i
];
5123 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5125 static int adap_init0(struct adapter
*adap
)
5129 enum dev_state state
;
5130 u32 params
[7], val
[7];
5131 struct fw_caps_config_cmd caps_cmd
;
5135 * Contact FW, advertising Master capability (and potentially forcing
5136 * ourselves as the Master PF if our module parameter force_init is
5139 ret
= t4_fw_hello(adap
, adap
->mbox
, adap
->fn
,
5140 force_init
? MASTER_MUST
: MASTER_MAY
,
5143 dev_err(adap
->pdev_dev
, "could not connect to FW, error %d\n",
5147 if (ret
== adap
->mbox
)
5148 adap
->flags
|= MASTER_PF
;
5149 if (force_init
&& state
== DEV_STATE_INIT
)
5150 state
= DEV_STATE_UNINIT
;
5153 * If we're the Master PF Driver and the device is uninitialized,
5154 * then let's consider upgrading the firmware ... (We always want
5155 * to check the firmware version number in order to A. get it for
5156 * later reporting and B. to warn if the currently loaded firmware
5157 * is excessively mismatched relative to the driver.)
5159 t4_get_fw_version(adap
, &adap
->params
.fw_vers
);
5160 t4_get_tp_version(adap
, &adap
->params
.tp_vers
);
5161 if ((adap
->flags
& MASTER_PF
) && state
!= DEV_STATE_INIT
) {
5162 struct fw_info
*fw_info
;
5163 struct fw_hdr
*card_fw
;
5164 const struct firmware
*fw
;
5165 const u8
*fw_data
= NULL
;
5166 unsigned int fw_size
= 0;
5168 /* This is the firmware whose headers the driver was compiled
5171 fw_info
= find_fw_info(CHELSIO_CHIP_VERSION(adap
->params
.chip
));
5172 if (fw_info
== NULL
) {
5173 dev_err(adap
->pdev_dev
,
5174 "unable to get firmware info for chip %d.\n",
5175 CHELSIO_CHIP_VERSION(adap
->params
.chip
));
5179 /* allocate memory to read the header of the firmware on the
5182 card_fw
= t4_alloc_mem(sizeof(*card_fw
));
5184 /* Get FW from from /lib/firmware/ */
5185 ret
= request_firmware(&fw
, fw_info
->fw_mod_name
,
5188 dev_err(adap
->pdev_dev
,
5189 "unable to load firmware image %s, error %d\n",
5190 fw_info
->fw_mod_name
, ret
);
5196 /* upgrade FW logic */
5197 ret
= t4_prep_fw(adap
, fw_info
, fw_data
, fw_size
, card_fw
,
5202 release_firmware(fw
);
5203 t4_free_mem(card_fw
);
5210 * Grab VPD parameters. This should be done after we establish a
5211 * connection to the firmware since some of the VPD parameters
5212 * (notably the Core Clock frequency) are retrieved via requests to
5213 * the firmware. On the other hand, we need these fairly early on
5214 * so we do this right after getting ahold of the firmware.
5216 ret
= get_vpd_params(adap
, &adap
->params
.vpd
);
5221 * Find out what ports are available to us. Note that we need to do
5222 * this before calling adap_init0_no_config() since it needs nports
5226 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV
) |
5227 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC
);
5228 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 1, &v
, &port_vec
);
5232 adap
->params
.nports
= hweight32(port_vec
);
5233 adap
->params
.portvec
= port_vec
;
5236 * If the firmware is initialized already (and we're not forcing a
5237 * master initialization), note that we're living with existing
5238 * adapter parameters. Otherwise, it's time to try initializing the
5241 if (state
== DEV_STATE_INIT
) {
5242 dev_info(adap
->pdev_dev
, "Coming up as %s: "\
5243 "Adapter already initialized\n",
5244 adap
->flags
& MASTER_PF
? "MASTER" : "SLAVE");
5245 adap
->flags
|= USING_SOFT_PARAMS
;
5247 dev_info(adap
->pdev_dev
, "Coming up as MASTER: "\
5248 "Initializing adapter\n");
5251 * If the firmware doesn't support Configuration
5252 * Files warn user and exit,
5255 dev_warn(adap
->pdev_dev
, "Firmware doesn't support "
5256 "configuration file.\n");
5258 ret
= adap_init0_no_config(adap
, reset
);
5261 * Find out whether we're dealing with a version of
5262 * the firmware which has configuration file support.
5264 params
[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV
) |
5265 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF
));
5266 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 1,
5270 * If the firmware doesn't support Configuration
5271 * Files, use the old Driver-based, hard-wired
5272 * initialization. Otherwise, try using the
5273 * Configuration File support and fall back to the
5274 * Driver-based initialization if there's no
5275 * Configuration File found.
5278 ret
= adap_init0_no_config(adap
, reset
);
5281 * The firmware provides us with a memory
5282 * buffer where we can load a Configuration
5283 * File from the host if we want to override
5284 * the Configuration File in flash.
5287 ret
= adap_init0_config(adap
, reset
);
5288 if (ret
== -ENOENT
) {
5289 dev_info(adap
->pdev_dev
,
5290 "No Configuration File present "
5291 "on adapter. Using hard-wired "
5292 "configuration parameters.\n");
5293 ret
= adap_init0_no_config(adap
, reset
);
5298 dev_err(adap
->pdev_dev
,
5299 "could not initialize adapter, error %d\n",
5306 * If we're living with non-hard-coded parameters (either from a
5307 * Firmware Configuration File or values programmed by a different PF
5308 * Driver), give the SGE code a chance to pull in anything that it
5309 * needs ... Note that this must be called after we retrieve our VPD
5310 * parameters in order to know how to convert core ticks to seconds.
5312 if (adap
->flags
& USING_SOFT_PARAMS
) {
5313 ret
= t4_sge_init(adap
);
5318 if (is_bypass_device(adap
->pdev
->device
))
5319 adap
->params
.bypass
= 1;
5322 * Grab some of our basic fundamental operating parameters.
5324 #define FW_PARAM_DEV(param) \
5325 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5326 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5328 #define FW_PARAM_PFVF(param) \
5329 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5330 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5331 FW_PARAMS_PARAM_Y(0) | \
5332 FW_PARAMS_PARAM_Z(0)
5334 params
[0] = FW_PARAM_PFVF(EQ_START
);
5335 params
[1] = FW_PARAM_PFVF(L2T_START
);
5336 params
[2] = FW_PARAM_PFVF(L2T_END
);
5337 params
[3] = FW_PARAM_PFVF(FILTER_START
);
5338 params
[4] = FW_PARAM_PFVF(FILTER_END
);
5339 params
[5] = FW_PARAM_PFVF(IQFLINT_START
);
5340 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 6, params
, val
);
5343 adap
->sge
.egr_start
= val
[0];
5344 adap
->l2t_start
= val
[1];
5345 adap
->l2t_end
= val
[2];
5346 adap
->tids
.ftid_base
= val
[3];
5347 adap
->tids
.nftids
= val
[4] - val
[3] + 1;
5348 adap
->sge
.ingr_start
= val
[5];
5350 /* query params related to active filter region */
5351 params
[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START
);
5352 params
[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END
);
5353 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 2, params
, val
);
5354 /* If Active filter size is set we enable establishing
5355 * offload connection through firmware work request
5357 if ((val
[0] != val
[1]) && (ret
>= 0)) {
5358 adap
->flags
|= FW_OFLD_CONN
;
5359 adap
->tids
.aftid_base
= val
[0];
5360 adap
->tids
.aftid_end
= val
[1];
5363 /* If we're running on newer firmware, let it know that we're
5364 * prepared to deal with encapsulated CPL messages. Older
5365 * firmware won't understand this and we'll just get
5366 * unencapsulated messages ...
5368 params
[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP
);
5370 (void) t4_set_params(adap
, adap
->mbox
, adap
->fn
, 0, 1, params
, val
);
5373 * Get device capabilities so we can determine what resources we need
5376 memset(&caps_cmd
, 0, sizeof(caps_cmd
));
5377 caps_cmd
.op_to_write
= htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD
) |
5378 FW_CMD_REQUEST
| FW_CMD_READ
);
5379 caps_cmd
.cfvalid_to_len16
= htonl(FW_LEN16(caps_cmd
));
5380 ret
= t4_wr_mbox(adap
, adap
->mbox
, &caps_cmd
, sizeof(caps_cmd
),
5385 if (caps_cmd
.ofldcaps
) {
5386 /* query offload-related parameters */
5387 params
[0] = FW_PARAM_DEV(NTID
);
5388 params
[1] = FW_PARAM_PFVF(SERVER_START
);
5389 params
[2] = FW_PARAM_PFVF(SERVER_END
);
5390 params
[3] = FW_PARAM_PFVF(TDDP_START
);
5391 params
[4] = FW_PARAM_PFVF(TDDP_END
);
5392 params
[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ
);
5393 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 6,
5397 adap
->tids
.ntids
= val
[0];
5398 adap
->tids
.natids
= min(adap
->tids
.ntids
/ 2, MAX_ATIDS
);
5399 adap
->tids
.stid_base
= val
[1];
5400 adap
->tids
.nstids
= val
[2] - val
[1] + 1;
5402 * Setup server filter region. Divide the availble filter
5403 * region into two parts. Regular filters get 1/3rd and server
5404 * filters get 2/3rd part. This is only enabled if workarond
5406 * 1. For regular filters.
5407 * 2. Server filter: This are special filters which are used
5408 * to redirect SYN packets to offload queue.
5410 if (adap
->flags
& FW_OFLD_CONN
&& !is_bypass(adap
)) {
5411 adap
->tids
.sftid_base
= adap
->tids
.ftid_base
+
5412 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
5413 adap
->tids
.nsftids
= adap
->tids
.nftids
-
5414 DIV_ROUND_UP(adap
->tids
.nftids
, 3);
5415 adap
->tids
.nftids
= adap
->tids
.sftid_base
-
5416 adap
->tids
.ftid_base
;
5418 adap
->vres
.ddp
.start
= val
[3];
5419 adap
->vres
.ddp
.size
= val
[4] - val
[3] + 1;
5420 adap
->params
.ofldq_wr_cred
= val
[5];
5422 adap
->params
.offload
= 1;
5424 if (caps_cmd
.rdmacaps
) {
5425 params
[0] = FW_PARAM_PFVF(STAG_START
);
5426 params
[1] = FW_PARAM_PFVF(STAG_END
);
5427 params
[2] = FW_PARAM_PFVF(RQ_START
);
5428 params
[3] = FW_PARAM_PFVF(RQ_END
);
5429 params
[4] = FW_PARAM_PFVF(PBL_START
);
5430 params
[5] = FW_PARAM_PFVF(PBL_END
);
5431 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 6,
5435 adap
->vres
.stag
.start
= val
[0];
5436 adap
->vres
.stag
.size
= val
[1] - val
[0] + 1;
5437 adap
->vres
.rq
.start
= val
[2];
5438 adap
->vres
.rq
.size
= val
[3] - val
[2] + 1;
5439 adap
->vres
.pbl
.start
= val
[4];
5440 adap
->vres
.pbl
.size
= val
[5] - val
[4] + 1;
5442 params
[0] = FW_PARAM_PFVF(SQRQ_START
);
5443 params
[1] = FW_PARAM_PFVF(SQRQ_END
);
5444 params
[2] = FW_PARAM_PFVF(CQ_START
);
5445 params
[3] = FW_PARAM_PFVF(CQ_END
);
5446 params
[4] = FW_PARAM_PFVF(OCQ_START
);
5447 params
[5] = FW_PARAM_PFVF(OCQ_END
);
5448 ret
= t4_query_params(adap
, 0, 0, 0, 6, params
, val
);
5451 adap
->vres
.qp
.start
= val
[0];
5452 adap
->vres
.qp
.size
= val
[1] - val
[0] + 1;
5453 adap
->vres
.cq
.start
= val
[2];
5454 adap
->vres
.cq
.size
= val
[3] - val
[2] + 1;
5455 adap
->vres
.ocq
.start
= val
[4];
5456 adap
->vres
.ocq
.size
= val
[5] - val
[4] + 1;
5458 if (caps_cmd
.iscsicaps
) {
5459 params
[0] = FW_PARAM_PFVF(ISCSI_START
);
5460 params
[1] = FW_PARAM_PFVF(ISCSI_END
);
5461 ret
= t4_query_params(adap
, adap
->mbox
, adap
->fn
, 0, 2,
5465 adap
->vres
.iscsi
.start
= val
[0];
5466 adap
->vres
.iscsi
.size
= val
[1] - val
[0] + 1;
5468 #undef FW_PARAM_PFVF
5472 * These are finalized by FW initialization, load their values now.
5474 t4_read_mtu_tbl(adap
, adap
->params
.mtus
, NULL
);
5475 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
5476 adap
->params
.b_wnd
);
5478 t4_init_tp_params(adap
);
5479 adap
->flags
|= FW_OK
;
5483 * Something bad happened. If a command timed out or failed with EIO
5484 * FW does not operate within its spec or something catastrophic
5485 * happened to HW/FW, stop issuing commands.
5488 if (ret
!= -ETIMEDOUT
&& ret
!= -EIO
)
5489 t4_fw_bye(adap
, adap
->mbox
);
5495 static pci_ers_result_t
eeh_err_detected(struct pci_dev
*pdev
,
5496 pci_channel_state_t state
)
5499 struct adapter
*adap
= pci_get_drvdata(pdev
);
5505 adap
->flags
&= ~FW_OK
;
5506 notify_ulds(adap
, CXGB4_STATE_START_RECOVERY
);
5507 spin_lock(&adap
->stats_lock
);
5508 for_each_port(adap
, i
) {
5509 struct net_device
*dev
= adap
->port
[i
];
5511 netif_device_detach(dev
);
5512 netif_carrier_off(dev
);
5514 spin_unlock(&adap
->stats_lock
);
5515 if (adap
->flags
& FULL_INIT_DONE
)
5518 if ((adap
->flags
& DEV_ENABLED
)) {
5519 pci_disable_device(pdev
);
5520 adap
->flags
&= ~DEV_ENABLED
;
5522 out
: return state
== pci_channel_io_perm_failure
?
5523 PCI_ERS_RESULT_DISCONNECT
: PCI_ERS_RESULT_NEED_RESET
;
5526 static pci_ers_result_t
eeh_slot_reset(struct pci_dev
*pdev
)
5529 struct fw_caps_config_cmd c
;
5530 struct adapter
*adap
= pci_get_drvdata(pdev
);
5533 pci_restore_state(pdev
);
5534 pci_save_state(pdev
);
5535 return PCI_ERS_RESULT_RECOVERED
;
5538 if (!(adap
->flags
& DEV_ENABLED
)) {
5539 if (pci_enable_device(pdev
)) {
5540 dev_err(&pdev
->dev
, "Cannot reenable PCI "
5541 "device after reset\n");
5542 return PCI_ERS_RESULT_DISCONNECT
;
5544 adap
->flags
|= DEV_ENABLED
;
5547 pci_set_master(pdev
);
5548 pci_restore_state(pdev
);
5549 pci_save_state(pdev
);
5550 pci_cleanup_aer_uncorrect_error_status(pdev
);
5552 if (t4_wait_dev_ready(adap
) < 0)
5553 return PCI_ERS_RESULT_DISCONNECT
;
5554 if (t4_fw_hello(adap
, adap
->fn
, adap
->fn
, MASTER_MUST
, NULL
) < 0)
5555 return PCI_ERS_RESULT_DISCONNECT
;
5556 adap
->flags
|= FW_OK
;
5557 if (adap_init1(adap
, &c
))
5558 return PCI_ERS_RESULT_DISCONNECT
;
5560 for_each_port(adap
, i
) {
5561 struct port_info
*p
= adap2pinfo(adap
, i
);
5563 ret
= t4_alloc_vi(adap
, adap
->fn
, p
->tx_chan
, adap
->fn
, 0, 1,
5566 return PCI_ERS_RESULT_DISCONNECT
;
5568 p
->xact_addr_filt
= -1;
5571 t4_load_mtus(adap
, adap
->params
.mtus
, adap
->params
.a_wnd
,
5572 adap
->params
.b_wnd
);
5575 return PCI_ERS_RESULT_DISCONNECT
;
5576 return PCI_ERS_RESULT_RECOVERED
;
5579 static void eeh_resume(struct pci_dev
*pdev
)
5582 struct adapter
*adap
= pci_get_drvdata(pdev
);
5588 for_each_port(adap
, i
) {
5589 struct net_device
*dev
= adap
->port
[i
];
5591 if (netif_running(dev
)) {
5593 cxgb_set_rxmode(dev
);
5595 netif_device_attach(dev
);
5600 static const struct pci_error_handlers cxgb4_eeh
= {
5601 .error_detected
= eeh_err_detected
,
5602 .slot_reset
= eeh_slot_reset
,
5603 .resume
= eeh_resume
,
5606 static inline bool is_10g_port(const struct link_config
*lc
)
5608 return (lc
->supported
& FW_PORT_CAP_SPEED_10G
) != 0;
5611 static inline void init_rspq(struct sge_rspq
*q
, u8 timer_idx
, u8 pkt_cnt_idx
,
5612 unsigned int size
, unsigned int iqe_size
)
5614 q
->intr_params
= QINTR_TIMER_IDX(timer_idx
) |
5615 (pkt_cnt_idx
< SGE_NCOUNTERS
? QINTR_CNT_EN
: 0);
5616 q
->pktcnt_idx
= pkt_cnt_idx
< SGE_NCOUNTERS
? pkt_cnt_idx
: 0;
5617 q
->iqe_len
= iqe_size
;
5622 * Perform default configuration of DMA queues depending on the number and type
5623 * of ports we found and the number of available CPUs. Most settings can be
5624 * modified by the admin prior to actual use.
5626 static void cfg_queues(struct adapter
*adap
)
5628 struct sge
*s
= &adap
->sge
;
5629 int i
, q10g
= 0, n10g
= 0, qidx
= 0;
5631 for_each_port(adap
, i
)
5632 n10g
+= is_10g_port(&adap2pinfo(adap
, i
)->link_cfg
);
5635 * We default to 1 queue per non-10G port and up to # of cores queues
5639 q10g
= (MAX_ETH_QSETS
- (adap
->params
.nports
- n10g
)) / n10g
;
5640 if (q10g
> netif_get_num_default_rss_queues())
5641 q10g
= netif_get_num_default_rss_queues();
5643 for_each_port(adap
, i
) {
5644 struct port_info
*pi
= adap2pinfo(adap
, i
);
5646 pi
->first_qset
= qidx
;
5647 pi
->nqsets
= is_10g_port(&pi
->link_cfg
) ? q10g
: 1;
5652 s
->max_ethqsets
= qidx
; /* MSI-X may lower it later */
5654 if (is_offload(adap
)) {
5656 * For offload we use 1 queue/channel if all ports are up to 1G,
5657 * otherwise we divide all available queues amongst the channels
5658 * capped by the number of available cores.
5661 i
= min_t(int, ARRAY_SIZE(s
->ofldrxq
),
5663 s
->ofldqsets
= roundup(i
, adap
->params
.nports
);
5665 s
->ofldqsets
= adap
->params
.nports
;
5666 /* For RDMA one Rx queue per channel suffices */
5667 s
->rdmaqs
= adap
->params
.nports
;
5670 for (i
= 0; i
< ARRAY_SIZE(s
->ethrxq
); i
++) {
5671 struct sge_eth_rxq
*r
= &s
->ethrxq
[i
];
5673 init_rspq(&r
->rspq
, 0, 0, 1024, 64);
5677 for (i
= 0; i
< ARRAY_SIZE(s
->ethtxq
); i
++)
5678 s
->ethtxq
[i
].q
.size
= 1024;
5680 for (i
= 0; i
< ARRAY_SIZE(s
->ctrlq
); i
++)
5681 s
->ctrlq
[i
].q
.size
= 512;
5683 for (i
= 0; i
< ARRAY_SIZE(s
->ofldtxq
); i
++)
5684 s
->ofldtxq
[i
].q
.size
= 1024;
5686 for (i
= 0; i
< ARRAY_SIZE(s
->ofldrxq
); i
++) {
5687 struct sge_ofld_rxq
*r
= &s
->ofldrxq
[i
];
5689 init_rspq(&r
->rspq
, 0, 0, 1024, 64);
5690 r
->rspq
.uld
= CXGB4_ULD_ISCSI
;
5694 for (i
= 0; i
< ARRAY_SIZE(s
->rdmarxq
); i
++) {
5695 struct sge_ofld_rxq
*r
= &s
->rdmarxq
[i
];
5697 init_rspq(&r
->rspq
, 0, 0, 511, 64);
5698 r
->rspq
.uld
= CXGB4_ULD_RDMA
;
5702 init_rspq(&s
->fw_evtq
, 6, 0, 512, 64);
5703 init_rspq(&s
->intrq
, 6, 0, 2 * MAX_INGQ
, 64);
5707 * Reduce the number of Ethernet queues across all ports to at most n.
5708 * n provides at least one queue per port.
5710 static void reduce_ethqs(struct adapter
*adap
, int n
)
5713 struct port_info
*pi
;
5715 while (n
< adap
->sge
.ethqsets
)
5716 for_each_port(adap
, i
) {
5717 pi
= adap2pinfo(adap
, i
);
5718 if (pi
->nqsets
> 1) {
5720 adap
->sge
.ethqsets
--;
5721 if (adap
->sge
.ethqsets
<= n
)
5727 for_each_port(adap
, i
) {
5728 pi
= adap2pinfo(adap
, i
);
5734 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5735 #define EXTRA_VECS 2
5737 static int enable_msix(struct adapter
*adap
)
5740 int i
, err
, want
, need
;
5741 struct sge
*s
= &adap
->sge
;
5742 unsigned int nchan
= adap
->params
.nports
;
5743 struct msix_entry entries
[MAX_INGQ
+ 1];
5745 for (i
= 0; i
< ARRAY_SIZE(entries
); ++i
)
5746 entries
[i
].entry
= i
;
5748 want
= s
->max_ethqsets
+ EXTRA_VECS
;
5749 if (is_offload(adap
)) {
5750 want
+= s
->rdmaqs
+ s
->ofldqsets
;
5751 /* need nchan for each possible ULD */
5752 ofld_need
= 2 * nchan
;
5754 need
= adap
->params
.nports
+ EXTRA_VECS
+ ofld_need
;
5756 while ((err
= pci_enable_msix(adap
->pdev
, entries
, want
)) >= need
)
5761 * Distribute available vectors to the various queue groups.
5762 * Every group gets its minimum requirement and NIC gets top
5763 * priority for leftovers.
5765 i
= want
- EXTRA_VECS
- ofld_need
;
5766 if (i
< s
->max_ethqsets
) {
5767 s
->max_ethqsets
= i
;
5768 if (i
< s
->ethqsets
)
5769 reduce_ethqs(adap
, i
);
5771 if (is_offload(adap
)) {
5772 i
= want
- EXTRA_VECS
- s
->max_ethqsets
;
5773 i
-= ofld_need
- nchan
;
5774 s
->ofldqsets
= (i
/ nchan
) * nchan
; /* round down */
5776 for (i
= 0; i
< want
; ++i
)
5777 adap
->msix_info
[i
].vec
= entries
[i
].vector
;
5779 dev_info(adap
->pdev_dev
,
5780 "only %d MSI-X vectors left, not using MSI-X\n", err
);
5786 static int init_rss(struct adapter
*adap
)
5790 for_each_port(adap
, i
) {
5791 struct port_info
*pi
= adap2pinfo(adap
, i
);
5793 pi
->rss
= kcalloc(pi
->rss_size
, sizeof(u16
), GFP_KERNEL
);
5796 for (j
= 0; j
< pi
->rss_size
; j
++)
5797 pi
->rss
[j
] = ethtool_rxfh_indir_default(j
, pi
->nqsets
);
5802 static void print_port_info(const struct net_device
*dev
)
5804 static const char *base
[] = {
5805 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
5806 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
5811 const char *spd
= "";
5812 const struct port_info
*pi
= netdev_priv(dev
);
5813 const struct adapter
*adap
= pi
->adapter
;
5815 if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_2_5GB
)
5817 else if (adap
->params
.pci
.speed
== PCI_EXP_LNKSTA_CLS_5_0GB
)
5820 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_100M
)
5821 bufp
+= sprintf(bufp
, "100/");
5822 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_1G
)
5823 bufp
+= sprintf(bufp
, "1000/");
5824 if (pi
->link_cfg
.supported
& FW_PORT_CAP_SPEED_10G
)
5825 bufp
+= sprintf(bufp
, "10G/");
5828 sprintf(bufp
, "BASE-%s", base
[pi
->port_type
]);
5830 netdev_info(dev
, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5831 adap
->params
.vpd
.id
,
5832 CHELSIO_CHIP_RELEASE(adap
->params
.chip
), buf
,
5833 is_offload(adap
) ? "R" : "", adap
->params
.pci
.width
, spd
,
5834 (adap
->flags
& USING_MSIX
) ? " MSI-X" :
5835 (adap
->flags
& USING_MSI
) ? " MSI" : "");
5836 netdev_info(dev
, "S/N: %s, E/C: %s\n",
5837 adap
->params
.vpd
.sn
, adap
->params
.vpd
.ec
);
5840 static void enable_pcie_relaxed_ordering(struct pci_dev
*dev
)
5842 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL
, PCI_EXP_DEVCTL_RELAX_EN
);
5846 * Free the following resources:
5847 * - memory used for tables
5850 * - resources FW is holding for us
5852 static void free_some_resources(struct adapter
*adapter
)
5856 t4_free_mem(adapter
->l2t
);
5857 t4_free_mem(adapter
->tids
.tid_tab
);
5858 disable_msi(adapter
);
5860 for_each_port(adapter
, i
)
5861 if (adapter
->port
[i
]) {
5862 kfree(adap2pinfo(adapter
, i
)->rss
);
5863 free_netdev(adapter
->port
[i
]);
5865 if (adapter
->flags
& FW_OK
)
5866 t4_fw_bye(adapter
, adapter
->fn
);
5869 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5870 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5871 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5872 #define SEGMENT_SIZE 128
5874 static int init_one(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
5876 int func
, i
, err
, s_qpp
, qpp
, num_seg
;
5877 struct port_info
*pi
;
5878 bool highdma
= false;
5879 struct adapter
*adapter
= NULL
;
5881 printk_once(KERN_INFO
"%s - version %s\n", DRV_DESC
, DRV_VERSION
);
5883 err
= pci_request_regions(pdev
, KBUILD_MODNAME
);
5885 /* Just info, some other driver may have claimed the device. */
5886 dev_info(&pdev
->dev
, "cannot obtain PCI resources\n");
5890 /* We control everything through one PF */
5891 func
= PCI_FUNC(pdev
->devfn
);
5892 if (func
!= ent
->driver_data
) {
5893 pci_save_state(pdev
); /* to restore SR-IOV later */
5897 err
= pci_enable_device(pdev
);
5899 dev_err(&pdev
->dev
, "cannot enable PCI device\n");
5900 goto out_release_regions
;
5903 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
5905 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
5907 dev_err(&pdev
->dev
, "unable to obtain 64-bit DMA for "
5908 "coherent allocations\n");
5909 goto out_disable_device
;
5912 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
5914 dev_err(&pdev
->dev
, "no usable DMA configuration\n");
5915 goto out_disable_device
;
5919 pci_enable_pcie_error_reporting(pdev
);
5920 enable_pcie_relaxed_ordering(pdev
);
5921 pci_set_master(pdev
);
5922 pci_save_state(pdev
);
5924 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
5927 goto out_disable_device
;
5930 /* PCI device has been enabled */
5931 adapter
->flags
|= DEV_ENABLED
;
5933 adapter
->regs
= pci_ioremap_bar(pdev
, 0);
5934 if (!adapter
->regs
) {
5935 dev_err(&pdev
->dev
, "cannot map device registers\n");
5937 goto out_free_adapter
;
5940 adapter
->pdev
= pdev
;
5941 adapter
->pdev_dev
= &pdev
->dev
;
5942 adapter
->mbox
= func
;
5944 adapter
->msg_enable
= dflt_msg_enable
;
5945 memset(adapter
->chan_map
, 0xff, sizeof(adapter
->chan_map
));
5947 spin_lock_init(&adapter
->stats_lock
);
5948 spin_lock_init(&adapter
->tid_release_lock
);
5950 INIT_WORK(&adapter
->tid_release_task
, process_tid_release_list
);
5951 INIT_WORK(&adapter
->db_full_task
, process_db_full
);
5952 INIT_WORK(&adapter
->db_drop_task
, process_db_drop
);
5954 err
= t4_prep_adapter(adapter
);
5956 goto out_unmap_bar0
;
5958 if (!is_t4(adapter
->params
.chip
)) {
5959 s_qpp
= QUEUESPERPAGEPF1
* adapter
->fn
;
5960 qpp
= 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter
,
5961 SGE_EGRESS_QUEUES_PER_PAGE_PF
) >> s_qpp
);
5962 num_seg
= PAGE_SIZE
/ SEGMENT_SIZE
;
5964 /* Each segment size is 128B. Write coalescing is enabled only
5965 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5966 * queue is less no of segments that can be accommodated in
5969 if (qpp
> num_seg
) {
5971 "Incorrect number of egress queues per page\n");
5973 goto out_unmap_bar0
;
5975 adapter
->bar2
= ioremap_wc(pci_resource_start(pdev
, 2),
5976 pci_resource_len(pdev
, 2));
5977 if (!adapter
->bar2
) {
5978 dev_err(&pdev
->dev
, "cannot map device bar2 region\n");
5980 goto out_unmap_bar0
;
5984 setup_memwin(adapter
);
5985 err
= adap_init0(adapter
);
5986 setup_memwin_rdma(adapter
);
5990 for_each_port(adapter
, i
) {
5991 struct net_device
*netdev
;
5993 netdev
= alloc_etherdev_mq(sizeof(struct port_info
),
6000 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
6002 adapter
->port
[i
] = netdev
;
6003 pi
= netdev_priv(netdev
);
6004 pi
->adapter
= adapter
;
6005 pi
->xact_addr_filt
= -1;
6007 netdev
->irq
= pdev
->irq
;
6009 netdev
->hw_features
= NETIF_F_SG
| TSO_FLAGS
|
6010 NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
|
6011 NETIF_F_RXCSUM
| NETIF_F_RXHASH
|
6012 NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_CTAG_RX
;
6014 netdev
->hw_features
|= NETIF_F_HIGHDMA
;
6015 netdev
->features
|= netdev
->hw_features
;
6016 netdev
->vlan_features
= netdev
->features
& VLAN_FEAT
;
6018 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
6020 netdev
->netdev_ops
= &cxgb4_netdev_ops
;
6021 SET_ETHTOOL_OPS(netdev
, &cxgb_ethtool_ops
);
6024 pci_set_drvdata(pdev
, adapter
);
6026 if (adapter
->flags
& FW_OK
) {
6027 err
= t4_port_init(adapter
, func
, func
, 0);
6033 * Configure queues and allocate tables now, they can be needed as
6034 * soon as the first register_netdev completes.
6036 cfg_queues(adapter
);
6038 adapter
->l2t
= t4_init_l2t();
6039 if (!adapter
->l2t
) {
6040 /* We tolerate a lack of L2T, giving up some functionality */
6041 dev_warn(&pdev
->dev
, "could not allocate L2T, continuing\n");
6042 adapter
->params
.offload
= 0;
6045 if (is_offload(adapter
) && tid_init(&adapter
->tids
) < 0) {
6046 dev_warn(&pdev
->dev
, "could not allocate TID table, "
6048 adapter
->params
.offload
= 0;
6051 /* See what interrupts we'll be using */
6052 if (msi
> 1 && enable_msix(adapter
) == 0)
6053 adapter
->flags
|= USING_MSIX
;
6054 else if (msi
> 0 && pci_enable_msi(pdev
) == 0)
6055 adapter
->flags
|= USING_MSI
;
6057 err
= init_rss(adapter
);
6062 * The card is now ready to go. If any errors occur during device
6063 * registration we do not fail the whole card but rather proceed only
6064 * with the ports we manage to register successfully. However we must
6065 * register at least one net device.
6067 for_each_port(adapter
, i
) {
6068 pi
= adap2pinfo(adapter
, i
);
6069 netif_set_real_num_tx_queues(adapter
->port
[i
], pi
->nqsets
);
6070 netif_set_real_num_rx_queues(adapter
->port
[i
], pi
->nqsets
);
6072 err
= register_netdev(adapter
->port
[i
]);
6075 adapter
->chan_map
[pi
->tx_chan
] = i
;
6076 print_port_info(adapter
->port
[i
]);
6079 dev_err(&pdev
->dev
, "could not register any net devices\n");
6083 dev_warn(&pdev
->dev
, "only %d net devices registered\n", i
);
6087 if (cxgb4_debugfs_root
) {
6088 adapter
->debugfs_root
= debugfs_create_dir(pci_name(pdev
),
6089 cxgb4_debugfs_root
);
6090 setup_debugfs(adapter
);
6093 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6094 pdev
->needs_freset
= 1;
6096 if (is_offload(adapter
))
6097 attach_ulds(adapter
);
6100 #ifdef CONFIG_PCI_IOV
6101 if (func
< ARRAY_SIZE(num_vf
) && num_vf
[func
] > 0)
6102 if (pci_enable_sriov(pdev
, num_vf
[func
]) == 0)
6103 dev_info(&pdev
->dev
,
6104 "instantiated %u virtual functions\n",
6110 free_some_resources(adapter
);
6112 if (!is_t4(adapter
->params
.chip
))
6113 iounmap(adapter
->bar2
);
6115 iounmap(adapter
->regs
);
6119 pci_disable_pcie_error_reporting(pdev
);
6120 pci_disable_device(pdev
);
6121 out_release_regions
:
6122 pci_release_regions(pdev
);
6126 static void remove_one(struct pci_dev
*pdev
)
6128 struct adapter
*adapter
= pci_get_drvdata(pdev
);
6130 #ifdef CONFIG_PCI_IOV
6131 pci_disable_sriov(pdev
);
6138 if (is_offload(adapter
))
6139 detach_ulds(adapter
);
6141 for_each_port(adapter
, i
)
6142 if (adapter
->port
[i
]->reg_state
== NETREG_REGISTERED
)
6143 unregister_netdev(adapter
->port
[i
]);
6145 if (adapter
->debugfs_root
)
6146 debugfs_remove_recursive(adapter
->debugfs_root
);
6148 /* If we allocated filters, free up state associated with any
6151 if (adapter
->tids
.ftid_tab
) {
6152 struct filter_entry
*f
= &adapter
->tids
.ftid_tab
[0];
6153 for (i
= 0; i
< (adapter
->tids
.nftids
+
6154 adapter
->tids
.nsftids
); i
++, f
++)
6156 clear_filter(adapter
, f
);
6159 if (adapter
->flags
& FULL_INIT_DONE
)
6162 free_some_resources(adapter
);
6163 iounmap(adapter
->regs
);
6164 if (!is_t4(adapter
->params
.chip
))
6165 iounmap(adapter
->bar2
);
6166 pci_disable_pcie_error_reporting(pdev
);
6167 if ((adapter
->flags
& DEV_ENABLED
)) {
6168 pci_disable_device(pdev
);
6169 adapter
->flags
&= ~DEV_ENABLED
;
6171 pci_release_regions(pdev
);
6174 pci_release_regions(pdev
);
6177 static struct pci_driver cxgb4_driver
= {
6178 .name
= KBUILD_MODNAME
,
6179 .id_table
= cxgb4_pci_tbl
,
6181 .remove
= remove_one
,
6182 .err_handler
= &cxgb4_eeh
,
6185 static int __init
cxgb4_init_module(void)
6189 workq
= create_singlethread_workqueue("cxgb4");
6193 /* Debugfs support is optional, just warn if this fails */
6194 cxgb4_debugfs_root
= debugfs_create_dir(KBUILD_MODNAME
, NULL
);
6195 if (!cxgb4_debugfs_root
)
6196 pr_warn("could not create debugfs entry, continuing\n");
6198 ret
= pci_register_driver(&cxgb4_driver
);
6200 debugfs_remove(cxgb4_debugfs_root
);
6201 destroy_workqueue(workq
);
6204 register_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
6209 static void __exit
cxgb4_cleanup_module(void)
6211 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier
);
6212 pci_unregister_driver(&cxgb4_driver
);
6213 debugfs_remove(cxgb4_debugfs_root
); /* NULL ok */
6214 flush_workqueue(workq
);
6215 destroy_workqueue(workq
);
6218 module_init(cxgb4_init_module
);
6219 module_exit(cxgb4_cleanup_module
);