1 /*************************************************************************
2 * myri10ge.c: Myricom Myri-10G Ethernet driver.
4 * Copyright (C) 2005 - 2009 Myricom, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Myricom, Inc. nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 * If the eeprom on your board is not recent enough, you will need to get a
33 * newer firmware image at:
34 * http://www.myri.com/scs/download-Myri10GE.html
36 * Contact Information:
38 * Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
39 *************************************************************************/
41 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43 #include <linux/tcp.h>
44 #include <linux/netdevice.h>
45 #include <linux/skbuff.h>
46 #include <linux/string.h>
47 #include <linux/module.h>
48 #include <linux/pci.h>
49 #include <linux/dma-mapping.h>
50 #include <linux/etherdevice.h>
51 #include <linux/if_ether.h>
52 #include <linux/if_vlan.h>
53 #include <linux/inet_lro.h>
54 #include <linux/dca.h>
56 #include <linux/inet.h>
58 #include <linux/ethtool.h>
59 #include <linux/firmware.h>
60 #include <linux/delay.h>
61 #include <linux/timer.h>
62 #include <linux/vmalloc.h>
63 #include <linux/crc32.h>
64 #include <linux/moduleparam.h>
66 #include <linux/log2.h>
67 #include <linux/slab.h>
68 #include <net/checksum.h>
71 #include <asm/byteorder.h>
73 #include <asm/processor.h>
78 #include "myri10ge_mcp.h"
79 #include "myri10ge_mcp_gen_header.h"
81 #define MYRI10GE_VERSION_STR "1.5.2-1.459"
83 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
84 MODULE_AUTHOR("Maintainer: help@myri.com");
85 MODULE_VERSION(MYRI10GE_VERSION_STR
);
86 MODULE_LICENSE("Dual BSD/GPL");
88 #define MYRI10GE_MAX_ETHER_MTU 9014
90 #define MYRI10GE_ETH_STOPPED 0
91 #define MYRI10GE_ETH_STOPPING 1
92 #define MYRI10GE_ETH_STARTING 2
93 #define MYRI10GE_ETH_RUNNING 3
94 #define MYRI10GE_ETH_OPEN_FAILED 4
96 #define MYRI10GE_EEPROM_STRINGS_SIZE 256
97 #define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
98 #define MYRI10GE_MAX_LRO_DESCRIPTORS 8
99 #define MYRI10GE_LRO_MAX_PKTS 64
101 #define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff)
102 #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
104 #define MYRI10GE_ALLOC_ORDER 0
105 #define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE)
106 #define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1)
108 #define MYRI10GE_MAX_SLICES 32
110 struct myri10ge_rx_buffer_state
{
113 DEFINE_DMA_UNMAP_ADDR(bus
);
114 DEFINE_DMA_UNMAP_LEN(len
);
117 struct myri10ge_tx_buffer_state
{
120 DEFINE_DMA_UNMAP_ADDR(bus
);
121 DEFINE_DMA_UNMAP_LEN(len
);
124 struct myri10ge_cmd
{
130 struct myri10ge_rx_buf
{
131 struct mcp_kreq_ether_recv __iomem
*lanai
; /* lanai ptr for recv ring */
132 struct mcp_kreq_ether_recv
*shadow
; /* host shadow of recv ring */
133 struct myri10ge_rx_buffer_state
*info
;
140 int mask
; /* number of rx slots -1 */
144 struct myri10ge_tx_buf
{
145 struct mcp_kreq_ether_send __iomem
*lanai
; /* lanai ptr for sendq */
146 __be32 __iomem
*send_go
; /* "go" doorbell ptr */
147 __be32 __iomem
*send_stop
; /* "stop" doorbell ptr */
148 struct mcp_kreq_ether_send
*req_list
; /* host shadow of sendq */
150 struct myri10ge_tx_buffer_state
*info
;
151 int mask
; /* number of transmit slots -1 */
152 int req ____cacheline_aligned
; /* transmit slots submitted */
153 int pkt_start
; /* packets started */
156 int done ____cacheline_aligned
; /* transmit slots completed */
157 int pkt_done
; /* packets completed */
162 struct myri10ge_rx_done
{
163 struct mcp_slot
*entry
;
167 struct net_lro_mgr lro_mgr
;
168 struct net_lro_desc lro_desc
[MYRI10GE_MAX_LRO_DESCRIPTORS
];
171 struct myri10ge_slice_netstats
{
172 unsigned long rx_packets
;
173 unsigned long tx_packets
;
174 unsigned long rx_bytes
;
175 unsigned long tx_bytes
;
176 unsigned long rx_dropped
;
177 unsigned long tx_dropped
;
180 struct myri10ge_slice_state
{
181 struct myri10ge_tx_buf tx
; /* transmit ring */
182 struct myri10ge_rx_buf rx_small
;
183 struct myri10ge_rx_buf rx_big
;
184 struct myri10ge_rx_done rx_done
;
185 struct net_device
*dev
;
186 struct napi_struct napi
;
187 struct myri10ge_priv
*mgp
;
188 struct myri10ge_slice_netstats stats
;
189 __be32 __iomem
*irq_claim
;
190 struct mcp_irq_data
*fw_stats
;
191 dma_addr_t fw_stats_bus
;
192 int watchdog_tx_done
;
194 int watchdog_rx_done
;
195 #ifdef CONFIG_MYRI10GE_DCA
198 __be32 __iomem
*dca_tag
;
203 struct myri10ge_priv
{
204 struct myri10ge_slice_state
*ss
;
205 int tx_boundary
; /* boundary transmits cannot cross */
207 int running
; /* running? */
208 int csum_flag
; /* rx_csums? */
212 struct net_device
*dev
;
213 spinlock_t stats_lock
;
216 unsigned long board_span
;
217 unsigned long iomem_base
;
218 __be32 __iomem
*irq_deassert
;
219 char *mac_addr_string
;
220 struct mcp_cmd_response
*cmd
;
222 struct pci_dev
*pdev
;
225 struct msix_entry
*msix_vectors
;
226 #ifdef CONFIG_MYRI10GE_DCA
231 unsigned int rdma_tags_available
;
233 __be32 __iomem
*intr_coal_delay_ptr
;
237 wait_queue_head_t down_wq
;
238 struct work_struct watchdog_work
;
239 struct timer_list watchdog_timer
;
243 bool fw_name_allocated
;
245 char eeprom_strings
[MYRI10GE_EEPROM_STRINGS_SIZE
];
246 char *product_code_string
;
247 char fw_version
[128];
251 int adopted_rx_filter_bug
;
252 u8 mac_addr
[6]; /* eeprom mac address */
253 unsigned long serial_number
;
254 int vendor_specific_offset
;
255 int fw_multicast_support
;
256 unsigned long features
;
263 unsigned int board_number
;
267 static char *myri10ge_fw_unaligned
= "myri10ge_ethp_z8e.dat";
268 static char *myri10ge_fw_aligned
= "myri10ge_eth_z8e.dat";
269 static char *myri10ge_fw_rss_unaligned
= "myri10ge_rss_ethp_z8e.dat";
270 static char *myri10ge_fw_rss_aligned
= "myri10ge_rss_eth_z8e.dat";
271 MODULE_FIRMWARE("myri10ge_ethp_z8e.dat");
272 MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
273 MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
274 MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
276 /* Careful: must be accessed under kparam_block_sysfs_write */
277 static char *myri10ge_fw_name
= NULL
;
278 module_param(myri10ge_fw_name
, charp
, S_IRUGO
| S_IWUSR
);
279 MODULE_PARM_DESC(myri10ge_fw_name
, "Firmware image name");
281 #define MYRI10GE_MAX_BOARDS 8
282 static char *myri10ge_fw_names
[MYRI10GE_MAX_BOARDS
] =
283 {[0 ... (MYRI10GE_MAX_BOARDS
- 1)] = NULL
};
284 module_param_array_named(myri10ge_fw_names
, myri10ge_fw_names
, charp
, NULL
,
286 MODULE_PARM_DESC(myri10ge_fw_name
, "Firmware image names per board");
288 static int myri10ge_ecrc_enable
= 1;
289 module_param(myri10ge_ecrc_enable
, int, S_IRUGO
);
290 MODULE_PARM_DESC(myri10ge_ecrc_enable
, "Enable Extended CRC on PCI-E");
292 static int myri10ge_small_bytes
= -1; /* -1 == auto */
293 module_param(myri10ge_small_bytes
, int, S_IRUGO
| S_IWUSR
);
294 MODULE_PARM_DESC(myri10ge_small_bytes
, "Threshold of small packets");
296 static int myri10ge_msi
= 1; /* enable msi by default */
297 module_param(myri10ge_msi
, int, S_IRUGO
| S_IWUSR
);
298 MODULE_PARM_DESC(myri10ge_msi
, "Enable Message Signalled Interrupts");
300 static int myri10ge_intr_coal_delay
= 75;
301 module_param(myri10ge_intr_coal_delay
, int, S_IRUGO
);
302 MODULE_PARM_DESC(myri10ge_intr_coal_delay
, "Interrupt coalescing delay");
304 static int myri10ge_flow_control
= 1;
305 module_param(myri10ge_flow_control
, int, S_IRUGO
);
306 MODULE_PARM_DESC(myri10ge_flow_control
, "Pause parameter");
308 static int myri10ge_deassert_wait
= 1;
309 module_param(myri10ge_deassert_wait
, int, S_IRUGO
| S_IWUSR
);
310 MODULE_PARM_DESC(myri10ge_deassert_wait
,
311 "Wait when deasserting legacy interrupts");
313 static int myri10ge_force_firmware
= 0;
314 module_param(myri10ge_force_firmware
, int, S_IRUGO
);
315 MODULE_PARM_DESC(myri10ge_force_firmware
,
316 "Force firmware to assume aligned completions");
318 static int myri10ge_initial_mtu
= MYRI10GE_MAX_ETHER_MTU
- ETH_HLEN
;
319 module_param(myri10ge_initial_mtu
, int, S_IRUGO
);
320 MODULE_PARM_DESC(myri10ge_initial_mtu
, "Initial MTU");
322 static int myri10ge_napi_weight
= 64;
323 module_param(myri10ge_napi_weight
, int, S_IRUGO
);
324 MODULE_PARM_DESC(myri10ge_napi_weight
, "Set NAPI weight");
326 static int myri10ge_watchdog_timeout
= 1;
327 module_param(myri10ge_watchdog_timeout
, int, S_IRUGO
);
328 MODULE_PARM_DESC(myri10ge_watchdog_timeout
, "Set watchdog timeout");
330 static int myri10ge_max_irq_loops
= 1048576;
331 module_param(myri10ge_max_irq_loops
, int, S_IRUGO
);
332 MODULE_PARM_DESC(myri10ge_max_irq_loops
,
333 "Set stuck legacy IRQ detection threshold");
335 #define MYRI10GE_MSG_DEFAULT NETIF_MSG_LINK
337 static int myri10ge_debug
= -1; /* defaults above */
338 module_param(myri10ge_debug
, int, 0);
339 MODULE_PARM_DESC(myri10ge_debug
, "Debug level (0=none,...,16=all)");
341 static int myri10ge_lro_max_pkts
= MYRI10GE_LRO_MAX_PKTS
;
342 module_param(myri10ge_lro_max_pkts
, int, S_IRUGO
);
343 MODULE_PARM_DESC(myri10ge_lro_max_pkts
,
344 "Number of LRO packets to be aggregated");
346 static int myri10ge_fill_thresh
= 256;
347 module_param(myri10ge_fill_thresh
, int, S_IRUGO
| S_IWUSR
);
348 MODULE_PARM_DESC(myri10ge_fill_thresh
, "Number of empty rx slots allowed");
350 static int myri10ge_reset_recover
= 1;
352 static int myri10ge_max_slices
= 1;
353 module_param(myri10ge_max_slices
, int, S_IRUGO
);
354 MODULE_PARM_DESC(myri10ge_max_slices
, "Max tx/rx queues");
356 static int myri10ge_rss_hash
= MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT
;
357 module_param(myri10ge_rss_hash
, int, S_IRUGO
);
358 MODULE_PARM_DESC(myri10ge_rss_hash
, "Type of RSS hashing to do");
360 static int myri10ge_dca
= 1;
361 module_param(myri10ge_dca
, int, S_IRUGO
);
362 MODULE_PARM_DESC(myri10ge_dca
, "Enable DCA if possible");
364 #define MYRI10GE_FW_OFFSET 1024*1024
365 #define MYRI10GE_HIGHPART_TO_U32(X) \
366 (sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
367 #define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X))
369 #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
371 static void myri10ge_set_multicast_list(struct net_device
*dev
);
372 static netdev_tx_t
myri10ge_sw_tso(struct sk_buff
*skb
,
373 struct net_device
*dev
);
375 static inline void put_be32(__be32 val
, __be32 __iomem
* p
)
377 __raw_writel((__force __u32
) val
, (__force
void __iomem
*)p
);
380 static struct net_device_stats
*myri10ge_get_stats(struct net_device
*dev
);
382 static void set_fw_name(struct myri10ge_priv
*mgp
, char *name
, bool allocated
)
384 if (mgp
->fw_name_allocated
)
387 mgp
->fw_name_allocated
= allocated
;
391 myri10ge_send_cmd(struct myri10ge_priv
*mgp
, u32 cmd
,
392 struct myri10ge_cmd
*data
, int atomic
)
395 char buf_bytes
[sizeof(*buf
) + 8];
396 struct mcp_cmd_response
*response
= mgp
->cmd
;
397 char __iomem
*cmd_addr
= mgp
->sram
+ MXGEFW_ETH_CMD
;
398 u32 dma_low
, dma_high
, result
, value
;
401 /* ensure buf is aligned to 8 bytes */
402 buf
= (struct mcp_cmd
*)ALIGN((unsigned long)buf_bytes
, 8);
404 buf
->data0
= htonl(data
->data0
);
405 buf
->data1
= htonl(data
->data1
);
406 buf
->data2
= htonl(data
->data2
);
407 buf
->cmd
= htonl(cmd
);
408 dma_low
= MYRI10GE_LOWPART_TO_U32(mgp
->cmd_bus
);
409 dma_high
= MYRI10GE_HIGHPART_TO_U32(mgp
->cmd_bus
);
411 buf
->response_addr
.low
= htonl(dma_low
);
412 buf
->response_addr
.high
= htonl(dma_high
);
413 response
->result
= htonl(MYRI10GE_NO_RESPONSE_RESULT
);
415 myri10ge_pio_copy(cmd_addr
, buf
, sizeof(*buf
));
417 /* wait up to 15ms. Longest command is the DMA benchmark,
418 * which is capped at 5ms, but runs from a timeout handler
419 * that runs every 7.8ms. So a 15ms timeout leaves us with
423 /* if atomic is set, do not sleep,
424 * and try to get the completion quickly
425 * (1ms will be enough for those commands) */
426 for (sleep_total
= 0;
427 sleep_total
< 1000 &&
428 response
->result
== htonl(MYRI10GE_NO_RESPONSE_RESULT
);
434 /* use msleep for most command */
435 for (sleep_total
= 0;
437 response
->result
== htonl(MYRI10GE_NO_RESPONSE_RESULT
);
442 result
= ntohl(response
->result
);
443 value
= ntohl(response
->data
);
444 if (result
!= MYRI10GE_NO_RESPONSE_RESULT
) {
448 } else if (result
== MXGEFW_CMD_UNKNOWN
) {
450 } else if (result
== MXGEFW_CMD_ERROR_UNALIGNED
) {
452 } else if (result
== MXGEFW_CMD_ERROR_RANGE
&&
453 cmd
== MXGEFW_CMD_ENABLE_RSS_QUEUES
&&
455 data1
& MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES
) !=
459 dev_err(&mgp
->pdev
->dev
,
460 "command %d failed, result = %d\n",
466 dev_err(&mgp
->pdev
->dev
, "command %d timed out, result = %d\n",
472 * The eeprom strings on the lanaiX have the format
475 * PT:ddd mmm xx xx:xx:xx xx\0
476 * PV:ddd mmm xx xx:xx:xx xx\0
478 static int myri10ge_read_mac_addr(struct myri10ge_priv
*mgp
)
483 ptr
= mgp
->eeprom_strings
;
484 limit
= mgp
->eeprom_strings
+ MYRI10GE_EEPROM_STRINGS_SIZE
;
486 while (*ptr
!= '\0' && ptr
< limit
) {
487 if (memcmp(ptr
, "MAC=", 4) == 0) {
489 mgp
->mac_addr_string
= ptr
;
490 for (i
= 0; i
< 6; i
++) {
491 if ((ptr
+ 2) > limit
)
494 simple_strtoul(ptr
, &ptr
, 16);
498 if (memcmp(ptr
, "PC=", 3) == 0) {
500 mgp
->product_code_string
= ptr
;
502 if (memcmp((const void *)ptr
, "SN=", 3) == 0) {
504 mgp
->serial_number
= simple_strtoul(ptr
, &ptr
, 10);
506 while (ptr
< limit
&& *ptr
++) ;
512 dev_err(&mgp
->pdev
->dev
, "failed to parse eeprom_strings\n");
517 * Enable or disable periodic RDMAs from the host to make certain
518 * chipsets resend dropped PCIe messages
521 static void myri10ge_dummy_rdma(struct myri10ge_priv
*mgp
, int enable
)
523 char __iomem
*submit
;
524 __be32 buf
[16] __attribute__ ((__aligned__(8)));
525 u32 dma_low
, dma_high
;
528 /* clear confirmation addr */
532 /* send a rdma command to the PCIe engine, and wait for the
533 * response in the confirmation address. The firmware should
534 * write a -1 there to indicate it is alive and well
536 dma_low
= MYRI10GE_LOWPART_TO_U32(mgp
->cmd_bus
);
537 dma_high
= MYRI10GE_HIGHPART_TO_U32(mgp
->cmd_bus
);
539 buf
[0] = htonl(dma_high
); /* confirm addr MSW */
540 buf
[1] = htonl(dma_low
); /* confirm addr LSW */
541 buf
[2] = MYRI10GE_NO_CONFIRM_DATA
; /* confirm data */
542 buf
[3] = htonl(dma_high
); /* dummy addr MSW */
543 buf
[4] = htonl(dma_low
); /* dummy addr LSW */
544 buf
[5] = htonl(enable
); /* enable? */
546 submit
= mgp
->sram
+ MXGEFW_BOOT_DUMMY_RDMA
;
548 myri10ge_pio_copy(submit
, &buf
, sizeof(buf
));
549 for (i
= 0; mgp
->cmd
->data
!= MYRI10GE_NO_CONFIRM_DATA
&& i
< 20; i
++)
551 if (mgp
->cmd
->data
!= MYRI10GE_NO_CONFIRM_DATA
)
552 dev_err(&mgp
->pdev
->dev
, "dummy rdma %s failed\n",
553 (enable
? "enable" : "disable"));
557 myri10ge_validate_firmware(struct myri10ge_priv
*mgp
,
558 struct mcp_gen_header
*hdr
)
560 struct device
*dev
= &mgp
->pdev
->dev
;
562 /* check firmware type */
563 if (ntohl(hdr
->mcp_type
) != MCP_TYPE_ETH
) {
564 dev_err(dev
, "Bad firmware type: 0x%x\n", ntohl(hdr
->mcp_type
));
568 /* save firmware version for ethtool */
569 strncpy(mgp
->fw_version
, hdr
->version
, sizeof(mgp
->fw_version
));
571 sscanf(mgp
->fw_version
, "%d.%d.%d", &mgp
->fw_ver_major
,
572 &mgp
->fw_ver_minor
, &mgp
->fw_ver_tiny
);
574 if (!(mgp
->fw_ver_major
== MXGEFW_VERSION_MAJOR
&&
575 mgp
->fw_ver_minor
== MXGEFW_VERSION_MINOR
)) {
576 dev_err(dev
, "Found firmware version %s\n", mgp
->fw_version
);
577 dev_err(dev
, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR
,
578 MXGEFW_VERSION_MINOR
);
584 static int myri10ge_load_hotplug_firmware(struct myri10ge_priv
*mgp
, u32
* size
)
586 unsigned crc
, reread_crc
;
587 const struct firmware
*fw
;
588 struct device
*dev
= &mgp
->pdev
->dev
;
589 unsigned char *fw_readback
;
590 struct mcp_gen_header
*hdr
;
595 if ((status
= request_firmware(&fw
, mgp
->fw_name
, dev
)) < 0) {
596 dev_err(dev
, "Unable to load %s firmware image via hotplug\n",
599 goto abort_with_nothing
;
604 if (fw
->size
>= mgp
->sram_size
- MYRI10GE_FW_OFFSET
||
605 fw
->size
< MCP_HEADER_PTR_OFFSET
+ 4) {
606 dev_err(dev
, "Firmware size invalid:%d\n", (int)fw
->size
);
612 hdr_offset
= ntohl(*(__be32
*) (fw
->data
+ MCP_HEADER_PTR_OFFSET
));
613 if ((hdr_offset
& 3) || hdr_offset
+ sizeof(*hdr
) > fw
->size
) {
614 dev_err(dev
, "Bad firmware file\n");
618 hdr
= (void *)(fw
->data
+ hdr_offset
);
620 status
= myri10ge_validate_firmware(mgp
, hdr
);
624 crc
= crc32(~0, fw
->data
, fw
->size
);
625 for (i
= 0; i
< fw
->size
; i
+= 256) {
626 myri10ge_pio_copy(mgp
->sram
+ MYRI10GE_FW_OFFSET
+ i
,
628 min(256U, (unsigned)(fw
->size
- i
)));
632 fw_readback
= vmalloc(fw
->size
);
637 /* corruption checking is good for parity recovery and buggy chipset */
638 memcpy_fromio(fw_readback
, mgp
->sram
+ MYRI10GE_FW_OFFSET
, fw
->size
);
639 reread_crc
= crc32(~0, fw_readback
, fw
->size
);
641 if (crc
!= reread_crc
) {
642 dev_err(dev
, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n",
643 (unsigned)fw
->size
, reread_crc
, crc
);
647 *size
= (u32
) fw
->size
;
650 release_firmware(fw
);
656 static int myri10ge_adopt_running_firmware(struct myri10ge_priv
*mgp
)
658 struct mcp_gen_header
*hdr
;
659 struct device
*dev
= &mgp
->pdev
->dev
;
660 const size_t bytes
= sizeof(struct mcp_gen_header
);
664 /* find running firmware header */
665 hdr_offset
= swab32(readl(mgp
->sram
+ MCP_HEADER_PTR_OFFSET
));
667 if ((hdr_offset
& 3) || hdr_offset
+ sizeof(*hdr
) > mgp
->sram_size
) {
668 dev_err(dev
, "Running firmware has bad header offset (%d)\n",
673 /* copy header of running firmware from SRAM to host memory to
674 * validate firmware */
675 hdr
= kmalloc(bytes
, GFP_KERNEL
);
677 dev_err(dev
, "could not malloc firmware hdr\n");
680 memcpy_fromio(hdr
, mgp
->sram
+ hdr_offset
, bytes
);
681 status
= myri10ge_validate_firmware(mgp
, hdr
);
684 /* check to see if adopted firmware has bug where adopting
685 * it will cause broadcasts to be filtered unless the NIC
686 * is kept in ALLMULTI mode */
687 if (mgp
->fw_ver_major
== 1 && mgp
->fw_ver_minor
== 4 &&
688 mgp
->fw_ver_tiny
>= 4 && mgp
->fw_ver_tiny
<= 11) {
689 mgp
->adopted_rx_filter_bug
= 1;
690 dev_warn(dev
, "Adopting fw %d.%d.%d: "
691 "working around rx filter bug\n",
692 mgp
->fw_ver_major
, mgp
->fw_ver_minor
,
698 static int myri10ge_get_firmware_capabilities(struct myri10ge_priv
*mgp
)
700 struct myri10ge_cmd cmd
;
703 /* probe for IPv6 TSO support */
704 mgp
->features
= NETIF_F_SG
| NETIF_F_HW_CSUM
| NETIF_F_TSO
;
705 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE
,
708 mgp
->max_tso6
= cmd
.data0
;
709 mgp
->features
|= NETIF_F_TSO6
;
712 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_RX_RING_SIZE
, &cmd
, 0);
714 dev_err(&mgp
->pdev
->dev
,
715 "failed MXGEFW_CMD_GET_RX_RING_SIZE\n");
719 mgp
->max_intr_slots
= 2 * (cmd
.data0
/ sizeof(struct mcp_dma_addr
));
724 static int myri10ge_load_firmware(struct myri10ge_priv
*mgp
, int adopt
)
726 char __iomem
*submit
;
727 __be32 buf
[16] __attribute__ ((__aligned__(8)));
728 u32 dma_low
, dma_high
, size
;
732 status
= myri10ge_load_hotplug_firmware(mgp
, &size
);
736 dev_warn(&mgp
->pdev
->dev
, "hotplug firmware loading failed\n");
738 /* Do not attempt to adopt firmware if there
743 status
= myri10ge_adopt_running_firmware(mgp
);
745 dev_err(&mgp
->pdev
->dev
,
746 "failed to adopt running firmware\n");
749 dev_info(&mgp
->pdev
->dev
,
750 "Successfully adopted running firmware\n");
751 if (mgp
->tx_boundary
== 4096) {
752 dev_warn(&mgp
->pdev
->dev
,
753 "Using firmware currently running on NIC"
755 dev_warn(&mgp
->pdev
->dev
,
756 "performance consider loading optimized "
758 dev_warn(&mgp
->pdev
->dev
, "via hotplug\n");
761 set_fw_name(mgp
, "adopted", false);
762 mgp
->tx_boundary
= 2048;
763 myri10ge_dummy_rdma(mgp
, 1);
764 status
= myri10ge_get_firmware_capabilities(mgp
);
768 /* clear confirmation addr */
772 /* send a reload command to the bootstrap MCP, and wait for the
773 * response in the confirmation address. The firmware should
774 * write a -1 there to indicate it is alive and well
776 dma_low
= MYRI10GE_LOWPART_TO_U32(mgp
->cmd_bus
);
777 dma_high
= MYRI10GE_HIGHPART_TO_U32(mgp
->cmd_bus
);
779 buf
[0] = htonl(dma_high
); /* confirm addr MSW */
780 buf
[1] = htonl(dma_low
); /* confirm addr LSW */
781 buf
[2] = MYRI10GE_NO_CONFIRM_DATA
; /* confirm data */
783 /* FIX: All newest firmware should un-protect the bottom of
784 * the sram before handoff. However, the very first interfaces
785 * do not. Therefore the handoff copy must skip the first 8 bytes
787 buf
[3] = htonl(MYRI10GE_FW_OFFSET
+ 8); /* where the code starts */
788 buf
[4] = htonl(size
- 8); /* length of code */
789 buf
[5] = htonl(8); /* where to copy to */
790 buf
[6] = htonl(0); /* where to jump to */
792 submit
= mgp
->sram
+ MXGEFW_BOOT_HANDOFF
;
794 myri10ge_pio_copy(submit
, &buf
, sizeof(buf
));
799 while (mgp
->cmd
->data
!= MYRI10GE_NO_CONFIRM_DATA
&& i
< 9) {
803 if (mgp
->cmd
->data
!= MYRI10GE_NO_CONFIRM_DATA
) {
804 dev_err(&mgp
->pdev
->dev
, "handoff failed\n");
807 myri10ge_dummy_rdma(mgp
, 1);
808 status
= myri10ge_get_firmware_capabilities(mgp
);
813 static int myri10ge_update_mac_address(struct myri10ge_priv
*mgp
, u8
* addr
)
815 struct myri10ge_cmd cmd
;
818 cmd
.data0
= ((addr
[0] << 24) | (addr
[1] << 16)
819 | (addr
[2] << 8) | addr
[3]);
821 cmd
.data1
= ((addr
[4] << 8) | (addr
[5]));
823 status
= myri10ge_send_cmd(mgp
, MXGEFW_SET_MAC_ADDRESS
, &cmd
, 0);
827 static int myri10ge_change_pause(struct myri10ge_priv
*mgp
, int pause
)
829 struct myri10ge_cmd cmd
;
832 ctl
= pause
? MXGEFW_ENABLE_FLOW_CONTROL
: MXGEFW_DISABLE_FLOW_CONTROL
;
833 status
= myri10ge_send_cmd(mgp
, ctl
, &cmd
, 0);
836 netdev_err(mgp
->dev
, "Failed to set flow control mode\n");
844 myri10ge_change_promisc(struct myri10ge_priv
*mgp
, int promisc
, int atomic
)
846 struct myri10ge_cmd cmd
;
849 ctl
= promisc
? MXGEFW_ENABLE_PROMISC
: MXGEFW_DISABLE_PROMISC
;
850 status
= myri10ge_send_cmd(mgp
, ctl
, &cmd
, atomic
);
852 netdev_err(mgp
->dev
, "Failed to set promisc mode\n");
855 static int myri10ge_dma_test(struct myri10ge_priv
*mgp
, int test_type
)
857 struct myri10ge_cmd cmd
;
860 struct page
*dmatest_page
;
861 dma_addr_t dmatest_bus
;
864 dmatest_page
= alloc_page(GFP_KERNEL
);
867 dmatest_bus
= pci_map_page(mgp
->pdev
, dmatest_page
, 0, PAGE_SIZE
,
870 /* Run a small DMA test.
871 * The magic multipliers to the length tell the firmware
872 * to do DMA read, write, or read+write tests. The
873 * results are returned in cmd.data0. The upper 16
874 * bits or the return is the number of transfers completed.
875 * The lower 16 bits is the time in 0.5us ticks that the
876 * transfers took to complete.
879 len
= mgp
->tx_boundary
;
881 cmd
.data0
= MYRI10GE_LOWPART_TO_U32(dmatest_bus
);
882 cmd
.data1
= MYRI10GE_HIGHPART_TO_U32(dmatest_bus
);
883 cmd
.data2
= len
* 0x10000;
884 status
= myri10ge_send_cmd(mgp
, test_type
, &cmd
, 0);
889 mgp
->read_dma
= ((cmd
.data0
>> 16) * len
* 2) / (cmd
.data0
& 0xffff);
890 cmd
.data0
= MYRI10GE_LOWPART_TO_U32(dmatest_bus
);
891 cmd
.data1
= MYRI10GE_HIGHPART_TO_U32(dmatest_bus
);
892 cmd
.data2
= len
* 0x1;
893 status
= myri10ge_send_cmd(mgp
, test_type
, &cmd
, 0);
898 mgp
->write_dma
= ((cmd
.data0
>> 16) * len
* 2) / (cmd
.data0
& 0xffff);
900 cmd
.data0
= MYRI10GE_LOWPART_TO_U32(dmatest_bus
);
901 cmd
.data1
= MYRI10GE_HIGHPART_TO_U32(dmatest_bus
);
902 cmd
.data2
= len
* 0x10001;
903 status
= myri10ge_send_cmd(mgp
, test_type
, &cmd
, 0);
908 mgp
->read_write_dma
= ((cmd
.data0
>> 16) * len
* 2 * 2) /
909 (cmd
.data0
& 0xffff);
912 pci_unmap_page(mgp
->pdev
, dmatest_bus
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
913 put_page(dmatest_page
);
915 if (status
!= 0 && test_type
!= MXGEFW_CMD_UNALIGNED_TEST
)
916 dev_warn(&mgp
->pdev
->dev
, "DMA %s benchmark failed: %d\n",
922 static int myri10ge_reset(struct myri10ge_priv
*mgp
)
924 struct myri10ge_cmd cmd
;
925 struct myri10ge_slice_state
*ss
;
928 #ifdef CONFIG_MYRI10GE_DCA
929 unsigned long dca_tag_off
;
932 /* try to send a reset command to the card to see if it
934 memset(&cmd
, 0, sizeof(cmd
));
935 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_RESET
, &cmd
, 0);
937 dev_err(&mgp
->pdev
->dev
, "failed reset\n");
941 (void)myri10ge_dma_test(mgp
, MXGEFW_DMA_TEST
);
943 * Use non-ndis mcp_slot (eg, 4 bytes total,
944 * no toeplitz hash value returned. Older firmware will
945 * not understand this command, but will use the correct
946 * sized mcp_slot, so we ignore error returns
948 cmd
.data0
= MXGEFW_RSS_MCP_SLOT_TYPE_MIN
;
949 (void)myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_RSS_MCP_SLOT_TYPE
, &cmd
, 0);
951 /* Now exchange information about interrupts */
953 bytes
= mgp
->max_intr_slots
* sizeof(*mgp
->ss
[0].rx_done
.entry
);
954 cmd
.data0
= (u32
) bytes
;
955 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_INTRQ_SIZE
, &cmd
, 0);
958 * Even though we already know how many slices are supported
959 * via myri10ge_probe_slices() MXGEFW_CMD_GET_MAX_RSS_QUEUES
960 * has magic side effects, and must be called after a reset.
961 * It must be called prior to calling any RSS related cmds,
962 * including assigning an interrupt queue for anything but
963 * slice 0. It must also be called *after*
964 * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by
965 * the firmware to compute offsets.
968 if (mgp
->num_slices
> 1) {
970 /* ask the maximum number of slices it supports */
971 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_MAX_RSS_QUEUES
,
974 dev_err(&mgp
->pdev
->dev
,
975 "failed to get number of slices\n");
979 * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior
980 * to setting up the interrupt queue DMA
983 cmd
.data0
= mgp
->num_slices
;
984 cmd
.data1
= MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE
;
985 if (mgp
->dev
->real_num_tx_queues
> 1)
986 cmd
.data1
|= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES
;
987 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_ENABLE_RSS_QUEUES
,
990 /* Firmware older than 1.4.32 only supports multiple
991 * RX queues, so if we get an error, first retry using a
992 * single TX queue before giving up */
993 if (status
!= 0 && mgp
->dev
->real_num_tx_queues
> 1) {
994 netif_set_real_num_tx_queues(mgp
->dev
, 1);
995 cmd
.data0
= mgp
->num_slices
;
996 cmd
.data1
= MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE
;
997 status
= myri10ge_send_cmd(mgp
,
998 MXGEFW_CMD_ENABLE_RSS_QUEUES
,
1003 dev_err(&mgp
->pdev
->dev
,
1004 "failed to set number of slices\n");
1009 for (i
= 0; i
< mgp
->num_slices
; i
++) {
1011 cmd
.data0
= MYRI10GE_LOWPART_TO_U32(ss
->rx_done
.bus
);
1012 cmd
.data1
= MYRI10GE_HIGHPART_TO_U32(ss
->rx_done
.bus
);
1014 status
|= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_INTRQ_DMA
,
1019 myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_IRQ_ACK_OFFSET
, &cmd
, 0);
1020 for (i
= 0; i
< mgp
->num_slices
; i
++) {
1023 (__iomem __be32
*) (mgp
->sram
+ cmd
.data0
+ 8 * i
);
1025 status
|= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET
,
1027 mgp
->irq_deassert
= (__iomem __be32
*) (mgp
->sram
+ cmd
.data0
);
1029 status
|= myri10ge_send_cmd
1030 (mgp
, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET
, &cmd
, 0);
1031 mgp
->intr_coal_delay_ptr
= (__iomem __be32
*) (mgp
->sram
+ cmd
.data0
);
1033 dev_err(&mgp
->pdev
->dev
, "failed set interrupt parameters\n");
1036 put_be32(htonl(mgp
->intr_coal_delay
), mgp
->intr_coal_delay_ptr
);
1038 #ifdef CONFIG_MYRI10GE_DCA
1039 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_DCA_OFFSET
, &cmd
, 0);
1040 dca_tag_off
= cmd
.data0
;
1041 for (i
= 0; i
< mgp
->num_slices
; i
++) {
1044 ss
->dca_tag
= (__iomem __be32
*)
1045 (mgp
->sram
+ dca_tag_off
+ 4 * i
);
1050 #endif /* CONFIG_MYRI10GE_DCA */
1052 /* reset mcp/driver shared state back to 0 */
1054 mgp
->link_changes
= 0;
1055 for (i
= 0; i
< mgp
->num_slices
; i
++) {
1058 memset(ss
->rx_done
.entry
, 0, bytes
);
1061 ss
->tx
.pkt_start
= 0;
1062 ss
->tx
.pkt_done
= 0;
1064 ss
->rx_small
.cnt
= 0;
1065 ss
->rx_done
.idx
= 0;
1066 ss
->rx_done
.cnt
= 0;
1067 ss
->tx
.wake_queue
= 0;
1068 ss
->tx
.stop_queue
= 0;
1071 status
= myri10ge_update_mac_address(mgp
, mgp
->dev
->dev_addr
);
1072 myri10ge_change_pause(mgp
, mgp
->pause
);
1073 myri10ge_set_multicast_list(mgp
->dev
);
1077 #ifdef CONFIG_MYRI10GE_DCA
1078 static int myri10ge_toggle_relaxed(struct pci_dev
*pdev
, int on
)
1083 cap
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
1087 err
= pci_read_config_word(pdev
, cap
+ PCI_EXP_DEVCTL
, &ctl
);
1088 ret
= (ctl
& PCI_EXP_DEVCTL_RELAX_EN
) >> 4;
1090 ctl
&= ~PCI_EXP_DEVCTL_RELAX_EN
;
1092 pci_write_config_word(pdev
, cap
+ PCI_EXP_DEVCTL
, ctl
);
1098 myri10ge_write_dca(struct myri10ge_slice_state
*ss
, int cpu
, int tag
)
1100 ss
->cached_dca_tag
= tag
;
1101 put_be32(htonl(tag
), ss
->dca_tag
);
1104 static inline void myri10ge_update_dca(struct myri10ge_slice_state
*ss
)
1106 int cpu
= get_cpu();
1109 if (cpu
!= ss
->cpu
) {
1110 tag
= dca3_get_tag(&ss
->mgp
->pdev
->dev
, cpu
);
1111 if (ss
->cached_dca_tag
!= tag
)
1112 myri10ge_write_dca(ss
, cpu
, tag
);
1118 static void myri10ge_setup_dca(struct myri10ge_priv
*mgp
)
1121 struct pci_dev
*pdev
= mgp
->pdev
;
1123 if (mgp
->ss
[0].dca_tag
== NULL
|| mgp
->dca_enabled
)
1125 if (!myri10ge_dca
) {
1126 dev_err(&pdev
->dev
, "dca disabled by administrator\n");
1129 err
= dca_add_requester(&pdev
->dev
);
1133 "dca_add_requester() failed, err=%d\n", err
);
1136 mgp
->relaxed_order
= myri10ge_toggle_relaxed(pdev
, 0);
1137 mgp
->dca_enabled
= 1;
1138 for (i
= 0; i
< mgp
->num_slices
; i
++) {
1139 mgp
->ss
[i
].cpu
= -1;
1140 mgp
->ss
[i
].cached_dca_tag
= -1;
1141 myri10ge_update_dca(&mgp
->ss
[i
]);
1145 static void myri10ge_teardown_dca(struct myri10ge_priv
*mgp
)
1147 struct pci_dev
*pdev
= mgp
->pdev
;
1150 if (!mgp
->dca_enabled
)
1152 mgp
->dca_enabled
= 0;
1153 if (mgp
->relaxed_order
)
1154 myri10ge_toggle_relaxed(pdev
, 1);
1155 err
= dca_remove_requester(&pdev
->dev
);
1158 static int myri10ge_notify_dca_device(struct device
*dev
, void *data
)
1160 struct myri10ge_priv
*mgp
;
1161 unsigned long event
;
1163 mgp
= dev_get_drvdata(dev
);
1164 event
= *(unsigned long *)data
;
1166 if (event
== DCA_PROVIDER_ADD
)
1167 myri10ge_setup_dca(mgp
);
1168 else if (event
== DCA_PROVIDER_REMOVE
)
1169 myri10ge_teardown_dca(mgp
);
1172 #endif /* CONFIG_MYRI10GE_DCA */
1175 myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem
* dst
,
1176 struct mcp_kreq_ether_recv
*src
)
1180 low
= src
->addr_low
;
1181 src
->addr_low
= htonl(DMA_BIT_MASK(32));
1182 myri10ge_pio_copy(dst
, src
, 4 * sizeof(*src
));
1184 myri10ge_pio_copy(dst
+ 4, src
+ 4, 4 * sizeof(*src
));
1186 src
->addr_low
= low
;
1187 put_be32(low
, &dst
->addr_low
);
1191 static inline void myri10ge_vlan_ip_csum(struct sk_buff
*skb
, __wsum hw_csum
)
1193 struct vlan_hdr
*vh
= (struct vlan_hdr
*)(skb
->data
);
1195 if ((skb
->protocol
== htons(ETH_P_8021Q
)) &&
1196 (vh
->h_vlan_encapsulated_proto
== htons(ETH_P_IP
) ||
1197 vh
->h_vlan_encapsulated_proto
== htons(ETH_P_IPV6
))) {
1198 skb
->csum
= hw_csum
;
1199 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1204 myri10ge_rx_skb_build(struct sk_buff
*skb
, u8
* va
,
1205 struct skb_frag_struct
*rx_frags
, int len
, int hlen
)
1207 struct skb_frag_struct
*skb_frags
;
1209 skb
->len
= skb
->data_len
= len
;
1210 skb
->truesize
= len
+ sizeof(struct sk_buff
);
1211 /* attach the page(s) */
1213 skb_frags
= skb_shinfo(skb
)->frags
;
1215 memcpy(skb_frags
, rx_frags
, sizeof(*skb_frags
));
1216 len
-= rx_frags
->size
;
1219 skb_shinfo(skb
)->nr_frags
++;
1222 /* pskb_may_pull is not available in irq context, but
1223 * skb_pull() (for ether_pad and eth_type_trans()) requires
1224 * the beginning of the packet in skb_headlen(), move it
1226 skb_copy_to_linear_data(skb
, va
, hlen
);
1227 skb_shinfo(skb
)->frags
[0].page_offset
+= hlen
;
1228 skb_shinfo(skb
)->frags
[0].size
-= hlen
;
1229 skb
->data_len
-= hlen
;
1231 skb_pull(skb
, MXGEFW_PAD
);
1235 myri10ge_alloc_rx_pages(struct myri10ge_priv
*mgp
, struct myri10ge_rx_buf
*rx
,
1236 int bytes
, int watchdog
)
1240 #if MYRI10GE_ALLOC_SIZE > 4096
1244 if (unlikely(rx
->watchdog_needed
&& !watchdog
))
1247 /* try to refill entire ring */
1248 while (rx
->fill_cnt
!= (rx
->cnt
+ rx
->mask
+ 1)) {
1249 idx
= rx
->fill_cnt
& rx
->mask
;
1250 if (rx
->page_offset
+ bytes
<= MYRI10GE_ALLOC_SIZE
) {
1251 /* we can use part of previous page */
1254 /* we need a new page */
1256 alloc_pages(GFP_ATOMIC
| __GFP_COMP
,
1257 MYRI10GE_ALLOC_ORDER
);
1258 if (unlikely(page
== NULL
)) {
1259 if (rx
->fill_cnt
- rx
->cnt
< 16)
1260 rx
->watchdog_needed
= 1;
1264 rx
->page_offset
= 0;
1265 rx
->bus
= pci_map_page(mgp
->pdev
, page
, 0,
1266 MYRI10GE_ALLOC_SIZE
,
1267 PCI_DMA_FROMDEVICE
);
1269 rx
->info
[idx
].page
= rx
->page
;
1270 rx
->info
[idx
].page_offset
= rx
->page_offset
;
1271 /* note that this is the address of the start of the
1273 dma_unmap_addr_set(&rx
->info
[idx
], bus
, rx
->bus
);
1274 rx
->shadow
[idx
].addr_low
=
1275 htonl(MYRI10GE_LOWPART_TO_U32(rx
->bus
) + rx
->page_offset
);
1276 rx
->shadow
[idx
].addr_high
=
1277 htonl(MYRI10GE_HIGHPART_TO_U32(rx
->bus
));
1279 /* start next packet on a cacheline boundary */
1280 rx
->page_offset
+= SKB_DATA_ALIGN(bytes
);
1282 #if MYRI10GE_ALLOC_SIZE > 4096
1283 /* don't cross a 4KB boundary */
1284 end_offset
= rx
->page_offset
+ bytes
- 1;
1285 if ((unsigned)(rx
->page_offset
^ end_offset
) > 4095)
1286 rx
->page_offset
= end_offset
& ~4095;
1290 /* copy 8 descriptors to the firmware at a time */
1291 if ((idx
& 7) == 7) {
1292 myri10ge_submit_8rx(&rx
->lanai
[idx
- 7],
1293 &rx
->shadow
[idx
- 7]);
1299 myri10ge_unmap_rx_page(struct pci_dev
*pdev
,
1300 struct myri10ge_rx_buffer_state
*info
, int bytes
)
1302 /* unmap the recvd page if we're the only or last user of it */
1303 if (bytes
>= MYRI10GE_ALLOC_SIZE
/ 2 ||
1304 (info
->page_offset
+ 2 * bytes
) > MYRI10GE_ALLOC_SIZE
) {
1305 pci_unmap_page(pdev
, (dma_unmap_addr(info
, bus
)
1306 & ~(MYRI10GE_ALLOC_SIZE
- 1)),
1307 MYRI10GE_ALLOC_SIZE
, PCI_DMA_FROMDEVICE
);
1311 #define MYRI10GE_HLEN 64 /* The number of bytes to copy from a
1312 * page into an skb */
1315 myri10ge_rx_done(struct myri10ge_slice_state
*ss
, struct myri10ge_rx_buf
*rx
,
1316 int bytes
, int len
, __wsum csum
)
1318 struct myri10ge_priv
*mgp
= ss
->mgp
;
1319 struct sk_buff
*skb
;
1320 struct skb_frag_struct rx_frags
[MYRI10GE_MAX_FRAGS_PER_FRAME
];
1321 int i
, idx
, hlen
, remainder
;
1322 struct pci_dev
*pdev
= mgp
->pdev
;
1323 struct net_device
*dev
= mgp
->dev
;
1327 idx
= rx
->cnt
& rx
->mask
;
1328 va
= page_address(rx
->info
[idx
].page
) + rx
->info
[idx
].page_offset
;
1330 /* Fill skb_frag_struct(s) with data from our receive */
1331 for (i
= 0, remainder
= len
; remainder
> 0; i
++) {
1332 myri10ge_unmap_rx_page(pdev
, &rx
->info
[idx
], bytes
);
1333 rx_frags
[i
].page
= rx
->info
[idx
].page
;
1334 rx_frags
[i
].page_offset
= rx
->info
[idx
].page_offset
;
1335 if (remainder
< MYRI10GE_ALLOC_SIZE
)
1336 rx_frags
[i
].size
= remainder
;
1338 rx_frags
[i
].size
= MYRI10GE_ALLOC_SIZE
;
1340 idx
= rx
->cnt
& rx
->mask
;
1341 remainder
-= MYRI10GE_ALLOC_SIZE
;
1344 if (dev
->features
& NETIF_F_LRO
) {
1345 rx_frags
[0].page_offset
+= MXGEFW_PAD
;
1346 rx_frags
[0].size
-= MXGEFW_PAD
;
1348 lro_receive_frags(&ss
->rx_done
.lro_mgr
, rx_frags
,
1349 /* opaque, will come back in get_frag_header */
1351 (void *)(__force
unsigned long)csum
, csum
);
1356 hlen
= MYRI10GE_HLEN
> len
? len
: MYRI10GE_HLEN
;
1358 /* allocate an skb to attach the page(s) to. This is done
1359 * after trying LRO, so as to avoid skb allocation overheads */
1361 skb
= netdev_alloc_skb(dev
, MYRI10GE_HLEN
+ 16);
1362 if (unlikely(skb
== NULL
)) {
1363 ss
->stats
.rx_dropped
++;
1366 put_page(rx_frags
[i
].page
);
1371 /* Attach the pages to the skb, and trim off any padding */
1372 myri10ge_rx_skb_build(skb
, va
, rx_frags
, len
, hlen
);
1373 if (skb_shinfo(skb
)->frags
[0].size
<= 0) {
1374 put_page(skb_shinfo(skb
)->frags
[0].page
);
1375 skb_shinfo(skb
)->nr_frags
= 0;
1377 skb
->protocol
= eth_type_trans(skb
, dev
);
1378 skb_record_rx_queue(skb
, ss
- &mgp
->ss
[0]);
1380 if (mgp
->csum_flag
) {
1381 if ((skb
->protocol
== htons(ETH_P_IP
)) ||
1382 (skb
->protocol
== htons(ETH_P_IPV6
))) {
1384 skb
->ip_summed
= CHECKSUM_COMPLETE
;
1386 myri10ge_vlan_ip_csum(skb
, csum
);
1388 netif_receive_skb(skb
);
1393 myri10ge_tx_done(struct myri10ge_slice_state
*ss
, int mcp_index
)
1395 struct pci_dev
*pdev
= ss
->mgp
->pdev
;
1396 struct myri10ge_tx_buf
*tx
= &ss
->tx
;
1397 struct netdev_queue
*dev_queue
;
1398 struct sk_buff
*skb
;
1401 while (tx
->pkt_done
!= mcp_index
) {
1402 idx
= tx
->done
& tx
->mask
;
1403 skb
= tx
->info
[idx
].skb
;
1406 tx
->info
[idx
].skb
= NULL
;
1407 if (tx
->info
[idx
].last
) {
1409 tx
->info
[idx
].last
= 0;
1412 len
= dma_unmap_len(&tx
->info
[idx
], len
);
1413 dma_unmap_len_set(&tx
->info
[idx
], len
, 0);
1415 ss
->stats
.tx_bytes
+= skb
->len
;
1416 ss
->stats
.tx_packets
++;
1417 dev_kfree_skb_irq(skb
);
1419 pci_unmap_single(pdev
,
1420 dma_unmap_addr(&tx
->info
[idx
],
1425 pci_unmap_page(pdev
,
1426 dma_unmap_addr(&tx
->info
[idx
],
1432 dev_queue
= netdev_get_tx_queue(ss
->dev
, ss
- ss
->mgp
->ss
);
1434 * Make a minimal effort to prevent the NIC from polling an
1435 * idle tx queue. If we can't get the lock we leave the queue
1436 * active. In this case, either a thread was about to start
1437 * using the queue anyway, or we lost a race and the NIC will
1438 * waste some of its resources polling an inactive queue for a
1442 if ((ss
->mgp
->dev
->real_num_tx_queues
> 1) &&
1443 __netif_tx_trylock(dev_queue
)) {
1444 if (tx
->req
== tx
->done
) {
1445 tx
->queue_active
= 0;
1446 put_be32(htonl(1), tx
->send_stop
);
1450 __netif_tx_unlock(dev_queue
);
1453 /* start the queue if we've stopped it */
1454 if (netif_tx_queue_stopped(dev_queue
) &&
1455 tx
->req
- tx
->done
< (tx
->mask
>> 1)) {
1457 netif_tx_wake_queue(dev_queue
);
1462 myri10ge_clean_rx_done(struct myri10ge_slice_state
*ss
, int budget
)
1464 struct myri10ge_rx_done
*rx_done
= &ss
->rx_done
;
1465 struct myri10ge_priv
*mgp
= ss
->mgp
;
1466 struct net_device
*netdev
= mgp
->dev
;
1467 unsigned long rx_bytes
= 0;
1468 unsigned long rx_packets
= 0;
1469 unsigned long rx_ok
;
1471 int idx
= rx_done
->idx
;
1472 int cnt
= rx_done
->cnt
;
1477 while (rx_done
->entry
[idx
].length
!= 0 && work_done
< budget
) {
1478 length
= ntohs(rx_done
->entry
[idx
].length
);
1479 rx_done
->entry
[idx
].length
= 0;
1480 checksum
= csum_unfold(rx_done
->entry
[idx
].checksum
);
1481 if (length
<= mgp
->small_bytes
)
1482 rx_ok
= myri10ge_rx_done(ss
, &ss
->rx_small
,
1486 rx_ok
= myri10ge_rx_done(ss
, &ss
->rx_big
,
1489 rx_packets
+= rx_ok
;
1490 rx_bytes
+= rx_ok
* (unsigned long)length
;
1492 idx
= cnt
& (mgp
->max_intr_slots
- 1);
1497 ss
->stats
.rx_packets
+= rx_packets
;
1498 ss
->stats
.rx_bytes
+= rx_bytes
;
1500 if (netdev
->features
& NETIF_F_LRO
)
1501 lro_flush_all(&rx_done
->lro_mgr
);
1503 /* restock receive rings if needed */
1504 if (ss
->rx_small
.fill_cnt
- ss
->rx_small
.cnt
< myri10ge_fill_thresh
)
1505 myri10ge_alloc_rx_pages(mgp
, &ss
->rx_small
,
1506 mgp
->small_bytes
+ MXGEFW_PAD
, 0);
1507 if (ss
->rx_big
.fill_cnt
- ss
->rx_big
.cnt
< myri10ge_fill_thresh
)
1508 myri10ge_alloc_rx_pages(mgp
, &ss
->rx_big
, mgp
->big_bytes
, 0);
1513 static inline void myri10ge_check_statblock(struct myri10ge_priv
*mgp
)
1515 struct mcp_irq_data
*stats
= mgp
->ss
[0].fw_stats
;
1517 if (unlikely(stats
->stats_updated
)) {
1518 unsigned link_up
= ntohl(stats
->link_up
);
1519 if (mgp
->link_state
!= link_up
) {
1520 mgp
->link_state
= link_up
;
1522 if (mgp
->link_state
== MXGEFW_LINK_UP
) {
1523 if (netif_msg_link(mgp
))
1524 netdev_info(mgp
->dev
, "link up\n");
1525 netif_carrier_on(mgp
->dev
);
1526 mgp
->link_changes
++;
1528 if (netif_msg_link(mgp
))
1529 netdev_info(mgp
->dev
, "link %s\n",
1530 link_up
== MXGEFW_LINK_MYRINET
?
1531 "mismatch (Myrinet detected)" :
1533 netif_carrier_off(mgp
->dev
);
1534 mgp
->link_changes
++;
1537 if (mgp
->rdma_tags_available
!=
1538 ntohl(stats
->rdma_tags_available
)) {
1539 mgp
->rdma_tags_available
=
1540 ntohl(stats
->rdma_tags_available
);
1541 netdev_warn(mgp
->dev
, "RDMA timed out! %d tags left\n",
1542 mgp
->rdma_tags_available
);
1544 mgp
->down_cnt
+= stats
->link_down
;
1545 if (stats
->link_down
)
1546 wake_up(&mgp
->down_wq
);
1550 static int myri10ge_poll(struct napi_struct
*napi
, int budget
)
1552 struct myri10ge_slice_state
*ss
=
1553 container_of(napi
, struct myri10ge_slice_state
, napi
);
1556 #ifdef CONFIG_MYRI10GE_DCA
1557 if (ss
->mgp
->dca_enabled
)
1558 myri10ge_update_dca(ss
);
1561 /* process as many rx events as NAPI will allow */
1562 work_done
= myri10ge_clean_rx_done(ss
, budget
);
1564 if (work_done
< budget
) {
1565 napi_complete(napi
);
1566 put_be32(htonl(3), ss
->irq_claim
);
1571 static irqreturn_t
myri10ge_intr(int irq
, void *arg
)
1573 struct myri10ge_slice_state
*ss
= arg
;
1574 struct myri10ge_priv
*mgp
= ss
->mgp
;
1575 struct mcp_irq_data
*stats
= ss
->fw_stats
;
1576 struct myri10ge_tx_buf
*tx
= &ss
->tx
;
1577 u32 send_done_count
;
1580 /* an interrupt on a non-zero receive-only slice is implicitly
1581 * valid since MSI-X irqs are not shared */
1582 if ((mgp
->dev
->real_num_tx_queues
== 1) && (ss
!= mgp
->ss
)) {
1583 napi_schedule(&ss
->napi
);
1587 /* make sure it is our IRQ, and that the DMA has finished */
1588 if (unlikely(!stats
->valid
))
1591 /* low bit indicates receives are present, so schedule
1592 * napi poll handler */
1593 if (stats
->valid
& 1)
1594 napi_schedule(&ss
->napi
);
1596 if (!mgp
->msi_enabled
&& !mgp
->msix_enabled
) {
1597 put_be32(0, mgp
->irq_deassert
);
1598 if (!myri10ge_deassert_wait
)
1604 /* Wait for IRQ line to go low, if using INTx */
1608 /* check for transmit completes and receives */
1609 send_done_count
= ntohl(stats
->send_done_count
);
1610 if (send_done_count
!= tx
->pkt_done
)
1611 myri10ge_tx_done(ss
, (int)send_done_count
);
1612 if (unlikely(i
> myri10ge_max_irq_loops
)) {
1613 netdev_err(mgp
->dev
, "irq stuck?\n");
1615 schedule_work(&mgp
->watchdog_work
);
1617 if (likely(stats
->valid
== 0))
1623 /* Only slice 0 updates stats */
1625 myri10ge_check_statblock(mgp
);
1627 put_be32(htonl(3), ss
->irq_claim
+ 1);
1632 myri10ge_get_settings(struct net_device
*netdev
, struct ethtool_cmd
*cmd
)
1634 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1638 cmd
->autoneg
= AUTONEG_DISABLE
;
1639 cmd
->speed
= SPEED_10000
;
1640 cmd
->duplex
= DUPLEX_FULL
;
1643 * parse the product code to deterimine the interface type
1644 * (CX4, XFP, Quad Ribbon Fiber) by looking at the character
1645 * after the 3rd dash in the driver's cached copy of the
1646 * EEPROM's product code string.
1648 ptr
= mgp
->product_code_string
;
1650 netdev_err(netdev
, "Missing product code\n");
1653 for (i
= 0; i
< 3; i
++, ptr
++) {
1654 ptr
= strchr(ptr
, '-');
1656 netdev_err(netdev
, "Invalid product code %s\n",
1657 mgp
->product_code_string
);
1663 if (*ptr
== 'R' || *ptr
== 'Q' || *ptr
== 'S') {
1664 /* We've found either an XFP, quad ribbon fiber, or SFP+ */
1665 cmd
->port
= PORT_FIBRE
;
1666 cmd
->supported
|= SUPPORTED_FIBRE
;
1667 cmd
->advertising
|= ADVERTISED_FIBRE
;
1669 cmd
->port
= PORT_OTHER
;
1671 if (*ptr
== 'R' || *ptr
== 'S')
1672 cmd
->transceiver
= XCVR_EXTERNAL
;
1674 cmd
->transceiver
= XCVR_INTERNAL
;
1680 myri10ge_get_drvinfo(struct net_device
*netdev
, struct ethtool_drvinfo
*info
)
1682 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1684 strlcpy(info
->driver
, "myri10ge", sizeof(info
->driver
));
1685 strlcpy(info
->version
, MYRI10GE_VERSION_STR
, sizeof(info
->version
));
1686 strlcpy(info
->fw_version
, mgp
->fw_version
, sizeof(info
->fw_version
));
1687 strlcpy(info
->bus_info
, pci_name(mgp
->pdev
), sizeof(info
->bus_info
));
1691 myri10ge_get_coalesce(struct net_device
*netdev
, struct ethtool_coalesce
*coal
)
1693 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1695 coal
->rx_coalesce_usecs
= mgp
->intr_coal_delay
;
1700 myri10ge_set_coalesce(struct net_device
*netdev
, struct ethtool_coalesce
*coal
)
1702 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1704 mgp
->intr_coal_delay
= coal
->rx_coalesce_usecs
;
1705 put_be32(htonl(mgp
->intr_coal_delay
), mgp
->intr_coal_delay_ptr
);
1710 myri10ge_get_pauseparam(struct net_device
*netdev
,
1711 struct ethtool_pauseparam
*pause
)
1713 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1716 pause
->rx_pause
= mgp
->pause
;
1717 pause
->tx_pause
= mgp
->pause
;
1721 myri10ge_set_pauseparam(struct net_device
*netdev
,
1722 struct ethtool_pauseparam
*pause
)
1724 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1726 if (pause
->tx_pause
!= mgp
->pause
)
1727 return myri10ge_change_pause(mgp
, pause
->tx_pause
);
1728 if (pause
->rx_pause
!= mgp
->pause
)
1729 return myri10ge_change_pause(mgp
, pause
->rx_pause
);
1730 if (pause
->autoneg
!= 0)
1736 myri10ge_get_ringparam(struct net_device
*netdev
,
1737 struct ethtool_ringparam
*ring
)
1739 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1741 ring
->rx_mini_max_pending
= mgp
->ss
[0].rx_small
.mask
+ 1;
1742 ring
->rx_max_pending
= mgp
->ss
[0].rx_big
.mask
+ 1;
1743 ring
->rx_jumbo_max_pending
= 0;
1744 ring
->tx_max_pending
= mgp
->ss
[0].tx
.mask
+ 1;
1745 ring
->rx_mini_pending
= ring
->rx_mini_max_pending
;
1746 ring
->rx_pending
= ring
->rx_max_pending
;
1747 ring
->rx_jumbo_pending
= ring
->rx_jumbo_max_pending
;
1748 ring
->tx_pending
= ring
->tx_max_pending
;
1751 static u32
myri10ge_get_rx_csum(struct net_device
*netdev
)
1753 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1761 static int myri10ge_set_rx_csum(struct net_device
*netdev
, u32 csum_enabled
)
1763 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1767 mgp
->csum_flag
= MXGEFW_FLAGS_CKSUM
;
1769 netdev
->features
&= ~NETIF_F_LRO
;
1776 static int myri10ge_set_tso(struct net_device
*netdev
, u32 tso_enabled
)
1778 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1779 unsigned long flags
= mgp
->features
& (NETIF_F_TSO6
| NETIF_F_TSO
);
1782 netdev
->features
|= flags
;
1784 netdev
->features
&= ~flags
;
1788 static const char myri10ge_gstrings_main_stats
[][ETH_GSTRING_LEN
] = {
1789 "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
1790 "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
1791 "rx_length_errors", "rx_over_errors", "rx_crc_errors",
1792 "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
1793 "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
1794 "tx_heartbeat_errors", "tx_window_errors",
1795 /* device-specific stats */
1796 "tx_boundary", "WC", "irq", "MSI", "MSIX",
1797 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
1798 "serial_number", "watchdog_resets",
1799 #ifdef CONFIG_MYRI10GE_DCA
1800 "dca_capable_firmware", "dca_device_present",
1802 "link_changes", "link_up", "dropped_link_overflow",
1803 "dropped_link_error_or_filtered",
1804 "dropped_pause", "dropped_bad_phy", "dropped_bad_crc32",
1805 "dropped_unicast_filtered", "dropped_multicast_filtered",
1806 "dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
1807 "dropped_no_big_buffer"
1810 static const char myri10ge_gstrings_slice_stats
[][ETH_GSTRING_LEN
] = {
1811 "----------- slice ---------",
1812 "tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
1813 "rx_small_cnt", "rx_big_cnt",
1814 "wake_queue", "stop_queue", "tx_linearized", "LRO aggregated",
1816 "LRO avg aggr", "LRO no_desc"
1819 #define MYRI10GE_NET_STATS_LEN 21
1820 #define MYRI10GE_MAIN_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_main_stats)
1821 #define MYRI10GE_SLICE_STATS_LEN ARRAY_SIZE(myri10ge_gstrings_slice_stats)
1824 myri10ge_get_strings(struct net_device
*netdev
, u32 stringset
, u8
* data
)
1826 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1829 switch (stringset
) {
1831 memcpy(data
, *myri10ge_gstrings_main_stats
,
1832 sizeof(myri10ge_gstrings_main_stats
));
1833 data
+= sizeof(myri10ge_gstrings_main_stats
);
1834 for (i
= 0; i
< mgp
->num_slices
; i
++) {
1835 memcpy(data
, *myri10ge_gstrings_slice_stats
,
1836 sizeof(myri10ge_gstrings_slice_stats
));
1837 data
+= sizeof(myri10ge_gstrings_slice_stats
);
1843 static int myri10ge_get_sset_count(struct net_device
*netdev
, int sset
)
1845 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1849 return MYRI10GE_MAIN_STATS_LEN
+
1850 mgp
->num_slices
* MYRI10GE_SLICE_STATS_LEN
;
1857 myri10ge_get_ethtool_stats(struct net_device
*netdev
,
1858 struct ethtool_stats
*stats
, u64
* data
)
1860 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1861 struct myri10ge_slice_state
*ss
;
1865 /* force stats update */
1866 (void)myri10ge_get_stats(netdev
);
1867 for (i
= 0; i
< MYRI10GE_NET_STATS_LEN
; i
++)
1868 data
[i
] = ((unsigned long *)&netdev
->stats
)[i
];
1870 data
[i
++] = (unsigned int)mgp
->tx_boundary
;
1871 data
[i
++] = (unsigned int)mgp
->wc_enabled
;
1872 data
[i
++] = (unsigned int)mgp
->pdev
->irq
;
1873 data
[i
++] = (unsigned int)mgp
->msi_enabled
;
1874 data
[i
++] = (unsigned int)mgp
->msix_enabled
;
1875 data
[i
++] = (unsigned int)mgp
->read_dma
;
1876 data
[i
++] = (unsigned int)mgp
->write_dma
;
1877 data
[i
++] = (unsigned int)mgp
->read_write_dma
;
1878 data
[i
++] = (unsigned int)mgp
->serial_number
;
1879 data
[i
++] = (unsigned int)mgp
->watchdog_resets
;
1880 #ifdef CONFIG_MYRI10GE_DCA
1881 data
[i
++] = (unsigned int)(mgp
->ss
[0].dca_tag
!= NULL
);
1882 data
[i
++] = (unsigned int)(mgp
->dca_enabled
);
1884 data
[i
++] = (unsigned int)mgp
->link_changes
;
1886 /* firmware stats are useful only in the first slice */
1888 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->link_up
);
1889 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_link_overflow
);
1891 (unsigned int)ntohl(ss
->fw_stats
->dropped_link_error_or_filtered
);
1892 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_pause
);
1893 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_bad_phy
);
1894 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_bad_crc32
);
1895 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_unicast_filtered
);
1897 (unsigned int)ntohl(ss
->fw_stats
->dropped_multicast_filtered
);
1898 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_runt
);
1899 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_overrun
);
1900 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_no_small_buffer
);
1901 data
[i
++] = (unsigned int)ntohl(ss
->fw_stats
->dropped_no_big_buffer
);
1903 for (slice
= 0; slice
< mgp
->num_slices
; slice
++) {
1904 ss
= &mgp
->ss
[slice
];
1906 data
[i
++] = (unsigned int)ss
->tx
.pkt_start
;
1907 data
[i
++] = (unsigned int)ss
->tx
.pkt_done
;
1908 data
[i
++] = (unsigned int)ss
->tx
.req
;
1909 data
[i
++] = (unsigned int)ss
->tx
.done
;
1910 data
[i
++] = (unsigned int)ss
->rx_small
.cnt
;
1911 data
[i
++] = (unsigned int)ss
->rx_big
.cnt
;
1912 data
[i
++] = (unsigned int)ss
->tx
.wake_queue
;
1913 data
[i
++] = (unsigned int)ss
->tx
.stop_queue
;
1914 data
[i
++] = (unsigned int)ss
->tx
.linearized
;
1915 data
[i
++] = ss
->rx_done
.lro_mgr
.stats
.aggregated
;
1916 data
[i
++] = ss
->rx_done
.lro_mgr
.stats
.flushed
;
1917 if (ss
->rx_done
.lro_mgr
.stats
.flushed
)
1918 data
[i
++] = ss
->rx_done
.lro_mgr
.stats
.aggregated
/
1919 ss
->rx_done
.lro_mgr
.stats
.flushed
;
1922 data
[i
++] = ss
->rx_done
.lro_mgr
.stats
.no_desc
;
1926 static void myri10ge_set_msglevel(struct net_device
*netdev
, u32 value
)
1928 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1929 mgp
->msg_enable
= value
;
1932 static u32
myri10ge_get_msglevel(struct net_device
*netdev
)
1934 struct myri10ge_priv
*mgp
= netdev_priv(netdev
);
1935 return mgp
->msg_enable
;
1938 static int myri10ge_set_flags(struct net_device
*netdev
, u32 value
)
1940 return ethtool_op_set_flags(netdev
, value
, ETH_FLAG_LRO
);
1943 static const struct ethtool_ops myri10ge_ethtool_ops
= {
1944 .get_settings
= myri10ge_get_settings
,
1945 .get_drvinfo
= myri10ge_get_drvinfo
,
1946 .get_coalesce
= myri10ge_get_coalesce
,
1947 .set_coalesce
= myri10ge_set_coalesce
,
1948 .get_pauseparam
= myri10ge_get_pauseparam
,
1949 .set_pauseparam
= myri10ge_set_pauseparam
,
1950 .get_ringparam
= myri10ge_get_ringparam
,
1951 .get_rx_csum
= myri10ge_get_rx_csum
,
1952 .set_rx_csum
= myri10ge_set_rx_csum
,
1953 .set_tx_csum
= ethtool_op_set_tx_hw_csum
,
1954 .set_sg
= ethtool_op_set_sg
,
1955 .set_tso
= myri10ge_set_tso
,
1956 .get_link
= ethtool_op_get_link
,
1957 .get_strings
= myri10ge_get_strings
,
1958 .get_sset_count
= myri10ge_get_sset_count
,
1959 .get_ethtool_stats
= myri10ge_get_ethtool_stats
,
1960 .set_msglevel
= myri10ge_set_msglevel
,
1961 .get_msglevel
= myri10ge_get_msglevel
,
1962 .get_flags
= ethtool_op_get_flags
,
1963 .set_flags
= myri10ge_set_flags
1966 static int myri10ge_allocate_rings(struct myri10ge_slice_state
*ss
)
1968 struct myri10ge_priv
*mgp
= ss
->mgp
;
1969 struct myri10ge_cmd cmd
;
1970 struct net_device
*dev
= mgp
->dev
;
1971 int tx_ring_size
, rx_ring_size
;
1972 int tx_ring_entries
, rx_ring_entries
;
1973 int i
, slice
, status
;
1976 /* get ring sizes */
1977 slice
= ss
- mgp
->ss
;
1979 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_SEND_RING_SIZE
, &cmd
, 0);
1980 tx_ring_size
= cmd
.data0
;
1982 status
|= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_RX_RING_SIZE
, &cmd
, 0);
1985 rx_ring_size
= cmd
.data0
;
1987 tx_ring_entries
= tx_ring_size
/ sizeof(struct mcp_kreq_ether_send
);
1988 rx_ring_entries
= rx_ring_size
/ sizeof(struct mcp_dma_addr
);
1989 ss
->tx
.mask
= tx_ring_entries
- 1;
1990 ss
->rx_small
.mask
= ss
->rx_big
.mask
= rx_ring_entries
- 1;
1994 /* allocate the host shadow rings */
1996 bytes
= 8 + (MYRI10GE_MAX_SEND_DESC_TSO
+ 4)
1997 * sizeof(*ss
->tx
.req_list
);
1998 ss
->tx
.req_bytes
= kzalloc(bytes
, GFP_KERNEL
);
1999 if (ss
->tx
.req_bytes
== NULL
)
2000 goto abort_with_nothing
;
2002 /* ensure req_list entries are aligned to 8 bytes */
2003 ss
->tx
.req_list
= (struct mcp_kreq_ether_send
*)
2004 ALIGN((unsigned long)ss
->tx
.req_bytes
, 8);
2005 ss
->tx
.queue_active
= 0;
2007 bytes
= rx_ring_entries
* sizeof(*ss
->rx_small
.shadow
);
2008 ss
->rx_small
.shadow
= kzalloc(bytes
, GFP_KERNEL
);
2009 if (ss
->rx_small
.shadow
== NULL
)
2010 goto abort_with_tx_req_bytes
;
2012 bytes
= rx_ring_entries
* sizeof(*ss
->rx_big
.shadow
);
2013 ss
->rx_big
.shadow
= kzalloc(bytes
, GFP_KERNEL
);
2014 if (ss
->rx_big
.shadow
== NULL
)
2015 goto abort_with_rx_small_shadow
;
2017 /* allocate the host info rings */
2019 bytes
= tx_ring_entries
* sizeof(*ss
->tx
.info
);
2020 ss
->tx
.info
= kzalloc(bytes
, GFP_KERNEL
);
2021 if (ss
->tx
.info
== NULL
)
2022 goto abort_with_rx_big_shadow
;
2024 bytes
= rx_ring_entries
* sizeof(*ss
->rx_small
.info
);
2025 ss
->rx_small
.info
= kzalloc(bytes
, GFP_KERNEL
);
2026 if (ss
->rx_small
.info
== NULL
)
2027 goto abort_with_tx_info
;
2029 bytes
= rx_ring_entries
* sizeof(*ss
->rx_big
.info
);
2030 ss
->rx_big
.info
= kzalloc(bytes
, GFP_KERNEL
);
2031 if (ss
->rx_big
.info
== NULL
)
2032 goto abort_with_rx_small_info
;
2034 /* Fill the receive rings */
2036 ss
->rx_small
.cnt
= 0;
2037 ss
->rx_big
.fill_cnt
= 0;
2038 ss
->rx_small
.fill_cnt
= 0;
2039 ss
->rx_small
.page_offset
= MYRI10GE_ALLOC_SIZE
;
2040 ss
->rx_big
.page_offset
= MYRI10GE_ALLOC_SIZE
;
2041 ss
->rx_small
.watchdog_needed
= 0;
2042 ss
->rx_big
.watchdog_needed
= 0;
2043 myri10ge_alloc_rx_pages(mgp
, &ss
->rx_small
,
2044 mgp
->small_bytes
+ MXGEFW_PAD
, 0);
2046 if (ss
->rx_small
.fill_cnt
< ss
->rx_small
.mask
+ 1) {
2047 netdev_err(dev
, "slice-%d: alloced only %d small bufs\n",
2048 slice
, ss
->rx_small
.fill_cnt
);
2049 goto abort_with_rx_small_ring
;
2052 myri10ge_alloc_rx_pages(mgp
, &ss
->rx_big
, mgp
->big_bytes
, 0);
2053 if (ss
->rx_big
.fill_cnt
< ss
->rx_big
.mask
+ 1) {
2054 netdev_err(dev
, "slice-%d: alloced only %d big bufs\n",
2055 slice
, ss
->rx_big
.fill_cnt
);
2056 goto abort_with_rx_big_ring
;
2061 abort_with_rx_big_ring
:
2062 for (i
= ss
->rx_big
.cnt
; i
< ss
->rx_big
.fill_cnt
; i
++) {
2063 int idx
= i
& ss
->rx_big
.mask
;
2064 myri10ge_unmap_rx_page(mgp
->pdev
, &ss
->rx_big
.info
[idx
],
2066 put_page(ss
->rx_big
.info
[idx
].page
);
2069 abort_with_rx_small_ring
:
2070 for (i
= ss
->rx_small
.cnt
; i
< ss
->rx_small
.fill_cnt
; i
++) {
2071 int idx
= i
& ss
->rx_small
.mask
;
2072 myri10ge_unmap_rx_page(mgp
->pdev
, &ss
->rx_small
.info
[idx
],
2073 mgp
->small_bytes
+ MXGEFW_PAD
);
2074 put_page(ss
->rx_small
.info
[idx
].page
);
2077 kfree(ss
->rx_big
.info
);
2079 abort_with_rx_small_info
:
2080 kfree(ss
->rx_small
.info
);
2085 abort_with_rx_big_shadow
:
2086 kfree(ss
->rx_big
.shadow
);
2088 abort_with_rx_small_shadow
:
2089 kfree(ss
->rx_small
.shadow
);
2091 abort_with_tx_req_bytes
:
2092 kfree(ss
->tx
.req_bytes
);
2093 ss
->tx
.req_bytes
= NULL
;
2094 ss
->tx
.req_list
= NULL
;
2100 static void myri10ge_free_rings(struct myri10ge_slice_state
*ss
)
2102 struct myri10ge_priv
*mgp
= ss
->mgp
;
2103 struct sk_buff
*skb
;
2104 struct myri10ge_tx_buf
*tx
;
2107 /* If not allocated, skip it */
2108 if (ss
->tx
.req_list
== NULL
)
2111 for (i
= ss
->rx_big
.cnt
; i
< ss
->rx_big
.fill_cnt
; i
++) {
2112 idx
= i
& ss
->rx_big
.mask
;
2113 if (i
== ss
->rx_big
.fill_cnt
- 1)
2114 ss
->rx_big
.info
[idx
].page_offset
= MYRI10GE_ALLOC_SIZE
;
2115 myri10ge_unmap_rx_page(mgp
->pdev
, &ss
->rx_big
.info
[idx
],
2117 put_page(ss
->rx_big
.info
[idx
].page
);
2120 for (i
= ss
->rx_small
.cnt
; i
< ss
->rx_small
.fill_cnt
; i
++) {
2121 idx
= i
& ss
->rx_small
.mask
;
2122 if (i
== ss
->rx_small
.fill_cnt
- 1)
2123 ss
->rx_small
.info
[idx
].page_offset
=
2124 MYRI10GE_ALLOC_SIZE
;
2125 myri10ge_unmap_rx_page(mgp
->pdev
, &ss
->rx_small
.info
[idx
],
2126 mgp
->small_bytes
+ MXGEFW_PAD
);
2127 put_page(ss
->rx_small
.info
[idx
].page
);
2130 while (tx
->done
!= tx
->req
) {
2131 idx
= tx
->done
& tx
->mask
;
2132 skb
= tx
->info
[idx
].skb
;
2135 tx
->info
[idx
].skb
= NULL
;
2137 len
= dma_unmap_len(&tx
->info
[idx
], len
);
2138 dma_unmap_len_set(&tx
->info
[idx
], len
, 0);
2140 ss
->stats
.tx_dropped
++;
2141 dev_kfree_skb_any(skb
);
2143 pci_unmap_single(mgp
->pdev
,
2144 dma_unmap_addr(&tx
->info
[idx
],
2149 pci_unmap_page(mgp
->pdev
,
2150 dma_unmap_addr(&tx
->info
[idx
],
2155 kfree(ss
->rx_big
.info
);
2157 kfree(ss
->rx_small
.info
);
2161 kfree(ss
->rx_big
.shadow
);
2163 kfree(ss
->rx_small
.shadow
);
2165 kfree(ss
->tx
.req_bytes
);
2166 ss
->tx
.req_bytes
= NULL
;
2167 ss
->tx
.req_list
= NULL
;
2170 static int myri10ge_request_irq(struct myri10ge_priv
*mgp
)
2172 struct pci_dev
*pdev
= mgp
->pdev
;
2173 struct myri10ge_slice_state
*ss
;
2174 struct net_device
*netdev
= mgp
->dev
;
2178 mgp
->msi_enabled
= 0;
2179 mgp
->msix_enabled
= 0;
2182 if (mgp
->num_slices
> 1) {
2184 pci_enable_msix(pdev
, mgp
->msix_vectors
,
2187 mgp
->msix_enabled
= 1;
2190 "Error %d setting up MSI-X\n", status
);
2194 if (mgp
->msix_enabled
== 0) {
2195 status
= pci_enable_msi(pdev
);
2198 "Error %d setting up MSI; falling back to xPIC\n",
2201 mgp
->msi_enabled
= 1;
2205 if (mgp
->msix_enabled
) {
2206 for (i
= 0; i
< mgp
->num_slices
; i
++) {
2208 snprintf(ss
->irq_desc
, sizeof(ss
->irq_desc
),
2209 "%s:slice-%d", netdev
->name
, i
);
2210 status
= request_irq(mgp
->msix_vectors
[i
].vector
,
2211 myri10ge_intr
, 0, ss
->irq_desc
,
2215 "slice %d failed to allocate IRQ\n", i
);
2218 free_irq(mgp
->msix_vectors
[i
].vector
,
2222 pci_disable_msix(pdev
);
2227 status
= request_irq(pdev
->irq
, myri10ge_intr
, IRQF_SHARED
,
2228 mgp
->dev
->name
, &mgp
->ss
[0]);
2230 dev_err(&pdev
->dev
, "failed to allocate IRQ\n");
2231 if (mgp
->msi_enabled
)
2232 pci_disable_msi(pdev
);
2238 static void myri10ge_free_irq(struct myri10ge_priv
*mgp
)
2240 struct pci_dev
*pdev
= mgp
->pdev
;
2243 if (mgp
->msix_enabled
) {
2244 for (i
= 0; i
< mgp
->num_slices
; i
++)
2245 free_irq(mgp
->msix_vectors
[i
].vector
, &mgp
->ss
[i
]);
2247 free_irq(pdev
->irq
, &mgp
->ss
[0]);
2249 if (mgp
->msi_enabled
)
2250 pci_disable_msi(pdev
);
2251 if (mgp
->msix_enabled
)
2252 pci_disable_msix(pdev
);
2256 myri10ge_get_frag_header(struct skb_frag_struct
*frag
, void **mac_hdr
,
2257 void **ip_hdr
, void **tcpudp_hdr
,
2258 u64
* hdr_flags
, void *priv
)
2261 struct vlan_ethhdr
*veh
;
2263 u8
*va
= page_address(frag
->page
) + frag
->page_offset
;
2264 unsigned long ll_hlen
;
2265 /* passed opaque through lro_receive_frags() */
2266 __wsum csum
= (__force __wsum
) (unsigned long)priv
;
2268 /* find the mac header, aborting if not IPv4 */
2270 eh
= (struct ethhdr
*)va
;
2273 if (eh
->h_proto
!= htons(ETH_P_IP
)) {
2274 if (eh
->h_proto
== htons(ETH_P_8021Q
)) {
2275 veh
= (struct vlan_ethhdr
*)va
;
2276 if (veh
->h_vlan_encapsulated_proto
!= htons(ETH_P_IP
))
2279 ll_hlen
+= VLAN_HLEN
;
2282 * HW checksum starts ETH_HLEN bytes into
2283 * frame, so we must subtract off the VLAN
2284 * header's checksum before csum can be used
2286 csum
= csum_sub(csum
, csum_partial(va
+ ETH_HLEN
,
2292 *hdr_flags
= LRO_IPV4
;
2294 iph
= (struct iphdr
*)(va
+ ll_hlen
);
2296 if (iph
->protocol
!= IPPROTO_TCP
)
2298 if (iph
->frag_off
& htons(IP_MF
| IP_OFFSET
))
2300 *hdr_flags
|= LRO_TCP
;
2301 *tcpudp_hdr
= (u8
*) (*ip_hdr
) + (iph
->ihl
<< 2);
2303 /* verify the IP checksum */
2304 if (unlikely(ip_fast_csum((u8
*) iph
, iph
->ihl
)))
2307 /* verify the checksum */
2308 if (unlikely(csum_tcpudp_magic(iph
->saddr
, iph
->daddr
,
2309 ntohs(iph
->tot_len
) - (iph
->ihl
<< 2),
2310 IPPROTO_TCP
, csum
)))
2316 static int myri10ge_get_txrx(struct myri10ge_priv
*mgp
, int slice
)
2318 struct myri10ge_cmd cmd
;
2319 struct myri10ge_slice_state
*ss
;
2322 ss
= &mgp
->ss
[slice
];
2324 if (slice
== 0 || (mgp
->dev
->real_num_tx_queues
> 1)) {
2326 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_SEND_OFFSET
,
2328 ss
->tx
.lanai
= (struct mcp_kreq_ether_send __iomem
*)
2329 (mgp
->sram
+ cmd
.data0
);
2332 status
|= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_SMALL_RX_OFFSET
,
2334 ss
->rx_small
.lanai
= (struct mcp_kreq_ether_recv __iomem
*)
2335 (mgp
->sram
+ cmd
.data0
);
2338 status
|= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_BIG_RX_OFFSET
, &cmd
, 0);
2339 ss
->rx_big
.lanai
= (struct mcp_kreq_ether_recv __iomem
*)
2340 (mgp
->sram
+ cmd
.data0
);
2342 ss
->tx
.send_go
= (__iomem __be32
*)
2343 (mgp
->sram
+ MXGEFW_ETH_SEND_GO
+ 64 * slice
);
2344 ss
->tx
.send_stop
= (__iomem __be32
*)
2345 (mgp
->sram
+ MXGEFW_ETH_SEND_STOP
+ 64 * slice
);
2350 static int myri10ge_set_stats(struct myri10ge_priv
*mgp
, int slice
)
2352 struct myri10ge_cmd cmd
;
2353 struct myri10ge_slice_state
*ss
;
2356 ss
= &mgp
->ss
[slice
];
2357 cmd
.data0
= MYRI10GE_LOWPART_TO_U32(ss
->fw_stats_bus
);
2358 cmd
.data1
= MYRI10GE_HIGHPART_TO_U32(ss
->fw_stats_bus
);
2359 cmd
.data2
= sizeof(struct mcp_irq_data
) | (slice
<< 16);
2360 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_STATS_DMA_V2
, &cmd
, 0);
2361 if (status
== -ENOSYS
) {
2362 dma_addr_t bus
= ss
->fw_stats_bus
;
2365 bus
+= offsetof(struct mcp_irq_data
, send_done_count
);
2366 cmd
.data0
= MYRI10GE_LOWPART_TO_U32(bus
);
2367 cmd
.data1
= MYRI10GE_HIGHPART_TO_U32(bus
);
2368 status
= myri10ge_send_cmd(mgp
,
2369 MXGEFW_CMD_SET_STATS_DMA_OBSOLETE
,
2371 /* Firmware cannot support multicast without STATS_DMA_V2 */
2372 mgp
->fw_multicast_support
= 0;
2374 mgp
->fw_multicast_support
= 1;
2379 static int myri10ge_open(struct net_device
*dev
)
2381 struct myri10ge_slice_state
*ss
;
2382 struct myri10ge_priv
*mgp
= netdev_priv(dev
);
2383 struct myri10ge_cmd cmd
;
2384 int i
, status
, big_pow2
, slice
;
2386 struct net_lro_mgr
*lro_mgr
;
2388 if (mgp
->running
!= MYRI10GE_ETH_STOPPED
)
2391 mgp
->running
= MYRI10GE_ETH_STARTING
;
2392 status
= myri10ge_reset(mgp
);
2394 netdev_err(dev
, "failed reset\n");
2395 goto abort_with_nothing
;
2398 if (mgp
->num_slices
> 1) {
2399 cmd
.data0
= mgp
->num_slices
;
2400 cmd
.data1
= MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE
;
2401 if (mgp
->dev
->real_num_tx_queues
> 1)
2402 cmd
.data1
|= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES
;
2403 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_ENABLE_RSS_QUEUES
,
2406 netdev_err(dev
, "failed to set number of slices\n");
2407 goto abort_with_nothing
;
2409 /* setup the indirection table */
2410 cmd
.data0
= mgp
->num_slices
;
2411 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_RSS_TABLE_SIZE
,
2414 status
|= myri10ge_send_cmd(mgp
,
2415 MXGEFW_CMD_GET_RSS_TABLE_OFFSET
,
2418 netdev_err(dev
, "failed to setup rss tables\n");
2419 goto abort_with_nothing
;
2422 /* just enable an identity mapping */
2423 itable
= mgp
->sram
+ cmd
.data0
;
2424 for (i
= 0; i
< mgp
->num_slices
; i
++)
2425 __raw_writeb(i
, &itable
[i
]);
2428 cmd
.data1
= myri10ge_rss_hash
;
2429 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_RSS_ENABLE
,
2432 netdev_err(dev
, "failed to enable slices\n");
2433 goto abort_with_nothing
;
2437 status
= myri10ge_request_irq(mgp
);
2439 goto abort_with_nothing
;
2441 /* decide what small buffer size to use. For good TCP rx
2442 * performance, it is important to not receive 1514 byte
2443 * frames into jumbo buffers, as it confuses the socket buffer
2444 * accounting code, leading to drops and erratic performance.
2447 if (dev
->mtu
<= ETH_DATA_LEN
)
2448 /* enough for a TCP header */
2449 mgp
->small_bytes
= (128 > SMP_CACHE_BYTES
)
2450 ? (128 - MXGEFW_PAD
)
2451 : (SMP_CACHE_BYTES
- MXGEFW_PAD
);
2453 /* enough for a vlan encapsulated ETH_DATA_LEN frame */
2454 mgp
->small_bytes
= VLAN_ETH_FRAME_LEN
;
2456 /* Override the small buffer size? */
2457 if (myri10ge_small_bytes
> 0)
2458 mgp
->small_bytes
= myri10ge_small_bytes
;
2460 /* Firmware needs the big buff size as a power of 2. Lie and
2461 * tell him the buffer is larger, because we only use 1
2462 * buffer/pkt, and the mtu will prevent overruns.
2464 big_pow2
= dev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
+ MXGEFW_PAD
;
2465 if (big_pow2
< MYRI10GE_ALLOC_SIZE
/ 2) {
2466 while (!is_power_of_2(big_pow2
))
2468 mgp
->big_bytes
= dev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
+ MXGEFW_PAD
;
2470 big_pow2
= MYRI10GE_ALLOC_SIZE
;
2471 mgp
->big_bytes
= big_pow2
;
2474 /* setup the per-slice data structures */
2475 for (slice
= 0; slice
< mgp
->num_slices
; slice
++) {
2476 ss
= &mgp
->ss
[slice
];
2478 status
= myri10ge_get_txrx(mgp
, slice
);
2480 netdev_err(dev
, "failed to get ring sizes or locations\n");
2481 goto abort_with_rings
;
2483 status
= myri10ge_allocate_rings(ss
);
2485 goto abort_with_rings
;
2487 /* only firmware which supports multiple TX queues
2488 * supports setting up the tx stats on non-zero
2490 if (slice
== 0 || mgp
->dev
->real_num_tx_queues
> 1)
2491 status
= myri10ge_set_stats(mgp
, slice
);
2493 netdev_err(dev
, "Couldn't set stats DMA\n");
2494 goto abort_with_rings
;
2497 lro_mgr
= &ss
->rx_done
.lro_mgr
;
2499 lro_mgr
->features
= LRO_F_NAPI
;
2500 lro_mgr
->ip_summed
= CHECKSUM_COMPLETE
;
2501 lro_mgr
->ip_summed_aggr
= CHECKSUM_UNNECESSARY
;
2502 lro_mgr
->max_desc
= MYRI10GE_MAX_LRO_DESCRIPTORS
;
2503 lro_mgr
->lro_arr
= ss
->rx_done
.lro_desc
;
2504 lro_mgr
->get_frag_header
= myri10ge_get_frag_header
;
2505 lro_mgr
->max_aggr
= myri10ge_lro_max_pkts
;
2506 lro_mgr
->frag_align_pad
= 2;
2507 if (lro_mgr
->max_aggr
> MAX_SKB_FRAGS
)
2508 lro_mgr
->max_aggr
= MAX_SKB_FRAGS
;
2510 /* must happen prior to any irq */
2511 napi_enable(&(ss
)->napi
);
2514 /* now give firmware buffers sizes, and MTU */
2515 cmd
.data0
= dev
->mtu
+ ETH_HLEN
+ VLAN_HLEN
;
2516 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_MTU
, &cmd
, 0);
2517 cmd
.data0
= mgp
->small_bytes
;
2519 myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE
, &cmd
, 0);
2520 cmd
.data0
= big_pow2
;
2522 myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_BIG_BUFFER_SIZE
, &cmd
, 0);
2524 netdev_err(dev
, "Couldn't set buffer sizes\n");
2525 goto abort_with_rings
;
2529 * Set Linux style TSO mode; this is needed only on newer
2530 * firmware versions. Older versions default to Linux
2534 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_TSO_MODE
, &cmd
, 0);
2535 if (status
&& status
!= -ENOSYS
) {
2536 netdev_err(dev
, "Couldn't set TSO mode\n");
2537 goto abort_with_rings
;
2540 mgp
->link_state
= ~0U;
2541 mgp
->rdma_tags_available
= 15;
2543 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_ETHERNET_UP
, &cmd
, 0);
2545 netdev_err(dev
, "Couldn't bring up link\n");
2546 goto abort_with_rings
;
2549 mgp
->running
= MYRI10GE_ETH_RUNNING
;
2550 mgp
->watchdog_timer
.expires
= jiffies
+ myri10ge_watchdog_timeout
* HZ
;
2551 add_timer(&mgp
->watchdog_timer
);
2552 netif_tx_wake_all_queues(dev
);
2559 napi_disable(&mgp
->ss
[slice
].napi
);
2561 for (i
= 0; i
< mgp
->num_slices
; i
++)
2562 myri10ge_free_rings(&mgp
->ss
[i
]);
2564 myri10ge_free_irq(mgp
);
2567 mgp
->running
= MYRI10GE_ETH_STOPPED
;
2571 static int myri10ge_close(struct net_device
*dev
)
2573 struct myri10ge_priv
*mgp
= netdev_priv(dev
);
2574 struct myri10ge_cmd cmd
;
2575 int status
, old_down_cnt
;
2578 if (mgp
->running
!= MYRI10GE_ETH_RUNNING
)
2581 if (mgp
->ss
[0].tx
.req_bytes
== NULL
)
2584 del_timer_sync(&mgp
->watchdog_timer
);
2585 mgp
->running
= MYRI10GE_ETH_STOPPING
;
2586 for (i
= 0; i
< mgp
->num_slices
; i
++) {
2587 napi_disable(&mgp
->ss
[i
].napi
);
2589 netif_carrier_off(dev
);
2591 netif_tx_stop_all_queues(dev
);
2592 if (mgp
->rebooted
== 0) {
2593 old_down_cnt
= mgp
->down_cnt
;
2596 myri10ge_send_cmd(mgp
, MXGEFW_CMD_ETHERNET_DOWN
, &cmd
, 0);
2598 netdev_err(dev
, "Couldn't bring down link\n");
2600 wait_event_timeout(mgp
->down_wq
, old_down_cnt
!= mgp
->down_cnt
,
2602 if (old_down_cnt
== mgp
->down_cnt
)
2603 netdev_err(dev
, "never got down irq\n");
2605 netif_tx_disable(dev
);
2606 myri10ge_free_irq(mgp
);
2607 for (i
= 0; i
< mgp
->num_slices
; i
++)
2608 myri10ge_free_rings(&mgp
->ss
[i
]);
2610 mgp
->running
= MYRI10GE_ETH_STOPPED
;
2614 /* copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
2615 * backwards one at a time and handle ring wraps */
2618 myri10ge_submit_req_backwards(struct myri10ge_tx_buf
*tx
,
2619 struct mcp_kreq_ether_send
*src
, int cnt
)
2621 int idx
, starting_slot
;
2622 starting_slot
= tx
->req
;
2625 idx
= (starting_slot
+ cnt
) & tx
->mask
;
2626 myri10ge_pio_copy(&tx
->lanai
[idx
], &src
[cnt
], sizeof(*src
));
2632 * copy an array of struct mcp_kreq_ether_send's to the mcp. Copy
2633 * at most 32 bytes at a time, so as to avoid involving the software
2634 * pio handler in the nic. We re-write the first segment's flags
2635 * to mark them valid only after writing the entire chain.
2639 myri10ge_submit_req(struct myri10ge_tx_buf
*tx
, struct mcp_kreq_ether_send
*src
,
2643 struct mcp_kreq_ether_send __iomem
*dstp
, *dst
;
2644 struct mcp_kreq_ether_send
*srcp
;
2647 idx
= tx
->req
& tx
->mask
;
2649 last_flags
= src
->flags
;
2652 dst
= dstp
= &tx
->lanai
[idx
];
2655 if ((idx
+ cnt
) < tx
->mask
) {
2656 for (i
= 0; i
< (cnt
- 1); i
+= 2) {
2657 myri10ge_pio_copy(dstp
, srcp
, 2 * sizeof(*src
));
2658 mb(); /* force write every 32 bytes */
2663 /* submit all but the first request, and ensure
2664 * that it is submitted below */
2665 myri10ge_submit_req_backwards(tx
, src
, cnt
);
2669 /* submit the first request */
2670 myri10ge_pio_copy(dstp
, srcp
, sizeof(*src
));
2671 mb(); /* barrier before setting valid flag */
2674 /* re-write the last 32-bits with the valid flags */
2675 src
->flags
= last_flags
;
2676 put_be32(*((__be32
*) src
+ 3), (__be32 __iomem
*) dst
+ 3);
2682 * Transmit a packet. We need to split the packet so that a single
2683 * segment does not cross myri10ge->tx_boundary, so this makes segment
2684 * counting tricky. So rather than try to count segments up front, we
2685 * just give up if there are too few segments to hold a reasonably
2686 * fragmented packet currently available. If we run
2687 * out of segments while preparing a packet for DMA, we just linearize
2691 static netdev_tx_t
myri10ge_xmit(struct sk_buff
*skb
,
2692 struct net_device
*dev
)
2694 struct myri10ge_priv
*mgp
= netdev_priv(dev
);
2695 struct myri10ge_slice_state
*ss
;
2696 struct mcp_kreq_ether_send
*req
;
2697 struct myri10ge_tx_buf
*tx
;
2698 struct skb_frag_struct
*frag
;
2699 struct netdev_queue
*netdev_queue
;
2702 __be32 high_swapped
;
2704 int idx
, last_idx
, avail
, frag_cnt
, frag_idx
, count
, mss
, max_segments
;
2705 u16 pseudo_hdr_offset
, cksum_offset
, queue
;
2706 int cum_len
, seglen
, boundary
, rdma_count
;
2709 queue
= skb_get_queue_mapping(skb
);
2710 ss
= &mgp
->ss
[queue
];
2711 netdev_queue
= netdev_get_tx_queue(mgp
->dev
, queue
);
2716 avail
= tx
->mask
- 1 - (tx
->req
- tx
->done
);
2719 max_segments
= MXGEFW_MAX_SEND_DESC
;
2721 if (skb_is_gso(skb
)) {
2722 mss
= skb_shinfo(skb
)->gso_size
;
2723 max_segments
= MYRI10GE_MAX_SEND_DESC_TSO
;
2726 if ((unlikely(avail
< max_segments
))) {
2727 /* we are out of transmit resources */
2729 netif_tx_stop_queue(netdev_queue
);
2730 return NETDEV_TX_BUSY
;
2733 /* Setup checksum offloading, if needed */
2735 pseudo_hdr_offset
= 0;
2737 flags
= (MXGEFW_FLAGS_NO_TSO
| MXGEFW_FLAGS_FIRST
);
2738 if (likely(skb
->ip_summed
== CHECKSUM_PARTIAL
)) {
2739 cksum_offset
= skb_checksum_start_offset(skb
);
2740 pseudo_hdr_offset
= cksum_offset
+ skb
->csum_offset
;
2741 /* If the headers are excessively large, then we must
2742 * fall back to a software checksum */
2743 if (unlikely(!mss
&& (cksum_offset
> 255 ||
2744 pseudo_hdr_offset
> 127))) {
2745 if (skb_checksum_help(skb
))
2748 pseudo_hdr_offset
= 0;
2750 odd_flag
= MXGEFW_FLAGS_ALIGN_ODD
;
2751 flags
|= MXGEFW_FLAGS_CKSUM
;
2757 if (mss
) { /* TSO */
2758 /* this removes any CKSUM flag from before */
2759 flags
= (MXGEFW_FLAGS_TSO_HDR
| MXGEFW_FLAGS_FIRST
);
2761 /* negative cum_len signifies to the
2762 * send loop that we are still in the
2763 * header portion of the TSO packet.
2764 * TSO header can be at most 1KB long */
2765 cum_len
= -(skb_transport_offset(skb
) + tcp_hdrlen(skb
));
2767 /* for IPv6 TSO, the checksum offset stores the
2768 * TCP header length, to save the firmware from
2769 * the need to parse the headers */
2770 if (skb_is_gso_v6(skb
)) {
2771 cksum_offset
= tcp_hdrlen(skb
);
2772 /* Can only handle headers <= max_tso6 long */
2773 if (unlikely(-cum_len
> mgp
->max_tso6
))
2774 return myri10ge_sw_tso(skb
, dev
);
2776 /* for TSO, pseudo_hdr_offset holds mss.
2777 * The firmware figures out where to put
2778 * the checksum by parsing the header. */
2779 pseudo_hdr_offset
= mss
;
2781 /* Mark small packets, and pad out tiny packets */
2782 if (skb
->len
<= MXGEFW_SEND_SMALL_SIZE
) {
2783 flags
|= MXGEFW_FLAGS_SMALL
;
2785 /* pad frames to at least ETH_ZLEN bytes */
2786 if (unlikely(skb
->len
< ETH_ZLEN
)) {
2787 if (skb_padto(skb
, ETH_ZLEN
)) {
2788 /* The packet is gone, so we must
2790 ss
->stats
.tx_dropped
+= 1;
2791 return NETDEV_TX_OK
;
2793 /* adjust the len to account for the zero pad
2794 * so that the nic can know how long it is */
2795 skb
->len
= ETH_ZLEN
;
2799 /* map the skb for DMA */
2800 len
= skb_headlen(skb
);
2801 idx
= tx
->req
& tx
->mask
;
2802 tx
->info
[idx
].skb
= skb
;
2803 bus
= pci_map_single(mgp
->pdev
, skb
->data
, len
, PCI_DMA_TODEVICE
);
2804 dma_unmap_addr_set(&tx
->info
[idx
], bus
, bus
);
2805 dma_unmap_len_set(&tx
->info
[idx
], len
, len
);
2807 frag_cnt
= skb_shinfo(skb
)->nr_frags
;
2812 /* "rdma_count" is the number of RDMAs belonging to the
2813 * current packet BEFORE the current send request. For
2814 * non-TSO packets, this is equal to "count".
2815 * For TSO packets, rdma_count needs to be reset
2816 * to 0 after a segment cut.
2818 * The rdma_count field of the send request is
2819 * the number of RDMAs of the packet starting at
2820 * that request. For TSO send requests with one ore more cuts
2821 * in the middle, this is the number of RDMAs starting
2822 * after the last cut in the request. All previous
2823 * segments before the last cut implicitly have 1 RDMA.
2825 * Since the number of RDMAs is not known beforehand,
2826 * it must be filled-in retroactively - after each
2827 * segmentation cut or at the end of the entire packet.
2831 /* Break the SKB or Fragment up into pieces which
2832 * do not cross mgp->tx_boundary */
2833 low
= MYRI10GE_LOWPART_TO_U32(bus
);
2834 high_swapped
= htonl(MYRI10GE_HIGHPART_TO_U32(bus
));
2839 if (unlikely(count
== max_segments
))
2840 goto abort_linearize
;
2843 (low
+ mgp
->tx_boundary
) & ~(mgp
->tx_boundary
- 1);
2844 seglen
= boundary
- low
;
2847 flags_next
= flags
& ~MXGEFW_FLAGS_FIRST
;
2848 cum_len_next
= cum_len
+ seglen
;
2849 if (mss
) { /* TSO */
2850 (req
- rdma_count
)->rdma_count
= rdma_count
+ 1;
2852 if (likely(cum_len
>= 0)) { /* payload */
2853 int next_is_first
, chop
;
2855 chop
= (cum_len_next
> mss
);
2856 cum_len_next
= cum_len_next
% mss
;
2857 next_is_first
= (cum_len_next
== 0);
2858 flags
|= chop
* MXGEFW_FLAGS_TSO_CHOP
;
2859 flags_next
|= next_is_first
*
2861 rdma_count
|= -(chop
| next_is_first
);
2862 rdma_count
+= chop
& !next_is_first
;
2863 } else if (likely(cum_len_next
>= 0)) { /* header ends */
2869 small
= (mss
<= MXGEFW_SEND_SMALL_SIZE
);
2870 flags_next
= MXGEFW_FLAGS_TSO_PLD
|
2871 MXGEFW_FLAGS_FIRST
|
2872 (small
* MXGEFW_FLAGS_SMALL
);
2875 req
->addr_high
= high_swapped
;
2876 req
->addr_low
= htonl(low
);
2877 req
->pseudo_hdr_offset
= htons(pseudo_hdr_offset
);
2878 req
->pad
= 0; /* complete solid 16-byte block; does this matter? */
2879 req
->rdma_count
= 1;
2880 req
->length
= htons(seglen
);
2881 req
->cksum_offset
= cksum_offset
;
2882 req
->flags
= flags
| ((cum_len
& 1) * odd_flag
);
2886 cum_len
= cum_len_next
;
2891 if (cksum_offset
!= 0 && !(mss
&& skb_is_gso_v6(skb
))) {
2892 if (unlikely(cksum_offset
> seglen
))
2893 cksum_offset
-= seglen
;
2898 if (frag_idx
== frag_cnt
)
2901 /* map next fragment for DMA */
2902 idx
= (count
+ tx
->req
) & tx
->mask
;
2903 frag
= &skb_shinfo(skb
)->frags
[frag_idx
];
2906 bus
= pci_map_page(mgp
->pdev
, frag
->page
, frag
->page_offset
,
2907 len
, PCI_DMA_TODEVICE
);
2908 dma_unmap_addr_set(&tx
->info
[idx
], bus
, bus
);
2909 dma_unmap_len_set(&tx
->info
[idx
], len
, len
);
2912 (req
- rdma_count
)->rdma_count
= rdma_count
;
2916 req
->flags
|= MXGEFW_FLAGS_TSO_LAST
;
2917 } while (!(req
->flags
& (MXGEFW_FLAGS_TSO_CHOP
|
2918 MXGEFW_FLAGS_FIRST
)));
2919 idx
= ((count
- 1) + tx
->req
) & tx
->mask
;
2920 tx
->info
[idx
].last
= 1;
2921 myri10ge_submit_req(tx
, tx
->req_list
, count
);
2922 /* if using multiple tx queues, make sure NIC polls the
2924 if ((mgp
->dev
->real_num_tx_queues
> 1) && tx
->queue_active
== 0) {
2925 tx
->queue_active
= 1;
2926 put_be32(htonl(1), tx
->send_go
);
2931 if ((avail
- count
) < MXGEFW_MAX_SEND_DESC
) {
2933 netif_tx_stop_queue(netdev_queue
);
2935 return NETDEV_TX_OK
;
2938 /* Free any DMA resources we've alloced and clear out the skb
2939 * slot so as to not trip up assertions, and to avoid a
2940 * double-free if linearizing fails */
2942 last_idx
= (idx
+ 1) & tx
->mask
;
2943 idx
= tx
->req
& tx
->mask
;
2944 tx
->info
[idx
].skb
= NULL
;
2946 len
= dma_unmap_len(&tx
->info
[idx
], len
);
2948 if (tx
->info
[idx
].skb
!= NULL
)
2949 pci_unmap_single(mgp
->pdev
,
2950 dma_unmap_addr(&tx
->info
[idx
],
2954 pci_unmap_page(mgp
->pdev
,
2955 dma_unmap_addr(&tx
->info
[idx
],
2958 dma_unmap_len_set(&tx
->info
[idx
], len
, 0);
2959 tx
->info
[idx
].skb
= NULL
;
2961 idx
= (idx
+ 1) & tx
->mask
;
2962 } while (idx
!= last_idx
);
2963 if (skb_is_gso(skb
)) {
2964 netdev_err(mgp
->dev
, "TSO but wanted to linearize?!?!?\n");
2968 if (skb_linearize(skb
))
2975 dev_kfree_skb_any(skb
);
2976 ss
->stats
.tx_dropped
+= 1;
2977 return NETDEV_TX_OK
;
2981 static netdev_tx_t
myri10ge_sw_tso(struct sk_buff
*skb
,
2982 struct net_device
*dev
)
2984 struct sk_buff
*segs
, *curr
;
2985 struct myri10ge_priv
*mgp
= netdev_priv(dev
);
2986 struct myri10ge_slice_state
*ss
;
2989 segs
= skb_gso_segment(skb
, dev
->features
& ~NETIF_F_TSO6
);
2997 status
= myri10ge_xmit(curr
, dev
);
2999 dev_kfree_skb_any(curr
);
3004 dev_kfree_skb_any(segs
);
3009 dev_kfree_skb_any(skb
);
3010 return NETDEV_TX_OK
;
3013 ss
= &mgp
->ss
[skb_get_queue_mapping(skb
)];
3014 dev_kfree_skb_any(skb
);
3015 ss
->stats
.tx_dropped
+= 1;
3016 return NETDEV_TX_OK
;
3019 static struct net_device_stats
*myri10ge_get_stats(struct net_device
*dev
)
3021 struct myri10ge_priv
*mgp
= netdev_priv(dev
);
3022 struct myri10ge_slice_netstats
*slice_stats
;
3023 struct net_device_stats
*stats
= &dev
->stats
;
3026 spin_lock(&mgp
->stats_lock
);
3027 memset(stats
, 0, sizeof(*stats
));
3028 for (i
= 0; i
< mgp
->num_slices
; i
++) {
3029 slice_stats
= &mgp
->ss
[i
].stats
;
3030 stats
->rx_packets
+= slice_stats
->rx_packets
;
3031 stats
->tx_packets
+= slice_stats
->tx_packets
;
3032 stats
->rx_bytes
+= slice_stats
->rx_bytes
;
3033 stats
->tx_bytes
+= slice_stats
->tx_bytes
;
3034 stats
->rx_dropped
+= slice_stats
->rx_dropped
;
3035 stats
->tx_dropped
+= slice_stats
->tx_dropped
;
3037 spin_unlock(&mgp
->stats_lock
);
3041 static void myri10ge_set_multicast_list(struct net_device
*dev
)
3043 struct myri10ge_priv
*mgp
= netdev_priv(dev
);
3044 struct myri10ge_cmd cmd
;
3045 struct netdev_hw_addr
*ha
;
3046 __be32 data
[2] = { 0, 0 };
3049 /* can be called from atomic contexts,
3050 * pass 1 to force atomicity in myri10ge_send_cmd() */
3051 myri10ge_change_promisc(mgp
, dev
->flags
& IFF_PROMISC
, 1);
3053 /* This firmware is known to not support multicast */
3054 if (!mgp
->fw_multicast_support
)
3057 /* Disable multicast filtering */
3059 err
= myri10ge_send_cmd(mgp
, MXGEFW_ENABLE_ALLMULTI
, &cmd
, 1);
3061 netdev_err(dev
, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
3066 if ((dev
->flags
& IFF_ALLMULTI
) || mgp
->adopted_rx_filter_bug
) {
3067 /* request to disable multicast filtering, so quit here */
3071 /* Flush the filters */
3073 err
= myri10ge_send_cmd(mgp
, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS
,
3076 netdev_err(dev
, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n",
3081 /* Walk the multicast list, and add each address */
3082 netdev_for_each_mc_addr(ha
, dev
) {
3083 memcpy(data
, &ha
->addr
, 6);
3084 cmd
.data0
= ntohl(data
[0]);
3085 cmd
.data1
= ntohl(data
[1]);
3086 err
= myri10ge_send_cmd(mgp
, MXGEFW_JOIN_MULTICAST_GROUP
,
3090 netdev_err(dev
, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
3095 /* Enable multicast filtering */
3096 err
= myri10ge_send_cmd(mgp
, MXGEFW_DISABLE_ALLMULTI
, &cmd
, 1);
3098 netdev_err(dev
, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n",
3109 static int myri10ge_set_mac_address(struct net_device
*dev
, void *addr
)
3111 struct sockaddr
*sa
= addr
;
3112 struct myri10ge_priv
*mgp
= netdev_priv(dev
);
3115 if (!is_valid_ether_addr(sa
->sa_data
))
3116 return -EADDRNOTAVAIL
;
3118 status
= myri10ge_update_mac_address(mgp
, sa
->sa_data
);
3120 netdev_err(dev
, "changing mac address failed with %d\n",
3125 /* change the dev structure */
3126 memcpy(dev
->dev_addr
, sa
->sa_data
, 6);
3130 static int myri10ge_change_mtu(struct net_device
*dev
, int new_mtu
)
3132 struct myri10ge_priv
*mgp
= netdev_priv(dev
);
3135 if ((new_mtu
< 68) || (ETH_HLEN
+ new_mtu
> MYRI10GE_MAX_ETHER_MTU
)) {
3136 netdev_err(dev
, "new mtu (%d) is not valid\n", new_mtu
);
3139 netdev_info(dev
, "changing mtu from %d to %d\n", dev
->mtu
, new_mtu
);
3141 /* if we change the mtu on an active device, we must
3142 * reset the device so the firmware sees the change */
3143 myri10ge_close(dev
);
3153 * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary.
3154 * Only do it if the bridge is a root port since we don't want to disturb
3155 * any other device, except if forced with myri10ge_ecrc_enable > 1.
3158 static void myri10ge_enable_ecrc(struct myri10ge_priv
*mgp
)
3160 struct pci_dev
*bridge
= mgp
->pdev
->bus
->self
;
3161 struct device
*dev
= &mgp
->pdev
->dev
;
3168 if (!myri10ge_ecrc_enable
|| !bridge
)
3171 /* check that the bridge is a root port */
3172 cap
= pci_find_capability(bridge
, PCI_CAP_ID_EXP
);
3173 pci_read_config_word(bridge
, cap
+ PCI_CAP_FLAGS
, &val
);
3174 ext_type
= (val
& PCI_EXP_FLAGS_TYPE
) >> 4;
3175 if (ext_type
!= PCI_EXP_TYPE_ROOT_PORT
) {
3176 if (myri10ge_ecrc_enable
> 1) {
3177 struct pci_dev
*prev_bridge
, *old_bridge
= bridge
;
3179 /* Walk the hierarchy up to the root port
3180 * where ECRC has to be enabled */
3182 prev_bridge
= bridge
;
3183 bridge
= bridge
->bus
->self
;
3184 if (!bridge
|| prev_bridge
== bridge
) {
3186 "Failed to find root port"
3187 " to force ECRC\n");
3191 pci_find_capability(bridge
, PCI_CAP_ID_EXP
);
3192 pci_read_config_word(bridge
,
3193 cap
+ PCI_CAP_FLAGS
, &val
);
3194 ext_type
= (val
& PCI_EXP_FLAGS_TYPE
) >> 4;
3195 } while (ext_type
!= PCI_EXP_TYPE_ROOT_PORT
);
3198 "Forcing ECRC on non-root port %s"
3199 " (enabling on root port %s)\n",
3200 pci_name(old_bridge
), pci_name(bridge
));
3203 "Not enabling ECRC on non-root port %s\n",
3209 cap
= pci_find_ext_capability(bridge
, PCI_EXT_CAP_ID_ERR
);
3213 ret
= pci_read_config_dword(bridge
, cap
+ PCI_ERR_CAP
, &err_cap
);
3215 dev_err(dev
, "failed reading ext-conf-space of %s\n",
3217 dev_err(dev
, "\t pci=nommconf in use? "
3218 "or buggy/incomplete/absent ACPI MCFG attr?\n");
3221 if (!(err_cap
& PCI_ERR_CAP_ECRC_GENC
))
3224 err_cap
|= PCI_ERR_CAP_ECRC_GENE
;
3225 pci_write_config_dword(bridge
, cap
+ PCI_ERR_CAP
, err_cap
);
3226 dev_info(dev
, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge
));
3230 * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
3231 * when the PCI-E Completion packets are aligned on an 8-byte
3232 * boundary. Some PCI-E chip sets always align Completion packets; on
3233 * the ones that do not, the alignment can be enforced by enabling
3234 * ECRC generation (if supported).
3236 * When PCI-E Completion packets are not aligned, it is actually more
3237 * efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
3239 * If the driver can neither enable ECRC nor verify that it has
3240 * already been enabled, then it must use a firmware image which works
3241 * around unaligned completion packets (myri10ge_rss_ethp_z8e.dat), and it
3242 * should also ensure that it never gives the device a Read-DMA which is
3243 * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is
3244 * enabled, then the driver should use the aligned (myri10ge_rss_eth_z8e.dat)
3245 * firmware image, and set tx_boundary to 4KB.
3248 static void myri10ge_firmware_probe(struct myri10ge_priv
*mgp
)
3250 struct pci_dev
*pdev
= mgp
->pdev
;
3251 struct device
*dev
= &pdev
->dev
;
3254 mgp
->tx_boundary
= 4096;
3256 * Verify the max read request size was set to 4KB
3257 * before trying the test with 4KB.
3259 status
= pcie_get_readrq(pdev
);
3261 dev_err(dev
, "Couldn't read max read req size: %d\n", status
);
3264 if (status
!= 4096) {
3265 dev_warn(dev
, "Max Read Request size != 4096 (%d)\n", status
);
3266 mgp
->tx_boundary
= 2048;
3269 * load the optimized firmware (which assumes aligned PCIe
3270 * completions) in order to see if it works on this host.
3272 set_fw_name(mgp
, myri10ge_fw_aligned
, false);
3273 status
= myri10ge_load_firmware(mgp
, 1);
3279 * Enable ECRC if possible
3281 myri10ge_enable_ecrc(mgp
);
3284 * Run a DMA test which watches for unaligned completions and
3285 * aborts on the first one seen.
3288 status
= myri10ge_dma_test(mgp
, MXGEFW_CMD_UNALIGNED_TEST
);
3290 return; /* keep the aligned firmware */
3292 if (status
!= -E2BIG
)
3293 dev_warn(dev
, "DMA test failed: %d\n", status
);
3294 if (status
== -ENOSYS
)
3295 dev_warn(dev
, "Falling back to ethp! "
3296 "Please install up to date fw\n");
3298 /* fall back to using the unaligned firmware */
3299 mgp
->tx_boundary
= 2048;
3300 set_fw_name(mgp
, myri10ge_fw_unaligned
, false);
3304 static void myri10ge_select_firmware(struct myri10ge_priv
*mgp
)
3308 if (myri10ge_force_firmware
== 0) {
3309 int link_width
, exp_cap
;
3312 exp_cap
= pci_find_capability(mgp
->pdev
, PCI_CAP_ID_EXP
);
3313 pci_read_config_word(mgp
->pdev
, exp_cap
+ PCI_EXP_LNKSTA
, &lnk
);
3314 link_width
= (lnk
>> 4) & 0x3f;
3316 /* Check to see if Link is less than 8 or if the
3317 * upstream bridge is known to provide aligned
3319 if (link_width
< 8) {
3320 dev_info(&mgp
->pdev
->dev
, "PCIE x%d Link\n",
3322 mgp
->tx_boundary
= 4096;
3323 set_fw_name(mgp
, myri10ge_fw_aligned
, false);
3325 myri10ge_firmware_probe(mgp
);
3328 if (myri10ge_force_firmware
== 1) {
3329 dev_info(&mgp
->pdev
->dev
,
3330 "Assuming aligned completions (forced)\n");
3331 mgp
->tx_boundary
= 4096;
3332 set_fw_name(mgp
, myri10ge_fw_aligned
, false);
3334 dev_info(&mgp
->pdev
->dev
,
3335 "Assuming unaligned completions (forced)\n");
3336 mgp
->tx_boundary
= 2048;
3337 set_fw_name(mgp
, myri10ge_fw_unaligned
, false);
3341 kparam_block_sysfs_write(myri10ge_fw_name
);
3342 if (myri10ge_fw_name
!= NULL
) {
3343 char *fw_name
= kstrdup(myri10ge_fw_name
, GFP_KERNEL
);
3346 set_fw_name(mgp
, fw_name
, true);
3349 kparam_unblock_sysfs_write(myri10ge_fw_name
);
3351 if (mgp
->board_number
< MYRI10GE_MAX_BOARDS
&&
3352 myri10ge_fw_names
[mgp
->board_number
] != NULL
&&
3353 strlen(myri10ge_fw_names
[mgp
->board_number
])) {
3354 set_fw_name(mgp
, myri10ge_fw_names
[mgp
->board_number
], false);
3358 dev_info(&mgp
->pdev
->dev
, "overriding firmware to %s\n",
3363 static int myri10ge_suspend(struct pci_dev
*pdev
, pm_message_t state
)
3365 struct myri10ge_priv
*mgp
;
3366 struct net_device
*netdev
;
3368 mgp
= pci_get_drvdata(pdev
);
3373 netif_device_detach(netdev
);
3374 if (netif_running(netdev
)) {
3375 netdev_info(netdev
, "closing\n");
3377 myri10ge_close(netdev
);
3380 myri10ge_dummy_rdma(mgp
, 0);
3381 pci_save_state(pdev
);
3382 pci_disable_device(pdev
);
3384 return pci_set_power_state(pdev
, pci_choose_state(pdev
, state
));
3387 static int myri10ge_resume(struct pci_dev
*pdev
)
3389 struct myri10ge_priv
*mgp
;
3390 struct net_device
*netdev
;
3394 mgp
= pci_get_drvdata(pdev
);
3398 pci_set_power_state(pdev
, 0); /* zeros conf space as a side effect */
3399 msleep(5); /* give card time to respond */
3400 pci_read_config_word(mgp
->pdev
, PCI_VENDOR_ID
, &vendor
);
3401 if (vendor
== 0xffff) {
3402 netdev_err(mgp
->dev
, "device disappeared!\n");
3406 status
= pci_restore_state(pdev
);
3410 status
= pci_enable_device(pdev
);
3412 dev_err(&pdev
->dev
, "failed to enable device\n");
3416 pci_set_master(pdev
);
3418 myri10ge_reset(mgp
);
3419 myri10ge_dummy_rdma(mgp
, 1);
3421 /* Save configuration space to be restored if the
3422 * nic resets due to a parity error */
3423 pci_save_state(pdev
);
3425 if (netif_running(netdev
)) {
3427 status
= myri10ge_open(netdev
);
3430 goto abort_with_enabled
;
3433 netif_device_attach(netdev
);
3438 pci_disable_device(pdev
);
3442 #endif /* CONFIG_PM */
3444 static u32
myri10ge_read_reboot(struct myri10ge_priv
*mgp
)
3446 struct pci_dev
*pdev
= mgp
->pdev
;
3447 int vs
= mgp
->vendor_specific_offset
;
3450 /*enter read32 mode */
3451 pci_write_config_byte(pdev
, vs
+ 0x10, 0x3);
3453 /*read REBOOT_STATUS (0xfffffff0) */
3454 pci_write_config_dword(pdev
, vs
+ 0x18, 0xfffffff0);
3455 pci_read_config_dword(pdev
, vs
+ 0x14, &reboot
);
3460 * This watchdog is used to check whether the board has suffered
3461 * from a parity error and needs to be recovered.
3463 static void myri10ge_watchdog(struct work_struct
*work
)
3465 struct myri10ge_priv
*mgp
=
3466 container_of(work
, struct myri10ge_priv
, watchdog_work
);
3467 struct myri10ge_tx_buf
*tx
;
3469 int status
, rebooted
;
3473 mgp
->watchdog_resets
++;
3474 pci_read_config_word(mgp
->pdev
, PCI_COMMAND
, &cmd
);
3476 if ((cmd
& PCI_COMMAND_MASTER
) == 0) {
3477 /* Bus master DMA disabled? Check to see
3478 * if the card rebooted due to a parity error
3479 * For now, just report it */
3480 reboot
= myri10ge_read_reboot(mgp
);
3481 netdev_err(mgp
->dev
, "NIC rebooted (0x%x),%s resetting\n",
3483 myri10ge_reset_recover
? "" : " not");
3484 if (myri10ge_reset_recover
== 0)
3489 myri10ge_close(mgp
->dev
);
3490 myri10ge_reset_recover
--;
3493 * A rebooted nic will come back with config space as
3494 * it was after power was applied to PCIe bus.
3495 * Attempt to restore config space which was saved
3496 * when the driver was loaded, or the last time the
3497 * nic was resumed from power saving mode.
3499 pci_restore_state(mgp
->pdev
);
3501 /* save state again for accounting reasons */
3502 pci_save_state(mgp
->pdev
);
3505 /* if we get back -1's from our slot, perhaps somebody
3506 * powered off our card. Don't try to reset it in
3508 if (cmd
== 0xffff) {
3509 pci_read_config_word(mgp
->pdev
, PCI_VENDOR_ID
, &vendor
);
3510 if (vendor
== 0xffff) {
3511 netdev_err(mgp
->dev
, "device disappeared!\n");
3515 /* Perhaps it is a software error. Try to reset */
3517 netdev_err(mgp
->dev
, "device timeout, resetting\n");
3518 for (i
= 0; i
< mgp
->num_slices
; i
++) {
3519 tx
= &mgp
->ss
[i
].tx
;
3520 netdev_err(mgp
->dev
, "(%d): %d %d %d %d %d %d\n",
3521 i
, tx
->queue_active
, tx
->req
,
3522 tx
->done
, tx
->pkt_start
, tx
->pkt_done
,
3523 (int)ntohl(mgp
->ss
[i
].fw_stats
->
3526 netdev_info(mgp
->dev
, "(%d): %d %d %d %d %d %d\n",
3527 i
, tx
->queue_active
, tx
->req
,
3528 tx
->done
, tx
->pkt_start
, tx
->pkt_done
,
3529 (int)ntohl(mgp
->ss
[i
].fw_stats
->
3536 myri10ge_close(mgp
->dev
);
3538 status
= myri10ge_load_firmware(mgp
, 1);
3540 netdev_err(mgp
->dev
, "failed to load firmware\n");
3542 myri10ge_open(mgp
->dev
);
3547 * We use our own timer routine rather than relying upon
3548 * netdev->tx_timeout because we have a very large hardware transmit
3549 * queue. Due to the large queue, the netdev->tx_timeout function
3550 * cannot detect a NIC with a parity error in a timely fashion if the
3551 * NIC is lightly loaded.
3553 static void myri10ge_watchdog_timer(unsigned long arg
)
3555 struct myri10ge_priv
*mgp
;
3556 struct myri10ge_slice_state
*ss
;
3557 int i
, reset_needed
, busy_slice_cnt
;
3561 mgp
= (struct myri10ge_priv
*)arg
;
3563 rx_pause_cnt
= ntohl(mgp
->ss
[0].fw_stats
->dropped_pause
);
3565 for (i
= 0, reset_needed
= 0;
3566 i
< mgp
->num_slices
&& reset_needed
== 0; ++i
) {
3569 if (ss
->rx_small
.watchdog_needed
) {
3570 myri10ge_alloc_rx_pages(mgp
, &ss
->rx_small
,
3571 mgp
->small_bytes
+ MXGEFW_PAD
,
3573 if (ss
->rx_small
.fill_cnt
- ss
->rx_small
.cnt
>=
3574 myri10ge_fill_thresh
)
3575 ss
->rx_small
.watchdog_needed
= 0;
3577 if (ss
->rx_big
.watchdog_needed
) {
3578 myri10ge_alloc_rx_pages(mgp
, &ss
->rx_big
,
3580 if (ss
->rx_big
.fill_cnt
- ss
->rx_big
.cnt
>=
3581 myri10ge_fill_thresh
)
3582 ss
->rx_big
.watchdog_needed
= 0;
3585 if (ss
->tx
.req
!= ss
->tx
.done
&&
3586 ss
->tx
.done
== ss
->watchdog_tx_done
&&
3587 ss
->watchdog_tx_req
!= ss
->watchdog_tx_done
) {
3588 /* nic seems like it might be stuck.. */
3589 if (rx_pause_cnt
!= mgp
->watchdog_pause
) {
3590 if (net_ratelimit())
3591 netdev_err(mgp
->dev
, "slice %d: TX paused, check link partner\n",
3594 netdev_warn(mgp
->dev
, "slice %d stuck:", i
);
3598 if (ss
->watchdog_tx_done
!= ss
->tx
.done
||
3599 ss
->watchdog_rx_done
!= ss
->rx_done
.cnt
) {
3602 ss
->watchdog_tx_done
= ss
->tx
.done
;
3603 ss
->watchdog_tx_req
= ss
->tx
.req
;
3604 ss
->watchdog_rx_done
= ss
->rx_done
.cnt
;
3606 /* if we've sent or received no traffic, poll the NIC to
3607 * ensure it is still there. Otherwise, we risk not noticing
3608 * an error in a timely fashion */
3609 if (busy_slice_cnt
== 0) {
3610 pci_read_config_word(mgp
->pdev
, PCI_COMMAND
, &cmd
);
3611 if ((cmd
& PCI_COMMAND_MASTER
) == 0) {
3615 mgp
->watchdog_pause
= rx_pause_cnt
;
3618 schedule_work(&mgp
->watchdog_work
);
3621 mod_timer(&mgp
->watchdog_timer
,
3622 jiffies
+ myri10ge_watchdog_timeout
* HZ
);
3626 static void myri10ge_free_slices(struct myri10ge_priv
*mgp
)
3628 struct myri10ge_slice_state
*ss
;
3629 struct pci_dev
*pdev
= mgp
->pdev
;
3633 if (mgp
->ss
== NULL
)
3636 for (i
= 0; i
< mgp
->num_slices
; i
++) {
3638 if (ss
->rx_done
.entry
!= NULL
) {
3639 bytes
= mgp
->max_intr_slots
*
3640 sizeof(*ss
->rx_done
.entry
);
3641 dma_free_coherent(&pdev
->dev
, bytes
,
3642 ss
->rx_done
.entry
, ss
->rx_done
.bus
);
3643 ss
->rx_done
.entry
= NULL
;
3645 if (ss
->fw_stats
!= NULL
) {
3646 bytes
= sizeof(*ss
->fw_stats
);
3647 dma_free_coherent(&pdev
->dev
, bytes
,
3648 ss
->fw_stats
, ss
->fw_stats_bus
);
3649 ss
->fw_stats
= NULL
;
3656 static int myri10ge_alloc_slices(struct myri10ge_priv
*mgp
)
3658 struct myri10ge_slice_state
*ss
;
3659 struct pci_dev
*pdev
= mgp
->pdev
;
3663 bytes
= sizeof(*mgp
->ss
) * mgp
->num_slices
;
3664 mgp
->ss
= kzalloc(bytes
, GFP_KERNEL
);
3665 if (mgp
->ss
== NULL
) {
3669 for (i
= 0; i
< mgp
->num_slices
; i
++) {
3671 bytes
= mgp
->max_intr_slots
* sizeof(*ss
->rx_done
.entry
);
3672 ss
->rx_done
.entry
= dma_alloc_coherent(&pdev
->dev
, bytes
,
3675 if (ss
->rx_done
.entry
== NULL
)
3677 memset(ss
->rx_done
.entry
, 0, bytes
);
3678 bytes
= sizeof(*ss
->fw_stats
);
3679 ss
->fw_stats
= dma_alloc_coherent(&pdev
->dev
, bytes
,
3682 if (ss
->fw_stats
== NULL
)
3686 netif_napi_add(ss
->dev
, &ss
->napi
, myri10ge_poll
,
3687 myri10ge_napi_weight
);
3691 myri10ge_free_slices(mgp
);
3696 * This function determines the number of slices supported.
3697 * The number slices is the minumum of the number of CPUS,
3698 * the number of MSI-X irqs supported, the number of slices
3699 * supported by the firmware
3701 static void myri10ge_probe_slices(struct myri10ge_priv
*mgp
)
3703 struct myri10ge_cmd cmd
;
3704 struct pci_dev
*pdev
= mgp
->pdev
;
3707 int i
, status
, ncpus
, msix_cap
;
3709 mgp
->num_slices
= 1;
3710 msix_cap
= pci_find_capability(pdev
, PCI_CAP_ID_MSIX
);
3711 ncpus
= num_online_cpus();
3713 if (myri10ge_max_slices
== 1 || msix_cap
== 0 ||
3714 (myri10ge_max_slices
== -1 && ncpus
< 2))
3717 /* try to load the slice aware rss firmware */
3718 old_fw
= mgp
->fw_name
;
3719 old_allocated
= mgp
->fw_name_allocated
;
3720 /* don't free old_fw if we override it. */
3721 mgp
->fw_name_allocated
= false;
3723 if (myri10ge_fw_name
!= NULL
) {
3724 dev_info(&mgp
->pdev
->dev
, "overriding rss firmware to %s\n",
3726 set_fw_name(mgp
, myri10ge_fw_name
, false);
3727 } else if (old_fw
== myri10ge_fw_aligned
)
3728 set_fw_name(mgp
, myri10ge_fw_rss_aligned
, false);
3730 set_fw_name(mgp
, myri10ge_fw_rss_unaligned
, false);
3731 status
= myri10ge_load_firmware(mgp
, 0);
3733 dev_info(&pdev
->dev
, "Rss firmware not found\n");
3739 /* hit the board with a reset to ensure it is alive */
3740 memset(&cmd
, 0, sizeof(cmd
));
3741 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_RESET
, &cmd
, 0);
3743 dev_err(&mgp
->pdev
->dev
, "failed reset\n");
3747 mgp
->max_intr_slots
= cmd
.data0
/ sizeof(struct mcp_slot
);
3749 /* tell it the size of the interrupt queues */
3750 cmd
.data0
= mgp
->max_intr_slots
* sizeof(struct mcp_slot
);
3751 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_SET_INTRQ_SIZE
, &cmd
, 0);
3753 dev_err(&mgp
->pdev
->dev
, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n");
3757 /* ask the maximum number of slices it supports */
3758 status
= myri10ge_send_cmd(mgp
, MXGEFW_CMD_GET_MAX_RSS_QUEUES
, &cmd
, 0);
3762 mgp
->num_slices
= cmd
.data0
;
3764 /* Only allow multiple slices if MSI-X is usable */
3765 if (!myri10ge_msi
) {
3769 /* if the admin did not specify a limit to how many
3770 * slices we should use, cap it automatically to the
3771 * number of CPUs currently online */
3772 if (myri10ge_max_slices
== -1)
3773 myri10ge_max_slices
= ncpus
;
3775 if (mgp
->num_slices
> myri10ge_max_slices
)
3776 mgp
->num_slices
= myri10ge_max_slices
;
3778 /* Now try to allocate as many MSI-X vectors as we have
3779 * slices. We give up on MSI-X if we can only get a single
3782 mgp
->msix_vectors
= kcalloc(mgp
->num_slices
, sizeof(*mgp
->msix_vectors
),
3784 if (mgp
->msix_vectors
== NULL
)
3786 for (i
= 0; i
< mgp
->num_slices
; i
++) {
3787 mgp
->msix_vectors
[i
].entry
= i
;
3790 while (mgp
->num_slices
> 1) {
3791 /* make sure it is a power of two */
3792 while (!is_power_of_2(mgp
->num_slices
))
3794 if (mgp
->num_slices
== 1)
3796 status
= pci_enable_msix(pdev
, mgp
->msix_vectors
,
3799 pci_disable_msix(pdev
);
3805 mgp
->num_slices
= status
;
3811 if (mgp
->msix_vectors
!= NULL
) {
3812 kfree(mgp
->msix_vectors
);
3813 mgp
->msix_vectors
= NULL
;
3817 mgp
->num_slices
= 1;
3818 set_fw_name(mgp
, old_fw
, old_allocated
);
3819 myri10ge_load_firmware(mgp
, 0);
3822 static const struct net_device_ops myri10ge_netdev_ops
= {
3823 .ndo_open
= myri10ge_open
,
3824 .ndo_stop
= myri10ge_close
,
3825 .ndo_start_xmit
= myri10ge_xmit
,
3826 .ndo_get_stats
= myri10ge_get_stats
,
3827 .ndo_validate_addr
= eth_validate_addr
,
3828 .ndo_change_mtu
= myri10ge_change_mtu
,
3829 .ndo_set_multicast_list
= myri10ge_set_multicast_list
,
3830 .ndo_set_mac_address
= myri10ge_set_mac_address
,
3833 static int myri10ge_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
3835 struct net_device
*netdev
;
3836 struct myri10ge_priv
*mgp
;
3837 struct device
*dev
= &pdev
->dev
;
3839 int status
= -ENXIO
;
3841 unsigned hdr_offset
, ss_offset
;
3842 static int board_number
;
3844 netdev
= alloc_etherdev_mq(sizeof(*mgp
), MYRI10GE_MAX_SLICES
);
3845 if (netdev
== NULL
) {
3846 dev_err(dev
, "Could not allocate ethernet device\n");
3850 SET_NETDEV_DEV(netdev
, &pdev
->dev
);
3852 mgp
= netdev_priv(netdev
);
3855 mgp
->csum_flag
= MXGEFW_FLAGS_CKSUM
;
3856 mgp
->pause
= myri10ge_flow_control
;
3857 mgp
->intr_coal_delay
= myri10ge_intr_coal_delay
;
3858 mgp
->msg_enable
= netif_msg_init(myri10ge_debug
, MYRI10GE_MSG_DEFAULT
);
3859 mgp
->board_number
= board_number
;
3860 init_waitqueue_head(&mgp
->down_wq
);
3862 if (pci_enable_device(pdev
)) {
3863 dev_err(&pdev
->dev
, "pci_enable_device call failed\n");
3865 goto abort_with_netdev
;
3868 /* Find the vendor-specific cap so we can check
3869 * the reboot register later on */
3870 mgp
->vendor_specific_offset
3871 = pci_find_capability(pdev
, PCI_CAP_ID_VNDR
);
3873 /* Set our max read request to 4KB */
3874 status
= pcie_set_readrq(pdev
, 4096);
3876 dev_err(&pdev
->dev
, "Error %d writing PCI_EXP_DEVCTL\n",
3878 goto abort_with_enabled
;
3881 pci_set_master(pdev
);
3883 status
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
3887 "64-bit pci address mask was refused, "
3889 status
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
3892 dev_err(&pdev
->dev
, "Error %d setting DMA mask\n", status
);
3893 goto abort_with_enabled
;
3895 (void)pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
3896 mgp
->cmd
= dma_alloc_coherent(&pdev
->dev
, sizeof(*mgp
->cmd
),
3897 &mgp
->cmd_bus
, GFP_KERNEL
);
3898 if (mgp
->cmd
== NULL
)
3899 goto abort_with_enabled
;
3901 mgp
->board_span
= pci_resource_len(pdev
, 0);
3902 mgp
->iomem_base
= pci_resource_start(pdev
, 0);
3904 mgp
->wc_enabled
= 0;
3906 mgp
->mtrr
= mtrr_add(mgp
->iomem_base
, mgp
->board_span
,
3907 MTRR_TYPE_WRCOMB
, 1);
3909 mgp
->wc_enabled
= 1;
3911 mgp
->sram
= ioremap_wc(mgp
->iomem_base
, mgp
->board_span
);
3912 if (mgp
->sram
== NULL
) {
3913 dev_err(&pdev
->dev
, "ioremap failed for %ld bytes at 0x%lx\n",
3914 mgp
->board_span
, mgp
->iomem_base
);
3916 goto abort_with_mtrr
;
3919 ntohl(__raw_readl(mgp
->sram
+ MCP_HEADER_PTR_OFFSET
)) & 0xffffc;
3920 ss_offset
= hdr_offset
+ offsetof(struct mcp_gen_header
, string_specs
);
3921 mgp
->sram_size
= ntohl(__raw_readl(mgp
->sram
+ ss_offset
));
3922 if (mgp
->sram_size
> mgp
->board_span
||
3923 mgp
->sram_size
<= MYRI10GE_FW_OFFSET
) {
3925 "invalid sram_size %dB or board span %ldB\n",
3926 mgp
->sram_size
, mgp
->board_span
);
3927 goto abort_with_ioremap
;
3929 memcpy_fromio(mgp
->eeprom_strings
,
3930 mgp
->sram
+ mgp
->sram_size
, MYRI10GE_EEPROM_STRINGS_SIZE
);
3931 memset(mgp
->eeprom_strings
+ MYRI10GE_EEPROM_STRINGS_SIZE
- 2, 0, 2);
3932 status
= myri10ge_read_mac_addr(mgp
);
3934 goto abort_with_ioremap
;
3936 for (i
= 0; i
< ETH_ALEN
; i
++)
3937 netdev
->dev_addr
[i
] = mgp
->mac_addr
[i
];
3939 myri10ge_select_firmware(mgp
);
3941 status
= myri10ge_load_firmware(mgp
, 1);
3943 dev_err(&pdev
->dev
, "failed to load firmware\n");
3944 goto abort_with_ioremap
;
3946 myri10ge_probe_slices(mgp
);
3947 status
= myri10ge_alloc_slices(mgp
);
3949 dev_err(&pdev
->dev
, "failed to alloc slice state\n");
3950 goto abort_with_firmware
;
3952 netif_set_real_num_tx_queues(netdev
, mgp
->num_slices
);
3953 netif_set_real_num_rx_queues(netdev
, mgp
->num_slices
);
3954 status
= myri10ge_reset(mgp
);
3956 dev_err(&pdev
->dev
, "failed reset\n");
3957 goto abort_with_slices
;
3959 #ifdef CONFIG_MYRI10GE_DCA
3960 myri10ge_setup_dca(mgp
);
3962 pci_set_drvdata(pdev
, mgp
);
3963 if ((myri10ge_initial_mtu
+ ETH_HLEN
) > MYRI10GE_MAX_ETHER_MTU
)
3964 myri10ge_initial_mtu
= MYRI10GE_MAX_ETHER_MTU
- ETH_HLEN
;
3965 if ((myri10ge_initial_mtu
+ ETH_HLEN
) < 68)
3966 myri10ge_initial_mtu
= 68;
3968 netdev
->netdev_ops
= &myri10ge_netdev_ops
;
3969 netdev
->mtu
= myri10ge_initial_mtu
;
3970 netdev
->base_addr
= mgp
->iomem_base
;
3971 netdev
->features
= mgp
->features
;
3974 netdev
->features
|= NETIF_F_HIGHDMA
;
3975 netdev
->features
|= NETIF_F_LRO
;
3977 netdev
->vlan_features
|= mgp
->features
;
3978 if (mgp
->fw_ver_tiny
< 37)
3979 netdev
->vlan_features
&= ~NETIF_F_TSO6
;
3980 if (mgp
->fw_ver_tiny
< 32)
3981 netdev
->vlan_features
&= ~NETIF_F_TSO
;
3983 /* make sure we can get an irq, and that MSI can be
3984 * setup (if available). Also ensure netdev->irq
3985 * is set to correct value if MSI is enabled */
3986 status
= myri10ge_request_irq(mgp
);
3988 goto abort_with_firmware
;
3989 netdev
->irq
= pdev
->irq
;
3990 myri10ge_free_irq(mgp
);
3992 /* Save configuration space to be restored if the
3993 * nic resets due to a parity error */
3994 pci_save_state(pdev
);
3996 /* Setup the watchdog timer */
3997 setup_timer(&mgp
->watchdog_timer
, myri10ge_watchdog_timer
,
3998 (unsigned long)mgp
);
4000 spin_lock_init(&mgp
->stats_lock
);
4001 SET_ETHTOOL_OPS(netdev
, &myri10ge_ethtool_ops
);
4002 INIT_WORK(&mgp
->watchdog_work
, myri10ge_watchdog
);
4003 status
= register_netdev(netdev
);
4005 dev_err(&pdev
->dev
, "register_netdev failed: %d\n", status
);
4006 goto abort_with_state
;
4008 if (mgp
->msix_enabled
)
4009 dev_info(dev
, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n",
4010 mgp
->num_slices
, mgp
->tx_boundary
, mgp
->fw_name
,
4011 (mgp
->wc_enabled
? "Enabled" : "Disabled"));
4013 dev_info(dev
, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
4014 mgp
->msi_enabled
? "MSI" : "xPIC",
4015 netdev
->irq
, mgp
->tx_boundary
, mgp
->fw_name
,
4016 (mgp
->wc_enabled
? "Enabled" : "Disabled"));
4022 pci_restore_state(pdev
);
4025 myri10ge_free_slices(mgp
);
4027 abort_with_firmware
:
4028 myri10ge_dummy_rdma(mgp
, 0);
4031 if (mgp
->mac_addr_string
!= NULL
)
4033 "myri10ge_probe() failed: MAC=%s, SN=%ld\n",
4034 mgp
->mac_addr_string
, mgp
->serial_number
);
4040 mtrr_del(mgp
->mtrr
, mgp
->iomem_base
, mgp
->board_span
);
4042 dma_free_coherent(&pdev
->dev
, sizeof(*mgp
->cmd
),
4043 mgp
->cmd
, mgp
->cmd_bus
);
4046 pci_disable_device(pdev
);
4049 set_fw_name(mgp
, NULL
, false);
4050 free_netdev(netdev
);
4057 * Does what is necessary to shutdown one Myrinet device. Called
4058 * once for each Myrinet card by the kernel when a module is
4061 static void myri10ge_remove(struct pci_dev
*pdev
)
4063 struct myri10ge_priv
*mgp
;
4064 struct net_device
*netdev
;
4066 mgp
= pci_get_drvdata(pdev
);
4070 cancel_work_sync(&mgp
->watchdog_work
);
4072 unregister_netdev(netdev
);
4074 #ifdef CONFIG_MYRI10GE_DCA
4075 myri10ge_teardown_dca(mgp
);
4077 myri10ge_dummy_rdma(mgp
, 0);
4079 /* avoid a memory leak */
4080 pci_restore_state(pdev
);
4086 mtrr_del(mgp
->mtrr
, mgp
->iomem_base
, mgp
->board_span
);
4088 myri10ge_free_slices(mgp
);
4089 if (mgp
->msix_vectors
!= NULL
)
4090 kfree(mgp
->msix_vectors
);
4091 dma_free_coherent(&pdev
->dev
, sizeof(*mgp
->cmd
),
4092 mgp
->cmd
, mgp
->cmd_bus
);
4094 set_fw_name(mgp
, NULL
, false);
4095 free_netdev(netdev
);
4096 pci_disable_device(pdev
);
4097 pci_set_drvdata(pdev
, NULL
);
4100 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
4101 #define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
4103 static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl
) = {
4104 {PCI_DEVICE(PCI_VENDOR_ID_MYRICOM
, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E
)},
4106 (PCI_VENDOR_ID_MYRICOM
, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9
)},
4110 MODULE_DEVICE_TABLE(pci
, myri10ge_pci_tbl
);
4112 static struct pci_driver myri10ge_driver
= {
4114 .probe
= myri10ge_probe
,
4115 .remove
= myri10ge_remove
,
4116 .id_table
= myri10ge_pci_tbl
,
4118 .suspend
= myri10ge_suspend
,
4119 .resume
= myri10ge_resume
,
4123 #ifdef CONFIG_MYRI10GE_DCA
4125 myri10ge_notify_dca(struct notifier_block
*nb
, unsigned long event
, void *p
)
4127 int err
= driver_for_each_device(&myri10ge_driver
.driver
,
4129 myri10ge_notify_dca_device
);
4136 static struct notifier_block myri10ge_dca_notifier
= {
4137 .notifier_call
= myri10ge_notify_dca
,
4141 #endif /* CONFIG_MYRI10GE_DCA */
4143 static __init
int myri10ge_init_module(void)
4145 pr_info("Version %s\n", MYRI10GE_VERSION_STR
);
4147 if (myri10ge_rss_hash
> MXGEFW_RSS_HASH_TYPE_MAX
) {
4148 pr_err("Illegal rssh hash type %d, defaulting to source port\n",
4150 myri10ge_rss_hash
= MXGEFW_RSS_HASH_TYPE_SRC_PORT
;
4152 #ifdef CONFIG_MYRI10GE_DCA
4153 dca_register_notify(&myri10ge_dca_notifier
);
4155 if (myri10ge_max_slices
> MYRI10GE_MAX_SLICES
)
4156 myri10ge_max_slices
= MYRI10GE_MAX_SLICES
;
4158 return pci_register_driver(&myri10ge_driver
);
4161 module_init(myri10ge_init_module
);
4163 static __exit
void myri10ge_cleanup_module(void)
4165 #ifdef CONFIG_MYRI10GE_DCA
4166 dca_unregister_notify(&myri10ge_dca_notifier
);
4168 pci_unregister_driver(&myri10ge_driver
);
4171 module_exit(myri10ge_cleanup_module
);