1 #include <linux/kernel.h>
2 #include <linux/init.h>
3 #include <linux/types.h>
4 #include <linux/module.h>
5 #include <linux/list.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/pagemap.h>
9 #include <linux/sched.h>
10 #include <linux/dmapool.h>
11 #include <linux/mempool.h>
12 #include <linux/spinlock.h>
13 #include <linux/kthread.h>
14 #include <linux/interrupt.h>
15 #include <linux/errno.h>
16 #include <linux/ioport.h>
19 #include <linux/ipv6.h>
21 #include <linux/tcp.h>
22 #include <linux/udp.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/ethtool.h>
28 #include <linux/skbuff.h>
29 #include <linux/rtnetlink.h>
30 #include <linux/if_vlan.h>
31 #include <linux/delay.h>
33 #include <linux/vmalloc.h>
38 static const char ql_gstrings_test
[][ETH_GSTRING_LEN
] = {
39 "Loopback test (offline)"
41 #define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
43 static int ql_update_ring_coalescing(struct ql_adapter
*qdev
)
46 struct rx_ring
*rx_ring
;
49 if (!netif_running(qdev
->ndev
))
52 /* Skip the default queue, and update the outbound handler
53 * queues if they changed.
55 cqicb
= (struct cqicb
*)&qdev
->rx_ring
[qdev
->rss_ring_count
];
56 if (le16_to_cpu(cqicb
->irq_delay
) != qdev
->tx_coalesce_usecs
||
57 le16_to_cpu(cqicb
->pkt_delay
) !=
58 qdev
->tx_max_coalesced_frames
) {
59 for (i
= qdev
->rss_ring_count
; i
< qdev
->rx_ring_count
; i
++) {
60 rx_ring
= &qdev
->rx_ring
[i
];
61 cqicb
= (struct cqicb
*)rx_ring
;
62 cqicb
->irq_delay
= cpu_to_le16(qdev
->tx_coalesce_usecs
);
64 cpu_to_le16(qdev
->tx_max_coalesced_frames
);
65 cqicb
->flags
= FLAGS_LI
;
66 status
= ql_write_cfg(qdev
, cqicb
, sizeof(*cqicb
),
67 CFG_LCQ
, rx_ring
->cq_id
);
69 netif_err(qdev
, ifup
, qdev
->ndev
,
70 "Failed to load CQICB.\n");
76 /* Update the inbound (RSS) handler queues if they changed. */
77 cqicb
= (struct cqicb
*)&qdev
->rx_ring
[0];
78 if (le16_to_cpu(cqicb
->irq_delay
) != qdev
->rx_coalesce_usecs
||
79 le16_to_cpu(cqicb
->pkt_delay
) !=
80 qdev
->rx_max_coalesced_frames
) {
81 for (i
= 0; i
< qdev
->rss_ring_count
; i
++, rx_ring
++) {
82 rx_ring
= &qdev
->rx_ring
[i
];
83 cqicb
= (struct cqicb
*)rx_ring
;
84 cqicb
->irq_delay
= cpu_to_le16(qdev
->rx_coalesce_usecs
);
86 cpu_to_le16(qdev
->rx_max_coalesced_frames
);
87 cqicb
->flags
= FLAGS_LI
;
88 status
= ql_write_cfg(qdev
, cqicb
, sizeof(*cqicb
),
89 CFG_LCQ
, rx_ring
->cq_id
);
91 netif_err(qdev
, ifup
, qdev
->ndev
,
92 "Failed to load CQICB.\n");
101 static void ql_update_stats(struct ql_adapter
*qdev
)
105 u64
*iter
= &qdev
->nic_stats
.tx_pkts
;
107 spin_lock(&qdev
->stats_lock
);
108 if (ql_sem_spinlock(qdev
, qdev
->xg_sem_mask
)) {
109 netif_err(qdev
, drv
, qdev
->ndev
,
110 "Couldn't get xgmac sem.\n");
116 for (i
= 0x200; i
< 0x280; i
+= 8) {
117 if (ql_read_xgmac_reg64(qdev
, i
, &data
)) {
118 netif_err(qdev
, drv
, qdev
->ndev
,
119 "Error reading status register 0x%.04x.\n",
130 for (i
= 0x300; i
< 0x3d0; i
+= 8) {
131 if (ql_read_xgmac_reg64(qdev
, i
, &data
)) {
132 netif_err(qdev
, drv
, qdev
->ndev
,
133 "Error reading status register 0x%.04x.\n",
142 * Get Per-priority TX pause frame counter statistics.
144 for (i
= 0x500; i
< 0x540; i
+= 8) {
145 if (ql_read_xgmac_reg64(qdev
, i
, &data
)) {
146 netif_err(qdev
, drv
, qdev
->ndev
,
147 "Error reading status register 0x%.04x.\n",
156 * Get Per-priority RX pause frame counter statistics.
158 for (i
= 0x568; i
< 0x5a8; i
+= 8) {
159 if (ql_read_xgmac_reg64(qdev
, i
, &data
)) {
160 netif_err(qdev
, drv
, qdev
->ndev
,
161 "Error reading status register 0x%.04x.\n",
170 * Get RX NIC FIFO DROP statistics.
172 if (ql_read_xgmac_reg64(qdev
, 0x5b8, &data
)) {
173 netif_err(qdev
, drv
, qdev
->ndev
,
174 "Error reading status register 0x%.04x.\n", i
);
179 ql_sem_unlock(qdev
, qdev
->xg_sem_mask
);
181 spin_unlock(&qdev
->stats_lock
);
186 static char ql_stats_str_arr
[][ETH_GSTRING_LEN
] = {
195 {"tx_65_to_127_pkts"},
196 {"tx_128_to_255_pkts"},
198 {"tx_512_to_1023_pkts"},
199 {"tx_1024_to_1518_pkts"},
200 {"tx_1519_to_max_pkts"},
201 {"tx_undersize_pkts"},
202 {"tx_oversize_pkts"},
210 {"rx_undersize_pkts"},
211 {"rx_oversize_pkts"},
213 {"rx_undersize_fcerr_pkts"},
222 {"rx_65_to_127_pkts"},
225 {"rx_512_to_1023_pkts"},
226 {"rx_1024_to_1518_pkts"},
227 {"rx_1519_to_max_pkts"},
229 {"tx_cbfc_pause_frames0"},
230 {"tx_cbfc_pause_frames1"},
231 {"tx_cbfc_pause_frames2"},
232 {"tx_cbfc_pause_frames3"},
233 {"tx_cbfc_pause_frames4"},
234 {"tx_cbfc_pause_frames5"},
235 {"tx_cbfc_pause_frames6"},
236 {"tx_cbfc_pause_frames7"},
237 {"rx_cbfc_pause_frames0"},
238 {"rx_cbfc_pause_frames1"},
239 {"rx_cbfc_pause_frames2"},
240 {"rx_cbfc_pause_frames3"},
241 {"rx_cbfc_pause_frames4"},
242 {"rx_cbfc_pause_frames5"},
243 {"rx_cbfc_pause_frames6"},
244 {"rx_cbfc_pause_frames7"},
245 {"rx_nic_fifo_drop"},
248 static void ql_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
252 memcpy(buf
, ql_stats_str_arr
, sizeof(ql_stats_str_arr
));
257 static int ql_get_sset_count(struct net_device
*dev
, int sset
)
261 return QLGE_TEST_LEN
;
263 return ARRAY_SIZE(ql_stats_str_arr
);
270 ql_get_ethtool_stats(struct net_device
*ndev
,
271 struct ethtool_stats
*stats
, u64
*data
)
273 struct ql_adapter
*qdev
= netdev_priv(ndev
);
274 struct nic_stats
*s
= &qdev
->nic_stats
;
276 ql_update_stats(qdev
);
278 *data
++ = s
->tx_pkts
;
279 *data
++ = s
->tx_bytes
;
280 *data
++ = s
->tx_mcast_pkts
;
281 *data
++ = s
->tx_bcast_pkts
;
282 *data
++ = s
->tx_ucast_pkts
;
283 *data
++ = s
->tx_ctl_pkts
;
284 *data
++ = s
->tx_pause_pkts
;
285 *data
++ = s
->tx_64_pkt
;
286 *data
++ = s
->tx_65_to_127_pkt
;
287 *data
++ = s
->tx_128_to_255_pkt
;
288 *data
++ = s
->tx_256_511_pkt
;
289 *data
++ = s
->tx_512_to_1023_pkt
;
290 *data
++ = s
->tx_1024_to_1518_pkt
;
291 *data
++ = s
->tx_1519_to_max_pkt
;
292 *data
++ = s
->tx_undersize_pkt
;
293 *data
++ = s
->tx_oversize_pkt
;
294 *data
++ = s
->rx_bytes
;
295 *data
++ = s
->rx_bytes_ok
;
296 *data
++ = s
->rx_pkts
;
297 *data
++ = s
->rx_pkts_ok
;
298 *data
++ = s
->rx_bcast_pkts
;
299 *data
++ = s
->rx_mcast_pkts
;
300 *data
++ = s
->rx_ucast_pkts
;
301 *data
++ = s
->rx_undersize_pkts
;
302 *data
++ = s
->rx_oversize_pkts
;
303 *data
++ = s
->rx_jabber_pkts
;
304 *data
++ = s
->rx_undersize_fcerr_pkts
;
305 *data
++ = s
->rx_drop_events
;
306 *data
++ = s
->rx_fcerr_pkts
;
307 *data
++ = s
->rx_align_err
;
308 *data
++ = s
->rx_symbol_err
;
309 *data
++ = s
->rx_mac_err
;
310 *data
++ = s
->rx_ctl_pkts
;
311 *data
++ = s
->rx_pause_pkts
;
312 *data
++ = s
->rx_64_pkts
;
313 *data
++ = s
->rx_65_to_127_pkts
;
314 *data
++ = s
->rx_128_255_pkts
;
315 *data
++ = s
->rx_256_511_pkts
;
316 *data
++ = s
->rx_512_to_1023_pkts
;
317 *data
++ = s
->rx_1024_to_1518_pkts
;
318 *data
++ = s
->rx_1519_to_max_pkts
;
319 *data
++ = s
->rx_len_err_pkts
;
320 *data
++ = s
->tx_cbfc_pause_frames0
;
321 *data
++ = s
->tx_cbfc_pause_frames1
;
322 *data
++ = s
->tx_cbfc_pause_frames2
;
323 *data
++ = s
->tx_cbfc_pause_frames3
;
324 *data
++ = s
->tx_cbfc_pause_frames4
;
325 *data
++ = s
->tx_cbfc_pause_frames5
;
326 *data
++ = s
->tx_cbfc_pause_frames6
;
327 *data
++ = s
->tx_cbfc_pause_frames7
;
328 *data
++ = s
->rx_cbfc_pause_frames0
;
329 *data
++ = s
->rx_cbfc_pause_frames1
;
330 *data
++ = s
->rx_cbfc_pause_frames2
;
331 *data
++ = s
->rx_cbfc_pause_frames3
;
332 *data
++ = s
->rx_cbfc_pause_frames4
;
333 *data
++ = s
->rx_cbfc_pause_frames5
;
334 *data
++ = s
->rx_cbfc_pause_frames6
;
335 *data
++ = s
->rx_cbfc_pause_frames7
;
336 *data
++ = s
->rx_nic_fifo_drop
;
339 static int ql_get_settings(struct net_device
*ndev
,
340 struct ethtool_cmd
*ecmd
)
342 struct ql_adapter
*qdev
= netdev_priv(ndev
);
344 ecmd
->supported
= SUPPORTED_10000baseT_Full
;
345 ecmd
->advertising
= ADVERTISED_10000baseT_Full
;
346 ecmd
->autoneg
= AUTONEG_ENABLE
;
347 ecmd
->transceiver
= XCVR_EXTERNAL
;
348 if ((qdev
->link_status
& STS_LINK_TYPE_MASK
) ==
349 STS_LINK_TYPE_10GBASET
) {
350 ecmd
->supported
|= (SUPPORTED_TP
| SUPPORTED_Autoneg
);
351 ecmd
->advertising
|= (ADVERTISED_TP
| ADVERTISED_Autoneg
);
352 ecmd
->port
= PORT_TP
;
354 ecmd
->supported
|= SUPPORTED_FIBRE
;
355 ecmd
->advertising
|= ADVERTISED_FIBRE
;
356 ecmd
->port
= PORT_FIBRE
;
359 ecmd
->speed
= SPEED_10000
;
360 ecmd
->duplex
= DUPLEX_FULL
;
365 static void ql_get_drvinfo(struct net_device
*ndev
,
366 struct ethtool_drvinfo
*drvinfo
)
368 struct ql_adapter
*qdev
= netdev_priv(ndev
);
369 strncpy(drvinfo
->driver
, qlge_driver_name
, 32);
370 strncpy(drvinfo
->version
, qlge_driver_version
, 32);
371 snprintf(drvinfo
->fw_version
, 32, "v%d.%d.%d",
372 (qdev
->fw_rev_id
& 0x00ff0000) >> 16,
373 (qdev
->fw_rev_id
& 0x0000ff00) >> 8,
374 (qdev
->fw_rev_id
& 0x000000ff));
375 strncpy(drvinfo
->bus_info
, pci_name(qdev
->pdev
), 32);
376 drvinfo
->n_stats
= 0;
377 drvinfo
->testinfo_len
= 0;
378 if (!test_bit(QL_FRC_COREDUMP
, &qdev
->flags
))
379 drvinfo
->regdump_len
= sizeof(struct ql_mpi_coredump
);
381 drvinfo
->regdump_len
= sizeof(struct ql_reg_dump
);
382 drvinfo
->eedump_len
= 0;
385 static void ql_get_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
387 struct ql_adapter
*qdev
= netdev_priv(ndev
);
388 /* What we support. */
389 wol
->supported
= WAKE_MAGIC
;
390 /* What we've currently got set. */
391 wol
->wolopts
= qdev
->wol
;
394 static int ql_set_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
396 struct ql_adapter
*qdev
= netdev_priv(ndev
);
399 if (wol
->wolopts
& ~WAKE_MAGIC
)
401 qdev
->wol
= wol
->wolopts
;
403 netif_info(qdev
, drv
, qdev
->ndev
, "Set wol option 0x%x\n", qdev
->wol
);
406 status
= ql_mb_wol_mode(qdev
, wol
);
407 netif_err(qdev
, drv
, qdev
->ndev
, "WOL %s (wol code 0x%x)\n",
408 status
== 0 ? "cleared successfully" : "clear failed",
415 static int ql_phys_id(struct net_device
*ndev
, u32 data
)
417 struct ql_adapter
*qdev
= netdev_priv(ndev
);
421 /* Save the current LED settings */
422 status
= ql_mb_get_led_cfg(qdev
);
425 led_reg
= qdev
->led_config
;
427 /* Start blinking the led */
428 if (!data
|| data
> 300)
431 for (i
= 0; i
< (data
* 10); i
++)
432 ql_mb_set_led_cfg(qdev
, QL_LED_BLINK
);
434 /* Restore LED settings */
435 status
= ql_mb_set_led_cfg(qdev
, led_reg
);
442 static int ql_start_loopback(struct ql_adapter
*qdev
)
444 if (netif_carrier_ok(qdev
->ndev
)) {
445 set_bit(QL_LB_LINK_UP
, &qdev
->flags
);
446 netif_carrier_off(qdev
->ndev
);
448 clear_bit(QL_LB_LINK_UP
, &qdev
->flags
);
449 qdev
->link_config
|= CFG_LOOPBACK_PCS
;
450 return ql_mb_set_port_cfg(qdev
);
453 static void ql_stop_loopback(struct ql_adapter
*qdev
)
455 qdev
->link_config
&= ~CFG_LOOPBACK_PCS
;
456 ql_mb_set_port_cfg(qdev
);
457 if (test_bit(QL_LB_LINK_UP
, &qdev
->flags
)) {
458 netif_carrier_on(qdev
->ndev
);
459 clear_bit(QL_LB_LINK_UP
, &qdev
->flags
);
463 static void ql_create_lb_frame(struct sk_buff
*skb
,
464 unsigned int frame_size
)
466 memset(skb
->data
, 0xFF, frame_size
);
468 memset(&skb
->data
[frame_size
/ 2], 0xAA, frame_size
/ 2 - 1);
469 memset(&skb
->data
[frame_size
/ 2 + 10], 0xBE, 1);
470 memset(&skb
->data
[frame_size
/ 2 + 12], 0xAF, 1);
473 void ql_check_lb_frame(struct ql_adapter
*qdev
,
476 unsigned int frame_size
= skb
->len
;
478 if ((*(skb
->data
+ 3) == 0xFF) &&
479 (*(skb
->data
+ frame_size
/ 2 + 10) == 0xBE) &&
480 (*(skb
->data
+ frame_size
/ 2 + 12) == 0xAF)) {
481 atomic_dec(&qdev
->lb_count
);
486 static int ql_run_loopback_test(struct ql_adapter
*qdev
)
491 unsigned int size
= SMALL_BUF_MAP_SIZE
;
493 for (i
= 0; i
< 64; i
++) {
494 skb
= netdev_alloc_skb(qdev
->ndev
, size
);
498 skb
->queue_mapping
= 0;
500 ql_create_lb_frame(skb
, size
);
501 rc
= ql_lb_send(skb
, qdev
->ndev
);
502 if (rc
!= NETDEV_TX_OK
)
504 atomic_inc(&qdev
->lb_count
);
506 /* Give queue time to settle before testing results. */
508 ql_clean_lb_rx_ring(&qdev
->rx_ring
[0], 128);
509 return atomic_read(&qdev
->lb_count
) ? -EIO
: 0;
512 static int ql_loopback_test(struct ql_adapter
*qdev
, u64
*data
)
514 *data
= ql_start_loopback(qdev
);
517 *data
= ql_run_loopback_test(qdev
);
519 ql_stop_loopback(qdev
);
523 static void ql_self_test(struct net_device
*ndev
,
524 struct ethtool_test
*eth_test
, u64
*data
)
526 struct ql_adapter
*qdev
= netdev_priv(ndev
);
528 if (netif_running(ndev
)) {
529 set_bit(QL_SELFTEST
, &qdev
->flags
);
530 if (eth_test
->flags
== ETH_TEST_FL_OFFLINE
) {
532 if (ql_loopback_test(qdev
, &data
[0]))
533 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
539 clear_bit(QL_SELFTEST
, &qdev
->flags
);
540 /* Give link time to come up after
541 * port configuration changes.
543 msleep_interruptible(4 * 1000);
545 netif_err(qdev
, drv
, qdev
->ndev
,
546 "is down, Loopback test will fail.\n");
547 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
551 static int ql_get_regs_len(struct net_device
*ndev
)
553 struct ql_adapter
*qdev
= netdev_priv(ndev
);
555 if (!test_bit(QL_FRC_COREDUMP
, &qdev
->flags
))
556 return sizeof(struct ql_mpi_coredump
);
558 return sizeof(struct ql_reg_dump
);
561 static void ql_get_regs(struct net_device
*ndev
,
562 struct ethtool_regs
*regs
, void *p
)
564 struct ql_adapter
*qdev
= netdev_priv(ndev
);
566 ql_get_dump(qdev
, p
);
567 qdev
->core_is_dumped
= 0;
568 if (!test_bit(QL_FRC_COREDUMP
, &qdev
->flags
))
569 regs
->len
= sizeof(struct ql_mpi_coredump
);
571 regs
->len
= sizeof(struct ql_reg_dump
);
574 static int ql_get_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*c
)
576 struct ql_adapter
*qdev
= netdev_priv(dev
);
578 c
->rx_coalesce_usecs
= qdev
->rx_coalesce_usecs
;
579 c
->tx_coalesce_usecs
= qdev
->tx_coalesce_usecs
;
581 /* This chip coalesces as follows:
582 * If a packet arrives, hold off interrupts until
583 * cqicb->int_delay expires, but if no other packets arrive don't
584 * wait longer than cqicb->pkt_int_delay. But ethtool doesn't use a
585 * timer to coalesce on a frame basis. So, we have to take ethtool's
586 * max_coalesced_frames value and convert it to a delay in microseconds.
587 * We do this by using a basic thoughput of 1,000,000 frames per
588 * second @ (1024 bytes). This means one frame per usec. So it's a
589 * simple one to one ratio.
591 c
->rx_max_coalesced_frames
= qdev
->rx_max_coalesced_frames
;
592 c
->tx_max_coalesced_frames
= qdev
->tx_max_coalesced_frames
;
597 static int ql_set_coalesce(struct net_device
*ndev
, struct ethtool_coalesce
*c
)
599 struct ql_adapter
*qdev
= netdev_priv(ndev
);
601 /* Validate user parameters. */
602 if (c
->rx_coalesce_usecs
> qdev
->rx_ring_size
/ 2)
604 /* Don't wait more than 10 usec. */
605 if (c
->rx_max_coalesced_frames
> MAX_INTER_FRAME_WAIT
)
607 if (c
->tx_coalesce_usecs
> qdev
->tx_ring_size
/ 2)
609 if (c
->tx_max_coalesced_frames
> MAX_INTER_FRAME_WAIT
)
612 /* Verify a change took place before updating the hardware. */
613 if (qdev
->rx_coalesce_usecs
== c
->rx_coalesce_usecs
&&
614 qdev
->tx_coalesce_usecs
== c
->tx_coalesce_usecs
&&
615 qdev
->rx_max_coalesced_frames
== c
->rx_max_coalesced_frames
&&
616 qdev
->tx_max_coalesced_frames
== c
->tx_max_coalesced_frames
)
619 qdev
->rx_coalesce_usecs
= c
->rx_coalesce_usecs
;
620 qdev
->tx_coalesce_usecs
= c
->tx_coalesce_usecs
;
621 qdev
->rx_max_coalesced_frames
= c
->rx_max_coalesced_frames
;
622 qdev
->tx_max_coalesced_frames
= c
->tx_max_coalesced_frames
;
624 return ql_update_ring_coalescing(qdev
);
627 static void ql_get_pauseparam(struct net_device
*netdev
,
628 struct ethtool_pauseparam
*pause
)
630 struct ql_adapter
*qdev
= netdev_priv(netdev
);
632 ql_mb_get_port_cfg(qdev
);
633 if (qdev
->link_config
& CFG_PAUSE_STD
) {
639 static int ql_set_pauseparam(struct net_device
*netdev
,
640 struct ethtool_pauseparam
*pause
)
642 struct ql_adapter
*qdev
= netdev_priv(netdev
);
645 if ((pause
->rx_pause
) && (pause
->tx_pause
))
646 qdev
->link_config
|= CFG_PAUSE_STD
;
647 else if (!pause
->rx_pause
&& !pause
->tx_pause
)
648 qdev
->link_config
&= ~CFG_PAUSE_STD
;
652 status
= ql_mb_set_port_cfg(qdev
);
658 static u32
ql_get_rx_csum(struct net_device
*netdev
)
660 struct ql_adapter
*qdev
= netdev_priv(netdev
);
661 return qdev
->rx_csum
;
664 static int ql_set_rx_csum(struct net_device
*netdev
, uint32_t data
)
666 struct ql_adapter
*qdev
= netdev_priv(netdev
);
667 qdev
->rx_csum
= data
;
671 static int ql_set_tso(struct net_device
*ndev
, uint32_t data
)
675 ndev
->features
|= NETIF_F_TSO
;
676 ndev
->features
|= NETIF_F_TSO6
;
678 ndev
->features
&= ~NETIF_F_TSO
;
679 ndev
->features
&= ~NETIF_F_TSO6
;
684 static u32
ql_get_msglevel(struct net_device
*ndev
)
686 struct ql_adapter
*qdev
= netdev_priv(ndev
);
687 return qdev
->msg_enable
;
690 static void ql_set_msglevel(struct net_device
*ndev
, u32 value
)
692 struct ql_adapter
*qdev
= netdev_priv(ndev
);
693 qdev
->msg_enable
= value
;
696 const struct ethtool_ops qlge_ethtool_ops
= {
697 .get_settings
= ql_get_settings
,
698 .get_drvinfo
= ql_get_drvinfo
,
699 .get_wol
= ql_get_wol
,
700 .set_wol
= ql_set_wol
,
701 .get_regs_len
= ql_get_regs_len
,
702 .get_regs
= ql_get_regs
,
703 .get_msglevel
= ql_get_msglevel
,
704 .set_msglevel
= ql_set_msglevel
,
705 .get_link
= ethtool_op_get_link
,
706 .phys_id
= ql_phys_id
,
707 .self_test
= ql_self_test
,
708 .get_pauseparam
= ql_get_pauseparam
,
709 .set_pauseparam
= ql_set_pauseparam
,
710 .get_rx_csum
= ql_get_rx_csum
,
711 .set_rx_csum
= ql_set_rx_csum
,
712 .get_tx_csum
= ethtool_op_get_tx_csum
,
713 .set_tx_csum
= ethtool_op_set_tx_csum
,
714 .get_sg
= ethtool_op_get_sg
,
715 .set_sg
= ethtool_op_set_sg
,
716 .get_tso
= ethtool_op_get_tso
,
717 .set_tso
= ql_set_tso
,
718 .get_coalesce
= ql_get_coalesce
,
719 .set_coalesce
= ql_set_coalesce
,
720 .get_sset_count
= ql_get_sset_count
,
721 .get_strings
= ql_get_strings
,
722 .get_ethtool_stats
= ql_get_ethtool_stats
,