1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2015 Microchip Technology
5 #include <linux/version.h>
6 #include <linux/module.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/vxlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/irq.h>
27 #include <linux/irqchip/chained_irq.h>
28 #include <linux/microchipphy.h>
29 #include <linux/phy_fixed.h>
30 #include <linux/of_mdio.h>
31 #include <linux/of_net.h>
34 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
35 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
36 #define DRIVER_NAME "lan78xx"
38 #define TX_TIMEOUT_JIFFIES (5 * HZ)
39 #define THROTTLE_JIFFIES (HZ / 8)
40 #define UNLINK_TIMEOUT_MS 3
42 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
44 #define SS_USB_PKT_SIZE (1024)
45 #define HS_USB_PKT_SIZE (512)
46 #define FS_USB_PKT_SIZE (64)
48 #define MAX_RX_FIFO_SIZE (12 * 1024)
49 #define MAX_TX_FIFO_SIZE (12 * 1024)
50 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
51 #define DEFAULT_BULK_IN_DELAY (0x0800)
52 #define MAX_SINGLE_PACKET_SIZE (9000)
53 #define DEFAULT_TX_CSUM_ENABLE (true)
54 #define DEFAULT_RX_CSUM_ENABLE (true)
55 #define DEFAULT_TSO_CSUM_ENABLE (true)
56 #define DEFAULT_VLAN_FILTER_ENABLE (true)
57 #define DEFAULT_VLAN_RX_OFFLOAD (true)
58 #define TX_OVERHEAD (8)
61 #define LAN78XX_USB_VENDOR_ID (0x0424)
62 #define LAN7800_USB_PRODUCT_ID (0x7800)
63 #define LAN7850_USB_PRODUCT_ID (0x7850)
64 #define LAN7801_USB_PRODUCT_ID (0x7801)
65 #define LAN78XX_EEPROM_MAGIC (0x78A5)
66 #define LAN78XX_OTP_MAGIC (0x78F3)
71 #define EEPROM_INDICATOR (0xA5)
72 #define EEPROM_MAC_OFFSET (0x01)
73 #define MAX_EEPROM_SIZE 512
74 #define OTP_INDICATOR_1 (0xF3)
75 #define OTP_INDICATOR_2 (0xF7)
77 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
78 WAKE_MCAST | WAKE_BCAST | \
79 WAKE_ARP | WAKE_MAGIC)
81 /* USB related defines */
82 #define BULK_IN_PIPE 1
83 #define BULK_OUT_PIPE 2
85 /* default autosuspend delay (mSec)*/
86 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
88 /* statistic update interval (mSec) */
89 #define STAT_UPDATE_TIMER (1 * 1000)
91 /* defines interrupts from interrupt EP */
92 #define MAX_INT_EP (32)
93 #define INT_EP_INTEP (31)
94 #define INT_EP_OTP_WR_DONE (28)
95 #define INT_EP_EEE_TX_LPI_START (26)
96 #define INT_EP_EEE_TX_LPI_STOP (25)
97 #define INT_EP_EEE_RX_LPI (24)
98 #define INT_EP_MAC_RESET_TIMEOUT (23)
99 #define INT_EP_RDFO (22)
100 #define INT_EP_TXE (21)
101 #define INT_EP_USB_STATUS (20)
102 #define INT_EP_TX_DIS (19)
103 #define INT_EP_RX_DIS (18)
104 #define INT_EP_PHY (17)
105 #define INT_EP_DP (16)
106 #define INT_EP_MAC_ERR (15)
107 #define INT_EP_TDFU (14)
108 #define INT_EP_TDFO (13)
109 #define INT_EP_UTX (12)
110 #define INT_EP_GPIO_11 (11)
111 #define INT_EP_GPIO_10 (10)
112 #define INT_EP_GPIO_9 (9)
113 #define INT_EP_GPIO_8 (8)
114 #define INT_EP_GPIO_7 (7)
115 #define INT_EP_GPIO_6 (6)
116 #define INT_EP_GPIO_5 (5)
117 #define INT_EP_GPIO_4 (4)
118 #define INT_EP_GPIO_3 (3)
119 #define INT_EP_GPIO_2 (2)
120 #define INT_EP_GPIO_1 (1)
121 #define INT_EP_GPIO_0 (0)
123 static const char lan78xx_gstrings
[][ETH_GSTRING_LEN
] = {
125 "RX Alignment Errors",
126 "Rx Fragment Errors",
128 "RX Undersize Frame Errors",
129 "RX Oversize Frame Errors",
131 "RX Unicast Byte Count",
132 "RX Broadcast Byte Count",
133 "RX Multicast Byte Count",
135 "RX Broadcast Frames",
136 "RX Multicast Frames",
139 "RX 65 - 127 Byte Frames",
140 "RX 128 - 255 Byte Frames",
141 "RX 256 - 511 Bytes Frames",
142 "RX 512 - 1023 Byte Frames",
143 "RX 1024 - 1518 Byte Frames",
144 "RX Greater 1518 Byte Frames",
145 "EEE RX LPI Transitions",
148 "TX Excess Deferral Errors",
151 "TX Single Collisions",
152 "TX Multiple Collisions",
153 "TX Excessive Collision",
154 "TX Late Collisions",
155 "TX Unicast Byte Count",
156 "TX Broadcast Byte Count",
157 "TX Multicast Byte Count",
159 "TX Broadcast Frames",
160 "TX Multicast Frames",
163 "TX 65 - 127 Byte Frames",
164 "TX 128 - 255 Byte Frames",
165 "TX 256 - 511 Bytes Frames",
166 "TX 512 - 1023 Byte Frames",
167 "TX 1024 - 1518 Byte Frames",
168 "TX Greater 1518 Byte Frames",
169 "EEE TX LPI Transitions",
173 struct lan78xx_statstage
{
175 u32 rx_alignment_errors
;
176 u32 rx_fragment_errors
;
177 u32 rx_jabber_errors
;
178 u32 rx_undersize_frame_errors
;
179 u32 rx_oversize_frame_errors
;
180 u32 rx_dropped_frames
;
181 u32 rx_unicast_byte_count
;
182 u32 rx_broadcast_byte_count
;
183 u32 rx_multicast_byte_count
;
184 u32 rx_unicast_frames
;
185 u32 rx_broadcast_frames
;
186 u32 rx_multicast_frames
;
188 u32 rx_64_byte_frames
;
189 u32 rx_65_127_byte_frames
;
190 u32 rx_128_255_byte_frames
;
191 u32 rx_256_511_bytes_frames
;
192 u32 rx_512_1023_byte_frames
;
193 u32 rx_1024_1518_byte_frames
;
194 u32 rx_greater_1518_byte_frames
;
195 u32 eee_rx_lpi_transitions
;
198 u32 tx_excess_deferral_errors
;
199 u32 tx_carrier_errors
;
200 u32 tx_bad_byte_count
;
201 u32 tx_single_collisions
;
202 u32 tx_multiple_collisions
;
203 u32 tx_excessive_collision
;
204 u32 tx_late_collisions
;
205 u32 tx_unicast_byte_count
;
206 u32 tx_broadcast_byte_count
;
207 u32 tx_multicast_byte_count
;
208 u32 tx_unicast_frames
;
209 u32 tx_broadcast_frames
;
210 u32 tx_multicast_frames
;
212 u32 tx_64_byte_frames
;
213 u32 tx_65_127_byte_frames
;
214 u32 tx_128_255_byte_frames
;
215 u32 tx_256_511_bytes_frames
;
216 u32 tx_512_1023_byte_frames
;
217 u32 tx_1024_1518_byte_frames
;
218 u32 tx_greater_1518_byte_frames
;
219 u32 eee_tx_lpi_transitions
;
223 struct lan78xx_statstage64
{
225 u64 rx_alignment_errors
;
226 u64 rx_fragment_errors
;
227 u64 rx_jabber_errors
;
228 u64 rx_undersize_frame_errors
;
229 u64 rx_oversize_frame_errors
;
230 u64 rx_dropped_frames
;
231 u64 rx_unicast_byte_count
;
232 u64 rx_broadcast_byte_count
;
233 u64 rx_multicast_byte_count
;
234 u64 rx_unicast_frames
;
235 u64 rx_broadcast_frames
;
236 u64 rx_multicast_frames
;
238 u64 rx_64_byte_frames
;
239 u64 rx_65_127_byte_frames
;
240 u64 rx_128_255_byte_frames
;
241 u64 rx_256_511_bytes_frames
;
242 u64 rx_512_1023_byte_frames
;
243 u64 rx_1024_1518_byte_frames
;
244 u64 rx_greater_1518_byte_frames
;
245 u64 eee_rx_lpi_transitions
;
248 u64 tx_excess_deferral_errors
;
249 u64 tx_carrier_errors
;
250 u64 tx_bad_byte_count
;
251 u64 tx_single_collisions
;
252 u64 tx_multiple_collisions
;
253 u64 tx_excessive_collision
;
254 u64 tx_late_collisions
;
255 u64 tx_unicast_byte_count
;
256 u64 tx_broadcast_byte_count
;
257 u64 tx_multicast_byte_count
;
258 u64 tx_unicast_frames
;
259 u64 tx_broadcast_frames
;
260 u64 tx_multicast_frames
;
262 u64 tx_64_byte_frames
;
263 u64 tx_65_127_byte_frames
;
264 u64 tx_128_255_byte_frames
;
265 u64 tx_256_511_bytes_frames
;
266 u64 tx_512_1023_byte_frames
;
267 u64 tx_1024_1518_byte_frames
;
268 u64 tx_greater_1518_byte_frames
;
269 u64 eee_tx_lpi_transitions
;
273 static u32 lan78xx_regs
[] = {
295 #define PHY_REG_SIZE (32 * sizeof(u32))
299 struct lan78xx_priv
{
300 struct lan78xx_net
*dev
;
302 u32 mchash_table
[DP_SEL_VHF_HASH_LEN
]; /* multicat hash table */
303 u32 pfilter_table
[NUM_OF_MAF
][2]; /* perfect filter table */
304 u32 vlan_table
[DP_SEL_VHF_VLAN_LEN
];
305 struct mutex dataport_mutex
; /* for dataport access */
306 spinlock_t rfe_ctl_lock
; /* for rfe register access */
307 struct work_struct set_multicast
;
308 struct work_struct set_vlan
;
322 struct skb_data
{ /* skb->cb is one of these */
324 struct lan78xx_net
*dev
;
325 enum skb_state state
;
331 struct usb_ctrlrequest req
;
332 struct lan78xx_net
*dev
;
335 #define EVENT_TX_HALT 0
336 #define EVENT_RX_HALT 1
337 #define EVENT_RX_MEMORY 2
338 #define EVENT_STS_SPLIT 3
339 #define EVENT_LINK_RESET 4
340 #define EVENT_RX_PAUSED 5
341 #define EVENT_DEV_WAKING 6
342 #define EVENT_DEV_ASLEEP 7
343 #define EVENT_DEV_OPEN 8
344 #define EVENT_STAT_UPDATE 9
347 struct mutex access_lock
; /* for stats access */
348 struct lan78xx_statstage saved
;
349 struct lan78xx_statstage rollover_count
;
350 struct lan78xx_statstage rollover_max
;
351 struct lan78xx_statstage64 curr_stat
;
354 struct irq_domain_data
{
355 struct irq_domain
*irqdomain
;
357 struct irq_chip
*irqchip
;
358 irq_flow_handler_t irq_handler
;
360 struct mutex irq_lock
; /* for irq bus access */
364 struct net_device
*net
;
365 struct usb_device
*udev
;
366 struct usb_interface
*intf
;
371 struct sk_buff_head rxq
;
372 struct sk_buff_head txq
;
373 struct sk_buff_head done
;
374 struct sk_buff_head rxq_pause
;
375 struct sk_buff_head txq_pend
;
377 struct tasklet_struct bh
;
378 struct delayed_work wq
;
380 struct usb_host_endpoint
*ep_blkin
;
381 struct usb_host_endpoint
*ep_blkout
;
382 struct usb_host_endpoint
*ep_intr
;
386 struct urb
*urb_intr
;
387 struct usb_anchor deferred
;
389 struct mutex phy_mutex
; /* for phy access */
390 unsigned pipe_in
, pipe_out
, pipe_intr
;
392 u32 hard_mtu
; /* count any extra framing */
393 size_t rx_urb_size
; /* size for rx urbs */
397 wait_queue_head_t
*wait
;
398 unsigned char suspend_count
;
401 struct timer_list delay
;
402 struct timer_list stat_monitor
;
404 unsigned long data
[5];
411 struct mii_bus
*mdiobus
;
412 phy_interface_t interface
;
415 u8 fc_request_control
;
418 struct statstage stats
;
420 struct irq_domain_data domain_data
;
423 /* define external phy id */
424 #define PHY_LAN8835 (0x0007C130)
425 #define PHY_KSZ9031RNX (0x00221620)
427 /* use ethtool to change the level for any given device */
428 static int msg_level
= -1;
429 module_param(msg_level
, int, 0);
430 MODULE_PARM_DESC(msg_level
, "Override default message level");
432 static int lan78xx_read_reg(struct lan78xx_net
*dev
, u32 index
, u32
*data
)
434 u32
*buf
= kmalloc(sizeof(u32
), GFP_KERNEL
);
440 ret
= usb_control_msg(dev
->udev
, usb_rcvctrlpipe(dev
->udev
, 0),
441 USB_VENDOR_REQUEST_READ_REGISTER
,
442 USB_DIR_IN
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
443 0, index
, buf
, 4, USB_CTRL_GET_TIMEOUT
);
444 if (likely(ret
>= 0)) {
448 netdev_warn(dev
->net
,
449 "Failed to read register index 0x%08x. ret = %d",
458 static int lan78xx_write_reg(struct lan78xx_net
*dev
, u32 index
, u32 data
)
460 u32
*buf
= kmalloc(sizeof(u32
), GFP_KERNEL
);
469 ret
= usb_control_msg(dev
->udev
, usb_sndctrlpipe(dev
->udev
, 0),
470 USB_VENDOR_REQUEST_WRITE_REGISTER
,
471 USB_DIR_OUT
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
472 0, index
, buf
, 4, USB_CTRL_SET_TIMEOUT
);
473 if (unlikely(ret
< 0)) {
474 netdev_warn(dev
->net
,
475 "Failed to write register index 0x%08x. ret = %d",
484 static int lan78xx_read_stats(struct lan78xx_net
*dev
,
485 struct lan78xx_statstage
*data
)
489 struct lan78xx_statstage
*stats
;
493 stats
= kmalloc(sizeof(*stats
), GFP_KERNEL
);
497 ret
= usb_control_msg(dev
->udev
,
498 usb_rcvctrlpipe(dev
->udev
, 0),
499 USB_VENDOR_REQUEST_GET_STATS
,
500 USB_DIR_IN
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
505 USB_CTRL_SET_TIMEOUT
);
506 if (likely(ret
>= 0)) {
509 for (i
= 0; i
< sizeof(*stats
)/sizeof(u32
); i
++) {
510 le32_to_cpus(&src
[i
]);
514 netdev_warn(dev
->net
,
515 "Failed to read stat ret = %d", ret
);
523 #define check_counter_rollover(struct1, dev_stats, member) { \
524 if (struct1->member < dev_stats.saved.member) \
525 dev_stats.rollover_count.member++; \
528 static void lan78xx_check_stat_rollover(struct lan78xx_net
*dev
,
529 struct lan78xx_statstage
*stats
)
531 check_counter_rollover(stats
, dev
->stats
, rx_fcs_errors
);
532 check_counter_rollover(stats
, dev
->stats
, rx_alignment_errors
);
533 check_counter_rollover(stats
, dev
->stats
, rx_fragment_errors
);
534 check_counter_rollover(stats
, dev
->stats
, rx_jabber_errors
);
535 check_counter_rollover(stats
, dev
->stats
, rx_undersize_frame_errors
);
536 check_counter_rollover(stats
, dev
->stats
, rx_oversize_frame_errors
);
537 check_counter_rollover(stats
, dev
->stats
, rx_dropped_frames
);
538 check_counter_rollover(stats
, dev
->stats
, rx_unicast_byte_count
);
539 check_counter_rollover(stats
, dev
->stats
, rx_broadcast_byte_count
);
540 check_counter_rollover(stats
, dev
->stats
, rx_multicast_byte_count
);
541 check_counter_rollover(stats
, dev
->stats
, rx_unicast_frames
);
542 check_counter_rollover(stats
, dev
->stats
, rx_broadcast_frames
);
543 check_counter_rollover(stats
, dev
->stats
, rx_multicast_frames
);
544 check_counter_rollover(stats
, dev
->stats
, rx_pause_frames
);
545 check_counter_rollover(stats
, dev
->stats
, rx_64_byte_frames
);
546 check_counter_rollover(stats
, dev
->stats
, rx_65_127_byte_frames
);
547 check_counter_rollover(stats
, dev
->stats
, rx_128_255_byte_frames
);
548 check_counter_rollover(stats
, dev
->stats
, rx_256_511_bytes_frames
);
549 check_counter_rollover(stats
, dev
->stats
, rx_512_1023_byte_frames
);
550 check_counter_rollover(stats
, dev
->stats
, rx_1024_1518_byte_frames
);
551 check_counter_rollover(stats
, dev
->stats
, rx_greater_1518_byte_frames
);
552 check_counter_rollover(stats
, dev
->stats
, eee_rx_lpi_transitions
);
553 check_counter_rollover(stats
, dev
->stats
, eee_rx_lpi_time
);
554 check_counter_rollover(stats
, dev
->stats
, tx_fcs_errors
);
555 check_counter_rollover(stats
, dev
->stats
, tx_excess_deferral_errors
);
556 check_counter_rollover(stats
, dev
->stats
, tx_carrier_errors
);
557 check_counter_rollover(stats
, dev
->stats
, tx_bad_byte_count
);
558 check_counter_rollover(stats
, dev
->stats
, tx_single_collisions
);
559 check_counter_rollover(stats
, dev
->stats
, tx_multiple_collisions
);
560 check_counter_rollover(stats
, dev
->stats
, tx_excessive_collision
);
561 check_counter_rollover(stats
, dev
->stats
, tx_late_collisions
);
562 check_counter_rollover(stats
, dev
->stats
, tx_unicast_byte_count
);
563 check_counter_rollover(stats
, dev
->stats
, tx_broadcast_byte_count
);
564 check_counter_rollover(stats
, dev
->stats
, tx_multicast_byte_count
);
565 check_counter_rollover(stats
, dev
->stats
, tx_unicast_frames
);
566 check_counter_rollover(stats
, dev
->stats
, tx_broadcast_frames
);
567 check_counter_rollover(stats
, dev
->stats
, tx_multicast_frames
);
568 check_counter_rollover(stats
, dev
->stats
, tx_pause_frames
);
569 check_counter_rollover(stats
, dev
->stats
, tx_64_byte_frames
);
570 check_counter_rollover(stats
, dev
->stats
, tx_65_127_byte_frames
);
571 check_counter_rollover(stats
, dev
->stats
, tx_128_255_byte_frames
);
572 check_counter_rollover(stats
, dev
->stats
, tx_256_511_bytes_frames
);
573 check_counter_rollover(stats
, dev
->stats
, tx_512_1023_byte_frames
);
574 check_counter_rollover(stats
, dev
->stats
, tx_1024_1518_byte_frames
);
575 check_counter_rollover(stats
, dev
->stats
, tx_greater_1518_byte_frames
);
576 check_counter_rollover(stats
, dev
->stats
, eee_tx_lpi_transitions
);
577 check_counter_rollover(stats
, dev
->stats
, eee_tx_lpi_time
);
579 memcpy(&dev
->stats
.saved
, stats
, sizeof(struct lan78xx_statstage
));
582 static void lan78xx_update_stats(struct lan78xx_net
*dev
)
584 u32
*p
, *count
, *max
;
587 struct lan78xx_statstage lan78xx_stats
;
589 if (usb_autopm_get_interface(dev
->intf
) < 0)
592 p
= (u32
*)&lan78xx_stats
;
593 count
= (u32
*)&dev
->stats
.rollover_count
;
594 max
= (u32
*)&dev
->stats
.rollover_max
;
595 data
= (u64
*)&dev
->stats
.curr_stat
;
597 mutex_lock(&dev
->stats
.access_lock
);
599 if (lan78xx_read_stats(dev
, &lan78xx_stats
) > 0)
600 lan78xx_check_stat_rollover(dev
, &lan78xx_stats
);
602 for (i
= 0; i
< (sizeof(lan78xx_stats
) / (sizeof(u32
))); i
++)
603 data
[i
] = (u64
)p
[i
] + ((u64
)count
[i
] * ((u64
)max
[i
] + 1));
605 mutex_unlock(&dev
->stats
.access_lock
);
607 usb_autopm_put_interface(dev
->intf
);
610 /* Loop until the read is completed with timeout called with phy_mutex held */
611 static int lan78xx_phy_wait_not_busy(struct lan78xx_net
*dev
)
613 unsigned long start_time
= jiffies
;
618 ret
= lan78xx_read_reg(dev
, MII_ACC
, &val
);
619 if (unlikely(ret
< 0))
622 if (!(val
& MII_ACC_MII_BUSY_
))
624 } while (!time_after(jiffies
, start_time
+ HZ
));
629 static inline u32
mii_access(int id
, int index
, int read
)
633 ret
= ((u32
)id
<< MII_ACC_PHY_ADDR_SHIFT_
) & MII_ACC_PHY_ADDR_MASK_
;
634 ret
|= ((u32
)index
<< MII_ACC_MIIRINDA_SHIFT_
) & MII_ACC_MIIRINDA_MASK_
;
636 ret
|= MII_ACC_MII_READ_
;
638 ret
|= MII_ACC_MII_WRITE_
;
639 ret
|= MII_ACC_MII_BUSY_
;
644 static int lan78xx_wait_eeprom(struct lan78xx_net
*dev
)
646 unsigned long start_time
= jiffies
;
651 ret
= lan78xx_read_reg(dev
, E2P_CMD
, &val
);
652 if (unlikely(ret
< 0))
655 if (!(val
& E2P_CMD_EPC_BUSY_
) ||
656 (val
& E2P_CMD_EPC_TIMEOUT_
))
658 usleep_range(40, 100);
659 } while (!time_after(jiffies
, start_time
+ HZ
));
661 if (val
& (E2P_CMD_EPC_TIMEOUT_
| E2P_CMD_EPC_BUSY_
)) {
662 netdev_warn(dev
->net
, "EEPROM read operation timeout");
669 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net
*dev
)
671 unsigned long start_time
= jiffies
;
676 ret
= lan78xx_read_reg(dev
, E2P_CMD
, &val
);
677 if (unlikely(ret
< 0))
680 if (!(val
& E2P_CMD_EPC_BUSY_
))
683 usleep_range(40, 100);
684 } while (!time_after(jiffies
, start_time
+ HZ
));
686 netdev_warn(dev
->net
, "EEPROM is busy");
690 static int lan78xx_read_raw_eeprom(struct lan78xx_net
*dev
, u32 offset
,
691 u32 length
, u8
*data
)
698 /* depends on chip, some EEPROM pins are muxed with LED function.
699 * disable & restore LED function to access EEPROM.
701 ret
= lan78xx_read_reg(dev
, HW_CFG
, &val
);
703 if (dev
->chipid
== ID_REV_CHIP_ID_7800_
) {
704 val
&= ~(HW_CFG_LED1_EN_
| HW_CFG_LED0_EN_
);
705 ret
= lan78xx_write_reg(dev
, HW_CFG
, val
);
708 retval
= lan78xx_eeprom_confirm_not_busy(dev
);
712 for (i
= 0; i
< length
; i
++) {
713 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_READ_
;
714 val
|= (offset
& E2P_CMD_EPC_ADDR_MASK_
);
715 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
716 if (unlikely(ret
< 0)) {
721 retval
= lan78xx_wait_eeprom(dev
);
725 ret
= lan78xx_read_reg(dev
, E2P_DATA
, &val
);
726 if (unlikely(ret
< 0)) {
731 data
[i
] = val
& 0xFF;
737 if (dev
->chipid
== ID_REV_CHIP_ID_7800_
)
738 ret
= lan78xx_write_reg(dev
, HW_CFG
, saved
);
743 static int lan78xx_read_eeprom(struct lan78xx_net
*dev
, u32 offset
,
744 u32 length
, u8
*data
)
749 ret
= lan78xx_read_raw_eeprom(dev
, 0, 1, &sig
);
750 if ((ret
== 0) && (sig
== EEPROM_INDICATOR
))
751 ret
= lan78xx_read_raw_eeprom(dev
, offset
, length
, data
);
758 static int lan78xx_write_raw_eeprom(struct lan78xx_net
*dev
, u32 offset
,
759 u32 length
, u8
*data
)
766 /* depends on chip, some EEPROM pins are muxed with LED function.
767 * disable & restore LED function to access EEPROM.
769 ret
= lan78xx_read_reg(dev
, HW_CFG
, &val
);
771 if (dev
->chipid
== ID_REV_CHIP_ID_7800_
) {
772 val
&= ~(HW_CFG_LED1_EN_
| HW_CFG_LED0_EN_
);
773 ret
= lan78xx_write_reg(dev
, HW_CFG
, val
);
776 retval
= lan78xx_eeprom_confirm_not_busy(dev
);
780 /* Issue write/erase enable command */
781 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_EWEN_
;
782 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
783 if (unlikely(ret
< 0)) {
788 retval
= lan78xx_wait_eeprom(dev
);
792 for (i
= 0; i
< length
; i
++) {
793 /* Fill data register */
795 ret
= lan78xx_write_reg(dev
, E2P_DATA
, val
);
801 /* Send "write" command */
802 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_WRITE_
;
803 val
|= (offset
& E2P_CMD_EPC_ADDR_MASK_
);
804 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
810 retval
= lan78xx_wait_eeprom(dev
);
819 if (dev
->chipid
== ID_REV_CHIP_ID_7800_
)
820 ret
= lan78xx_write_reg(dev
, HW_CFG
, saved
);
825 static int lan78xx_read_raw_otp(struct lan78xx_net
*dev
, u32 offset
,
826 u32 length
, u8
*data
)
831 unsigned long timeout
;
833 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
835 if (buf
& OTP_PWR_DN_PWRDN_N_
) {
836 /* clear it and wait to be cleared */
837 ret
= lan78xx_write_reg(dev
, OTP_PWR_DN
, 0);
839 timeout
= jiffies
+ HZ
;
842 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
843 if (time_after(jiffies
, timeout
)) {
844 netdev_warn(dev
->net
,
845 "timeout on OTP_PWR_DN");
848 } while (buf
& OTP_PWR_DN_PWRDN_N_
);
851 for (i
= 0; i
< length
; i
++) {
852 ret
= lan78xx_write_reg(dev
, OTP_ADDR1
,
853 ((offset
+ i
) >> 8) & OTP_ADDR1_15_11
);
854 ret
= lan78xx_write_reg(dev
, OTP_ADDR2
,
855 ((offset
+ i
) & OTP_ADDR2_10_3
));
857 ret
= lan78xx_write_reg(dev
, OTP_FUNC_CMD
, OTP_FUNC_CMD_READ_
);
858 ret
= lan78xx_write_reg(dev
, OTP_CMD_GO
, OTP_CMD_GO_GO_
);
860 timeout
= jiffies
+ HZ
;
863 ret
= lan78xx_read_reg(dev
, OTP_STATUS
, &buf
);
864 if (time_after(jiffies
, timeout
)) {
865 netdev_warn(dev
->net
,
866 "timeout on OTP_STATUS");
869 } while (buf
& OTP_STATUS_BUSY_
);
871 ret
= lan78xx_read_reg(dev
, OTP_RD_DATA
, &buf
);
873 data
[i
] = (u8
)(buf
& 0xFF);
879 static int lan78xx_write_raw_otp(struct lan78xx_net
*dev
, u32 offset
,
880 u32 length
, u8
*data
)
885 unsigned long timeout
;
887 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
889 if (buf
& OTP_PWR_DN_PWRDN_N_
) {
890 /* clear it and wait to be cleared */
891 ret
= lan78xx_write_reg(dev
, OTP_PWR_DN
, 0);
893 timeout
= jiffies
+ HZ
;
896 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
897 if (time_after(jiffies
, timeout
)) {
898 netdev_warn(dev
->net
,
899 "timeout on OTP_PWR_DN completion");
902 } while (buf
& OTP_PWR_DN_PWRDN_N_
);
905 /* set to BYTE program mode */
906 ret
= lan78xx_write_reg(dev
, OTP_PRGM_MODE
, OTP_PRGM_MODE_BYTE_
);
908 for (i
= 0; i
< length
; i
++) {
909 ret
= lan78xx_write_reg(dev
, OTP_ADDR1
,
910 ((offset
+ i
) >> 8) & OTP_ADDR1_15_11
);
911 ret
= lan78xx_write_reg(dev
, OTP_ADDR2
,
912 ((offset
+ i
) & OTP_ADDR2_10_3
));
913 ret
= lan78xx_write_reg(dev
, OTP_PRGM_DATA
, data
[i
]);
914 ret
= lan78xx_write_reg(dev
, OTP_TST_CMD
, OTP_TST_CMD_PRGVRFY_
);
915 ret
= lan78xx_write_reg(dev
, OTP_CMD_GO
, OTP_CMD_GO_GO_
);
917 timeout
= jiffies
+ HZ
;
920 ret
= lan78xx_read_reg(dev
, OTP_STATUS
, &buf
);
921 if (time_after(jiffies
, timeout
)) {
922 netdev_warn(dev
->net
,
923 "Timeout on OTP_STATUS completion");
926 } while (buf
& OTP_STATUS_BUSY_
);
932 static int lan78xx_read_otp(struct lan78xx_net
*dev
, u32 offset
,
933 u32 length
, u8
*data
)
938 ret
= lan78xx_read_raw_otp(dev
, 0, 1, &sig
);
941 if (sig
== OTP_INDICATOR_2
)
943 else if (sig
!= OTP_INDICATOR_1
)
946 ret
= lan78xx_read_raw_otp(dev
, offset
, length
, data
);
952 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net
*dev
)
956 for (i
= 0; i
< 100; i
++) {
959 ret
= lan78xx_read_reg(dev
, DP_SEL
, &dp_sel
);
960 if (unlikely(ret
< 0))
963 if (dp_sel
& DP_SEL_DPRDY_
)
966 usleep_range(40, 100);
969 netdev_warn(dev
->net
, "lan78xx_dataport_wait_not_busy timed out");
974 static int lan78xx_dataport_write(struct lan78xx_net
*dev
, u32 ram_select
,
975 u32 addr
, u32 length
, u32
*buf
)
977 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
981 if (usb_autopm_get_interface(dev
->intf
) < 0)
984 mutex_lock(&pdata
->dataport_mutex
);
986 ret
= lan78xx_dataport_wait_not_busy(dev
);
990 ret
= lan78xx_read_reg(dev
, DP_SEL
, &dp_sel
);
992 dp_sel
&= ~DP_SEL_RSEL_MASK_
;
993 dp_sel
|= ram_select
;
994 ret
= lan78xx_write_reg(dev
, DP_SEL
, dp_sel
);
996 for (i
= 0; i
< length
; i
++) {
997 ret
= lan78xx_write_reg(dev
, DP_ADDR
, addr
+ i
);
999 ret
= lan78xx_write_reg(dev
, DP_DATA
, buf
[i
]);
1001 ret
= lan78xx_write_reg(dev
, DP_CMD
, DP_CMD_WRITE_
);
1003 ret
= lan78xx_dataport_wait_not_busy(dev
);
1009 mutex_unlock(&pdata
->dataport_mutex
);
1010 usb_autopm_put_interface(dev
->intf
);
1015 static void lan78xx_set_addr_filter(struct lan78xx_priv
*pdata
,
1016 int index
, u8 addr
[ETH_ALEN
])
1020 if ((pdata
) && (index
> 0) && (index
< NUM_OF_MAF
)) {
1022 temp
= addr
[2] | (temp
<< 8);
1023 temp
= addr
[1] | (temp
<< 8);
1024 temp
= addr
[0] | (temp
<< 8);
1025 pdata
->pfilter_table
[index
][1] = temp
;
1027 temp
= addr
[4] | (temp
<< 8);
1028 temp
|= MAF_HI_VALID_
| MAF_HI_TYPE_DST_
;
1029 pdata
->pfilter_table
[index
][0] = temp
;
1033 /* returns hash bit number for given MAC address */
1034 static inline u32
lan78xx_hash(char addr
[ETH_ALEN
])
1036 return (ether_crc(ETH_ALEN
, addr
) >> 23) & 0x1ff;
1039 static void lan78xx_deferred_multicast_write(struct work_struct
*param
)
1041 struct lan78xx_priv
*pdata
=
1042 container_of(param
, struct lan78xx_priv
, set_multicast
);
1043 struct lan78xx_net
*dev
= pdata
->dev
;
1047 netif_dbg(dev
, drv
, dev
->net
, "deferred multicast write 0x%08x\n",
1050 lan78xx_dataport_write(dev
, DP_SEL_RSEL_VLAN_DA_
, DP_SEL_VHF_VLAN_LEN
,
1051 DP_SEL_VHF_HASH_LEN
, pdata
->mchash_table
);
1053 for (i
= 1; i
< NUM_OF_MAF
; i
++) {
1054 ret
= lan78xx_write_reg(dev
, MAF_HI(i
), 0);
1055 ret
= lan78xx_write_reg(dev
, MAF_LO(i
),
1056 pdata
->pfilter_table
[i
][1]);
1057 ret
= lan78xx_write_reg(dev
, MAF_HI(i
),
1058 pdata
->pfilter_table
[i
][0]);
1061 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
1064 static void lan78xx_set_multicast(struct net_device
*netdev
)
1066 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1067 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1068 unsigned long flags
;
1071 spin_lock_irqsave(&pdata
->rfe_ctl_lock
, flags
);
1073 pdata
->rfe_ctl
&= ~(RFE_CTL_UCAST_EN_
| RFE_CTL_MCAST_EN_
|
1074 RFE_CTL_DA_PERFECT_
| RFE_CTL_MCAST_HASH_
);
1076 for (i
= 0; i
< DP_SEL_VHF_HASH_LEN
; i
++)
1077 pdata
->mchash_table
[i
] = 0;
1078 /* pfilter_table[0] has own HW address */
1079 for (i
= 1; i
< NUM_OF_MAF
; i
++) {
1080 pdata
->pfilter_table
[i
][0] =
1081 pdata
->pfilter_table
[i
][1] = 0;
1084 pdata
->rfe_ctl
|= RFE_CTL_BCAST_EN_
;
1086 if (dev
->net
->flags
& IFF_PROMISC
) {
1087 netif_dbg(dev
, drv
, dev
->net
, "promiscuous mode enabled");
1088 pdata
->rfe_ctl
|= RFE_CTL_MCAST_EN_
| RFE_CTL_UCAST_EN_
;
1090 if (dev
->net
->flags
& IFF_ALLMULTI
) {
1091 netif_dbg(dev
, drv
, dev
->net
,
1092 "receive all multicast enabled");
1093 pdata
->rfe_ctl
|= RFE_CTL_MCAST_EN_
;
1097 if (netdev_mc_count(dev
->net
)) {
1098 struct netdev_hw_addr
*ha
;
1101 netif_dbg(dev
, drv
, dev
->net
, "receive multicast hash filter");
1103 pdata
->rfe_ctl
|= RFE_CTL_DA_PERFECT_
;
1106 netdev_for_each_mc_addr(ha
, netdev
) {
1107 /* set first 32 into Perfect Filter */
1109 lan78xx_set_addr_filter(pdata
, i
, ha
->addr
);
1111 u32 bitnum
= lan78xx_hash(ha
->addr
);
1113 pdata
->mchash_table
[bitnum
/ 32] |=
1114 (1 << (bitnum
% 32));
1115 pdata
->rfe_ctl
|= RFE_CTL_MCAST_HASH_
;
1121 spin_unlock_irqrestore(&pdata
->rfe_ctl_lock
, flags
);
1123 /* defer register writes to a sleepable context */
1124 schedule_work(&pdata
->set_multicast
);
1127 static int lan78xx_update_flowcontrol(struct lan78xx_net
*dev
, u8 duplex
,
1128 u16 lcladv
, u16 rmtadv
)
1130 u32 flow
= 0, fct_flow
= 0;
1134 if (dev
->fc_autoneg
)
1135 cap
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1137 cap
= dev
->fc_request_control
;
1139 if (cap
& FLOW_CTRL_TX
)
1140 flow
|= (FLOW_CR_TX_FCEN_
| 0xFFFF);
1142 if (cap
& FLOW_CTRL_RX
)
1143 flow
|= FLOW_CR_RX_FCEN_
;
1145 if (dev
->udev
->speed
== USB_SPEED_SUPER
)
1147 else if (dev
->udev
->speed
== USB_SPEED_HIGH
)
1150 netif_dbg(dev
, link
, dev
->net
, "rx pause %s, tx pause %s",
1151 (cap
& FLOW_CTRL_RX
? "enabled" : "disabled"),
1152 (cap
& FLOW_CTRL_TX
? "enabled" : "disabled"));
1154 ret
= lan78xx_write_reg(dev
, FCT_FLOW
, fct_flow
);
1156 /* threshold value should be set before enabling flow */
1157 ret
= lan78xx_write_reg(dev
, FLOW
, flow
);
1162 static int lan78xx_link_reset(struct lan78xx_net
*dev
)
1164 struct phy_device
*phydev
= dev
->net
->phydev
;
1165 struct ethtool_link_ksettings ecmd
;
1166 int ladv
, radv
, ret
;
1169 /* clear LAN78xx interrupt status */
1170 ret
= lan78xx_write_reg(dev
, INT_STS
, INT_STS_PHY_INT_
);
1171 if (unlikely(ret
< 0))
1174 phy_read_status(phydev
);
1176 if (!phydev
->link
&& dev
->link_on
) {
1177 dev
->link_on
= false;
1180 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1181 if (unlikely(ret
< 0))
1184 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1185 if (unlikely(ret
< 0))
1188 del_timer(&dev
->stat_monitor
);
1189 } else if (phydev
->link
&& !dev
->link_on
) {
1190 dev
->link_on
= true;
1192 phy_ethtool_ksettings_get(phydev
, &ecmd
);
1194 if (dev
->udev
->speed
== USB_SPEED_SUPER
) {
1195 if (ecmd
.base
.speed
== 1000) {
1197 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
1198 buf
&= ~USB_CFG1_DEV_U2_INIT_EN_
;
1199 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
1201 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
1202 buf
|= USB_CFG1_DEV_U1_INIT_EN_
;
1203 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
1205 /* enable U1 & U2 */
1206 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
1207 buf
|= USB_CFG1_DEV_U2_INIT_EN_
;
1208 buf
|= USB_CFG1_DEV_U1_INIT_EN_
;
1209 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
1213 ladv
= phy_read(phydev
, MII_ADVERTISE
);
1217 radv
= phy_read(phydev
, MII_LPA
);
1221 netif_dbg(dev
, link
, dev
->net
,
1222 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1223 ecmd
.base
.speed
, ecmd
.base
.duplex
, ladv
, radv
);
1225 ret
= lan78xx_update_flowcontrol(dev
, ecmd
.base
.duplex
, ladv
,
1228 if (!timer_pending(&dev
->stat_monitor
)) {
1230 mod_timer(&dev
->stat_monitor
,
1231 jiffies
+ STAT_UPDATE_TIMER
);
1234 tasklet_schedule(&dev
->bh
);
1240 /* some work can't be done in tasklets, so we use keventd
1242 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1243 * but tasklet_schedule() doesn't. hope the failure is rare.
1245 static void lan78xx_defer_kevent(struct lan78xx_net
*dev
, int work
)
1247 set_bit(work
, &dev
->flags
);
1248 if (!schedule_delayed_work(&dev
->wq
, 0))
1249 netdev_err(dev
->net
, "kevent %d may have been dropped\n", work
);
1252 static void lan78xx_status(struct lan78xx_net
*dev
, struct urb
*urb
)
1256 if (urb
->actual_length
!= 4) {
1257 netdev_warn(dev
->net
,
1258 "unexpected urb length %d", urb
->actual_length
);
1262 intdata
= get_unaligned_le32(urb
->transfer_buffer
);
1264 if (intdata
& INT_ENP_PHY_INT
) {
1265 netif_dbg(dev
, link
, dev
->net
, "PHY INTR: 0x%08x\n", intdata
);
1266 lan78xx_defer_kevent(dev
, EVENT_LINK_RESET
);
1268 if (dev
->domain_data
.phyirq
> 0) {
1269 local_irq_disable();
1270 generic_handle_irq(dev
->domain_data
.phyirq
);
1274 netdev_warn(dev
->net
,
1275 "unexpected interrupt: 0x%08x\n", intdata
);
1278 static int lan78xx_ethtool_get_eeprom_len(struct net_device
*netdev
)
1280 return MAX_EEPROM_SIZE
;
1283 static int lan78xx_ethtool_get_eeprom(struct net_device
*netdev
,
1284 struct ethtool_eeprom
*ee
, u8
*data
)
1286 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1289 ret
= usb_autopm_get_interface(dev
->intf
);
1293 ee
->magic
= LAN78XX_EEPROM_MAGIC
;
1295 ret
= lan78xx_read_raw_eeprom(dev
, ee
->offset
, ee
->len
, data
);
1297 usb_autopm_put_interface(dev
->intf
);
1302 static int lan78xx_ethtool_set_eeprom(struct net_device
*netdev
,
1303 struct ethtool_eeprom
*ee
, u8
*data
)
1305 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1308 ret
= usb_autopm_get_interface(dev
->intf
);
1312 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1313 * to load data from EEPROM
1315 if (ee
->magic
== LAN78XX_EEPROM_MAGIC
)
1316 ret
= lan78xx_write_raw_eeprom(dev
, ee
->offset
, ee
->len
, data
);
1317 else if ((ee
->magic
== LAN78XX_OTP_MAGIC
) &&
1318 (ee
->offset
== 0) &&
1320 (data
[0] == OTP_INDICATOR_1
))
1321 ret
= lan78xx_write_raw_otp(dev
, ee
->offset
, ee
->len
, data
);
1323 usb_autopm_put_interface(dev
->intf
);
1328 static void lan78xx_get_strings(struct net_device
*netdev
, u32 stringset
,
1331 if (stringset
== ETH_SS_STATS
)
1332 memcpy(data
, lan78xx_gstrings
, sizeof(lan78xx_gstrings
));
1335 static int lan78xx_get_sset_count(struct net_device
*netdev
, int sset
)
1337 if (sset
== ETH_SS_STATS
)
1338 return ARRAY_SIZE(lan78xx_gstrings
);
1343 static void lan78xx_get_stats(struct net_device
*netdev
,
1344 struct ethtool_stats
*stats
, u64
*data
)
1346 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1348 lan78xx_update_stats(dev
);
1350 mutex_lock(&dev
->stats
.access_lock
);
1351 memcpy(data
, &dev
->stats
.curr_stat
, sizeof(dev
->stats
.curr_stat
));
1352 mutex_unlock(&dev
->stats
.access_lock
);
1355 static void lan78xx_get_wol(struct net_device
*netdev
,
1356 struct ethtool_wolinfo
*wol
)
1358 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1361 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1363 if (usb_autopm_get_interface(dev
->intf
) < 0)
1366 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
1367 if (unlikely(ret
< 0)) {
1371 if (buf
& USB_CFG_RMT_WKP_
) {
1372 wol
->supported
= WAKE_ALL
;
1373 wol
->wolopts
= pdata
->wol
;
1380 usb_autopm_put_interface(dev
->intf
);
1383 static int lan78xx_set_wol(struct net_device
*netdev
,
1384 struct ethtool_wolinfo
*wol
)
1386 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1387 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1390 ret
= usb_autopm_get_interface(dev
->intf
);
1394 if (wol
->wolopts
& ~WAKE_ALL
)
1397 pdata
->wol
= wol
->wolopts
;
1399 device_set_wakeup_enable(&dev
->udev
->dev
, (bool)wol
->wolopts
);
1401 phy_ethtool_set_wol(netdev
->phydev
, wol
);
1403 usb_autopm_put_interface(dev
->intf
);
1408 static int lan78xx_get_eee(struct net_device
*net
, struct ethtool_eee
*edata
)
1410 struct lan78xx_net
*dev
= netdev_priv(net
);
1411 struct phy_device
*phydev
= net
->phydev
;
1415 ret
= usb_autopm_get_interface(dev
->intf
);
1419 ret
= phy_ethtool_get_eee(phydev
, edata
);
1423 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1424 if (buf
& MAC_CR_EEE_EN_
) {
1425 edata
->eee_enabled
= true;
1426 edata
->eee_active
= !!(edata
->advertised
&
1427 edata
->lp_advertised
);
1428 edata
->tx_lpi_enabled
= true;
1429 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1430 ret
= lan78xx_read_reg(dev
, EEE_TX_LPI_REQ_DLY
, &buf
);
1431 edata
->tx_lpi_timer
= buf
;
1433 edata
->eee_enabled
= false;
1434 edata
->eee_active
= false;
1435 edata
->tx_lpi_enabled
= false;
1436 edata
->tx_lpi_timer
= 0;
1441 usb_autopm_put_interface(dev
->intf
);
1446 static int lan78xx_set_eee(struct net_device
*net
, struct ethtool_eee
*edata
)
1448 struct lan78xx_net
*dev
= netdev_priv(net
);
1452 ret
= usb_autopm_get_interface(dev
->intf
);
1456 if (edata
->eee_enabled
) {
1457 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1458 buf
|= MAC_CR_EEE_EN_
;
1459 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1461 phy_ethtool_set_eee(net
->phydev
, edata
);
1463 buf
= (u32
)edata
->tx_lpi_timer
;
1464 ret
= lan78xx_write_reg(dev
, EEE_TX_LPI_REQ_DLY
, buf
);
1466 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1467 buf
&= ~MAC_CR_EEE_EN_
;
1468 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1471 usb_autopm_put_interface(dev
->intf
);
1476 static u32
lan78xx_get_link(struct net_device
*net
)
1478 phy_read_status(net
->phydev
);
1480 return net
->phydev
->link
;
1483 static void lan78xx_get_drvinfo(struct net_device
*net
,
1484 struct ethtool_drvinfo
*info
)
1486 struct lan78xx_net
*dev
= netdev_priv(net
);
1488 strncpy(info
->driver
, DRIVER_NAME
, sizeof(info
->driver
));
1489 usb_make_path(dev
->udev
, info
->bus_info
, sizeof(info
->bus_info
));
1492 static u32
lan78xx_get_msglevel(struct net_device
*net
)
1494 struct lan78xx_net
*dev
= netdev_priv(net
);
1496 return dev
->msg_enable
;
1499 static void lan78xx_set_msglevel(struct net_device
*net
, u32 level
)
1501 struct lan78xx_net
*dev
= netdev_priv(net
);
1503 dev
->msg_enable
= level
;
1506 static int lan78xx_get_link_ksettings(struct net_device
*net
,
1507 struct ethtool_link_ksettings
*cmd
)
1509 struct lan78xx_net
*dev
= netdev_priv(net
);
1510 struct phy_device
*phydev
= net
->phydev
;
1513 ret
= usb_autopm_get_interface(dev
->intf
);
1517 phy_ethtool_ksettings_get(phydev
, cmd
);
1519 usb_autopm_put_interface(dev
->intf
);
1524 static int lan78xx_set_link_ksettings(struct net_device
*net
,
1525 const struct ethtool_link_ksettings
*cmd
)
1527 struct lan78xx_net
*dev
= netdev_priv(net
);
1528 struct phy_device
*phydev
= net
->phydev
;
1532 ret
= usb_autopm_get_interface(dev
->intf
);
1536 /* change speed & duplex */
1537 ret
= phy_ethtool_ksettings_set(phydev
, cmd
);
1539 if (!cmd
->base
.autoneg
) {
1540 /* force link down */
1541 temp
= phy_read(phydev
, MII_BMCR
);
1542 phy_write(phydev
, MII_BMCR
, temp
| BMCR_LOOPBACK
);
1544 phy_write(phydev
, MII_BMCR
, temp
);
1547 usb_autopm_put_interface(dev
->intf
);
1552 static void lan78xx_get_pause(struct net_device
*net
,
1553 struct ethtool_pauseparam
*pause
)
1555 struct lan78xx_net
*dev
= netdev_priv(net
);
1556 struct phy_device
*phydev
= net
->phydev
;
1557 struct ethtool_link_ksettings ecmd
;
1559 phy_ethtool_ksettings_get(phydev
, &ecmd
);
1561 pause
->autoneg
= dev
->fc_autoneg
;
1563 if (dev
->fc_request_control
& FLOW_CTRL_TX
)
1564 pause
->tx_pause
= 1;
1566 if (dev
->fc_request_control
& FLOW_CTRL_RX
)
1567 pause
->rx_pause
= 1;
1570 static int lan78xx_set_pause(struct net_device
*net
,
1571 struct ethtool_pauseparam
*pause
)
1573 struct lan78xx_net
*dev
= netdev_priv(net
);
1574 struct phy_device
*phydev
= net
->phydev
;
1575 struct ethtool_link_ksettings ecmd
;
1578 phy_ethtool_ksettings_get(phydev
, &ecmd
);
1580 if (pause
->autoneg
&& !ecmd
.base
.autoneg
) {
1585 dev
->fc_request_control
= 0;
1586 if (pause
->rx_pause
)
1587 dev
->fc_request_control
|= FLOW_CTRL_RX
;
1589 if (pause
->tx_pause
)
1590 dev
->fc_request_control
|= FLOW_CTRL_TX
;
1592 if (ecmd
.base
.autoneg
) {
1593 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc
) = { 0, };
1596 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT
,
1597 ecmd
.link_modes
.advertising
);
1598 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT
,
1599 ecmd
.link_modes
.advertising
);
1600 mii_adv
= (u32
)mii_advertise_flowctrl(dev
->fc_request_control
);
1601 mii_adv_to_linkmode_adv_t(fc
, mii_adv
);
1602 linkmode_or(ecmd
.link_modes
.advertising
, fc
,
1603 ecmd
.link_modes
.advertising
);
1605 phy_ethtool_ksettings_set(phydev
, &ecmd
);
1608 dev
->fc_autoneg
= pause
->autoneg
;
1615 static int lan78xx_get_regs_len(struct net_device
*netdev
)
1617 if (!netdev
->phydev
)
1618 return (sizeof(lan78xx_regs
));
1620 return (sizeof(lan78xx_regs
) + PHY_REG_SIZE
);
1624 lan78xx_get_regs(struct net_device
*netdev
, struct ethtool_regs
*regs
,
1629 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1631 /* Read Device/MAC registers */
1632 for (i
= 0; i
< ARRAY_SIZE(lan78xx_regs
); i
++)
1633 lan78xx_read_reg(dev
, lan78xx_regs
[i
], &data
[i
]);
1635 if (!netdev
->phydev
)
1638 /* Read PHY registers */
1639 for (j
= 0; j
< 32; i
++, j
++)
1640 data
[i
] = phy_read(netdev
->phydev
, j
);
1643 static const struct ethtool_ops lan78xx_ethtool_ops
= {
1644 .get_link
= lan78xx_get_link
,
1645 .nway_reset
= phy_ethtool_nway_reset
,
1646 .get_drvinfo
= lan78xx_get_drvinfo
,
1647 .get_msglevel
= lan78xx_get_msglevel
,
1648 .set_msglevel
= lan78xx_set_msglevel
,
1649 .get_eeprom_len
= lan78xx_ethtool_get_eeprom_len
,
1650 .get_eeprom
= lan78xx_ethtool_get_eeprom
,
1651 .set_eeprom
= lan78xx_ethtool_set_eeprom
,
1652 .get_ethtool_stats
= lan78xx_get_stats
,
1653 .get_sset_count
= lan78xx_get_sset_count
,
1654 .get_strings
= lan78xx_get_strings
,
1655 .get_wol
= lan78xx_get_wol
,
1656 .set_wol
= lan78xx_set_wol
,
1657 .get_eee
= lan78xx_get_eee
,
1658 .set_eee
= lan78xx_set_eee
,
1659 .get_pauseparam
= lan78xx_get_pause
,
1660 .set_pauseparam
= lan78xx_set_pause
,
1661 .get_link_ksettings
= lan78xx_get_link_ksettings
,
1662 .set_link_ksettings
= lan78xx_set_link_ksettings
,
1663 .get_regs_len
= lan78xx_get_regs_len
,
1664 .get_regs
= lan78xx_get_regs
,
1667 static void lan78xx_init_mac_address(struct lan78xx_net
*dev
)
1669 u32 addr_lo
, addr_hi
;
1673 ret
= lan78xx_read_reg(dev
, RX_ADDRL
, &addr_lo
);
1674 ret
= lan78xx_read_reg(dev
, RX_ADDRH
, &addr_hi
);
1676 addr
[0] = addr_lo
& 0xFF;
1677 addr
[1] = (addr_lo
>> 8) & 0xFF;
1678 addr
[2] = (addr_lo
>> 16) & 0xFF;
1679 addr
[3] = (addr_lo
>> 24) & 0xFF;
1680 addr
[4] = addr_hi
& 0xFF;
1681 addr
[5] = (addr_hi
>> 8) & 0xFF;
1683 if (!is_valid_ether_addr(addr
)) {
1684 if (!eth_platform_get_mac_address(&dev
->udev
->dev
, addr
)) {
1685 /* valid address present in Device Tree */
1686 netif_dbg(dev
, ifup
, dev
->net
,
1687 "MAC address read from Device Tree");
1688 } else if (((lan78xx_read_eeprom(dev
, EEPROM_MAC_OFFSET
,
1689 ETH_ALEN
, addr
) == 0) ||
1690 (lan78xx_read_otp(dev
, EEPROM_MAC_OFFSET
,
1691 ETH_ALEN
, addr
) == 0)) &&
1692 is_valid_ether_addr(addr
)) {
1693 /* eeprom values are valid so use them */
1694 netif_dbg(dev
, ifup
, dev
->net
,
1695 "MAC address read from EEPROM");
1697 /* generate random MAC */
1698 eth_random_addr(addr
);
1699 netif_dbg(dev
, ifup
, dev
->net
,
1700 "MAC address set to random addr");
1703 addr_lo
= addr
[0] | (addr
[1] << 8) |
1704 (addr
[2] << 16) | (addr
[3] << 24);
1705 addr_hi
= addr
[4] | (addr
[5] << 8);
1707 ret
= lan78xx_write_reg(dev
, RX_ADDRL
, addr_lo
);
1708 ret
= lan78xx_write_reg(dev
, RX_ADDRH
, addr_hi
);
1711 ret
= lan78xx_write_reg(dev
, MAF_LO(0), addr_lo
);
1712 ret
= lan78xx_write_reg(dev
, MAF_HI(0), addr_hi
| MAF_HI_VALID_
);
1714 ether_addr_copy(dev
->net
->dev_addr
, addr
);
1717 /* MDIO read and write wrappers for phylib */
1718 static int lan78xx_mdiobus_read(struct mii_bus
*bus
, int phy_id
, int idx
)
1720 struct lan78xx_net
*dev
= bus
->priv
;
1724 ret
= usb_autopm_get_interface(dev
->intf
);
1728 mutex_lock(&dev
->phy_mutex
);
1730 /* confirm MII not busy */
1731 ret
= lan78xx_phy_wait_not_busy(dev
);
1735 /* set the address, index & direction (read from PHY) */
1736 addr
= mii_access(phy_id
, idx
, MII_READ
);
1737 ret
= lan78xx_write_reg(dev
, MII_ACC
, addr
);
1739 ret
= lan78xx_phy_wait_not_busy(dev
);
1743 ret
= lan78xx_read_reg(dev
, MII_DATA
, &val
);
1745 ret
= (int)(val
& 0xFFFF);
1748 mutex_unlock(&dev
->phy_mutex
);
1749 usb_autopm_put_interface(dev
->intf
);
1754 static int lan78xx_mdiobus_write(struct mii_bus
*bus
, int phy_id
, int idx
,
1757 struct lan78xx_net
*dev
= bus
->priv
;
1761 ret
= usb_autopm_get_interface(dev
->intf
);
1765 mutex_lock(&dev
->phy_mutex
);
1767 /* confirm MII not busy */
1768 ret
= lan78xx_phy_wait_not_busy(dev
);
1773 ret
= lan78xx_write_reg(dev
, MII_DATA
, val
);
1775 /* set the address, index & direction (write to PHY) */
1776 addr
= mii_access(phy_id
, idx
, MII_WRITE
);
1777 ret
= lan78xx_write_reg(dev
, MII_ACC
, addr
);
1779 ret
= lan78xx_phy_wait_not_busy(dev
);
1784 mutex_unlock(&dev
->phy_mutex
);
1785 usb_autopm_put_interface(dev
->intf
);
1789 static int lan78xx_mdio_init(struct lan78xx_net
*dev
)
1791 struct device_node
*node
;
1794 dev
->mdiobus
= mdiobus_alloc();
1795 if (!dev
->mdiobus
) {
1796 netdev_err(dev
->net
, "can't allocate MDIO bus\n");
1800 dev
->mdiobus
->priv
= (void *)dev
;
1801 dev
->mdiobus
->read
= lan78xx_mdiobus_read
;
1802 dev
->mdiobus
->write
= lan78xx_mdiobus_write
;
1803 dev
->mdiobus
->name
= "lan78xx-mdiobus";
1804 dev
->mdiobus
->parent
= &dev
->udev
->dev
;
1806 snprintf(dev
->mdiobus
->id
, MII_BUS_ID_SIZE
, "usb-%03d:%03d",
1807 dev
->udev
->bus
->busnum
, dev
->udev
->devnum
);
1809 switch (dev
->chipid
) {
1810 case ID_REV_CHIP_ID_7800_
:
1811 case ID_REV_CHIP_ID_7850_
:
1812 /* set to internal PHY id */
1813 dev
->mdiobus
->phy_mask
= ~(1 << 1);
1815 case ID_REV_CHIP_ID_7801_
:
1816 /* scan thru PHYAD[2..0] */
1817 dev
->mdiobus
->phy_mask
= ~(0xFF);
1821 node
= of_get_child_by_name(dev
->udev
->dev
.of_node
, "mdio");
1822 ret
= of_mdiobus_register(dev
->mdiobus
, node
);
1825 netdev_err(dev
->net
, "can't register MDIO bus\n");
1829 netdev_dbg(dev
->net
, "registered mdiobus bus %s\n", dev
->mdiobus
->id
);
1832 mdiobus_free(dev
->mdiobus
);
1836 static void lan78xx_remove_mdio(struct lan78xx_net
*dev
)
1838 mdiobus_unregister(dev
->mdiobus
);
1839 mdiobus_free(dev
->mdiobus
);
1842 static void lan78xx_link_status_change(struct net_device
*net
)
1844 struct phy_device
*phydev
= net
->phydev
;
1847 /* At forced 100 F/H mode, chip may fail to set mode correctly
1848 * when cable is switched between long(~50+m) and short one.
1849 * As workaround, set to 10 before setting to 100
1850 * at forced 100 F/H mode.
1852 if (!phydev
->autoneg
&& (phydev
->speed
== 100)) {
1853 /* disable phy interrupt */
1854 temp
= phy_read(phydev
, LAN88XX_INT_MASK
);
1855 temp
&= ~LAN88XX_INT_MASK_MDINTPIN_EN_
;
1856 ret
= phy_write(phydev
, LAN88XX_INT_MASK
, temp
);
1858 temp
= phy_read(phydev
, MII_BMCR
);
1859 temp
&= ~(BMCR_SPEED100
| BMCR_SPEED1000
);
1860 phy_write(phydev
, MII_BMCR
, temp
); /* set to 10 first */
1861 temp
|= BMCR_SPEED100
;
1862 phy_write(phydev
, MII_BMCR
, temp
); /* set to 100 later */
1864 /* clear pending interrupt generated while workaround */
1865 temp
= phy_read(phydev
, LAN88XX_INT_STS
);
1867 /* enable phy interrupt back */
1868 temp
= phy_read(phydev
, LAN88XX_INT_MASK
);
1869 temp
|= LAN88XX_INT_MASK_MDINTPIN_EN_
;
1870 ret
= phy_write(phydev
, LAN88XX_INT_MASK
, temp
);
1874 static int irq_map(struct irq_domain
*d
, unsigned int irq
,
1875 irq_hw_number_t hwirq
)
1877 struct irq_domain_data
*data
= d
->host_data
;
1879 irq_set_chip_data(irq
, data
);
1880 irq_set_chip_and_handler(irq
, data
->irqchip
, data
->irq_handler
);
1881 irq_set_noprobe(irq
);
1886 static void irq_unmap(struct irq_domain
*d
, unsigned int irq
)
1888 irq_set_chip_and_handler(irq
, NULL
, NULL
);
1889 irq_set_chip_data(irq
, NULL
);
1892 static const struct irq_domain_ops chip_domain_ops
= {
1897 static void lan78xx_irq_mask(struct irq_data
*irqd
)
1899 struct irq_domain_data
*data
= irq_data_get_irq_chip_data(irqd
);
1901 data
->irqenable
&= ~BIT(irqd_to_hwirq(irqd
));
1904 static void lan78xx_irq_unmask(struct irq_data
*irqd
)
1906 struct irq_domain_data
*data
= irq_data_get_irq_chip_data(irqd
);
1908 data
->irqenable
|= BIT(irqd_to_hwirq(irqd
));
1911 static void lan78xx_irq_bus_lock(struct irq_data
*irqd
)
1913 struct irq_domain_data
*data
= irq_data_get_irq_chip_data(irqd
);
1915 mutex_lock(&data
->irq_lock
);
1918 static void lan78xx_irq_bus_sync_unlock(struct irq_data
*irqd
)
1920 struct irq_domain_data
*data
= irq_data_get_irq_chip_data(irqd
);
1921 struct lan78xx_net
*dev
=
1922 container_of(data
, struct lan78xx_net
, domain_data
);
1926 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1927 * are only two callbacks executed in non-atomic contex.
1929 ret
= lan78xx_read_reg(dev
, INT_EP_CTL
, &buf
);
1930 if (buf
!= data
->irqenable
)
1931 ret
= lan78xx_write_reg(dev
, INT_EP_CTL
, data
->irqenable
);
1933 mutex_unlock(&data
->irq_lock
);
1936 static struct irq_chip lan78xx_irqchip
= {
1937 .name
= "lan78xx-irqs",
1938 .irq_mask
= lan78xx_irq_mask
,
1939 .irq_unmask
= lan78xx_irq_unmask
,
1940 .irq_bus_lock
= lan78xx_irq_bus_lock
,
1941 .irq_bus_sync_unlock
= lan78xx_irq_bus_sync_unlock
,
1944 static int lan78xx_setup_irq_domain(struct lan78xx_net
*dev
)
1946 struct device_node
*of_node
;
1947 struct irq_domain
*irqdomain
;
1948 unsigned int irqmap
= 0;
1952 of_node
= dev
->udev
->dev
.parent
->of_node
;
1954 mutex_init(&dev
->domain_data
.irq_lock
);
1956 lan78xx_read_reg(dev
, INT_EP_CTL
, &buf
);
1957 dev
->domain_data
.irqenable
= buf
;
1959 dev
->domain_data
.irqchip
= &lan78xx_irqchip
;
1960 dev
->domain_data
.irq_handler
= handle_simple_irq
;
1962 irqdomain
= irq_domain_add_simple(of_node
, MAX_INT_EP
, 0,
1963 &chip_domain_ops
, &dev
->domain_data
);
1965 /* create mapping for PHY interrupt */
1966 irqmap
= irq_create_mapping(irqdomain
, INT_EP_PHY
);
1968 irq_domain_remove(irqdomain
);
1977 dev
->domain_data
.irqdomain
= irqdomain
;
1978 dev
->domain_data
.phyirq
= irqmap
;
1983 static void lan78xx_remove_irq_domain(struct lan78xx_net
*dev
)
1985 if (dev
->domain_data
.phyirq
> 0) {
1986 irq_dispose_mapping(dev
->domain_data
.phyirq
);
1988 if (dev
->domain_data
.irqdomain
)
1989 irq_domain_remove(dev
->domain_data
.irqdomain
);
1991 dev
->domain_data
.phyirq
= 0;
1992 dev
->domain_data
.irqdomain
= NULL
;
1995 static int lan8835_fixup(struct phy_device
*phydev
)
1999 struct lan78xx_net
*dev
= netdev_priv(phydev
->attached_dev
);
2001 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
2002 buf
= phy_read_mmd(phydev
, MDIO_MMD_PCS
, 0x8010);
2005 phy_write_mmd(phydev
, MDIO_MMD_PCS
, 0x8010, buf
);
2007 /* RGMII MAC TXC Delay Enable */
2008 ret
= lan78xx_write_reg(dev
, MAC_RGMII_ID
,
2009 MAC_RGMII_ID_TXC_DELAY_EN_
);
2011 /* RGMII TX DLL Tune Adjust */
2012 ret
= lan78xx_write_reg(dev
, RGMII_TX_BYP_DLL
, 0x3D00);
2014 dev
->interface
= PHY_INTERFACE_MODE_RGMII_TXID
;
2019 static int ksz9031rnx_fixup(struct phy_device
*phydev
)
2021 struct lan78xx_net
*dev
= netdev_priv(phydev
->attached_dev
);
2023 /* Micrel9301RNX PHY configuration */
2024 /* RGMII Control Signal Pad Skew */
2025 phy_write_mmd(phydev
, MDIO_MMD_WIS
, 4, 0x0077);
2026 /* RGMII RX Data Pad Skew */
2027 phy_write_mmd(phydev
, MDIO_MMD_WIS
, 5, 0x7777);
2028 /* RGMII RX Clock Pad Skew */
2029 phy_write_mmd(phydev
, MDIO_MMD_WIS
, 8, 0x1FF);
2031 dev
->interface
= PHY_INTERFACE_MODE_RGMII_RXID
;
2036 static struct phy_device
*lan7801_phy_init(struct lan78xx_net
*dev
)
2040 struct fixed_phy_status fphy_status
= {
2042 .speed
= SPEED_1000
,
2043 .duplex
= DUPLEX_FULL
,
2045 struct phy_device
*phydev
;
2047 phydev
= phy_find_first(dev
->mdiobus
);
2049 netdev_dbg(dev
->net
, "PHY Not Found!! Registering Fixed PHY\n");
2050 phydev
= fixed_phy_register(PHY_POLL
, &fphy_status
, NULL
);
2051 if (IS_ERR(phydev
)) {
2052 netdev_err(dev
->net
, "No PHY/fixed_PHY found\n");
2055 netdev_dbg(dev
->net
, "Registered FIXED PHY\n");
2056 dev
->interface
= PHY_INTERFACE_MODE_RGMII
;
2057 ret
= lan78xx_write_reg(dev
, MAC_RGMII_ID
,
2058 MAC_RGMII_ID_TXC_DELAY_EN_
);
2059 ret
= lan78xx_write_reg(dev
, RGMII_TX_BYP_DLL
, 0x3D00);
2060 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
2061 buf
|= HW_CFG_CLK125_EN_
;
2062 buf
|= HW_CFG_REFCLK25_EN_
;
2063 ret
= lan78xx_write_reg(dev
, HW_CFG
, buf
);
2066 netdev_err(dev
->net
, "no PHY driver found\n");
2069 dev
->interface
= PHY_INTERFACE_MODE_RGMII
;
2070 /* external PHY fixup for KSZ9031RNX */
2071 ret
= phy_register_fixup_for_uid(PHY_KSZ9031RNX
, 0xfffffff0,
2074 netdev_err(dev
->net
, "Failed to register fixup for PHY_KSZ9031RNX\n");
2077 /* external PHY fixup for LAN8835 */
2078 ret
= phy_register_fixup_for_uid(PHY_LAN8835
, 0xfffffff0,
2081 netdev_err(dev
->net
, "Failed to register fixup for PHY_LAN8835\n");
2084 /* add more external PHY fixup here if needed */
2086 phydev
->is_internal
= false;
2091 static int lan78xx_phy_init(struct lan78xx_net
*dev
)
2093 __ETHTOOL_DECLARE_LINK_MODE_MASK(fc
) = { 0, };
2096 struct phy_device
*phydev
;
2098 switch (dev
->chipid
) {
2099 case ID_REV_CHIP_ID_7801_
:
2100 phydev
= lan7801_phy_init(dev
);
2102 netdev_err(dev
->net
, "lan7801: PHY Init Failed");
2107 case ID_REV_CHIP_ID_7800_
:
2108 case ID_REV_CHIP_ID_7850_
:
2109 phydev
= phy_find_first(dev
->mdiobus
);
2111 netdev_err(dev
->net
, "no PHY found\n");
2114 phydev
->is_internal
= true;
2115 dev
->interface
= PHY_INTERFACE_MODE_GMII
;
2119 netdev_err(dev
->net
, "Unknown CHIP ID found\n");
2123 /* if phyirq is not set, use polling mode in phylib */
2124 if (dev
->domain_data
.phyirq
> 0)
2125 phydev
->irq
= dev
->domain_data
.phyirq
;
2128 netdev_dbg(dev
->net
, "phydev->irq = %d\n", phydev
->irq
);
2130 /* set to AUTOMDIX */
2131 phydev
->mdix
= ETH_TP_MDI_AUTO
;
2133 ret
= phy_connect_direct(dev
->net
, phydev
,
2134 lan78xx_link_status_change
,
2137 netdev_err(dev
->net
, "can't attach PHY to %s\n",
2139 if (dev
->chipid
== ID_REV_CHIP_ID_7801_
) {
2140 if (phy_is_pseudo_fixed_link(phydev
)) {
2141 fixed_phy_unregister(phydev
);
2143 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX
,
2145 phy_unregister_fixup_for_uid(PHY_LAN8835
,
2152 /* MAC doesn't support 1000T Half */
2153 phy_remove_link_mode(phydev
, ETHTOOL_LINK_MODE_1000baseT_Half_BIT
);
2155 /* support both flow controls */
2156 dev
->fc_request_control
= (FLOW_CTRL_RX
| FLOW_CTRL_TX
);
2157 linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT
,
2158 phydev
->advertising
);
2159 linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT
,
2160 phydev
->advertising
);
2161 mii_adv
= (u32
)mii_advertise_flowctrl(dev
->fc_request_control
);
2162 mii_adv_to_linkmode_adv_t(fc
, mii_adv
);
2163 linkmode_or(phydev
->advertising
, fc
, phydev
->advertising
);
2165 if (phydev
->mdio
.dev
.of_node
) {
2169 len
= of_property_count_elems_of_size(phydev
->mdio
.dev
.of_node
,
2170 "microchip,led-modes",
2173 /* Ensure the appropriate LEDs are enabled */
2174 lan78xx_read_reg(dev
, HW_CFG
, ®
);
2175 reg
&= ~(HW_CFG_LED0_EN_
|
2179 reg
|= (len
> 0) * HW_CFG_LED0_EN_
|
2180 (len
> 1) * HW_CFG_LED1_EN_
|
2181 (len
> 2) * HW_CFG_LED2_EN_
|
2182 (len
> 3) * HW_CFG_LED3_EN_
;
2183 lan78xx_write_reg(dev
, HW_CFG
, reg
);
2187 genphy_config_aneg(phydev
);
2189 dev
->fc_autoneg
= phydev
->autoneg
;
2194 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net
*dev
, int size
)
2200 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
2202 rxenabled
= ((buf
& MAC_RX_RXEN_
) != 0);
2205 buf
&= ~MAC_RX_RXEN_
;
2206 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
2209 /* add 4 to size for FCS */
2210 buf
&= ~MAC_RX_MAX_SIZE_MASK_
;
2211 buf
|= (((size
+ 4) << MAC_RX_MAX_SIZE_SHIFT_
) & MAC_RX_MAX_SIZE_MASK_
);
2213 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
2216 buf
|= MAC_RX_RXEN_
;
2217 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
2223 static int unlink_urbs(struct lan78xx_net
*dev
, struct sk_buff_head
*q
)
2225 struct sk_buff
*skb
;
2226 unsigned long flags
;
2229 spin_lock_irqsave(&q
->lock
, flags
);
2230 while (!skb_queue_empty(q
)) {
2231 struct skb_data
*entry
;
2235 skb_queue_walk(q
, skb
) {
2236 entry
= (struct skb_data
*)skb
->cb
;
2237 if (entry
->state
!= unlink_start
)
2242 entry
->state
= unlink_start
;
2245 /* Get reference count of the URB to avoid it to be
2246 * freed during usb_unlink_urb, which may trigger
2247 * use-after-free problem inside usb_unlink_urb since
2248 * usb_unlink_urb is always racing with .complete
2249 * handler(include defer_bh).
2252 spin_unlock_irqrestore(&q
->lock
, flags
);
2253 /* during some PM-driven resume scenarios,
2254 * these (async) unlinks complete immediately
2256 ret
= usb_unlink_urb(urb
);
2257 if (ret
!= -EINPROGRESS
&& ret
!= 0)
2258 netdev_dbg(dev
->net
, "unlink urb err, %d\n", ret
);
2262 spin_lock_irqsave(&q
->lock
, flags
);
2264 spin_unlock_irqrestore(&q
->lock
, flags
);
2268 static int lan78xx_change_mtu(struct net_device
*netdev
, int new_mtu
)
2270 struct lan78xx_net
*dev
= netdev_priv(netdev
);
2271 int ll_mtu
= new_mtu
+ netdev
->hard_header_len
;
2272 int old_hard_mtu
= dev
->hard_mtu
;
2273 int old_rx_urb_size
= dev
->rx_urb_size
;
2276 /* no second zero-length packet read wanted after mtu-sized packets */
2277 if ((ll_mtu
% dev
->maxpacket
) == 0)
2280 ret
= lan78xx_set_rx_max_frame_length(dev
, new_mtu
+ VLAN_ETH_HLEN
);
2282 netdev
->mtu
= new_mtu
;
2284 dev
->hard_mtu
= netdev
->mtu
+ netdev
->hard_header_len
;
2285 if (dev
->rx_urb_size
== old_hard_mtu
) {
2286 dev
->rx_urb_size
= dev
->hard_mtu
;
2287 if (dev
->rx_urb_size
> old_rx_urb_size
) {
2288 if (netif_running(dev
->net
)) {
2289 unlink_urbs(dev
, &dev
->rxq
);
2290 tasklet_schedule(&dev
->bh
);
2298 static int lan78xx_set_mac_addr(struct net_device
*netdev
, void *p
)
2300 struct lan78xx_net
*dev
= netdev_priv(netdev
);
2301 struct sockaddr
*addr
= p
;
2302 u32 addr_lo
, addr_hi
;
2305 if (netif_running(netdev
))
2308 if (!is_valid_ether_addr(addr
->sa_data
))
2309 return -EADDRNOTAVAIL
;
2311 ether_addr_copy(netdev
->dev_addr
, addr
->sa_data
);
2313 addr_lo
= netdev
->dev_addr
[0] |
2314 netdev
->dev_addr
[1] << 8 |
2315 netdev
->dev_addr
[2] << 16 |
2316 netdev
->dev_addr
[3] << 24;
2317 addr_hi
= netdev
->dev_addr
[4] |
2318 netdev
->dev_addr
[5] << 8;
2320 ret
= lan78xx_write_reg(dev
, RX_ADDRL
, addr_lo
);
2321 ret
= lan78xx_write_reg(dev
, RX_ADDRH
, addr_hi
);
2323 /* Added to support MAC address changes */
2324 ret
= lan78xx_write_reg(dev
, MAF_LO(0), addr_lo
);
2325 ret
= lan78xx_write_reg(dev
, MAF_HI(0), addr_hi
| MAF_HI_VALID_
);
2330 /* Enable or disable Rx checksum offload engine */
2331 static int lan78xx_set_features(struct net_device
*netdev
,
2332 netdev_features_t features
)
2334 struct lan78xx_net
*dev
= netdev_priv(netdev
);
2335 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2336 unsigned long flags
;
2339 spin_lock_irqsave(&pdata
->rfe_ctl_lock
, flags
);
2341 if (features
& NETIF_F_RXCSUM
) {
2342 pdata
->rfe_ctl
|= RFE_CTL_TCPUDP_COE_
| RFE_CTL_IP_COE_
;
2343 pdata
->rfe_ctl
|= RFE_CTL_ICMP_COE_
| RFE_CTL_IGMP_COE_
;
2345 pdata
->rfe_ctl
&= ~(RFE_CTL_TCPUDP_COE_
| RFE_CTL_IP_COE_
);
2346 pdata
->rfe_ctl
&= ~(RFE_CTL_ICMP_COE_
| RFE_CTL_IGMP_COE_
);
2349 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
2350 pdata
->rfe_ctl
|= RFE_CTL_VLAN_STRIP_
;
2352 pdata
->rfe_ctl
&= ~RFE_CTL_VLAN_STRIP_
;
2354 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
2355 pdata
->rfe_ctl
|= RFE_CTL_VLAN_FILTER_
;
2357 pdata
->rfe_ctl
&= ~RFE_CTL_VLAN_FILTER_
;
2359 spin_unlock_irqrestore(&pdata
->rfe_ctl_lock
, flags
);
2361 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
2366 static void lan78xx_deferred_vlan_write(struct work_struct
*param
)
2368 struct lan78xx_priv
*pdata
=
2369 container_of(param
, struct lan78xx_priv
, set_vlan
);
2370 struct lan78xx_net
*dev
= pdata
->dev
;
2372 lan78xx_dataport_write(dev
, DP_SEL_RSEL_VLAN_DA_
, 0,
2373 DP_SEL_VHF_VLAN_LEN
, pdata
->vlan_table
);
2376 static int lan78xx_vlan_rx_add_vid(struct net_device
*netdev
,
2377 __be16 proto
, u16 vid
)
2379 struct lan78xx_net
*dev
= netdev_priv(netdev
);
2380 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2382 u16 vid_dword_index
;
2384 vid_dword_index
= (vid
>> 5) & 0x7F;
2385 vid_bit_index
= vid
& 0x1F;
2387 pdata
->vlan_table
[vid_dword_index
] |= (1 << vid_bit_index
);
2389 /* defer register writes to a sleepable context */
2390 schedule_work(&pdata
->set_vlan
);
2395 static int lan78xx_vlan_rx_kill_vid(struct net_device
*netdev
,
2396 __be16 proto
, u16 vid
)
2398 struct lan78xx_net
*dev
= netdev_priv(netdev
);
2399 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2401 u16 vid_dword_index
;
2403 vid_dword_index
= (vid
>> 5) & 0x7F;
2404 vid_bit_index
= vid
& 0x1F;
2406 pdata
->vlan_table
[vid_dword_index
] &= ~(1 << vid_bit_index
);
2408 /* defer register writes to a sleepable context */
2409 schedule_work(&pdata
->set_vlan
);
2414 static void lan78xx_init_ltm(struct lan78xx_net
*dev
)
2418 u32 regs
[6] = { 0 };
2420 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
2421 if (buf
& USB_CFG1_LTM_ENABLE_
) {
2423 /* Get values from EEPROM first */
2424 if (lan78xx_read_eeprom(dev
, 0x3F, 2, temp
) == 0) {
2425 if (temp
[0] == 24) {
2426 ret
= lan78xx_read_raw_eeprom(dev
,
2433 } else if (lan78xx_read_otp(dev
, 0x3F, 2, temp
) == 0) {
2434 if (temp
[0] == 24) {
2435 ret
= lan78xx_read_raw_otp(dev
,
2445 lan78xx_write_reg(dev
, LTM_BELT_IDLE0
, regs
[0]);
2446 lan78xx_write_reg(dev
, LTM_BELT_IDLE1
, regs
[1]);
2447 lan78xx_write_reg(dev
, LTM_BELT_ACT0
, regs
[2]);
2448 lan78xx_write_reg(dev
, LTM_BELT_ACT1
, regs
[3]);
2449 lan78xx_write_reg(dev
, LTM_INACTIVE0
, regs
[4]);
2450 lan78xx_write_reg(dev
, LTM_INACTIVE1
, regs
[5]);
2453 static int lan78xx_reset(struct lan78xx_net
*dev
)
2455 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2458 unsigned long timeout
;
2461 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
2462 buf
|= HW_CFG_LRST_
;
2463 ret
= lan78xx_write_reg(dev
, HW_CFG
, buf
);
2465 timeout
= jiffies
+ HZ
;
2468 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
2469 if (time_after(jiffies
, timeout
)) {
2470 netdev_warn(dev
->net
,
2471 "timeout on completion of LiteReset");
2474 } while (buf
& HW_CFG_LRST_
);
2476 lan78xx_init_mac_address(dev
);
2478 /* save DEVID for later usage */
2479 ret
= lan78xx_read_reg(dev
, ID_REV
, &buf
);
2480 dev
->chipid
= (buf
& ID_REV_CHIP_ID_MASK_
) >> 16;
2481 dev
->chiprev
= buf
& ID_REV_CHIP_REV_MASK_
;
2483 /* Respond to the IN token with a NAK */
2484 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
2485 buf
|= USB_CFG_BIR_
;
2486 ret
= lan78xx_write_reg(dev
, USB_CFG0
, buf
);
2489 lan78xx_init_ltm(dev
);
2491 if (dev
->udev
->speed
== USB_SPEED_SUPER
) {
2492 buf
= DEFAULT_BURST_CAP_SIZE
/ SS_USB_PKT_SIZE
;
2493 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
2496 } else if (dev
->udev
->speed
== USB_SPEED_HIGH
) {
2497 buf
= DEFAULT_BURST_CAP_SIZE
/ HS_USB_PKT_SIZE
;
2498 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
2499 dev
->rx_qlen
= RX_MAX_QUEUE_MEMORY
/ dev
->rx_urb_size
;
2500 dev
->tx_qlen
= RX_MAX_QUEUE_MEMORY
/ dev
->hard_mtu
;
2502 buf
= DEFAULT_BURST_CAP_SIZE
/ FS_USB_PKT_SIZE
;
2503 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
2508 ret
= lan78xx_write_reg(dev
, BURST_CAP
, buf
);
2509 ret
= lan78xx_write_reg(dev
, BULK_IN_DLY
, DEFAULT_BULK_IN_DELAY
);
2511 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
2513 ret
= lan78xx_write_reg(dev
, HW_CFG
, buf
);
2515 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
2516 buf
|= USB_CFG_BCE_
;
2517 ret
= lan78xx_write_reg(dev
, USB_CFG0
, buf
);
2519 /* set FIFO sizes */
2520 buf
= (MAX_RX_FIFO_SIZE
- 512) / 512;
2521 ret
= lan78xx_write_reg(dev
, FCT_RX_FIFO_END
, buf
);
2523 buf
= (MAX_TX_FIFO_SIZE
- 512) / 512;
2524 ret
= lan78xx_write_reg(dev
, FCT_TX_FIFO_END
, buf
);
2526 ret
= lan78xx_write_reg(dev
, INT_STS
, INT_STS_CLEAR_ALL_
);
2527 ret
= lan78xx_write_reg(dev
, FLOW
, 0);
2528 ret
= lan78xx_write_reg(dev
, FCT_FLOW
, 0);
2530 /* Don't need rfe_ctl_lock during initialisation */
2531 ret
= lan78xx_read_reg(dev
, RFE_CTL
, &pdata
->rfe_ctl
);
2532 pdata
->rfe_ctl
|= RFE_CTL_BCAST_EN_
| RFE_CTL_DA_PERFECT_
;
2533 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
2535 /* Enable or disable checksum offload engines */
2536 lan78xx_set_features(dev
->net
, dev
->net
->features
);
2538 lan78xx_set_multicast(dev
->net
);
2541 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
2542 buf
|= PMT_CTL_PHY_RST_
;
2543 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
2545 timeout
= jiffies
+ HZ
;
2548 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
2549 if (time_after(jiffies
, timeout
)) {
2550 netdev_warn(dev
->net
, "timeout waiting for PHY Reset");
2553 } while ((buf
& PMT_CTL_PHY_RST_
) || !(buf
& PMT_CTL_READY_
));
2555 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
2556 /* LAN7801 only has RGMII mode */
2557 if (dev
->chipid
== ID_REV_CHIP_ID_7801_
)
2558 buf
&= ~MAC_CR_GMII_EN_
;
2560 if (dev
->chipid
== ID_REV_CHIP_ID_7800_
) {
2561 ret
= lan78xx_read_raw_eeprom(dev
, 0, 1, &sig
);
2562 if (!ret
&& sig
!= EEPROM_INDICATOR
) {
2563 /* Implies there is no external eeprom. Set mac speed */
2564 netdev_info(dev
->net
, "No External EEPROM. Setting MAC Speed\n");
2565 buf
|= MAC_CR_AUTO_DUPLEX_
| MAC_CR_AUTO_SPEED_
;
2568 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
2570 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
2571 buf
|= MAC_TX_TXEN_
;
2572 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
2574 ret
= lan78xx_read_reg(dev
, FCT_TX_CTL
, &buf
);
2575 buf
|= FCT_TX_CTL_EN_
;
2576 ret
= lan78xx_write_reg(dev
, FCT_TX_CTL
, buf
);
2578 ret
= lan78xx_set_rx_max_frame_length(dev
,
2579 dev
->net
->mtu
+ VLAN_ETH_HLEN
);
2581 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
2582 buf
|= MAC_RX_RXEN_
;
2583 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
2585 ret
= lan78xx_read_reg(dev
, FCT_RX_CTL
, &buf
);
2586 buf
|= FCT_RX_CTL_EN_
;
2587 ret
= lan78xx_write_reg(dev
, FCT_RX_CTL
, buf
);
2592 static void lan78xx_init_stats(struct lan78xx_net
*dev
)
2597 /* initialize for stats update
2598 * some counters are 20bits and some are 32bits
2600 p
= (u32
*)&dev
->stats
.rollover_max
;
2601 for (i
= 0; i
< (sizeof(dev
->stats
.rollover_max
) / (sizeof(u32
))); i
++)
2604 dev
->stats
.rollover_max
.rx_unicast_byte_count
= 0xFFFFFFFF;
2605 dev
->stats
.rollover_max
.rx_broadcast_byte_count
= 0xFFFFFFFF;
2606 dev
->stats
.rollover_max
.rx_multicast_byte_count
= 0xFFFFFFFF;
2607 dev
->stats
.rollover_max
.eee_rx_lpi_transitions
= 0xFFFFFFFF;
2608 dev
->stats
.rollover_max
.eee_rx_lpi_time
= 0xFFFFFFFF;
2609 dev
->stats
.rollover_max
.tx_unicast_byte_count
= 0xFFFFFFFF;
2610 dev
->stats
.rollover_max
.tx_broadcast_byte_count
= 0xFFFFFFFF;
2611 dev
->stats
.rollover_max
.tx_multicast_byte_count
= 0xFFFFFFFF;
2612 dev
->stats
.rollover_max
.eee_tx_lpi_transitions
= 0xFFFFFFFF;
2613 dev
->stats
.rollover_max
.eee_tx_lpi_time
= 0xFFFFFFFF;
2615 set_bit(EVENT_STAT_UPDATE
, &dev
->flags
);
2618 static int lan78xx_open(struct net_device
*net
)
2620 struct lan78xx_net
*dev
= netdev_priv(net
);
2623 ret
= usb_autopm_get_interface(dev
->intf
);
2627 phy_start(net
->phydev
);
2629 netif_dbg(dev
, ifup
, dev
->net
, "phy initialised successfully");
2631 /* for Link Check */
2632 if (dev
->urb_intr
) {
2633 ret
= usb_submit_urb(dev
->urb_intr
, GFP_KERNEL
);
2635 netif_err(dev
, ifup
, dev
->net
,
2636 "intr submit %d\n", ret
);
2641 lan78xx_init_stats(dev
);
2643 set_bit(EVENT_DEV_OPEN
, &dev
->flags
);
2645 netif_start_queue(net
);
2647 dev
->link_on
= false;
2649 lan78xx_defer_kevent(dev
, EVENT_LINK_RESET
);
2651 usb_autopm_put_interface(dev
->intf
);
2657 static void lan78xx_terminate_urbs(struct lan78xx_net
*dev
)
2659 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup
);
2660 DECLARE_WAITQUEUE(wait
, current
);
2663 /* ensure there are no more active urbs */
2664 add_wait_queue(&unlink_wakeup
, &wait
);
2665 set_current_state(TASK_UNINTERRUPTIBLE
);
2666 dev
->wait
= &unlink_wakeup
;
2667 temp
= unlink_urbs(dev
, &dev
->txq
) + unlink_urbs(dev
, &dev
->rxq
);
2669 /* maybe wait for deletions to finish. */
2670 while (!skb_queue_empty(&dev
->rxq
) &&
2671 !skb_queue_empty(&dev
->txq
) &&
2672 !skb_queue_empty(&dev
->done
)) {
2673 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS
));
2674 set_current_state(TASK_UNINTERRUPTIBLE
);
2675 netif_dbg(dev
, ifdown
, dev
->net
,
2676 "waited for %d urb completions\n", temp
);
2678 set_current_state(TASK_RUNNING
);
2680 remove_wait_queue(&unlink_wakeup
, &wait
);
2683 static int lan78xx_stop(struct net_device
*net
)
2685 struct lan78xx_net
*dev
= netdev_priv(net
);
2687 if (timer_pending(&dev
->stat_monitor
))
2688 del_timer_sync(&dev
->stat_monitor
);
2691 phy_stop(net
->phydev
);
2693 clear_bit(EVENT_DEV_OPEN
, &dev
->flags
);
2694 netif_stop_queue(net
);
2696 netif_info(dev
, ifdown
, dev
->net
,
2697 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2698 net
->stats
.rx_packets
, net
->stats
.tx_packets
,
2699 net
->stats
.rx_errors
, net
->stats
.tx_errors
);
2701 lan78xx_terminate_urbs(dev
);
2703 usb_kill_urb(dev
->urb_intr
);
2705 skb_queue_purge(&dev
->rxq_pause
);
2707 /* deferred work (task, timer, softirq) must also stop.
2708 * can't flush_scheduled_work() until we drop rtnl (later),
2709 * else workers could deadlock; so make workers a NOP.
2712 cancel_delayed_work_sync(&dev
->wq
);
2713 tasklet_kill(&dev
->bh
);
2715 usb_autopm_put_interface(dev
->intf
);
2720 static struct sk_buff
*lan78xx_tx_prep(struct lan78xx_net
*dev
,
2721 struct sk_buff
*skb
, gfp_t flags
)
2723 u32 tx_cmd_a
, tx_cmd_b
;
2726 if (skb_cow_head(skb
, TX_OVERHEAD
)) {
2727 dev_kfree_skb_any(skb
);
2731 if (skb_linearize(skb
)) {
2732 dev_kfree_skb_any(skb
);
2736 tx_cmd_a
= (u32
)(skb
->len
& TX_CMD_A_LEN_MASK_
) | TX_CMD_A_FCS_
;
2738 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2739 tx_cmd_a
|= TX_CMD_A_IPE_
| TX_CMD_A_TPE_
;
2742 if (skb_is_gso(skb
)) {
2743 u16 mss
= max(skb_shinfo(skb
)->gso_size
, TX_CMD_B_MSS_MIN_
);
2745 tx_cmd_b
= (mss
<< TX_CMD_B_MSS_SHIFT_
) & TX_CMD_B_MSS_MASK_
;
2747 tx_cmd_a
|= TX_CMD_A_LSO_
;
2750 if (skb_vlan_tag_present(skb
)) {
2751 tx_cmd_a
|= TX_CMD_A_IVTG_
;
2752 tx_cmd_b
|= skb_vlan_tag_get(skb
) & TX_CMD_B_VTAG_MASK_
;
2755 ptr
= skb_push(skb
, 8);
2756 put_unaligned_le32(tx_cmd_a
, ptr
);
2757 put_unaligned_le32(tx_cmd_b
, ptr
+ 4);
2762 static enum skb_state
defer_bh(struct lan78xx_net
*dev
, struct sk_buff
*skb
,
2763 struct sk_buff_head
*list
, enum skb_state state
)
2765 unsigned long flags
;
2766 enum skb_state old_state
;
2767 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2769 spin_lock_irqsave(&list
->lock
, flags
);
2770 old_state
= entry
->state
;
2771 entry
->state
= state
;
2773 __skb_unlink(skb
, list
);
2774 spin_unlock(&list
->lock
);
2775 spin_lock(&dev
->done
.lock
);
2777 __skb_queue_tail(&dev
->done
, skb
);
2778 if (skb_queue_len(&dev
->done
) == 1)
2779 tasklet_schedule(&dev
->bh
);
2780 spin_unlock_irqrestore(&dev
->done
.lock
, flags
);
2785 static void tx_complete(struct urb
*urb
)
2787 struct sk_buff
*skb
= (struct sk_buff
*)urb
->context
;
2788 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2789 struct lan78xx_net
*dev
= entry
->dev
;
2791 if (urb
->status
== 0) {
2792 dev
->net
->stats
.tx_packets
+= entry
->num_of_packet
;
2793 dev
->net
->stats
.tx_bytes
+= entry
->length
;
2795 dev
->net
->stats
.tx_errors
++;
2797 switch (urb
->status
) {
2799 lan78xx_defer_kevent(dev
, EVENT_TX_HALT
);
2802 /* software-driven interface shutdown */
2810 netif_stop_queue(dev
->net
);
2813 netif_dbg(dev
, tx_err
, dev
->net
,
2814 "tx err %d\n", entry
->urb
->status
);
2819 usb_autopm_put_interface_async(dev
->intf
);
2821 defer_bh(dev
, skb
, &dev
->txq
, tx_done
);
2824 static void lan78xx_queue_skb(struct sk_buff_head
*list
,
2825 struct sk_buff
*newsk
, enum skb_state state
)
2827 struct skb_data
*entry
= (struct skb_data
*)newsk
->cb
;
2829 __skb_queue_tail(list
, newsk
);
2830 entry
->state
= state
;
2834 lan78xx_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
2836 struct lan78xx_net
*dev
= netdev_priv(net
);
2837 struct sk_buff
*skb2
= NULL
;
2840 skb_tx_timestamp(skb
);
2841 skb2
= lan78xx_tx_prep(dev
, skb
, GFP_ATOMIC
);
2845 skb_queue_tail(&dev
->txq_pend
, skb2
);
2847 /* throttle TX patch at slower than SUPER SPEED USB */
2848 if ((dev
->udev
->speed
< USB_SPEED_SUPER
) &&
2849 (skb_queue_len(&dev
->txq_pend
) > 10))
2850 netif_stop_queue(net
);
2852 netif_dbg(dev
, tx_err
, dev
->net
,
2853 "lan78xx_tx_prep return NULL\n");
2854 dev
->net
->stats
.tx_errors
++;
2855 dev
->net
->stats
.tx_dropped
++;
2858 tasklet_schedule(&dev
->bh
);
2860 return NETDEV_TX_OK
;
2864 lan78xx_get_endpoints(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2867 struct usb_host_interface
*alt
= NULL
;
2868 struct usb_host_endpoint
*in
= NULL
, *out
= NULL
;
2869 struct usb_host_endpoint
*status
= NULL
;
2871 for (tmp
= 0; tmp
< intf
->num_altsetting
; tmp
++) {
2877 alt
= intf
->altsetting
+ tmp
;
2879 for (ep
= 0; ep
< alt
->desc
.bNumEndpoints
; ep
++) {
2880 struct usb_host_endpoint
*e
;
2883 e
= alt
->endpoint
+ ep
;
2884 switch (e
->desc
.bmAttributes
) {
2885 case USB_ENDPOINT_XFER_INT
:
2886 if (!usb_endpoint_dir_in(&e
->desc
))
2890 case USB_ENDPOINT_XFER_BULK
:
2895 if (usb_endpoint_dir_in(&e
->desc
)) {
2898 else if (intr
&& !status
)
2908 if (!alt
|| !in
|| !out
)
2911 dev
->pipe_in
= usb_rcvbulkpipe(dev
->udev
,
2912 in
->desc
.bEndpointAddress
&
2913 USB_ENDPOINT_NUMBER_MASK
);
2914 dev
->pipe_out
= usb_sndbulkpipe(dev
->udev
,
2915 out
->desc
.bEndpointAddress
&
2916 USB_ENDPOINT_NUMBER_MASK
);
2917 dev
->ep_intr
= status
;
2922 static int lan78xx_bind(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2924 struct lan78xx_priv
*pdata
= NULL
;
2928 ret
= lan78xx_get_endpoints(dev
, intf
);
2930 netdev_warn(dev
->net
, "lan78xx_get_endpoints failed: %d\n",
2935 dev
->data
[0] = (unsigned long)kzalloc(sizeof(*pdata
), GFP_KERNEL
);
2937 pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2939 netdev_warn(dev
->net
, "Unable to allocate lan78xx_priv");
2945 spin_lock_init(&pdata
->rfe_ctl_lock
);
2946 mutex_init(&pdata
->dataport_mutex
);
2948 INIT_WORK(&pdata
->set_multicast
, lan78xx_deferred_multicast_write
);
2950 for (i
= 0; i
< DP_SEL_VHF_VLAN_LEN
; i
++)
2951 pdata
->vlan_table
[i
] = 0;
2953 INIT_WORK(&pdata
->set_vlan
, lan78xx_deferred_vlan_write
);
2955 dev
->net
->features
= 0;
2957 if (DEFAULT_TX_CSUM_ENABLE
)
2958 dev
->net
->features
|= NETIF_F_HW_CSUM
;
2960 if (DEFAULT_RX_CSUM_ENABLE
)
2961 dev
->net
->features
|= NETIF_F_RXCSUM
;
2963 if (DEFAULT_TSO_CSUM_ENABLE
)
2964 dev
->net
->features
|= NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_SG
;
2966 if (DEFAULT_VLAN_RX_OFFLOAD
)
2967 dev
->net
->features
|= NETIF_F_HW_VLAN_CTAG_RX
;
2969 if (DEFAULT_VLAN_FILTER_ENABLE
)
2970 dev
->net
->features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
2972 dev
->net
->hw_features
= dev
->net
->features
;
2974 ret
= lan78xx_setup_irq_domain(dev
);
2976 netdev_warn(dev
->net
,
2977 "lan78xx_setup_irq_domain() failed : %d", ret
);
2981 dev
->net
->hard_header_len
+= TX_OVERHEAD
;
2982 dev
->hard_mtu
= dev
->net
->mtu
+ dev
->net
->hard_header_len
;
2984 /* Init all registers */
2985 ret
= lan78xx_reset(dev
);
2987 netdev_warn(dev
->net
, "Registers INIT FAILED....");
2991 ret
= lan78xx_mdio_init(dev
);
2993 netdev_warn(dev
->net
, "MDIO INIT FAILED.....");
2997 dev
->net
->flags
|= IFF_MULTICAST
;
2999 pdata
->wol
= WAKE_MAGIC
;
3004 lan78xx_remove_irq_domain(dev
);
3007 netdev_warn(dev
->net
, "Bind routine FAILED");
3008 cancel_work_sync(&pdata
->set_multicast
);
3009 cancel_work_sync(&pdata
->set_vlan
);
3014 static void lan78xx_unbind(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
3016 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
3018 lan78xx_remove_irq_domain(dev
);
3020 lan78xx_remove_mdio(dev
);
3023 cancel_work_sync(&pdata
->set_multicast
);
3024 cancel_work_sync(&pdata
->set_vlan
);
3025 netif_dbg(dev
, ifdown
, dev
->net
, "free pdata");
3032 static void lan78xx_rx_csum_offload(struct lan78xx_net
*dev
,
3033 struct sk_buff
*skb
,
3034 u32 rx_cmd_a
, u32 rx_cmd_b
)
3036 /* HW Checksum offload appears to be flawed if used when not stripping
3037 * VLAN headers. Drop back to S/W checksums under these conditions.
3039 if (!(dev
->net
->features
& NETIF_F_RXCSUM
) ||
3040 unlikely(rx_cmd_a
& RX_CMD_A_ICSM_
) ||
3041 ((rx_cmd_a
& RX_CMD_A_FVTG_
) &&
3042 !(dev
->net
->features
& NETIF_F_HW_VLAN_CTAG_RX
))) {
3043 skb
->ip_summed
= CHECKSUM_NONE
;
3045 skb
->csum
= ntohs((u16
)(rx_cmd_b
>> RX_CMD_B_CSUM_SHIFT_
));
3046 skb
->ip_summed
= CHECKSUM_COMPLETE
;
3050 static void lan78xx_rx_vlan_offload(struct lan78xx_net
*dev
,
3051 struct sk_buff
*skb
,
3052 u32 rx_cmd_a
, u32 rx_cmd_b
)
3054 if ((dev
->net
->features
& NETIF_F_HW_VLAN_CTAG_RX
) &&
3055 (rx_cmd_a
& RX_CMD_A_FVTG_
))
3056 __vlan_hwaccel_put_tag(skb
, htons(ETH_P_8021Q
),
3057 (rx_cmd_b
& 0xffff));
3060 static void lan78xx_skb_return(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
3064 if (test_bit(EVENT_RX_PAUSED
, &dev
->flags
)) {
3065 skb_queue_tail(&dev
->rxq_pause
, skb
);
3069 dev
->net
->stats
.rx_packets
++;
3070 dev
->net
->stats
.rx_bytes
+= skb
->len
;
3072 skb
->protocol
= eth_type_trans(skb
, dev
->net
);
3074 netif_dbg(dev
, rx_status
, dev
->net
, "< rx, len %zu, type 0x%x\n",
3075 skb
->len
+ sizeof(struct ethhdr
), skb
->protocol
);
3076 memset(skb
->cb
, 0, sizeof(struct skb_data
));
3078 if (skb_defer_rx_timestamp(skb
))
3081 status
= netif_rx(skb
);
3082 if (status
!= NET_RX_SUCCESS
)
3083 netif_dbg(dev
, rx_err
, dev
->net
,
3084 "netif_rx status %d\n", status
);
3087 static int lan78xx_rx(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
3089 if (skb
->len
< dev
->net
->hard_header_len
)
3092 while (skb
->len
> 0) {
3093 u32 rx_cmd_a
, rx_cmd_b
, align_count
, size
;
3095 struct sk_buff
*skb2
;
3096 unsigned char *packet
;
3098 rx_cmd_a
= get_unaligned_le32(skb
->data
);
3099 skb_pull(skb
, sizeof(rx_cmd_a
));
3101 rx_cmd_b
= get_unaligned_le32(skb
->data
);
3102 skb_pull(skb
, sizeof(rx_cmd_b
));
3104 rx_cmd_c
= get_unaligned_le16(skb
->data
);
3105 skb_pull(skb
, sizeof(rx_cmd_c
));
3109 /* get the packet length */
3110 size
= (rx_cmd_a
& RX_CMD_A_LEN_MASK_
);
3111 align_count
= (4 - ((size
+ RXW_PADDING
) % 4)) % 4;
3113 if (unlikely(rx_cmd_a
& RX_CMD_A_RED_
)) {
3114 netif_dbg(dev
, rx_err
, dev
->net
,
3115 "Error rx_cmd_a=0x%08x", rx_cmd_a
);
3117 /* last frame in this batch */
3118 if (skb
->len
== size
) {
3119 lan78xx_rx_csum_offload(dev
, skb
,
3120 rx_cmd_a
, rx_cmd_b
);
3121 lan78xx_rx_vlan_offload(dev
, skb
,
3122 rx_cmd_a
, rx_cmd_b
);
3124 skb_trim(skb
, skb
->len
- 4); /* remove fcs */
3125 skb
->truesize
= size
+ sizeof(struct sk_buff
);
3130 skb2
= skb_clone(skb
, GFP_ATOMIC
);
3131 if (unlikely(!skb2
)) {
3132 netdev_warn(dev
->net
, "Error allocating skb");
3137 skb2
->data
= packet
;
3138 skb_set_tail_pointer(skb2
, size
);
3140 lan78xx_rx_csum_offload(dev
, skb2
, rx_cmd_a
, rx_cmd_b
);
3141 lan78xx_rx_vlan_offload(dev
, skb2
, rx_cmd_a
, rx_cmd_b
);
3143 skb_trim(skb2
, skb2
->len
- 4); /* remove fcs */
3144 skb2
->truesize
= size
+ sizeof(struct sk_buff
);
3146 lan78xx_skb_return(dev
, skb2
);
3149 skb_pull(skb
, size
);
3151 /* padding bytes before the next frame starts */
3153 skb_pull(skb
, align_count
);
3159 static inline void rx_process(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
3161 if (!lan78xx_rx(dev
, skb
)) {
3162 dev
->net
->stats
.rx_errors
++;
3167 lan78xx_skb_return(dev
, skb
);
3171 netif_dbg(dev
, rx_err
, dev
->net
, "drop\n");
3172 dev
->net
->stats
.rx_errors
++;
3174 skb_queue_tail(&dev
->done
, skb
);
3177 static void rx_complete(struct urb
*urb
);
3179 static int rx_submit(struct lan78xx_net
*dev
, struct urb
*urb
, gfp_t flags
)
3181 struct sk_buff
*skb
;
3182 struct skb_data
*entry
;
3183 unsigned long lockflags
;
3184 size_t size
= dev
->rx_urb_size
;
3187 skb
= netdev_alloc_skb_ip_align(dev
->net
, size
);
3193 entry
= (struct skb_data
*)skb
->cb
;
3198 usb_fill_bulk_urb(urb
, dev
->udev
, dev
->pipe_in
,
3199 skb
->data
, size
, rx_complete
, skb
);
3201 spin_lock_irqsave(&dev
->rxq
.lock
, lockflags
);
3203 if (netif_device_present(dev
->net
) &&
3204 netif_running(dev
->net
) &&
3205 !test_bit(EVENT_RX_HALT
, &dev
->flags
) &&
3206 !test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
3207 ret
= usb_submit_urb(urb
, GFP_ATOMIC
);
3210 lan78xx_queue_skb(&dev
->rxq
, skb
, rx_start
);
3213 lan78xx_defer_kevent(dev
, EVENT_RX_HALT
);
3216 netif_dbg(dev
, ifdown
, dev
->net
, "device gone\n");
3217 netif_device_detach(dev
->net
);
3223 netif_dbg(dev
, rx_err
, dev
->net
,
3224 "rx submit, %d\n", ret
);
3225 tasklet_schedule(&dev
->bh
);
3228 netif_dbg(dev
, ifdown
, dev
->net
, "rx: stopped\n");
3231 spin_unlock_irqrestore(&dev
->rxq
.lock
, lockflags
);
3233 dev_kfree_skb_any(skb
);
3239 static void rx_complete(struct urb
*urb
)
3241 struct sk_buff
*skb
= (struct sk_buff
*)urb
->context
;
3242 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
3243 struct lan78xx_net
*dev
= entry
->dev
;
3244 int urb_status
= urb
->status
;
3245 enum skb_state state
;
3247 skb_put(skb
, urb
->actual_length
);
3251 switch (urb_status
) {
3253 if (skb
->len
< dev
->net
->hard_header_len
) {
3255 dev
->net
->stats
.rx_errors
++;
3256 dev
->net
->stats
.rx_length_errors
++;
3257 netif_dbg(dev
, rx_err
, dev
->net
,
3258 "rx length %d\n", skb
->len
);
3260 usb_mark_last_busy(dev
->udev
);
3263 dev
->net
->stats
.rx_errors
++;
3264 lan78xx_defer_kevent(dev
, EVENT_RX_HALT
);
3266 case -ECONNRESET
: /* async unlink */
3267 case -ESHUTDOWN
: /* hardware gone */
3268 netif_dbg(dev
, ifdown
, dev
->net
,
3269 "rx shutdown, code %d\n", urb_status
);
3277 dev
->net
->stats
.rx_errors
++;
3283 /* data overrun ... flush fifo? */
3285 dev
->net
->stats
.rx_over_errors
++;
3290 dev
->net
->stats
.rx_errors
++;
3291 netif_dbg(dev
, rx_err
, dev
->net
, "rx status %d\n", urb_status
);
3295 state
= defer_bh(dev
, skb
, &dev
->rxq
, state
);
3298 if (netif_running(dev
->net
) &&
3299 !test_bit(EVENT_RX_HALT
, &dev
->flags
) &&
3300 state
!= unlink_start
) {
3301 rx_submit(dev
, urb
, GFP_ATOMIC
);
3306 netif_dbg(dev
, rx_err
, dev
->net
, "no read resubmitted\n");
3309 static void lan78xx_tx_bh(struct lan78xx_net
*dev
)
3312 struct urb
*urb
= NULL
;
3313 struct skb_data
*entry
;
3314 unsigned long flags
;
3315 struct sk_buff_head
*tqp
= &dev
->txq_pend
;
3316 struct sk_buff
*skb
, *skb2
;
3319 int skb_totallen
, pkt_cnt
;
3325 spin_lock_irqsave(&tqp
->lock
, flags
);
3326 skb_queue_walk(tqp
, skb
) {
3327 if (skb_is_gso(skb
)) {
3328 if (!skb_queue_is_first(tqp
, skb
)) {
3329 /* handle previous packets first */
3333 length
= skb
->len
- TX_OVERHEAD
;
3334 __skb_unlink(skb
, tqp
);
3335 spin_unlock_irqrestore(&tqp
->lock
, flags
);
3339 if ((skb_totallen
+ skb
->len
) > MAX_SINGLE_PACKET_SIZE
)
3341 skb_totallen
= skb
->len
+ roundup(skb_totallen
, sizeof(u32
));
3344 spin_unlock_irqrestore(&tqp
->lock
, flags
);
3346 /* copy to a single skb */
3347 skb
= alloc_skb(skb_totallen
, GFP_ATOMIC
);
3351 skb_put(skb
, skb_totallen
);
3353 for (count
= pos
= 0; count
< pkt_cnt
; count
++) {
3354 skb2
= skb_dequeue(tqp
);
3356 length
+= (skb2
->len
- TX_OVERHEAD
);
3357 memcpy(skb
->data
+ pos
, skb2
->data
, skb2
->len
);
3358 pos
+= roundup(skb2
->len
, sizeof(u32
));
3359 dev_kfree_skb(skb2
);
3364 urb
= usb_alloc_urb(0, GFP_ATOMIC
);
3368 entry
= (struct skb_data
*)skb
->cb
;
3371 entry
->length
= length
;
3372 entry
->num_of_packet
= count
;
3374 spin_lock_irqsave(&dev
->txq
.lock
, flags
);
3375 ret
= usb_autopm_get_interface_async(dev
->intf
);
3377 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
3381 usb_fill_bulk_urb(urb
, dev
->udev
, dev
->pipe_out
,
3382 skb
->data
, skb
->len
, tx_complete
, skb
);
3384 if (length
% dev
->maxpacket
== 0) {
3385 /* send USB_ZERO_PACKET */
3386 urb
->transfer_flags
|= URB_ZERO_PACKET
;
3390 /* if this triggers the device is still a sleep */
3391 if (test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
3392 /* transmission will be done in resume */
3393 usb_anchor_urb(urb
, &dev
->deferred
);
3394 /* no use to process more packets */
3395 netif_stop_queue(dev
->net
);
3397 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
3398 netdev_dbg(dev
->net
, "Delaying transmission for resumption\n");
3403 ret
= usb_submit_urb(urb
, GFP_ATOMIC
);
3406 netif_trans_update(dev
->net
);
3407 lan78xx_queue_skb(&dev
->txq
, skb
, tx_start
);
3408 if (skb_queue_len(&dev
->txq
) >= dev
->tx_qlen
)
3409 netif_stop_queue(dev
->net
);
3412 netif_stop_queue(dev
->net
);
3413 lan78xx_defer_kevent(dev
, EVENT_TX_HALT
);
3414 usb_autopm_put_interface_async(dev
->intf
);
3417 usb_autopm_put_interface_async(dev
->intf
);
3418 netif_dbg(dev
, tx_err
, dev
->net
,
3419 "tx: submit urb err %d\n", ret
);
3423 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
3426 netif_dbg(dev
, tx_err
, dev
->net
, "drop, code %d\n", ret
);
3428 dev
->net
->stats
.tx_dropped
++;
3430 dev_kfree_skb_any(skb
);
3433 netif_dbg(dev
, tx_queued
, dev
->net
,
3434 "> tx, len %d, type 0x%x\n", length
, skb
->protocol
);
3437 static void lan78xx_rx_bh(struct lan78xx_net
*dev
)
3442 if (skb_queue_len(&dev
->rxq
) < dev
->rx_qlen
) {
3443 for (i
= 0; i
< 10; i
++) {
3444 if (skb_queue_len(&dev
->rxq
) >= dev
->rx_qlen
)
3446 urb
= usb_alloc_urb(0, GFP_ATOMIC
);
3448 if (rx_submit(dev
, urb
, GFP_ATOMIC
) == -ENOLINK
)
3452 if (skb_queue_len(&dev
->rxq
) < dev
->rx_qlen
)
3453 tasklet_schedule(&dev
->bh
);
3455 if (skb_queue_len(&dev
->txq
) < dev
->tx_qlen
)
3456 netif_wake_queue(dev
->net
);
3459 static void lan78xx_bh(unsigned long param
)
3461 struct lan78xx_net
*dev
= (struct lan78xx_net
*)param
;
3462 struct sk_buff
*skb
;
3463 struct skb_data
*entry
;
3465 while ((skb
= skb_dequeue(&dev
->done
))) {
3466 entry
= (struct skb_data
*)(skb
->cb
);
3467 switch (entry
->state
) {
3469 entry
->state
= rx_cleanup
;
3470 rx_process(dev
, skb
);
3473 usb_free_urb(entry
->urb
);
3477 usb_free_urb(entry
->urb
);
3481 netdev_dbg(dev
->net
, "skb state %d\n", entry
->state
);
3486 if (netif_device_present(dev
->net
) && netif_running(dev
->net
)) {
3487 /* reset update timer delta */
3488 if (timer_pending(&dev
->stat_monitor
) && (dev
->delta
!= 1)) {
3490 mod_timer(&dev
->stat_monitor
,
3491 jiffies
+ STAT_UPDATE_TIMER
);
3494 if (!skb_queue_empty(&dev
->txq_pend
))
3497 if (!timer_pending(&dev
->delay
) &&
3498 !test_bit(EVENT_RX_HALT
, &dev
->flags
))
3503 static void lan78xx_delayedwork(struct work_struct
*work
)
3506 struct lan78xx_net
*dev
;
3508 dev
= container_of(work
, struct lan78xx_net
, wq
.work
);
3510 if (test_bit(EVENT_TX_HALT
, &dev
->flags
)) {
3511 unlink_urbs(dev
, &dev
->txq
);
3512 status
= usb_autopm_get_interface(dev
->intf
);
3515 status
= usb_clear_halt(dev
->udev
, dev
->pipe_out
);
3516 usb_autopm_put_interface(dev
->intf
);
3519 status
!= -ESHUTDOWN
) {
3520 if (netif_msg_tx_err(dev
))
3522 netdev_err(dev
->net
,
3523 "can't clear tx halt, status %d\n",
3526 clear_bit(EVENT_TX_HALT
, &dev
->flags
);
3527 if (status
!= -ESHUTDOWN
)
3528 netif_wake_queue(dev
->net
);
3531 if (test_bit(EVENT_RX_HALT
, &dev
->flags
)) {
3532 unlink_urbs(dev
, &dev
->rxq
);
3533 status
= usb_autopm_get_interface(dev
->intf
);
3536 status
= usb_clear_halt(dev
->udev
, dev
->pipe_in
);
3537 usb_autopm_put_interface(dev
->intf
);
3540 status
!= -ESHUTDOWN
) {
3541 if (netif_msg_rx_err(dev
))
3543 netdev_err(dev
->net
,
3544 "can't clear rx halt, status %d\n",
3547 clear_bit(EVENT_RX_HALT
, &dev
->flags
);
3548 tasklet_schedule(&dev
->bh
);
3552 if (test_bit(EVENT_LINK_RESET
, &dev
->flags
)) {
3555 clear_bit(EVENT_LINK_RESET
, &dev
->flags
);
3556 status
= usb_autopm_get_interface(dev
->intf
);
3559 if (lan78xx_link_reset(dev
) < 0) {
3560 usb_autopm_put_interface(dev
->intf
);
3562 netdev_info(dev
->net
, "link reset failed (%d)\n",
3565 usb_autopm_put_interface(dev
->intf
);
3569 if (test_bit(EVENT_STAT_UPDATE
, &dev
->flags
)) {
3570 lan78xx_update_stats(dev
);
3572 clear_bit(EVENT_STAT_UPDATE
, &dev
->flags
);
3574 mod_timer(&dev
->stat_monitor
,
3575 jiffies
+ (STAT_UPDATE_TIMER
* dev
->delta
));
3577 dev
->delta
= min((dev
->delta
* 2), 50);
3581 static void intr_complete(struct urb
*urb
)
3583 struct lan78xx_net
*dev
= urb
->context
;
3584 int status
= urb
->status
;
3589 lan78xx_status(dev
, urb
);
3592 /* software-driven interface shutdown */
3593 case -ENOENT
: /* urb killed */
3594 case -ESHUTDOWN
: /* hardware gone */
3595 netif_dbg(dev
, ifdown
, dev
->net
,
3596 "intr shutdown, code %d\n", status
);
3599 /* NOTE: not throttling like RX/TX, since this endpoint
3600 * already polls infrequently
3603 netdev_dbg(dev
->net
, "intr status %d\n", status
);
3607 if (!netif_running(dev
->net
))
3610 memset(urb
->transfer_buffer
, 0, urb
->transfer_buffer_length
);
3611 status
= usb_submit_urb(urb
, GFP_ATOMIC
);
3613 netif_err(dev
, timer
, dev
->net
,
3614 "intr resubmit --> %d\n", status
);
3617 static void lan78xx_disconnect(struct usb_interface
*intf
)
3619 struct lan78xx_net
*dev
;
3620 struct usb_device
*udev
;
3621 struct net_device
*net
;
3622 struct phy_device
*phydev
;
3624 dev
= usb_get_intfdata(intf
);
3625 usb_set_intfdata(intf
, NULL
);
3629 udev
= interface_to_usbdev(intf
);
3631 phydev
= net
->phydev
;
3633 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX
, 0xfffffff0);
3634 phy_unregister_fixup_for_uid(PHY_LAN8835
, 0xfffffff0);
3636 phy_disconnect(net
->phydev
);
3638 if (phy_is_pseudo_fixed_link(phydev
))
3639 fixed_phy_unregister(phydev
);
3641 unregister_netdev(net
);
3643 cancel_delayed_work_sync(&dev
->wq
);
3645 usb_scuttle_anchored_urbs(&dev
->deferred
);
3647 lan78xx_unbind(dev
, intf
);
3649 usb_kill_urb(dev
->urb_intr
);
3650 usb_free_urb(dev
->urb_intr
);
3656 static void lan78xx_tx_timeout(struct net_device
*net
, unsigned int txqueue
)
3658 struct lan78xx_net
*dev
= netdev_priv(net
);
3660 unlink_urbs(dev
, &dev
->txq
);
3661 tasklet_schedule(&dev
->bh
);
3664 static netdev_features_t
lan78xx_features_check(struct sk_buff
*skb
,
3665 struct net_device
*netdev
,
3666 netdev_features_t features
)
3668 if (skb
->len
+ TX_OVERHEAD
> MAX_SINGLE_PACKET_SIZE
)
3669 features
&= ~NETIF_F_GSO_MASK
;
3671 features
= vlan_features_check(skb
, features
);
3672 features
= vxlan_features_check(skb
, features
);
3677 static const struct net_device_ops lan78xx_netdev_ops
= {
3678 .ndo_open
= lan78xx_open
,
3679 .ndo_stop
= lan78xx_stop
,
3680 .ndo_start_xmit
= lan78xx_start_xmit
,
3681 .ndo_tx_timeout
= lan78xx_tx_timeout
,
3682 .ndo_change_mtu
= lan78xx_change_mtu
,
3683 .ndo_set_mac_address
= lan78xx_set_mac_addr
,
3684 .ndo_validate_addr
= eth_validate_addr
,
3685 .ndo_do_ioctl
= phy_do_ioctl_running
,
3686 .ndo_set_rx_mode
= lan78xx_set_multicast
,
3687 .ndo_set_features
= lan78xx_set_features
,
3688 .ndo_vlan_rx_add_vid
= lan78xx_vlan_rx_add_vid
,
3689 .ndo_vlan_rx_kill_vid
= lan78xx_vlan_rx_kill_vid
,
3690 .ndo_features_check
= lan78xx_features_check
,
3693 static void lan78xx_stat_monitor(struct timer_list
*t
)
3695 struct lan78xx_net
*dev
= from_timer(dev
, t
, stat_monitor
);
3697 lan78xx_defer_kevent(dev
, EVENT_STAT_UPDATE
);
3700 static int lan78xx_probe(struct usb_interface
*intf
,
3701 const struct usb_device_id
*id
)
3703 struct lan78xx_net
*dev
;
3704 struct net_device
*netdev
;
3705 struct usb_device
*udev
;
3711 udev
= interface_to_usbdev(intf
);
3712 udev
= usb_get_dev(udev
);
3714 netdev
= alloc_etherdev(sizeof(struct lan78xx_net
));
3716 dev_err(&intf
->dev
, "Error: OOM\n");
3721 /* netdev_printk() needs this */
3722 SET_NETDEV_DEV(netdev
, &intf
->dev
);
3724 dev
= netdev_priv(netdev
);
3728 dev
->msg_enable
= netif_msg_init(msg_level
, NETIF_MSG_DRV
3729 | NETIF_MSG_PROBE
| NETIF_MSG_LINK
);
3731 skb_queue_head_init(&dev
->rxq
);
3732 skb_queue_head_init(&dev
->txq
);
3733 skb_queue_head_init(&dev
->done
);
3734 skb_queue_head_init(&dev
->rxq_pause
);
3735 skb_queue_head_init(&dev
->txq_pend
);
3736 mutex_init(&dev
->phy_mutex
);
3738 tasklet_init(&dev
->bh
, lan78xx_bh
, (unsigned long)dev
);
3739 INIT_DELAYED_WORK(&dev
->wq
, lan78xx_delayedwork
);
3740 init_usb_anchor(&dev
->deferred
);
3742 netdev
->netdev_ops
= &lan78xx_netdev_ops
;
3743 netdev
->watchdog_timeo
= TX_TIMEOUT_JIFFIES
;
3744 netdev
->ethtool_ops
= &lan78xx_ethtool_ops
;
3747 timer_setup(&dev
->stat_monitor
, lan78xx_stat_monitor
, 0);
3749 mutex_init(&dev
->stats
.access_lock
);
3751 ret
= lan78xx_bind(dev
, intf
);
3755 if (netdev
->mtu
> (dev
->hard_mtu
- netdev
->hard_header_len
))
3756 netdev
->mtu
= dev
->hard_mtu
- netdev
->hard_header_len
;
3758 /* MTU range: 68 - 9000 */
3759 netdev
->max_mtu
= MAX_SINGLE_PACKET_SIZE
;
3760 netif_set_gso_max_size(netdev
, MAX_SINGLE_PACKET_SIZE
- MAX_HEADER
);
3762 dev
->ep_blkin
= (intf
->cur_altsetting
)->endpoint
+ 0;
3763 dev
->ep_blkout
= (intf
->cur_altsetting
)->endpoint
+ 1;
3764 dev
->ep_intr
= (intf
->cur_altsetting
)->endpoint
+ 2;
3766 dev
->pipe_in
= usb_rcvbulkpipe(udev
, BULK_IN_PIPE
);
3767 dev
->pipe_out
= usb_sndbulkpipe(udev
, BULK_OUT_PIPE
);
3769 dev
->pipe_intr
= usb_rcvintpipe(dev
->udev
,
3770 dev
->ep_intr
->desc
.bEndpointAddress
&
3771 USB_ENDPOINT_NUMBER_MASK
);
3772 period
= dev
->ep_intr
->desc
.bInterval
;
3774 maxp
= usb_maxpacket(dev
->udev
, dev
->pipe_intr
, 0);
3775 buf
= kmalloc(maxp
, GFP_KERNEL
);
3777 dev
->urb_intr
= usb_alloc_urb(0, GFP_KERNEL
);
3778 if (!dev
->urb_intr
) {
3783 usb_fill_int_urb(dev
->urb_intr
, dev
->udev
,
3784 dev
->pipe_intr
, buf
, maxp
,
3785 intr_complete
, dev
, period
);
3789 dev
->maxpacket
= usb_maxpacket(dev
->udev
, dev
->pipe_out
, 1);
3791 /* driver requires remote-wakeup capability during autosuspend. */
3792 intf
->needs_remote_wakeup
= 1;
3794 ret
= lan78xx_phy_init(dev
);
3798 ret
= register_netdev(netdev
);
3800 netif_err(dev
, probe
, netdev
, "couldn't register the device\n");
3804 usb_set_intfdata(intf
, dev
);
3806 ret
= device_set_wakeup_enable(&udev
->dev
, true);
3808 /* Default delay of 2sec has more overhead than advantage.
3809 * Set to 10sec as default.
3811 pm_runtime_set_autosuspend_delay(&udev
->dev
,
3812 DEFAULT_AUTOSUSPEND_DELAY
);
3817 phy_disconnect(netdev
->phydev
);
3819 usb_free_urb(dev
->urb_intr
);
3821 lan78xx_unbind(dev
, intf
);
3823 free_netdev(netdev
);
3830 static u16
lan78xx_wakeframe_crc16(const u8
*buf
, int len
)
3832 const u16 crc16poly
= 0x8005;
3838 for (i
= 0; i
< len
; i
++) {
3840 for (bit
= 0; bit
< 8; bit
++) {
3844 if (msb
^ (u16
)(data
& 1)) {
3846 crc
|= (u16
)0x0001U
;
3855 static int lan78xx_set_suspend(struct lan78xx_net
*dev
, u32 wol
)
3863 const u8 ipv4_multicast
[3] = { 0x01, 0x00, 0x5E };
3864 const u8 ipv6_multicast
[3] = { 0x33, 0x33 };
3865 const u8 arp_type
[2] = { 0x08, 0x06 };
3867 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3868 buf
&= ~MAC_TX_TXEN_
;
3869 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3870 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3871 buf
&= ~MAC_RX_RXEN_
;
3872 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3874 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3875 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3876 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3881 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &temp_pmt_ctl
);
3882 temp_pmt_ctl
&= ~PMT_CTL_RES_CLR_WKP_EN_
;
3883 temp_pmt_ctl
|= PMT_CTL_RES_CLR_WKP_STS_
;
3885 for (mask_index
= 0; mask_index
< NUM_OF_WUF_CFG
; mask_index
++)
3886 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
), 0);
3889 if (wol
& WAKE_PHY
) {
3890 temp_pmt_ctl
|= PMT_CTL_PHY_WAKE_EN_
;
3892 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3893 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3894 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3896 if (wol
& WAKE_MAGIC
) {
3897 temp_wucsr
|= WUCSR_MPEN_
;
3899 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3900 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3901 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_3_
;
3903 if (wol
& WAKE_BCAST
) {
3904 temp_wucsr
|= WUCSR_BCST_EN_
;
3906 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3907 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3908 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3910 if (wol
& WAKE_MCAST
) {
3911 temp_wucsr
|= WUCSR_WAKE_EN_
;
3913 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3914 crc
= lan78xx_wakeframe_crc16(ipv4_multicast
, 3);
3915 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3917 WUF_CFGX_TYPE_MCAST_
|
3918 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3919 (crc
& WUF_CFGX_CRC16_MASK_
));
3921 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 7);
3922 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3923 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3924 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3927 /* for IPv6 Multicast */
3928 crc
= lan78xx_wakeframe_crc16(ipv6_multicast
, 2);
3929 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3931 WUF_CFGX_TYPE_MCAST_
|
3932 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3933 (crc
& WUF_CFGX_CRC16_MASK_
));
3935 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 3);
3936 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3937 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3938 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3941 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3942 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3943 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3945 if (wol
& WAKE_UCAST
) {
3946 temp_wucsr
|= WUCSR_PFDA_EN_
;
3948 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3949 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3950 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3952 if (wol
& WAKE_ARP
) {
3953 temp_wucsr
|= WUCSR_WAKE_EN_
;
3955 /* set WUF_CFG & WUF_MASK
3956 * for packettype (offset 12,13) = ARP (0x0806)
3958 crc
= lan78xx_wakeframe_crc16(arp_type
, 2);
3959 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3961 WUF_CFGX_TYPE_ALL_
|
3962 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3963 (crc
& WUF_CFGX_CRC16_MASK_
));
3965 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 0x3000);
3966 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3967 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3968 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3971 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3972 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3973 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3976 ret
= lan78xx_write_reg(dev
, WUCSR
, temp_wucsr
);
3978 /* when multiple WOL bits are set */
3979 if (hweight_long((unsigned long)wol
) > 1) {
3980 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3981 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3982 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3984 ret
= lan78xx_write_reg(dev
, PMT_CTL
, temp_pmt_ctl
);
3987 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3988 buf
|= PMT_CTL_WUPS_MASK_
;
3989 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3991 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3992 buf
|= MAC_RX_RXEN_
;
3993 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3998 static int lan78xx_suspend(struct usb_interface
*intf
, pm_message_t message
)
4000 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
4001 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
4005 if (!dev
->suspend_count
++) {
4006 spin_lock_irq(&dev
->txq
.lock
);
4007 /* don't autosuspend while transmitting */
4008 if ((skb_queue_len(&dev
->txq
) ||
4009 skb_queue_len(&dev
->txq_pend
)) &&
4010 PMSG_IS_AUTO(message
)) {
4011 spin_unlock_irq(&dev
->txq
.lock
);
4015 set_bit(EVENT_DEV_ASLEEP
, &dev
->flags
);
4016 spin_unlock_irq(&dev
->txq
.lock
);
4020 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
4021 buf
&= ~MAC_TX_TXEN_
;
4022 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
4023 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
4024 buf
&= ~MAC_RX_RXEN_
;
4025 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
4027 /* empty out the rx and queues */
4028 netif_device_detach(dev
->net
);
4029 lan78xx_terminate_urbs(dev
);
4030 usb_kill_urb(dev
->urb_intr
);
4033 netif_device_attach(dev
->net
);
4036 if (test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
4037 del_timer(&dev
->stat_monitor
);
4039 if (PMSG_IS_AUTO(message
)) {
4040 /* auto suspend (selective suspend) */
4041 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
4042 buf
&= ~MAC_TX_TXEN_
;
4043 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
4044 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
4045 buf
&= ~MAC_RX_RXEN_
;
4046 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
4048 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
4049 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
4050 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
4052 /* set goodframe wakeup */
4053 ret
= lan78xx_read_reg(dev
, WUCSR
, &buf
);
4055 buf
|= WUCSR_RFE_WAKE_EN_
;
4056 buf
|= WUCSR_STORE_WAKE_
;
4058 ret
= lan78xx_write_reg(dev
, WUCSR
, buf
);
4060 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
4062 buf
&= ~PMT_CTL_RES_CLR_WKP_EN_
;
4063 buf
|= PMT_CTL_RES_CLR_WKP_STS_
;
4065 buf
|= PMT_CTL_PHY_WAKE_EN_
;
4066 buf
|= PMT_CTL_WOL_EN_
;
4067 buf
&= ~PMT_CTL_SUS_MODE_MASK_
;
4068 buf
|= PMT_CTL_SUS_MODE_3_
;
4070 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
4072 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
4074 buf
|= PMT_CTL_WUPS_MASK_
;
4076 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
4078 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
4079 buf
|= MAC_RX_RXEN_
;
4080 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
4082 lan78xx_set_suspend(dev
, pdata
->wol
);
4091 static int lan78xx_resume(struct usb_interface
*intf
)
4093 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
4094 struct sk_buff
*skb
;
4099 if (!timer_pending(&dev
->stat_monitor
)) {
4101 mod_timer(&dev
->stat_monitor
,
4102 jiffies
+ STAT_UPDATE_TIMER
);
4105 if (!--dev
->suspend_count
) {
4106 /* resume interrupt URBs */
4107 if (dev
->urb_intr
&& test_bit(EVENT_DEV_OPEN
, &dev
->flags
))
4108 usb_submit_urb(dev
->urb_intr
, GFP_NOIO
);
4110 spin_lock_irq(&dev
->txq
.lock
);
4111 while ((res
= usb_get_from_anchor(&dev
->deferred
))) {
4112 skb
= (struct sk_buff
*)res
->context
;
4113 ret
= usb_submit_urb(res
, GFP_ATOMIC
);
4115 dev_kfree_skb_any(skb
);
4117 usb_autopm_put_interface_async(dev
->intf
);
4119 netif_trans_update(dev
->net
);
4120 lan78xx_queue_skb(&dev
->txq
, skb
, tx_start
);
4124 clear_bit(EVENT_DEV_ASLEEP
, &dev
->flags
);
4125 spin_unlock_irq(&dev
->txq
.lock
);
4127 if (test_bit(EVENT_DEV_OPEN
, &dev
->flags
)) {
4128 if (!(skb_queue_len(&dev
->txq
) >= dev
->tx_qlen
))
4129 netif_start_queue(dev
->net
);
4130 tasklet_schedule(&dev
->bh
);
4134 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
4135 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
4136 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
4138 ret
= lan78xx_write_reg(dev
, WUCSR2
, WUCSR2_NS_RCD_
|
4140 WUCSR2_IPV6_TCPSYN_RCD_
|
4141 WUCSR2_IPV4_TCPSYN_RCD_
);
4143 ret
= lan78xx_write_reg(dev
, WUCSR
, WUCSR_EEE_TX_WAKE_
|
4144 WUCSR_EEE_RX_WAKE_
|
4146 WUCSR_RFE_WAKE_FR_
|
4151 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
4152 buf
|= MAC_TX_TXEN_
;
4153 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
4158 static int lan78xx_reset_resume(struct usb_interface
*intf
)
4160 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
4164 phy_start(dev
->net
->phydev
);
4166 return lan78xx_resume(intf
);
4169 static const struct usb_device_id products
[] = {
4171 /* LAN7800 USB Gigabit Ethernet Device */
4172 USB_DEVICE(LAN78XX_USB_VENDOR_ID
, LAN7800_USB_PRODUCT_ID
),
4175 /* LAN7850 USB Gigabit Ethernet Device */
4176 USB_DEVICE(LAN78XX_USB_VENDOR_ID
, LAN7850_USB_PRODUCT_ID
),
4179 /* LAN7801 USB Gigabit Ethernet Device */
4180 USB_DEVICE(LAN78XX_USB_VENDOR_ID
, LAN7801_USB_PRODUCT_ID
),
4184 MODULE_DEVICE_TABLE(usb
, products
);
4186 static struct usb_driver lan78xx_driver
= {
4187 .name
= DRIVER_NAME
,
4188 .id_table
= products
,
4189 .probe
= lan78xx_probe
,
4190 .disconnect
= lan78xx_disconnect
,
4191 .suspend
= lan78xx_suspend
,
4192 .resume
= lan78xx_resume
,
4193 .reset_resume
= lan78xx_reset_resume
,
4194 .supports_autosuspend
= 1,
4195 .disable_hub_initiated_lpm
= 1,
4198 module_usb_driver(lan78xx_driver
);
4200 MODULE_AUTHOR(DRIVER_AUTHOR
);
4201 MODULE_DESCRIPTION(DRIVER_DESC
);
4202 MODULE_LICENSE("GPL");