2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/microchipphy.h>
36 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME "lan78xx"
39 #define DRIVER_VERSION "1.0.2"
41 #define TX_TIMEOUT_JIFFIES (5 * HZ)
42 #define THROTTLE_JIFFIES (HZ / 8)
43 #define UNLINK_TIMEOUT_MS 3
45 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
47 #define SS_USB_PKT_SIZE (1024)
48 #define HS_USB_PKT_SIZE (512)
49 #define FS_USB_PKT_SIZE (64)
51 #define MAX_RX_FIFO_SIZE (12 * 1024)
52 #define MAX_TX_FIFO_SIZE (12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY (0x0800)
55 #define MAX_SINGLE_PACKET_SIZE (9000)
56 #define DEFAULT_TX_CSUM_ENABLE (true)
57 #define DEFAULT_RX_CSUM_ENABLE (true)
58 #define DEFAULT_TSO_CSUM_ENABLE (true)
59 #define DEFAULT_VLAN_FILTER_ENABLE (true)
60 #define TX_OVERHEAD (8)
63 #define LAN78XX_USB_VENDOR_ID (0x0424)
64 #define LAN7800_USB_PRODUCT_ID (0x7800)
65 #define LAN7850_USB_PRODUCT_ID (0x7850)
66 #define LAN78XX_EEPROM_MAGIC (0x78A5)
67 #define LAN78XX_OTP_MAGIC (0x78F3)
72 #define EEPROM_INDICATOR (0xA5)
73 #define EEPROM_MAC_OFFSET (0x01)
74 #define MAX_EEPROM_SIZE 512
75 #define OTP_INDICATOR_1 (0xF3)
76 #define OTP_INDICATOR_2 (0xF7)
78 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
79 WAKE_MCAST | WAKE_BCAST | \
80 WAKE_ARP | WAKE_MAGIC)
82 /* USB related defines */
83 #define BULK_IN_PIPE 1
84 #define BULK_OUT_PIPE 2
86 /* default autosuspend delay (mSec)*/
87 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
89 static const char lan78xx_gstrings
[][ETH_GSTRING_LEN
] = {
91 "RX Alignment Errors",
94 "RX Undersize Frame Errors",
95 "RX Oversize Frame Errors",
97 "RX Unicast Byte Count",
98 "RX Broadcast Byte Count",
99 "RX Multicast Byte Count",
101 "RX Broadcast Frames",
102 "RX Multicast Frames",
105 "RX 65 - 127 Byte Frames",
106 "RX 128 - 255 Byte Frames",
107 "RX 256 - 511 Bytes Frames",
108 "RX 512 - 1023 Byte Frames",
109 "RX 1024 - 1518 Byte Frames",
110 "RX Greater 1518 Byte Frames",
111 "EEE RX LPI Transitions",
114 "TX Excess Deferral Errors",
117 "TX Single Collisions",
118 "TX Multiple Collisions",
119 "TX Excessive Collision",
120 "TX Late Collisions",
121 "TX Unicast Byte Count",
122 "TX Broadcast Byte Count",
123 "TX Multicast Byte Count",
125 "TX Broadcast Frames",
126 "TX Multicast Frames",
129 "TX 65 - 127 Byte Frames",
130 "TX 128 - 255 Byte Frames",
131 "TX 256 - 511 Bytes Frames",
132 "TX 512 - 1023 Byte Frames",
133 "TX 1024 - 1518 Byte Frames",
134 "TX Greater 1518 Byte Frames",
135 "EEE TX LPI Transitions",
139 struct lan78xx_statstage
{
141 u32 rx_alignment_errors
;
142 u32 rx_fragment_errors
;
143 u32 rx_jabber_errors
;
144 u32 rx_undersize_frame_errors
;
145 u32 rx_oversize_frame_errors
;
146 u32 rx_dropped_frames
;
147 u32 rx_unicast_byte_count
;
148 u32 rx_broadcast_byte_count
;
149 u32 rx_multicast_byte_count
;
150 u32 rx_unicast_frames
;
151 u32 rx_broadcast_frames
;
152 u32 rx_multicast_frames
;
154 u32 rx_64_byte_frames
;
155 u32 rx_65_127_byte_frames
;
156 u32 rx_128_255_byte_frames
;
157 u32 rx_256_511_bytes_frames
;
158 u32 rx_512_1023_byte_frames
;
159 u32 rx_1024_1518_byte_frames
;
160 u32 rx_greater_1518_byte_frames
;
161 u32 eee_rx_lpi_transitions
;
164 u32 tx_excess_deferral_errors
;
165 u32 tx_carrier_errors
;
166 u32 tx_bad_byte_count
;
167 u32 tx_single_collisions
;
168 u32 tx_multiple_collisions
;
169 u32 tx_excessive_collision
;
170 u32 tx_late_collisions
;
171 u32 tx_unicast_byte_count
;
172 u32 tx_broadcast_byte_count
;
173 u32 tx_multicast_byte_count
;
174 u32 tx_unicast_frames
;
175 u32 tx_broadcast_frames
;
176 u32 tx_multicast_frames
;
178 u32 tx_64_byte_frames
;
179 u32 tx_65_127_byte_frames
;
180 u32 tx_128_255_byte_frames
;
181 u32 tx_256_511_bytes_frames
;
182 u32 tx_512_1023_byte_frames
;
183 u32 tx_1024_1518_byte_frames
;
184 u32 tx_greater_1518_byte_frames
;
185 u32 eee_tx_lpi_transitions
;
191 struct lan78xx_priv
{
192 struct lan78xx_net
*dev
;
194 u32 mchash_table
[DP_SEL_VHF_HASH_LEN
]; /* multicat hash table */
195 u32 pfilter_table
[NUM_OF_MAF
][2]; /* perfect filter table */
196 u32 vlan_table
[DP_SEL_VHF_VLAN_LEN
];
197 struct mutex dataport_mutex
; /* for dataport access */
198 spinlock_t rfe_ctl_lock
; /* for rfe register access */
199 struct work_struct set_multicast
;
200 struct work_struct set_vlan
;
214 struct skb_data
{ /* skb->cb is one of these */
216 struct lan78xx_net
*dev
;
217 enum skb_state state
;
222 struct usb_ctrlrequest req
;
223 struct lan78xx_net
*dev
;
226 #define EVENT_TX_HALT 0
227 #define EVENT_RX_HALT 1
228 #define EVENT_RX_MEMORY 2
229 #define EVENT_STS_SPLIT 3
230 #define EVENT_LINK_RESET 4
231 #define EVENT_RX_PAUSED 5
232 #define EVENT_DEV_WAKING 6
233 #define EVENT_DEV_ASLEEP 7
234 #define EVENT_DEV_OPEN 8
237 struct net_device
*net
;
238 struct usb_device
*udev
;
239 struct usb_interface
*intf
;
244 struct sk_buff_head rxq
;
245 struct sk_buff_head txq
;
246 struct sk_buff_head done
;
247 struct sk_buff_head rxq_pause
;
248 struct sk_buff_head txq_pend
;
250 struct tasklet_struct bh
;
251 struct delayed_work wq
;
253 struct usb_host_endpoint
*ep_blkin
;
254 struct usb_host_endpoint
*ep_blkout
;
255 struct usb_host_endpoint
*ep_intr
;
259 struct urb
*urb_intr
;
260 struct usb_anchor deferred
;
262 struct mutex phy_mutex
; /* for phy access */
263 unsigned pipe_in
, pipe_out
, pipe_intr
;
265 u32 hard_mtu
; /* count any extra framing */
266 size_t rx_urb_size
; /* size for rx urbs */
270 wait_queue_head_t
*wait
;
271 unsigned char suspend_count
;
274 struct timer_list delay
;
276 unsigned long data
[5];
282 struct mii_bus
*mdiobus
;
285 /* use ethtool to change the level for any given device */
286 static int msg_level
= -1;
287 module_param(msg_level
, int, 0);
288 MODULE_PARM_DESC(msg_level
, "Override default message level");
290 static int lan78xx_read_reg(struct lan78xx_net
*dev
, u32 index
, u32
*data
)
292 u32
*buf
= kmalloc(sizeof(u32
), GFP_KERNEL
);
298 ret
= usb_control_msg(dev
->udev
, usb_rcvctrlpipe(dev
->udev
, 0),
299 USB_VENDOR_REQUEST_READ_REGISTER
,
300 USB_DIR_IN
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
301 0, index
, buf
, 4, USB_CTRL_GET_TIMEOUT
);
302 if (likely(ret
>= 0)) {
306 netdev_warn(dev
->net
,
307 "Failed to read register index 0x%08x. ret = %d",
316 static int lan78xx_write_reg(struct lan78xx_net
*dev
, u32 index
, u32 data
)
318 u32
*buf
= kmalloc(sizeof(u32
), GFP_KERNEL
);
327 ret
= usb_control_msg(dev
->udev
, usb_sndctrlpipe(dev
->udev
, 0),
328 USB_VENDOR_REQUEST_WRITE_REGISTER
,
329 USB_DIR_OUT
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
330 0, index
, buf
, 4, USB_CTRL_SET_TIMEOUT
);
331 if (unlikely(ret
< 0)) {
332 netdev_warn(dev
->net
,
333 "Failed to write register index 0x%08x. ret = %d",
342 static int lan78xx_read_stats(struct lan78xx_net
*dev
,
343 struct lan78xx_statstage
*data
)
347 struct lan78xx_statstage
*stats
;
351 stats
= kmalloc(sizeof(*stats
), GFP_KERNEL
);
355 ret
= usb_control_msg(dev
->udev
,
356 usb_rcvctrlpipe(dev
->udev
, 0),
357 USB_VENDOR_REQUEST_GET_STATS
,
358 USB_DIR_IN
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
363 USB_CTRL_SET_TIMEOUT
);
364 if (likely(ret
>= 0)) {
367 for (i
= 0; i
< sizeof(*stats
)/sizeof(u32
); i
++) {
368 le32_to_cpus(&src
[i
]);
372 netdev_warn(dev
->net
,
373 "Failed to read stat ret = 0x%x", ret
);
381 /* Loop until the read is completed with timeout called with phy_mutex held */
382 static int lan78xx_phy_wait_not_busy(struct lan78xx_net
*dev
)
384 unsigned long start_time
= jiffies
;
389 ret
= lan78xx_read_reg(dev
, MII_ACC
, &val
);
390 if (unlikely(ret
< 0))
393 if (!(val
& MII_ACC_MII_BUSY_
))
395 } while (!time_after(jiffies
, start_time
+ HZ
));
400 static inline u32
mii_access(int id
, int index
, int read
)
404 ret
= ((u32
)id
<< MII_ACC_PHY_ADDR_SHIFT_
) & MII_ACC_PHY_ADDR_MASK_
;
405 ret
|= ((u32
)index
<< MII_ACC_MIIRINDA_SHIFT_
) & MII_ACC_MIIRINDA_MASK_
;
407 ret
|= MII_ACC_MII_READ_
;
409 ret
|= MII_ACC_MII_WRITE_
;
410 ret
|= MII_ACC_MII_BUSY_
;
415 static int lan78xx_wait_eeprom(struct lan78xx_net
*dev
)
417 unsigned long start_time
= jiffies
;
422 ret
= lan78xx_read_reg(dev
, E2P_CMD
, &val
);
423 if (unlikely(ret
< 0))
426 if (!(val
& E2P_CMD_EPC_BUSY_
) ||
427 (val
& E2P_CMD_EPC_TIMEOUT_
))
429 usleep_range(40, 100);
430 } while (!time_after(jiffies
, start_time
+ HZ
));
432 if (val
& (E2P_CMD_EPC_TIMEOUT_
| E2P_CMD_EPC_BUSY_
)) {
433 netdev_warn(dev
->net
, "EEPROM read operation timeout");
440 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net
*dev
)
442 unsigned long start_time
= jiffies
;
447 ret
= lan78xx_read_reg(dev
, E2P_CMD
, &val
);
448 if (unlikely(ret
< 0))
451 if (!(val
& E2P_CMD_EPC_BUSY_
))
454 usleep_range(40, 100);
455 } while (!time_after(jiffies
, start_time
+ HZ
));
457 netdev_warn(dev
->net
, "EEPROM is busy");
461 static int lan78xx_read_raw_eeprom(struct lan78xx_net
*dev
, u32 offset
,
462 u32 length
, u8
*data
)
469 /* depends on chip, some EEPROM pins are muxed with LED function.
470 * disable & restore LED function to access EEPROM.
472 ret
= lan78xx_read_reg(dev
, HW_CFG
, &val
);
474 if ((dev
->devid
& ID_REV_CHIP_ID_MASK_
) == 0x78000000) {
475 val
&= ~(HW_CFG_LED1_EN_
| HW_CFG_LED0_EN_
);
476 ret
= lan78xx_write_reg(dev
, HW_CFG
, val
);
479 retval
= lan78xx_eeprom_confirm_not_busy(dev
);
483 for (i
= 0; i
< length
; i
++) {
484 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_READ_
;
485 val
|= (offset
& E2P_CMD_EPC_ADDR_MASK_
);
486 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
487 if (unlikely(ret
< 0)) {
492 retval
= lan78xx_wait_eeprom(dev
);
496 ret
= lan78xx_read_reg(dev
, E2P_DATA
, &val
);
497 if (unlikely(ret
< 0)) {
502 data
[i
] = val
& 0xFF;
508 if ((dev
->devid
& ID_REV_CHIP_ID_MASK_
) == 0x78000000)
509 ret
= lan78xx_write_reg(dev
, HW_CFG
, saved
);
514 static int lan78xx_read_eeprom(struct lan78xx_net
*dev
, u32 offset
,
515 u32 length
, u8
*data
)
520 ret
= lan78xx_read_raw_eeprom(dev
, 0, 1, &sig
);
521 if ((ret
== 0) && (sig
== EEPROM_INDICATOR
))
522 ret
= lan78xx_read_raw_eeprom(dev
, offset
, length
, data
);
529 static int lan78xx_write_raw_eeprom(struct lan78xx_net
*dev
, u32 offset
,
530 u32 length
, u8
*data
)
537 /* depends on chip, some EEPROM pins are muxed with LED function.
538 * disable & restore LED function to access EEPROM.
540 ret
= lan78xx_read_reg(dev
, HW_CFG
, &val
);
542 if ((dev
->devid
& ID_REV_CHIP_ID_MASK_
) == 0x78000000) {
543 val
&= ~(HW_CFG_LED1_EN_
| HW_CFG_LED0_EN_
);
544 ret
= lan78xx_write_reg(dev
, HW_CFG
, val
);
547 retval
= lan78xx_eeprom_confirm_not_busy(dev
);
551 /* Issue write/erase enable command */
552 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_EWEN_
;
553 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
554 if (unlikely(ret
< 0)) {
559 retval
= lan78xx_wait_eeprom(dev
);
563 for (i
= 0; i
< length
; i
++) {
564 /* Fill data register */
566 ret
= lan78xx_write_reg(dev
, E2P_DATA
, val
);
572 /* Send "write" command */
573 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_WRITE_
;
574 val
|= (offset
& E2P_CMD_EPC_ADDR_MASK_
);
575 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
581 retval
= lan78xx_wait_eeprom(dev
);
590 if ((dev
->devid
& ID_REV_CHIP_ID_MASK_
) == 0x78000000)
591 ret
= lan78xx_write_reg(dev
, HW_CFG
, saved
);
596 static int lan78xx_read_raw_otp(struct lan78xx_net
*dev
, u32 offset
,
597 u32 length
, u8
*data
)
602 unsigned long timeout
;
604 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
606 if (buf
& OTP_PWR_DN_PWRDN_N_
) {
607 /* clear it and wait to be cleared */
608 ret
= lan78xx_write_reg(dev
, OTP_PWR_DN
, 0);
610 timeout
= jiffies
+ HZ
;
613 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
614 if (time_after(jiffies
, timeout
)) {
615 netdev_warn(dev
->net
,
616 "timeout on OTP_PWR_DN");
619 } while (buf
& OTP_PWR_DN_PWRDN_N_
);
622 for (i
= 0; i
< length
; i
++) {
623 ret
= lan78xx_write_reg(dev
, OTP_ADDR1
,
624 ((offset
+ i
) >> 8) & OTP_ADDR1_15_11
);
625 ret
= lan78xx_write_reg(dev
, OTP_ADDR2
,
626 ((offset
+ i
) & OTP_ADDR2_10_3
));
628 ret
= lan78xx_write_reg(dev
, OTP_FUNC_CMD
, OTP_FUNC_CMD_READ_
);
629 ret
= lan78xx_write_reg(dev
, OTP_CMD_GO
, OTP_CMD_GO_GO_
);
631 timeout
= jiffies
+ HZ
;
634 ret
= lan78xx_read_reg(dev
, OTP_STATUS
, &buf
);
635 if (time_after(jiffies
, timeout
)) {
636 netdev_warn(dev
->net
,
637 "timeout on OTP_STATUS");
640 } while (buf
& OTP_STATUS_BUSY_
);
642 ret
= lan78xx_read_reg(dev
, OTP_RD_DATA
, &buf
);
644 data
[i
] = (u8
)(buf
& 0xFF);
650 static int lan78xx_write_raw_otp(struct lan78xx_net
*dev
, u32 offset
,
651 u32 length
, u8
*data
)
656 unsigned long timeout
;
658 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
660 if (buf
& OTP_PWR_DN_PWRDN_N_
) {
661 /* clear it and wait to be cleared */
662 ret
= lan78xx_write_reg(dev
, OTP_PWR_DN
, 0);
664 timeout
= jiffies
+ HZ
;
667 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
668 if (time_after(jiffies
, timeout
)) {
669 netdev_warn(dev
->net
,
670 "timeout on OTP_PWR_DN completion");
673 } while (buf
& OTP_PWR_DN_PWRDN_N_
);
676 /* set to BYTE program mode */
677 ret
= lan78xx_write_reg(dev
, OTP_PRGM_MODE
, OTP_PRGM_MODE_BYTE_
);
679 for (i
= 0; i
< length
; i
++) {
680 ret
= lan78xx_write_reg(dev
, OTP_ADDR1
,
681 ((offset
+ i
) >> 8) & OTP_ADDR1_15_11
);
682 ret
= lan78xx_write_reg(dev
, OTP_ADDR2
,
683 ((offset
+ i
) & OTP_ADDR2_10_3
));
684 ret
= lan78xx_write_reg(dev
, OTP_PRGM_DATA
, data
[i
]);
685 ret
= lan78xx_write_reg(dev
, OTP_TST_CMD
, OTP_TST_CMD_PRGVRFY_
);
686 ret
= lan78xx_write_reg(dev
, OTP_CMD_GO
, OTP_CMD_GO_GO_
);
688 timeout
= jiffies
+ HZ
;
691 ret
= lan78xx_read_reg(dev
, OTP_STATUS
, &buf
);
692 if (time_after(jiffies
, timeout
)) {
693 netdev_warn(dev
->net
,
694 "Timeout on OTP_STATUS completion");
697 } while (buf
& OTP_STATUS_BUSY_
);
703 static int lan78xx_read_otp(struct lan78xx_net
*dev
, u32 offset
,
704 u32 length
, u8
*data
)
709 ret
= lan78xx_read_raw_otp(dev
, 0, 1, &sig
);
712 if (sig
== OTP_INDICATOR_1
)
714 else if (sig
== OTP_INDICATOR_2
)
718 ret
= lan78xx_read_raw_otp(dev
, offset
, length
, data
);
724 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net
*dev
)
728 for (i
= 0; i
< 100; i
++) {
731 ret
= lan78xx_read_reg(dev
, DP_SEL
, &dp_sel
);
732 if (unlikely(ret
< 0))
735 if (dp_sel
& DP_SEL_DPRDY_
)
738 usleep_range(40, 100);
741 netdev_warn(dev
->net
, "lan78xx_dataport_wait_not_busy timed out");
746 static int lan78xx_dataport_write(struct lan78xx_net
*dev
, u32 ram_select
,
747 u32 addr
, u32 length
, u32
*buf
)
749 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
753 if (usb_autopm_get_interface(dev
->intf
) < 0)
756 mutex_lock(&pdata
->dataport_mutex
);
758 ret
= lan78xx_dataport_wait_not_busy(dev
);
762 ret
= lan78xx_read_reg(dev
, DP_SEL
, &dp_sel
);
764 dp_sel
&= ~DP_SEL_RSEL_MASK_
;
765 dp_sel
|= ram_select
;
766 ret
= lan78xx_write_reg(dev
, DP_SEL
, dp_sel
);
768 for (i
= 0; i
< length
; i
++) {
769 ret
= lan78xx_write_reg(dev
, DP_ADDR
, addr
+ i
);
771 ret
= lan78xx_write_reg(dev
, DP_DATA
, buf
[i
]);
773 ret
= lan78xx_write_reg(dev
, DP_CMD
, DP_CMD_WRITE_
);
775 ret
= lan78xx_dataport_wait_not_busy(dev
);
781 mutex_unlock(&pdata
->dataport_mutex
);
782 usb_autopm_put_interface(dev
->intf
);
787 static void lan78xx_set_addr_filter(struct lan78xx_priv
*pdata
,
788 int index
, u8 addr
[ETH_ALEN
])
792 if ((pdata
) && (index
> 0) && (index
< NUM_OF_MAF
)) {
794 temp
= addr
[2] | (temp
<< 8);
795 temp
= addr
[1] | (temp
<< 8);
796 temp
= addr
[0] | (temp
<< 8);
797 pdata
->pfilter_table
[index
][1] = temp
;
799 temp
= addr
[4] | (temp
<< 8);
800 temp
|= MAF_HI_VALID_
| MAF_HI_TYPE_DST_
;
801 pdata
->pfilter_table
[index
][0] = temp
;
805 /* returns hash bit number for given MAC address */
806 static inline u32
lan78xx_hash(char addr
[ETH_ALEN
])
808 return (ether_crc(ETH_ALEN
, addr
) >> 23) & 0x1ff;
811 static void lan78xx_deferred_multicast_write(struct work_struct
*param
)
813 struct lan78xx_priv
*pdata
=
814 container_of(param
, struct lan78xx_priv
, set_multicast
);
815 struct lan78xx_net
*dev
= pdata
->dev
;
819 netif_dbg(dev
, drv
, dev
->net
, "deferred multicast write 0x%08x\n",
822 lan78xx_dataport_write(dev
, DP_SEL_RSEL_VLAN_DA_
, DP_SEL_VHF_VLAN_LEN
,
823 DP_SEL_VHF_HASH_LEN
, pdata
->mchash_table
);
825 for (i
= 1; i
< NUM_OF_MAF
; i
++) {
826 ret
= lan78xx_write_reg(dev
, MAF_HI(i
), 0);
827 ret
= lan78xx_write_reg(dev
, MAF_LO(i
),
828 pdata
->pfilter_table
[i
][1]);
829 ret
= lan78xx_write_reg(dev
, MAF_HI(i
),
830 pdata
->pfilter_table
[i
][0]);
833 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
836 static void lan78xx_set_multicast(struct net_device
*netdev
)
838 struct lan78xx_net
*dev
= netdev_priv(netdev
);
839 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
843 spin_lock_irqsave(&pdata
->rfe_ctl_lock
, flags
);
845 pdata
->rfe_ctl
&= ~(RFE_CTL_UCAST_EN_
| RFE_CTL_MCAST_EN_
|
846 RFE_CTL_DA_PERFECT_
| RFE_CTL_MCAST_HASH_
);
848 for (i
= 0; i
< DP_SEL_VHF_HASH_LEN
; i
++)
849 pdata
->mchash_table
[i
] = 0;
850 /* pfilter_table[0] has own HW address */
851 for (i
= 1; i
< NUM_OF_MAF
; i
++) {
852 pdata
->pfilter_table
[i
][0] =
853 pdata
->pfilter_table
[i
][1] = 0;
856 pdata
->rfe_ctl
|= RFE_CTL_BCAST_EN_
;
858 if (dev
->net
->flags
& IFF_PROMISC
) {
859 netif_dbg(dev
, drv
, dev
->net
, "promiscuous mode enabled");
860 pdata
->rfe_ctl
|= RFE_CTL_MCAST_EN_
| RFE_CTL_UCAST_EN_
;
862 if (dev
->net
->flags
& IFF_ALLMULTI
) {
863 netif_dbg(dev
, drv
, dev
->net
,
864 "receive all multicast enabled");
865 pdata
->rfe_ctl
|= RFE_CTL_MCAST_EN_
;
869 if (netdev_mc_count(dev
->net
)) {
870 struct netdev_hw_addr
*ha
;
873 netif_dbg(dev
, drv
, dev
->net
, "receive multicast hash filter");
875 pdata
->rfe_ctl
|= RFE_CTL_DA_PERFECT_
;
878 netdev_for_each_mc_addr(ha
, netdev
) {
879 /* set first 32 into Perfect Filter */
881 lan78xx_set_addr_filter(pdata
, i
, ha
->addr
);
883 u32 bitnum
= lan78xx_hash(ha
->addr
);
885 pdata
->mchash_table
[bitnum
/ 32] |=
886 (1 << (bitnum
% 32));
887 pdata
->rfe_ctl
|= RFE_CTL_MCAST_HASH_
;
893 spin_unlock_irqrestore(&pdata
->rfe_ctl_lock
, flags
);
895 /* defer register writes to a sleepable context */
896 schedule_work(&pdata
->set_multicast
);
899 static int lan78xx_update_flowcontrol(struct lan78xx_net
*dev
, u8 duplex
,
900 u16 lcladv
, u16 rmtadv
)
902 u32 flow
= 0, fct_flow
= 0;
905 u8 cap
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
907 if (cap
& FLOW_CTRL_TX
)
908 flow
= (FLOW_CR_TX_FCEN_
| 0xFFFF);
910 if (cap
& FLOW_CTRL_RX
)
911 flow
|= FLOW_CR_RX_FCEN_
;
913 if (dev
->udev
->speed
== USB_SPEED_SUPER
)
915 else if (dev
->udev
->speed
== USB_SPEED_HIGH
)
918 netif_dbg(dev
, link
, dev
->net
, "rx pause %s, tx pause %s",
919 (cap
& FLOW_CTRL_RX
? "enabled" : "disabled"),
920 (cap
& FLOW_CTRL_TX
? "enabled" : "disabled"));
922 ret
= lan78xx_write_reg(dev
, FCT_FLOW
, fct_flow
);
924 /* threshold value should be set before enabling flow */
925 ret
= lan78xx_write_reg(dev
, FLOW
, flow
);
930 static int lan78xx_link_reset(struct lan78xx_net
*dev
)
932 struct phy_device
*phydev
= dev
->net
->phydev
;
933 struct ethtool_cmd ecmd
= { .cmd
= ETHTOOL_GSET
};
937 /* clear PHY interrupt status */
938 ret
= phy_read(phydev
, LAN88XX_INT_STS
);
939 if (unlikely(ret
< 0))
942 /* clear LAN78xx interrupt status */
943 ret
= lan78xx_write_reg(dev
, INT_STS
, INT_STS_PHY_INT_
);
944 if (unlikely(ret
< 0))
947 phy_read_status(phydev
);
949 if (!phydev
->link
&& dev
->link_on
) {
950 dev
->link_on
= false;
953 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
954 if (unlikely(ret
< 0))
957 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
958 if (unlikely(ret
< 0))
961 phy_mac_interrupt(phydev
, 0);
962 } else if (phydev
->link
&& !dev
->link_on
) {
965 phy_ethtool_gset(phydev
, &ecmd
);
967 ret
= phy_read(phydev
, LAN88XX_INT_STS
);
969 if (dev
->udev
->speed
== USB_SPEED_SUPER
) {
970 if (ethtool_cmd_speed(&ecmd
) == 1000) {
972 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
973 buf
&= ~USB_CFG1_DEV_U2_INIT_EN_
;
974 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
976 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
977 buf
|= USB_CFG1_DEV_U1_INIT_EN_
;
978 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
981 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
982 buf
|= USB_CFG1_DEV_U2_INIT_EN_
;
983 buf
|= USB_CFG1_DEV_U1_INIT_EN_
;
984 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
988 ladv
= phy_read(phydev
, MII_ADVERTISE
);
992 radv
= phy_read(phydev
, MII_LPA
);
996 netif_dbg(dev
, link
, dev
->net
,
997 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
998 ethtool_cmd_speed(&ecmd
), ecmd
.duplex
, ladv
, radv
);
1000 ret
= lan78xx_update_flowcontrol(dev
, ecmd
.duplex
, ladv
, radv
);
1001 phy_mac_interrupt(phydev
, 1);
1007 /* some work can't be done in tasklets, so we use keventd
1009 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1010 * but tasklet_schedule() doesn't. hope the failure is rare.
1012 void lan78xx_defer_kevent(struct lan78xx_net
*dev
, int work
)
1014 set_bit(work
, &dev
->flags
);
1015 if (!schedule_delayed_work(&dev
->wq
, 0))
1016 netdev_err(dev
->net
, "kevent %d may have been dropped\n", work
);
1019 static void lan78xx_status(struct lan78xx_net
*dev
, struct urb
*urb
)
1023 if (urb
->actual_length
!= 4) {
1024 netdev_warn(dev
->net
,
1025 "unexpected urb length %d", urb
->actual_length
);
1029 memcpy(&intdata
, urb
->transfer_buffer
, 4);
1030 le32_to_cpus(&intdata
);
1032 if (intdata
& INT_ENP_PHY_INT
) {
1033 netif_dbg(dev
, link
, dev
->net
, "PHY INTR: 0x%08x\n", intdata
);
1034 lan78xx_defer_kevent(dev
, EVENT_LINK_RESET
);
1036 netdev_warn(dev
->net
,
1037 "unexpected interrupt: 0x%08x\n", intdata
);
1040 static int lan78xx_ethtool_get_eeprom_len(struct net_device
*netdev
)
1042 return MAX_EEPROM_SIZE
;
1045 static int lan78xx_ethtool_get_eeprom(struct net_device
*netdev
,
1046 struct ethtool_eeprom
*ee
, u8
*data
)
1048 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1050 ee
->magic
= LAN78XX_EEPROM_MAGIC
;
1052 return lan78xx_read_raw_eeprom(dev
, ee
->offset
, ee
->len
, data
);
1055 static int lan78xx_ethtool_set_eeprom(struct net_device
*netdev
,
1056 struct ethtool_eeprom
*ee
, u8
*data
)
1058 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1060 /* Allow entire eeprom update only */
1061 if ((ee
->magic
== LAN78XX_EEPROM_MAGIC
) &&
1062 (ee
->offset
== 0) &&
1064 (data
[0] == EEPROM_INDICATOR
))
1065 return lan78xx_write_raw_eeprom(dev
, ee
->offset
, ee
->len
, data
);
1066 else if ((ee
->magic
== LAN78XX_OTP_MAGIC
) &&
1067 (ee
->offset
== 0) &&
1069 (data
[0] == OTP_INDICATOR_1
))
1070 return lan78xx_write_raw_otp(dev
, ee
->offset
, ee
->len
, data
);
1075 static void lan78xx_get_strings(struct net_device
*netdev
, u32 stringset
,
1078 if (stringset
== ETH_SS_STATS
)
1079 memcpy(data
, lan78xx_gstrings
, sizeof(lan78xx_gstrings
));
1082 static int lan78xx_get_sset_count(struct net_device
*netdev
, int sset
)
1084 if (sset
== ETH_SS_STATS
)
1085 return ARRAY_SIZE(lan78xx_gstrings
);
1090 static void lan78xx_get_stats(struct net_device
*netdev
,
1091 struct ethtool_stats
*stats
, u64
*data
)
1093 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1094 struct lan78xx_statstage lan78xx_stat
;
1098 if (usb_autopm_get_interface(dev
->intf
) < 0)
1101 if (lan78xx_read_stats(dev
, &lan78xx_stat
) > 0) {
1102 p
= (u32
*)&lan78xx_stat
;
1103 for (i
= 0; i
< (sizeof(lan78xx_stat
) / (sizeof(u32
))); i
++)
1107 usb_autopm_put_interface(dev
->intf
);
1110 static void lan78xx_get_wol(struct net_device
*netdev
,
1111 struct ethtool_wolinfo
*wol
)
1113 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1116 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1118 if (usb_autopm_get_interface(dev
->intf
) < 0)
1121 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
1122 if (unlikely(ret
< 0)) {
1126 if (buf
& USB_CFG_RMT_WKP_
) {
1127 wol
->supported
= WAKE_ALL
;
1128 wol
->wolopts
= pdata
->wol
;
1135 usb_autopm_put_interface(dev
->intf
);
1138 static int lan78xx_set_wol(struct net_device
*netdev
,
1139 struct ethtool_wolinfo
*wol
)
1141 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1142 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1145 ret
= usb_autopm_get_interface(dev
->intf
);
1150 if (wol
->wolopts
& WAKE_UCAST
)
1151 pdata
->wol
|= WAKE_UCAST
;
1152 if (wol
->wolopts
& WAKE_MCAST
)
1153 pdata
->wol
|= WAKE_MCAST
;
1154 if (wol
->wolopts
& WAKE_BCAST
)
1155 pdata
->wol
|= WAKE_BCAST
;
1156 if (wol
->wolopts
& WAKE_MAGIC
)
1157 pdata
->wol
|= WAKE_MAGIC
;
1158 if (wol
->wolopts
& WAKE_PHY
)
1159 pdata
->wol
|= WAKE_PHY
;
1160 if (wol
->wolopts
& WAKE_ARP
)
1161 pdata
->wol
|= WAKE_ARP
;
1163 device_set_wakeup_enable(&dev
->udev
->dev
, (bool)wol
->wolopts
);
1165 phy_ethtool_set_wol(netdev
->phydev
, wol
);
1167 usb_autopm_put_interface(dev
->intf
);
1172 static int lan78xx_get_eee(struct net_device
*net
, struct ethtool_eee
*edata
)
1174 struct lan78xx_net
*dev
= netdev_priv(net
);
1175 struct phy_device
*phydev
= net
->phydev
;
1179 ret
= usb_autopm_get_interface(dev
->intf
);
1183 ret
= phy_ethtool_get_eee(phydev
, edata
);
1187 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1188 if (buf
& MAC_CR_EEE_EN_
) {
1189 edata
->eee_enabled
= true;
1190 edata
->eee_active
= !!(edata
->advertised
&
1191 edata
->lp_advertised
);
1192 edata
->tx_lpi_enabled
= true;
1193 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1194 ret
= lan78xx_read_reg(dev
, EEE_TX_LPI_REQ_DLY
, &buf
);
1195 edata
->tx_lpi_timer
= buf
;
1197 edata
->eee_enabled
= false;
1198 edata
->eee_active
= false;
1199 edata
->tx_lpi_enabled
= false;
1200 edata
->tx_lpi_timer
= 0;
1205 usb_autopm_put_interface(dev
->intf
);
1210 static int lan78xx_set_eee(struct net_device
*net
, struct ethtool_eee
*edata
)
1212 struct lan78xx_net
*dev
= netdev_priv(net
);
1216 ret
= usb_autopm_get_interface(dev
->intf
);
1220 if (edata
->eee_enabled
) {
1221 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1222 buf
|= MAC_CR_EEE_EN_
;
1223 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1225 phy_ethtool_set_eee(net
->phydev
, edata
);
1227 buf
= (u32
)edata
->tx_lpi_timer
;
1228 ret
= lan78xx_write_reg(dev
, EEE_TX_LPI_REQ_DLY
, buf
);
1230 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1231 buf
&= ~MAC_CR_EEE_EN_
;
1232 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1235 usb_autopm_put_interface(dev
->intf
);
1240 static u32
lan78xx_get_link(struct net_device
*net
)
1242 phy_read_status(net
->phydev
);
1244 return net
->phydev
->link
;
1247 int lan78xx_nway_reset(struct net_device
*net
)
1249 return phy_start_aneg(net
->phydev
);
1252 static void lan78xx_get_drvinfo(struct net_device
*net
,
1253 struct ethtool_drvinfo
*info
)
1255 struct lan78xx_net
*dev
= netdev_priv(net
);
1257 strncpy(info
->driver
, DRIVER_NAME
, sizeof(info
->driver
));
1258 strncpy(info
->version
, DRIVER_VERSION
, sizeof(info
->version
));
1259 usb_make_path(dev
->udev
, info
->bus_info
, sizeof(info
->bus_info
));
1262 static u32
lan78xx_get_msglevel(struct net_device
*net
)
1264 struct lan78xx_net
*dev
= netdev_priv(net
);
1266 return dev
->msg_enable
;
1269 static void lan78xx_set_msglevel(struct net_device
*net
, u32 level
)
1271 struct lan78xx_net
*dev
= netdev_priv(net
);
1273 dev
->msg_enable
= level
;
1276 static int lan78xx_get_mdix_status(struct net_device
*net
)
1278 struct phy_device
*phydev
= net
->phydev
;
1281 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
, LAN88XX_EXT_PAGE_SPACE_1
);
1282 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1283 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
, LAN88XX_EXT_PAGE_SPACE_0
);
1288 static void lan78xx_set_mdix_status(struct net_device
*net
, __u8 mdix_ctrl
)
1290 struct lan78xx_net
*dev
= netdev_priv(net
);
1291 struct phy_device
*phydev
= net
->phydev
;
1294 if (mdix_ctrl
== ETH_TP_MDI
) {
1295 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1296 LAN88XX_EXT_PAGE_SPACE_1
);
1297 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1298 buf
&= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1299 phy_write(phydev
, LAN88XX_EXT_MODE_CTRL
,
1300 buf
| LAN88XX_EXT_MODE_CTRL_MDI_
);
1301 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1302 LAN88XX_EXT_PAGE_SPACE_0
);
1303 } else if (mdix_ctrl
== ETH_TP_MDI_X
) {
1304 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1305 LAN88XX_EXT_PAGE_SPACE_1
);
1306 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1307 buf
&= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1308 phy_write(phydev
, LAN88XX_EXT_MODE_CTRL
,
1309 buf
| LAN88XX_EXT_MODE_CTRL_MDI_X_
);
1310 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1311 LAN88XX_EXT_PAGE_SPACE_0
);
1312 } else if (mdix_ctrl
== ETH_TP_MDI_AUTO
) {
1313 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1314 LAN88XX_EXT_PAGE_SPACE_1
);
1315 buf
= phy_read(phydev
, LAN88XX_EXT_MODE_CTRL
);
1316 buf
&= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1317 phy_write(phydev
, LAN88XX_EXT_MODE_CTRL
,
1318 buf
| LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_
);
1319 phy_write(phydev
, LAN88XX_EXT_PAGE_ACCESS
,
1320 LAN88XX_EXT_PAGE_SPACE_0
);
1322 dev
->mdix_ctrl
= mdix_ctrl
;
1325 static int lan78xx_get_settings(struct net_device
*net
, struct ethtool_cmd
*cmd
)
1327 struct lan78xx_net
*dev
= netdev_priv(net
);
1328 struct phy_device
*phydev
= net
->phydev
;
1332 ret
= usb_autopm_get_interface(dev
->intf
);
1336 ret
= phy_ethtool_gset(phydev
, cmd
);
1338 buf
= lan78xx_get_mdix_status(net
);
1340 buf
&= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_
;
1341 if (buf
== LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_
) {
1342 cmd
->eth_tp_mdix
= ETH_TP_MDI_AUTO
;
1343 cmd
->eth_tp_mdix_ctrl
= ETH_TP_MDI_AUTO
;
1344 } else if (buf
== LAN88XX_EXT_MODE_CTRL_MDI_
) {
1345 cmd
->eth_tp_mdix
= ETH_TP_MDI
;
1346 cmd
->eth_tp_mdix_ctrl
= ETH_TP_MDI
;
1347 } else if (buf
== LAN88XX_EXT_MODE_CTRL_MDI_X_
) {
1348 cmd
->eth_tp_mdix
= ETH_TP_MDI_X
;
1349 cmd
->eth_tp_mdix_ctrl
= ETH_TP_MDI_X
;
1352 usb_autopm_put_interface(dev
->intf
);
1357 static int lan78xx_set_settings(struct net_device
*net
, struct ethtool_cmd
*cmd
)
1359 struct lan78xx_net
*dev
= netdev_priv(net
);
1360 struct phy_device
*phydev
= net
->phydev
;
1364 ret
= usb_autopm_get_interface(dev
->intf
);
1368 if (dev
->mdix_ctrl
!= cmd
->eth_tp_mdix_ctrl
) {
1369 lan78xx_set_mdix_status(net
, cmd
->eth_tp_mdix_ctrl
);
1372 /* change speed & duplex */
1373 ret
= phy_ethtool_sset(phydev
, cmd
);
1375 if (!cmd
->autoneg
) {
1376 /* force link down */
1377 temp
= phy_read(phydev
, MII_BMCR
);
1378 phy_write(phydev
, MII_BMCR
, temp
| BMCR_LOOPBACK
);
1380 phy_write(phydev
, MII_BMCR
, temp
);
1383 usb_autopm_put_interface(dev
->intf
);
1388 static const struct ethtool_ops lan78xx_ethtool_ops
= {
1389 .get_link
= lan78xx_get_link
,
1390 .nway_reset
= lan78xx_nway_reset
,
1391 .get_drvinfo
= lan78xx_get_drvinfo
,
1392 .get_msglevel
= lan78xx_get_msglevel
,
1393 .set_msglevel
= lan78xx_set_msglevel
,
1394 .get_settings
= lan78xx_get_settings
,
1395 .set_settings
= lan78xx_set_settings
,
1396 .get_eeprom_len
= lan78xx_ethtool_get_eeprom_len
,
1397 .get_eeprom
= lan78xx_ethtool_get_eeprom
,
1398 .set_eeprom
= lan78xx_ethtool_set_eeprom
,
1399 .get_ethtool_stats
= lan78xx_get_stats
,
1400 .get_sset_count
= lan78xx_get_sset_count
,
1401 .get_strings
= lan78xx_get_strings
,
1402 .get_wol
= lan78xx_get_wol
,
1403 .set_wol
= lan78xx_set_wol
,
1404 .get_eee
= lan78xx_get_eee
,
1405 .set_eee
= lan78xx_set_eee
,
1408 static int lan78xx_ioctl(struct net_device
*netdev
, struct ifreq
*rq
, int cmd
)
1410 if (!netif_running(netdev
))
1413 return phy_mii_ioctl(netdev
->phydev
, rq
, cmd
);
1416 static void lan78xx_init_mac_address(struct lan78xx_net
*dev
)
1418 u32 addr_lo
, addr_hi
;
1422 ret
= lan78xx_read_reg(dev
, RX_ADDRL
, &addr_lo
);
1423 ret
= lan78xx_read_reg(dev
, RX_ADDRH
, &addr_hi
);
1425 addr
[0] = addr_lo
& 0xFF;
1426 addr
[1] = (addr_lo
>> 8) & 0xFF;
1427 addr
[2] = (addr_lo
>> 16) & 0xFF;
1428 addr
[3] = (addr_lo
>> 24) & 0xFF;
1429 addr
[4] = addr_hi
& 0xFF;
1430 addr
[5] = (addr_hi
>> 8) & 0xFF;
1432 if (!is_valid_ether_addr(addr
)) {
1433 /* reading mac address from EEPROM or OTP */
1434 if ((lan78xx_read_eeprom(dev
, EEPROM_MAC_OFFSET
, ETH_ALEN
,
1436 (lan78xx_read_otp(dev
, EEPROM_MAC_OFFSET
, ETH_ALEN
,
1438 if (is_valid_ether_addr(addr
)) {
1439 /* eeprom values are valid so use them */
1440 netif_dbg(dev
, ifup
, dev
->net
,
1441 "MAC address read from EEPROM");
1443 /* generate random MAC */
1444 random_ether_addr(addr
);
1445 netif_dbg(dev
, ifup
, dev
->net
,
1446 "MAC address set to random addr");
1449 addr_lo
= addr
[0] | (addr
[1] << 8) |
1450 (addr
[2] << 16) | (addr
[3] << 24);
1451 addr_hi
= addr
[4] | (addr
[5] << 8);
1453 ret
= lan78xx_write_reg(dev
, RX_ADDRL
, addr_lo
);
1454 ret
= lan78xx_write_reg(dev
, RX_ADDRH
, addr_hi
);
1456 /* generate random MAC */
1457 random_ether_addr(addr
);
1458 netif_dbg(dev
, ifup
, dev
->net
,
1459 "MAC address set to random addr");
1463 ret
= lan78xx_write_reg(dev
, MAF_LO(0), addr_lo
);
1464 ret
= lan78xx_write_reg(dev
, MAF_HI(0), addr_hi
| MAF_HI_VALID_
);
1466 ether_addr_copy(dev
->net
->dev_addr
, addr
);
1469 /* MDIO read and write wrappers for phylib */
1470 static int lan78xx_mdiobus_read(struct mii_bus
*bus
, int phy_id
, int idx
)
1472 struct lan78xx_net
*dev
= bus
->priv
;
1476 ret
= usb_autopm_get_interface(dev
->intf
);
1480 mutex_lock(&dev
->phy_mutex
);
1482 /* confirm MII not busy */
1483 ret
= lan78xx_phy_wait_not_busy(dev
);
1487 /* set the address, index & direction (read from PHY) */
1488 addr
= mii_access(phy_id
, idx
, MII_READ
);
1489 ret
= lan78xx_write_reg(dev
, MII_ACC
, addr
);
1491 ret
= lan78xx_phy_wait_not_busy(dev
);
1495 ret
= lan78xx_read_reg(dev
, MII_DATA
, &val
);
1497 ret
= (int)(val
& 0xFFFF);
1500 mutex_unlock(&dev
->phy_mutex
);
1501 usb_autopm_put_interface(dev
->intf
);
1505 static int lan78xx_mdiobus_write(struct mii_bus
*bus
, int phy_id
, int idx
,
1508 struct lan78xx_net
*dev
= bus
->priv
;
1512 ret
= usb_autopm_get_interface(dev
->intf
);
1516 mutex_lock(&dev
->phy_mutex
);
1518 /* confirm MII not busy */
1519 ret
= lan78xx_phy_wait_not_busy(dev
);
1524 ret
= lan78xx_write_reg(dev
, MII_DATA
, val
);
1526 /* set the address, index & direction (write to PHY) */
1527 addr
= mii_access(phy_id
, idx
, MII_WRITE
);
1528 ret
= lan78xx_write_reg(dev
, MII_ACC
, addr
);
1530 ret
= lan78xx_phy_wait_not_busy(dev
);
1535 mutex_unlock(&dev
->phy_mutex
);
1536 usb_autopm_put_interface(dev
->intf
);
1540 static int lan78xx_mdio_init(struct lan78xx_net
*dev
)
1544 dev
->mdiobus
= mdiobus_alloc();
1545 if (!dev
->mdiobus
) {
1546 netdev_err(dev
->net
, "can't allocate MDIO bus\n");
1550 dev
->mdiobus
->priv
= (void *)dev
;
1551 dev
->mdiobus
->read
= lan78xx_mdiobus_read
;
1552 dev
->mdiobus
->write
= lan78xx_mdiobus_write
;
1553 dev
->mdiobus
->name
= "lan78xx-mdiobus";
1555 snprintf(dev
->mdiobus
->id
, MII_BUS_ID_SIZE
, "usb-%03d:%03d",
1556 dev
->udev
->bus
->busnum
, dev
->udev
->devnum
);
1558 switch (dev
->devid
& ID_REV_CHIP_ID_MASK_
) {
1561 /* set to internal PHY id */
1562 dev
->mdiobus
->phy_mask
= ~(1 << 1);
1566 ret
= mdiobus_register(dev
->mdiobus
);
1568 netdev_err(dev
->net
, "can't register MDIO bus\n");
1572 netdev_dbg(dev
->net
, "registered mdiobus bus %s\n", dev
->mdiobus
->id
);
1575 mdiobus_free(dev
->mdiobus
);
1579 static void lan78xx_remove_mdio(struct lan78xx_net
*dev
)
1581 mdiobus_unregister(dev
->mdiobus
);
1582 mdiobus_free(dev
->mdiobus
);
1585 static void lan78xx_link_status_change(struct net_device
*net
)
1590 static int lan78xx_phy_init(struct lan78xx_net
*dev
)
1593 struct phy_device
*phydev
= dev
->net
->phydev
;
1595 phydev
= phy_find_first(dev
->mdiobus
);
1597 netdev_err(dev
->net
, "no PHY found\n");
1601 /* Enable PHY interrupts.
1602 * We handle our own interrupt
1604 ret
= phy_read(phydev
, LAN88XX_INT_STS
);
1605 ret
= phy_write(phydev
, LAN88XX_INT_MASK
,
1606 LAN88XX_INT_MASK_MDINTPIN_EN_
|
1607 LAN88XX_INT_MASK_LINK_CHANGE_
);
1609 phydev
->irq
= PHY_IGNORE_INTERRUPT
;
1611 ret
= phy_connect_direct(dev
->net
, phydev
,
1612 lan78xx_link_status_change
,
1613 PHY_INTERFACE_MODE_GMII
);
1615 netdev_err(dev
->net
, "can't attach PHY to %s\n",
1620 /* set to AUTOMDIX */
1621 lan78xx_set_mdix_status(dev
->net
, ETH_TP_MDI_AUTO
);
1623 /* MAC doesn't support 1000T Half */
1624 phydev
->supported
&= ~SUPPORTED_1000baseT_Half
;
1625 phydev
->supported
|= (SUPPORTED_10baseT_Half
|
1626 SUPPORTED_10baseT_Full
|
1627 SUPPORTED_100baseT_Half
|
1628 SUPPORTED_100baseT_Full
|
1629 SUPPORTED_1000baseT_Full
|
1630 SUPPORTED_Pause
| SUPPORTED_Asym_Pause
);
1631 genphy_config_aneg(phydev
);
1635 netif_dbg(dev
, ifup
, dev
->net
, "phy initialised successfully");
1640 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net
*dev
, int size
)
1646 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
1648 rxenabled
= ((buf
& MAC_RX_RXEN_
) != 0);
1651 buf
&= ~MAC_RX_RXEN_
;
1652 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
1655 /* add 4 to size for FCS */
1656 buf
&= ~MAC_RX_MAX_SIZE_MASK_
;
1657 buf
|= (((size
+ 4) << MAC_RX_MAX_SIZE_SHIFT_
) & MAC_RX_MAX_SIZE_MASK_
);
1659 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
1662 buf
|= MAC_RX_RXEN_
;
1663 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
1669 static int unlink_urbs(struct lan78xx_net
*dev
, struct sk_buff_head
*q
)
1671 struct sk_buff
*skb
;
1672 unsigned long flags
;
1675 spin_lock_irqsave(&q
->lock
, flags
);
1676 while (!skb_queue_empty(q
)) {
1677 struct skb_data
*entry
;
1681 skb_queue_walk(q
, skb
) {
1682 entry
= (struct skb_data
*)skb
->cb
;
1683 if (entry
->state
!= unlink_start
)
1688 entry
->state
= unlink_start
;
1691 /* Get reference count of the URB to avoid it to be
1692 * freed during usb_unlink_urb, which may trigger
1693 * use-after-free problem inside usb_unlink_urb since
1694 * usb_unlink_urb is always racing with .complete
1695 * handler(include defer_bh).
1698 spin_unlock_irqrestore(&q
->lock
, flags
);
1699 /* during some PM-driven resume scenarios,
1700 * these (async) unlinks complete immediately
1702 ret
= usb_unlink_urb(urb
);
1703 if (ret
!= -EINPROGRESS
&& ret
!= 0)
1704 netdev_dbg(dev
->net
, "unlink urb err, %d\n", ret
);
1708 spin_lock_irqsave(&q
->lock
, flags
);
1710 spin_unlock_irqrestore(&q
->lock
, flags
);
1714 static int lan78xx_change_mtu(struct net_device
*netdev
, int new_mtu
)
1716 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1717 int ll_mtu
= new_mtu
+ netdev
->hard_header_len
;
1718 int old_hard_mtu
= dev
->hard_mtu
;
1719 int old_rx_urb_size
= dev
->rx_urb_size
;
1722 if (new_mtu
> MAX_SINGLE_PACKET_SIZE
)
1727 /* no second zero-length packet read wanted after mtu-sized packets */
1728 if ((ll_mtu
% dev
->maxpacket
) == 0)
1731 ret
= lan78xx_set_rx_max_frame_length(dev
, new_mtu
+ ETH_HLEN
);
1733 netdev
->mtu
= new_mtu
;
1735 dev
->hard_mtu
= netdev
->mtu
+ netdev
->hard_header_len
;
1736 if (dev
->rx_urb_size
== old_hard_mtu
) {
1737 dev
->rx_urb_size
= dev
->hard_mtu
;
1738 if (dev
->rx_urb_size
> old_rx_urb_size
) {
1739 if (netif_running(dev
->net
)) {
1740 unlink_urbs(dev
, &dev
->rxq
);
1741 tasklet_schedule(&dev
->bh
);
1749 int lan78xx_set_mac_addr(struct net_device
*netdev
, void *p
)
1751 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1752 struct sockaddr
*addr
= p
;
1753 u32 addr_lo
, addr_hi
;
1756 if (netif_running(netdev
))
1759 if (!is_valid_ether_addr(addr
->sa_data
))
1760 return -EADDRNOTAVAIL
;
1762 ether_addr_copy(netdev
->dev_addr
, addr
->sa_data
);
1764 addr_lo
= netdev
->dev_addr
[0] |
1765 netdev
->dev_addr
[1] << 8 |
1766 netdev
->dev_addr
[2] << 16 |
1767 netdev
->dev_addr
[3] << 24;
1768 addr_hi
= netdev
->dev_addr
[4] |
1769 netdev
->dev_addr
[5] << 8;
1771 ret
= lan78xx_write_reg(dev
, RX_ADDRL
, addr_lo
);
1772 ret
= lan78xx_write_reg(dev
, RX_ADDRH
, addr_hi
);
1777 /* Enable or disable Rx checksum offload engine */
1778 static int lan78xx_set_features(struct net_device
*netdev
,
1779 netdev_features_t features
)
1781 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1782 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1783 unsigned long flags
;
1786 spin_lock_irqsave(&pdata
->rfe_ctl_lock
, flags
);
1788 if (features
& NETIF_F_RXCSUM
) {
1789 pdata
->rfe_ctl
|= RFE_CTL_TCPUDP_COE_
| RFE_CTL_IP_COE_
;
1790 pdata
->rfe_ctl
|= RFE_CTL_ICMP_COE_
| RFE_CTL_IGMP_COE_
;
1792 pdata
->rfe_ctl
&= ~(RFE_CTL_TCPUDP_COE_
| RFE_CTL_IP_COE_
);
1793 pdata
->rfe_ctl
&= ~(RFE_CTL_ICMP_COE_
| RFE_CTL_IGMP_COE_
);
1796 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
1797 pdata
->rfe_ctl
|= RFE_CTL_VLAN_FILTER_
;
1799 pdata
->rfe_ctl
&= ~RFE_CTL_VLAN_FILTER_
;
1801 spin_unlock_irqrestore(&pdata
->rfe_ctl_lock
, flags
);
1803 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
1808 static void lan78xx_deferred_vlan_write(struct work_struct
*param
)
1810 struct lan78xx_priv
*pdata
=
1811 container_of(param
, struct lan78xx_priv
, set_vlan
);
1812 struct lan78xx_net
*dev
= pdata
->dev
;
1814 lan78xx_dataport_write(dev
, DP_SEL_RSEL_VLAN_DA_
, 0,
1815 DP_SEL_VHF_VLAN_LEN
, pdata
->vlan_table
);
1818 static int lan78xx_vlan_rx_add_vid(struct net_device
*netdev
,
1819 __be16 proto
, u16 vid
)
1821 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1822 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1824 u16 vid_dword_index
;
1826 vid_dword_index
= (vid
>> 5) & 0x7F;
1827 vid_bit_index
= vid
& 0x1F;
1829 pdata
->vlan_table
[vid_dword_index
] |= (1 << vid_bit_index
);
1831 /* defer register writes to a sleepable context */
1832 schedule_work(&pdata
->set_vlan
);
1837 static int lan78xx_vlan_rx_kill_vid(struct net_device
*netdev
,
1838 __be16 proto
, u16 vid
)
1840 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1841 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1843 u16 vid_dword_index
;
1845 vid_dword_index
= (vid
>> 5) & 0x7F;
1846 vid_bit_index
= vid
& 0x1F;
1848 pdata
->vlan_table
[vid_dword_index
] &= ~(1 << vid_bit_index
);
1850 /* defer register writes to a sleepable context */
1851 schedule_work(&pdata
->set_vlan
);
1856 static void lan78xx_init_ltm(struct lan78xx_net
*dev
)
1860 u32 regs
[6] = { 0 };
1862 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
1863 if (buf
& USB_CFG1_LTM_ENABLE_
) {
1865 /* Get values from EEPROM first */
1866 if (lan78xx_read_eeprom(dev
, 0x3F, 2, temp
) == 0) {
1867 if (temp
[0] == 24) {
1868 ret
= lan78xx_read_raw_eeprom(dev
,
1875 } else if (lan78xx_read_otp(dev
, 0x3F, 2, temp
) == 0) {
1876 if (temp
[0] == 24) {
1877 ret
= lan78xx_read_raw_otp(dev
,
1887 lan78xx_write_reg(dev
, LTM_BELT_IDLE0
, regs
[0]);
1888 lan78xx_write_reg(dev
, LTM_BELT_IDLE1
, regs
[1]);
1889 lan78xx_write_reg(dev
, LTM_BELT_ACT0
, regs
[2]);
1890 lan78xx_write_reg(dev
, LTM_BELT_ACT1
, regs
[3]);
1891 lan78xx_write_reg(dev
, LTM_INACTIVE0
, regs
[4]);
1892 lan78xx_write_reg(dev
, LTM_INACTIVE1
, regs
[5]);
1895 static int lan78xx_reset(struct lan78xx_net
*dev
)
1897 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1900 unsigned long timeout
;
1902 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
1903 buf
|= HW_CFG_LRST_
;
1904 ret
= lan78xx_write_reg(dev
, HW_CFG
, buf
);
1906 timeout
= jiffies
+ HZ
;
1909 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
1910 if (time_after(jiffies
, timeout
)) {
1911 netdev_warn(dev
->net
,
1912 "timeout on completion of LiteReset");
1915 } while (buf
& HW_CFG_LRST_
);
1917 lan78xx_init_mac_address(dev
);
1919 /* save DEVID for later usage */
1920 ret
= lan78xx_read_reg(dev
, ID_REV
, &buf
);
1923 /* Respond to the IN token with a NAK */
1924 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
1925 buf
|= USB_CFG_BIR_
;
1926 ret
= lan78xx_write_reg(dev
, USB_CFG0
, buf
);
1929 lan78xx_init_ltm(dev
);
1931 dev
->net
->hard_header_len
+= TX_OVERHEAD
;
1932 dev
->hard_mtu
= dev
->net
->mtu
+ dev
->net
->hard_header_len
;
1934 if (dev
->udev
->speed
== USB_SPEED_SUPER
) {
1935 buf
= DEFAULT_BURST_CAP_SIZE
/ SS_USB_PKT_SIZE
;
1936 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
1939 } else if (dev
->udev
->speed
== USB_SPEED_HIGH
) {
1940 buf
= DEFAULT_BURST_CAP_SIZE
/ HS_USB_PKT_SIZE
;
1941 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
1942 dev
->rx_qlen
= RX_MAX_QUEUE_MEMORY
/ dev
->rx_urb_size
;
1943 dev
->tx_qlen
= RX_MAX_QUEUE_MEMORY
/ dev
->hard_mtu
;
1945 buf
= DEFAULT_BURST_CAP_SIZE
/ FS_USB_PKT_SIZE
;
1946 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
1950 ret
= lan78xx_write_reg(dev
, BURST_CAP
, buf
);
1951 ret
= lan78xx_write_reg(dev
, BULK_IN_DLY
, DEFAULT_BULK_IN_DELAY
);
1953 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
1955 ret
= lan78xx_write_reg(dev
, HW_CFG
, buf
);
1957 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
1958 buf
|= USB_CFG_BCE_
;
1959 ret
= lan78xx_write_reg(dev
, USB_CFG0
, buf
);
1961 /* set FIFO sizes */
1962 buf
= (MAX_RX_FIFO_SIZE
- 512) / 512;
1963 ret
= lan78xx_write_reg(dev
, FCT_RX_FIFO_END
, buf
);
1965 buf
= (MAX_TX_FIFO_SIZE
- 512) / 512;
1966 ret
= lan78xx_write_reg(dev
, FCT_TX_FIFO_END
, buf
);
1968 ret
= lan78xx_write_reg(dev
, INT_STS
, INT_STS_CLEAR_ALL_
);
1969 ret
= lan78xx_write_reg(dev
, FLOW
, 0);
1970 ret
= lan78xx_write_reg(dev
, FCT_FLOW
, 0);
1972 /* Don't need rfe_ctl_lock during initialisation */
1973 ret
= lan78xx_read_reg(dev
, RFE_CTL
, &pdata
->rfe_ctl
);
1974 pdata
->rfe_ctl
|= RFE_CTL_BCAST_EN_
| RFE_CTL_DA_PERFECT_
;
1975 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
1977 /* Enable or disable checksum offload engines */
1978 lan78xx_set_features(dev
->net
, dev
->net
->features
);
1980 lan78xx_set_multicast(dev
->net
);
1983 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
1984 buf
|= PMT_CTL_PHY_RST_
;
1985 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
1987 timeout
= jiffies
+ HZ
;
1990 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
1991 if (time_after(jiffies
, timeout
)) {
1992 netdev_warn(dev
->net
, "timeout waiting for PHY Reset");
1995 } while ((buf
& PMT_CTL_PHY_RST_
) || !(buf
& PMT_CTL_READY_
));
1997 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1998 buf
|= MAC_CR_AUTO_DUPLEX_
| MAC_CR_AUTO_SPEED_
;
1999 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
2001 /* enable PHY interrupts */
2002 ret
= lan78xx_read_reg(dev
, INT_EP_CTL
, &buf
);
2003 buf
|= INT_ENP_PHY_INT
;
2004 ret
= lan78xx_write_reg(dev
, INT_EP_CTL
, buf
);
2006 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
2007 buf
|= MAC_TX_TXEN_
;
2008 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
2010 ret
= lan78xx_read_reg(dev
, FCT_TX_CTL
, &buf
);
2011 buf
|= FCT_TX_CTL_EN_
;
2012 ret
= lan78xx_write_reg(dev
, FCT_TX_CTL
, buf
);
2014 ret
= lan78xx_set_rx_max_frame_length(dev
, dev
->net
->mtu
+ ETH_HLEN
);
2016 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
2017 buf
|= MAC_RX_RXEN_
;
2018 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
2020 ret
= lan78xx_read_reg(dev
, FCT_RX_CTL
, &buf
);
2021 buf
|= FCT_RX_CTL_EN_
;
2022 ret
= lan78xx_write_reg(dev
, FCT_RX_CTL
, buf
);
2027 static int lan78xx_open(struct net_device
*net
)
2029 struct lan78xx_net
*dev
= netdev_priv(net
);
2032 ret
= usb_autopm_get_interface(dev
->intf
);
2036 ret
= lan78xx_reset(dev
);
2040 ret
= lan78xx_phy_init(dev
);
2044 /* for Link Check */
2045 if (dev
->urb_intr
) {
2046 ret
= usb_submit_urb(dev
->urb_intr
, GFP_KERNEL
);
2048 netif_err(dev
, ifup
, dev
->net
,
2049 "intr submit %d\n", ret
);
2054 set_bit(EVENT_DEV_OPEN
, &dev
->flags
);
2056 netif_start_queue(net
);
2058 dev
->link_on
= false;
2060 lan78xx_defer_kevent(dev
, EVENT_LINK_RESET
);
2062 usb_autopm_put_interface(dev
->intf
);
2068 static void lan78xx_terminate_urbs(struct lan78xx_net
*dev
)
2070 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup
);
2071 DECLARE_WAITQUEUE(wait
, current
);
2074 /* ensure there are no more active urbs */
2075 add_wait_queue(&unlink_wakeup
, &wait
);
2076 set_current_state(TASK_UNINTERRUPTIBLE
);
2077 dev
->wait
= &unlink_wakeup
;
2078 temp
= unlink_urbs(dev
, &dev
->txq
) + unlink_urbs(dev
, &dev
->rxq
);
2080 /* maybe wait for deletions to finish. */
2081 while (!skb_queue_empty(&dev
->rxq
) &&
2082 !skb_queue_empty(&dev
->txq
) &&
2083 !skb_queue_empty(&dev
->done
)) {
2084 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS
));
2085 set_current_state(TASK_UNINTERRUPTIBLE
);
2086 netif_dbg(dev
, ifdown
, dev
->net
,
2087 "waited for %d urb completions\n", temp
);
2089 set_current_state(TASK_RUNNING
);
2091 remove_wait_queue(&unlink_wakeup
, &wait
);
2094 int lan78xx_stop(struct net_device
*net
)
2096 struct lan78xx_net
*dev
= netdev_priv(net
);
2098 phy_stop(net
->phydev
);
2099 phy_disconnect(net
->phydev
);
2102 clear_bit(EVENT_DEV_OPEN
, &dev
->flags
);
2103 netif_stop_queue(net
);
2105 netif_info(dev
, ifdown
, dev
->net
,
2106 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2107 net
->stats
.rx_packets
, net
->stats
.tx_packets
,
2108 net
->stats
.rx_errors
, net
->stats
.tx_errors
);
2110 lan78xx_terminate_urbs(dev
);
2112 usb_kill_urb(dev
->urb_intr
);
2114 skb_queue_purge(&dev
->rxq_pause
);
2116 /* deferred work (task, timer, softirq) must also stop.
2117 * can't flush_scheduled_work() until we drop rtnl (later),
2118 * else workers could deadlock; so make workers a NOP.
2121 cancel_delayed_work_sync(&dev
->wq
);
2122 tasklet_kill(&dev
->bh
);
2124 usb_autopm_put_interface(dev
->intf
);
2129 static int lan78xx_linearize(struct sk_buff
*skb
)
2131 return skb_linearize(skb
);
2134 static struct sk_buff
*lan78xx_tx_prep(struct lan78xx_net
*dev
,
2135 struct sk_buff
*skb
, gfp_t flags
)
2137 u32 tx_cmd_a
, tx_cmd_b
;
2139 if (skb_headroom(skb
) < TX_OVERHEAD
) {
2140 struct sk_buff
*skb2
;
2142 skb2
= skb_copy_expand(skb
, TX_OVERHEAD
, 0, flags
);
2143 dev_kfree_skb_any(skb
);
2149 if (lan78xx_linearize(skb
) < 0)
2152 tx_cmd_a
= (u32
)(skb
->len
& TX_CMD_A_LEN_MASK_
) | TX_CMD_A_FCS_
;
2154 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2155 tx_cmd_a
|= TX_CMD_A_IPE_
| TX_CMD_A_TPE_
;
2158 if (skb_is_gso(skb
)) {
2159 u16 mss
= max(skb_shinfo(skb
)->gso_size
, TX_CMD_B_MSS_MIN_
);
2161 tx_cmd_b
= (mss
<< TX_CMD_B_MSS_SHIFT_
) & TX_CMD_B_MSS_MASK_
;
2163 tx_cmd_a
|= TX_CMD_A_LSO_
;
2166 if (skb_vlan_tag_present(skb
)) {
2167 tx_cmd_a
|= TX_CMD_A_IVTG_
;
2168 tx_cmd_b
|= skb_vlan_tag_get(skb
) & TX_CMD_B_VTAG_MASK_
;
2172 cpu_to_le32s(&tx_cmd_b
);
2173 memcpy(skb
->data
, &tx_cmd_b
, 4);
2176 cpu_to_le32s(&tx_cmd_a
);
2177 memcpy(skb
->data
, &tx_cmd_a
, 4);
2182 static enum skb_state
defer_bh(struct lan78xx_net
*dev
, struct sk_buff
*skb
,
2183 struct sk_buff_head
*list
, enum skb_state state
)
2185 unsigned long flags
;
2186 enum skb_state old_state
;
2187 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2189 spin_lock_irqsave(&list
->lock
, flags
);
2190 old_state
= entry
->state
;
2191 entry
->state
= state
;
2193 __skb_unlink(skb
, list
);
2194 spin_unlock(&list
->lock
);
2195 spin_lock(&dev
->done
.lock
);
2197 __skb_queue_tail(&dev
->done
, skb
);
2198 if (skb_queue_len(&dev
->done
) == 1)
2199 tasklet_schedule(&dev
->bh
);
2200 spin_unlock_irqrestore(&dev
->done
.lock
, flags
);
2205 static void tx_complete(struct urb
*urb
)
2207 struct sk_buff
*skb
= (struct sk_buff
*)urb
->context
;
2208 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2209 struct lan78xx_net
*dev
= entry
->dev
;
2211 if (urb
->status
== 0) {
2212 dev
->net
->stats
.tx_packets
++;
2213 dev
->net
->stats
.tx_bytes
+= entry
->length
;
2215 dev
->net
->stats
.tx_errors
++;
2217 switch (urb
->status
) {
2219 lan78xx_defer_kevent(dev
, EVENT_TX_HALT
);
2222 /* software-driven interface shutdown */
2230 netif_stop_queue(dev
->net
);
2233 netif_dbg(dev
, tx_err
, dev
->net
,
2234 "tx err %d\n", entry
->urb
->status
);
2239 usb_autopm_put_interface_async(dev
->intf
);
2241 defer_bh(dev
, skb
, &dev
->txq
, tx_done
);
2244 static void lan78xx_queue_skb(struct sk_buff_head
*list
,
2245 struct sk_buff
*newsk
, enum skb_state state
)
2247 struct skb_data
*entry
= (struct skb_data
*)newsk
->cb
;
2249 __skb_queue_tail(list
, newsk
);
2250 entry
->state
= state
;
2253 netdev_tx_t
lan78xx_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
2255 struct lan78xx_net
*dev
= netdev_priv(net
);
2256 struct sk_buff
*skb2
= NULL
;
2259 skb_tx_timestamp(skb
);
2260 skb2
= lan78xx_tx_prep(dev
, skb
, GFP_ATOMIC
);
2264 skb_queue_tail(&dev
->txq_pend
, skb2
);
2266 /* throttle TX patch at slower than SUPER SPEED USB */
2267 if ((dev
->udev
->speed
< USB_SPEED_SUPER
) &&
2268 (skb_queue_len(&dev
->txq_pend
) > 10))
2269 netif_stop_queue(net
);
2271 netif_dbg(dev
, tx_err
, dev
->net
,
2272 "lan78xx_tx_prep return NULL\n");
2273 dev
->net
->stats
.tx_errors
++;
2274 dev
->net
->stats
.tx_dropped
++;
2277 tasklet_schedule(&dev
->bh
);
2279 return NETDEV_TX_OK
;
2282 int lan78xx_get_endpoints(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2285 struct usb_host_interface
*alt
= NULL
;
2286 struct usb_host_endpoint
*in
= NULL
, *out
= NULL
;
2287 struct usb_host_endpoint
*status
= NULL
;
2289 for (tmp
= 0; tmp
< intf
->num_altsetting
; tmp
++) {
2295 alt
= intf
->altsetting
+ tmp
;
2297 for (ep
= 0; ep
< alt
->desc
.bNumEndpoints
; ep
++) {
2298 struct usb_host_endpoint
*e
;
2301 e
= alt
->endpoint
+ ep
;
2302 switch (e
->desc
.bmAttributes
) {
2303 case USB_ENDPOINT_XFER_INT
:
2304 if (!usb_endpoint_dir_in(&e
->desc
))
2308 case USB_ENDPOINT_XFER_BULK
:
2313 if (usb_endpoint_dir_in(&e
->desc
)) {
2316 else if (intr
&& !status
)
2326 if (!alt
|| !in
|| !out
)
2329 dev
->pipe_in
= usb_rcvbulkpipe(dev
->udev
,
2330 in
->desc
.bEndpointAddress
&
2331 USB_ENDPOINT_NUMBER_MASK
);
2332 dev
->pipe_out
= usb_sndbulkpipe(dev
->udev
,
2333 out
->desc
.bEndpointAddress
&
2334 USB_ENDPOINT_NUMBER_MASK
);
2335 dev
->ep_intr
= status
;
2340 static int lan78xx_bind(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2342 struct lan78xx_priv
*pdata
= NULL
;
2346 ret
= lan78xx_get_endpoints(dev
, intf
);
2348 dev
->data
[0] = (unsigned long)kzalloc(sizeof(*pdata
), GFP_KERNEL
);
2350 pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2352 netdev_warn(dev
->net
, "Unable to allocate lan78xx_priv");
2358 spin_lock_init(&pdata
->rfe_ctl_lock
);
2359 mutex_init(&pdata
->dataport_mutex
);
2361 INIT_WORK(&pdata
->set_multicast
, lan78xx_deferred_multicast_write
);
2363 for (i
= 0; i
< DP_SEL_VHF_VLAN_LEN
; i
++)
2364 pdata
->vlan_table
[i
] = 0;
2366 INIT_WORK(&pdata
->set_vlan
, lan78xx_deferred_vlan_write
);
2368 dev
->net
->features
= 0;
2370 if (DEFAULT_TX_CSUM_ENABLE
)
2371 dev
->net
->features
|= NETIF_F_HW_CSUM
;
2373 if (DEFAULT_RX_CSUM_ENABLE
)
2374 dev
->net
->features
|= NETIF_F_RXCSUM
;
2376 if (DEFAULT_TSO_CSUM_ENABLE
)
2377 dev
->net
->features
|= NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_SG
;
2379 dev
->net
->hw_features
= dev
->net
->features
;
2381 /* Init all registers */
2382 ret
= lan78xx_reset(dev
);
2384 lan78xx_mdio_init(dev
);
2386 dev
->net
->flags
|= IFF_MULTICAST
;
2388 pdata
->wol
= WAKE_MAGIC
;
2393 static void lan78xx_unbind(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2395 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2397 lan78xx_remove_mdio(dev
);
2400 netif_dbg(dev
, ifdown
, dev
->net
, "free pdata");
2407 static void lan78xx_rx_csum_offload(struct lan78xx_net
*dev
,
2408 struct sk_buff
*skb
,
2409 u32 rx_cmd_a
, u32 rx_cmd_b
)
2411 if (!(dev
->net
->features
& NETIF_F_RXCSUM
) ||
2412 unlikely(rx_cmd_a
& RX_CMD_A_ICSM_
)) {
2413 skb
->ip_summed
= CHECKSUM_NONE
;
2415 skb
->csum
= ntohs((u16
)(rx_cmd_b
>> RX_CMD_B_CSUM_SHIFT_
));
2416 skb
->ip_summed
= CHECKSUM_COMPLETE
;
2420 void lan78xx_skb_return(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
2424 if (test_bit(EVENT_RX_PAUSED
, &dev
->flags
)) {
2425 skb_queue_tail(&dev
->rxq_pause
, skb
);
2429 skb
->protocol
= eth_type_trans(skb
, dev
->net
);
2430 dev
->net
->stats
.rx_packets
++;
2431 dev
->net
->stats
.rx_bytes
+= skb
->len
;
2433 netif_dbg(dev
, rx_status
, dev
->net
, "< rx, len %zu, type 0x%x\n",
2434 skb
->len
+ sizeof(struct ethhdr
), skb
->protocol
);
2435 memset(skb
->cb
, 0, sizeof(struct skb_data
));
2437 if (skb_defer_rx_timestamp(skb
))
2440 status
= netif_rx(skb
);
2441 if (status
!= NET_RX_SUCCESS
)
2442 netif_dbg(dev
, rx_err
, dev
->net
,
2443 "netif_rx status %d\n", status
);
2446 static int lan78xx_rx(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
2448 if (skb
->len
< dev
->net
->hard_header_len
)
2451 while (skb
->len
> 0) {
2452 u32 rx_cmd_a
, rx_cmd_b
, align_count
, size
;
2454 struct sk_buff
*skb2
;
2455 unsigned char *packet
;
2457 memcpy(&rx_cmd_a
, skb
->data
, sizeof(rx_cmd_a
));
2458 le32_to_cpus(&rx_cmd_a
);
2459 skb_pull(skb
, sizeof(rx_cmd_a
));
2461 memcpy(&rx_cmd_b
, skb
->data
, sizeof(rx_cmd_b
));
2462 le32_to_cpus(&rx_cmd_b
);
2463 skb_pull(skb
, sizeof(rx_cmd_b
));
2465 memcpy(&rx_cmd_c
, skb
->data
, sizeof(rx_cmd_c
));
2466 le16_to_cpus(&rx_cmd_c
);
2467 skb_pull(skb
, sizeof(rx_cmd_c
));
2471 /* get the packet length */
2472 size
= (rx_cmd_a
& RX_CMD_A_LEN_MASK_
);
2473 align_count
= (4 - ((size
+ RXW_PADDING
) % 4)) % 4;
2475 if (unlikely(rx_cmd_a
& RX_CMD_A_RED_
)) {
2476 netif_dbg(dev
, rx_err
, dev
->net
,
2477 "Error rx_cmd_a=0x%08x", rx_cmd_a
);
2479 /* last frame in this batch */
2480 if (skb
->len
== size
) {
2481 lan78xx_rx_csum_offload(dev
, skb
,
2482 rx_cmd_a
, rx_cmd_b
);
2484 skb_trim(skb
, skb
->len
- 4); /* remove fcs */
2485 skb
->truesize
= size
+ sizeof(struct sk_buff
);
2490 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2491 if (unlikely(!skb2
)) {
2492 netdev_warn(dev
->net
, "Error allocating skb");
2497 skb2
->data
= packet
;
2498 skb_set_tail_pointer(skb2
, size
);
2500 lan78xx_rx_csum_offload(dev
, skb2
, rx_cmd_a
, rx_cmd_b
);
2502 skb_trim(skb2
, skb2
->len
- 4); /* remove fcs */
2503 skb2
->truesize
= size
+ sizeof(struct sk_buff
);
2505 lan78xx_skb_return(dev
, skb2
);
2508 skb_pull(skb
, size
);
2510 /* padding bytes before the next frame starts */
2512 skb_pull(skb
, align_count
);
2518 static inline void rx_process(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
2520 if (!lan78xx_rx(dev
, skb
)) {
2521 dev
->net
->stats
.rx_errors
++;
2526 lan78xx_skb_return(dev
, skb
);
2530 netif_dbg(dev
, rx_err
, dev
->net
, "drop\n");
2531 dev
->net
->stats
.rx_errors
++;
2533 skb_queue_tail(&dev
->done
, skb
);
2536 static void rx_complete(struct urb
*urb
);
2538 static int rx_submit(struct lan78xx_net
*dev
, struct urb
*urb
, gfp_t flags
)
2540 struct sk_buff
*skb
;
2541 struct skb_data
*entry
;
2542 unsigned long lockflags
;
2543 size_t size
= dev
->rx_urb_size
;
2546 skb
= netdev_alloc_skb_ip_align(dev
->net
, size
);
2552 entry
= (struct skb_data
*)skb
->cb
;
2557 usb_fill_bulk_urb(urb
, dev
->udev
, dev
->pipe_in
,
2558 skb
->data
, size
, rx_complete
, skb
);
2560 spin_lock_irqsave(&dev
->rxq
.lock
, lockflags
);
2562 if (netif_device_present(dev
->net
) &&
2563 netif_running(dev
->net
) &&
2564 !test_bit(EVENT_RX_HALT
, &dev
->flags
) &&
2565 !test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
2566 ret
= usb_submit_urb(urb
, GFP_ATOMIC
);
2569 lan78xx_queue_skb(&dev
->rxq
, skb
, rx_start
);
2572 lan78xx_defer_kevent(dev
, EVENT_RX_HALT
);
2575 netif_dbg(dev
, ifdown
, dev
->net
, "device gone\n");
2576 netif_device_detach(dev
->net
);
2582 netif_dbg(dev
, rx_err
, dev
->net
,
2583 "rx submit, %d\n", ret
);
2584 tasklet_schedule(&dev
->bh
);
2587 netif_dbg(dev
, ifdown
, dev
->net
, "rx: stopped\n");
2590 spin_unlock_irqrestore(&dev
->rxq
.lock
, lockflags
);
2592 dev_kfree_skb_any(skb
);
2598 static void rx_complete(struct urb
*urb
)
2600 struct sk_buff
*skb
= (struct sk_buff
*)urb
->context
;
2601 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2602 struct lan78xx_net
*dev
= entry
->dev
;
2603 int urb_status
= urb
->status
;
2604 enum skb_state state
;
2606 skb_put(skb
, urb
->actual_length
);
2610 switch (urb_status
) {
2612 if (skb
->len
< dev
->net
->hard_header_len
) {
2614 dev
->net
->stats
.rx_errors
++;
2615 dev
->net
->stats
.rx_length_errors
++;
2616 netif_dbg(dev
, rx_err
, dev
->net
,
2617 "rx length %d\n", skb
->len
);
2619 usb_mark_last_busy(dev
->udev
);
2622 dev
->net
->stats
.rx_errors
++;
2623 lan78xx_defer_kevent(dev
, EVENT_RX_HALT
);
2625 case -ECONNRESET
: /* async unlink */
2626 case -ESHUTDOWN
: /* hardware gone */
2627 netif_dbg(dev
, ifdown
, dev
->net
,
2628 "rx shutdown, code %d\n", urb_status
);
2636 dev
->net
->stats
.rx_errors
++;
2642 /* data overrun ... flush fifo? */
2644 dev
->net
->stats
.rx_over_errors
++;
2649 dev
->net
->stats
.rx_errors
++;
2650 netif_dbg(dev
, rx_err
, dev
->net
, "rx status %d\n", urb_status
);
2654 state
= defer_bh(dev
, skb
, &dev
->rxq
, state
);
2657 if (netif_running(dev
->net
) &&
2658 !test_bit(EVENT_RX_HALT
, &dev
->flags
) &&
2659 state
!= unlink_start
) {
2660 rx_submit(dev
, urb
, GFP_ATOMIC
);
2665 netif_dbg(dev
, rx_err
, dev
->net
, "no read resubmitted\n");
2668 static void lan78xx_tx_bh(struct lan78xx_net
*dev
)
2671 struct urb
*urb
= NULL
;
2672 struct skb_data
*entry
;
2673 unsigned long flags
;
2674 struct sk_buff_head
*tqp
= &dev
->txq_pend
;
2675 struct sk_buff
*skb
, *skb2
;
2678 int skb_totallen
, pkt_cnt
;
2682 for (skb
= tqp
->next
; pkt_cnt
< tqp
->qlen
; skb
= skb
->next
) {
2683 if (skb_is_gso(skb
)) {
2685 /* handle previous packets first */
2689 skb2
= skb_dequeue(tqp
);
2693 if ((skb_totallen
+ skb
->len
) > MAX_SINGLE_PACKET_SIZE
)
2695 skb_totallen
= skb
->len
+ roundup(skb_totallen
, sizeof(u32
));
2699 /* copy to a single skb */
2700 skb
= alloc_skb(skb_totallen
, GFP_ATOMIC
);
2704 skb_put(skb
, skb_totallen
);
2706 for (count
= pos
= 0; count
< pkt_cnt
; count
++) {
2707 skb2
= skb_dequeue(tqp
);
2709 memcpy(skb
->data
+ pos
, skb2
->data
, skb2
->len
);
2710 pos
+= roundup(skb2
->len
, sizeof(u32
));
2711 dev_kfree_skb(skb2
);
2715 length
= skb_totallen
;
2718 urb
= usb_alloc_urb(0, GFP_ATOMIC
);
2720 netif_dbg(dev
, tx_err
, dev
->net
, "no urb\n");
2724 entry
= (struct skb_data
*)skb
->cb
;
2727 entry
->length
= length
;
2729 spin_lock_irqsave(&dev
->txq
.lock
, flags
);
2730 ret
= usb_autopm_get_interface_async(dev
->intf
);
2732 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
2736 usb_fill_bulk_urb(urb
, dev
->udev
, dev
->pipe_out
,
2737 skb
->data
, skb
->len
, tx_complete
, skb
);
2739 if (length
% dev
->maxpacket
== 0) {
2740 /* send USB_ZERO_PACKET */
2741 urb
->transfer_flags
|= URB_ZERO_PACKET
;
2745 /* if this triggers the device is still a sleep */
2746 if (test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
2747 /* transmission will be done in resume */
2748 usb_anchor_urb(urb
, &dev
->deferred
);
2749 /* no use to process more packets */
2750 netif_stop_queue(dev
->net
);
2752 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
2753 netdev_dbg(dev
->net
, "Delaying transmission for resumption\n");
2758 ret
= usb_submit_urb(urb
, GFP_ATOMIC
);
2761 dev
->net
->trans_start
= jiffies
;
2762 lan78xx_queue_skb(&dev
->txq
, skb
, tx_start
);
2763 if (skb_queue_len(&dev
->txq
) >= dev
->tx_qlen
)
2764 netif_stop_queue(dev
->net
);
2767 netif_stop_queue(dev
->net
);
2768 lan78xx_defer_kevent(dev
, EVENT_TX_HALT
);
2769 usb_autopm_put_interface_async(dev
->intf
);
2772 usb_autopm_put_interface_async(dev
->intf
);
2773 netif_dbg(dev
, tx_err
, dev
->net
,
2774 "tx: submit urb err %d\n", ret
);
2778 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
2781 netif_dbg(dev
, tx_err
, dev
->net
, "drop, code %d\n", ret
);
2783 dev
->net
->stats
.tx_dropped
++;
2785 dev_kfree_skb_any(skb
);
2788 netif_dbg(dev
, tx_queued
, dev
->net
,
2789 "> tx, len %d, type 0x%x\n", length
, skb
->protocol
);
2792 static void lan78xx_rx_bh(struct lan78xx_net
*dev
)
2797 if (skb_queue_len(&dev
->rxq
) < dev
->rx_qlen
) {
2798 for (i
= 0; i
< 10; i
++) {
2799 if (skb_queue_len(&dev
->rxq
) >= dev
->rx_qlen
)
2801 urb
= usb_alloc_urb(0, GFP_ATOMIC
);
2803 if (rx_submit(dev
, urb
, GFP_ATOMIC
) == -ENOLINK
)
2807 if (skb_queue_len(&dev
->rxq
) < dev
->rx_qlen
)
2808 tasklet_schedule(&dev
->bh
);
2810 if (skb_queue_len(&dev
->txq
) < dev
->tx_qlen
)
2811 netif_wake_queue(dev
->net
);
2814 static void lan78xx_bh(unsigned long param
)
2816 struct lan78xx_net
*dev
= (struct lan78xx_net
*)param
;
2817 struct sk_buff
*skb
;
2818 struct skb_data
*entry
;
2820 while ((skb
= skb_dequeue(&dev
->done
))) {
2821 entry
= (struct skb_data
*)(skb
->cb
);
2822 switch (entry
->state
) {
2824 entry
->state
= rx_cleanup
;
2825 rx_process(dev
, skb
);
2828 usb_free_urb(entry
->urb
);
2832 usb_free_urb(entry
->urb
);
2836 netdev_dbg(dev
->net
, "skb state %d\n", entry
->state
);
2841 if (netif_device_present(dev
->net
) && netif_running(dev
->net
)) {
2842 if (!skb_queue_empty(&dev
->txq_pend
))
2845 if (!timer_pending(&dev
->delay
) &&
2846 !test_bit(EVENT_RX_HALT
, &dev
->flags
))
2851 static void lan78xx_delayedwork(struct work_struct
*work
)
2854 struct lan78xx_net
*dev
;
2856 dev
= container_of(work
, struct lan78xx_net
, wq
.work
);
2858 if (test_bit(EVENT_TX_HALT
, &dev
->flags
)) {
2859 unlink_urbs(dev
, &dev
->txq
);
2860 status
= usb_autopm_get_interface(dev
->intf
);
2863 status
= usb_clear_halt(dev
->udev
, dev
->pipe_out
);
2864 usb_autopm_put_interface(dev
->intf
);
2867 status
!= -ESHUTDOWN
) {
2868 if (netif_msg_tx_err(dev
))
2870 netdev_err(dev
->net
,
2871 "can't clear tx halt, status %d\n",
2874 clear_bit(EVENT_TX_HALT
, &dev
->flags
);
2875 if (status
!= -ESHUTDOWN
)
2876 netif_wake_queue(dev
->net
);
2879 if (test_bit(EVENT_RX_HALT
, &dev
->flags
)) {
2880 unlink_urbs(dev
, &dev
->rxq
);
2881 status
= usb_autopm_get_interface(dev
->intf
);
2884 status
= usb_clear_halt(dev
->udev
, dev
->pipe_in
);
2885 usb_autopm_put_interface(dev
->intf
);
2888 status
!= -ESHUTDOWN
) {
2889 if (netif_msg_rx_err(dev
))
2891 netdev_err(dev
->net
,
2892 "can't clear rx halt, status %d\n",
2895 clear_bit(EVENT_RX_HALT
, &dev
->flags
);
2896 tasklet_schedule(&dev
->bh
);
2900 if (test_bit(EVENT_LINK_RESET
, &dev
->flags
)) {
2903 clear_bit(EVENT_LINK_RESET
, &dev
->flags
);
2904 status
= usb_autopm_get_interface(dev
->intf
);
2907 if (lan78xx_link_reset(dev
) < 0) {
2908 usb_autopm_put_interface(dev
->intf
);
2910 netdev_info(dev
->net
, "link reset failed (%d)\n",
2913 usb_autopm_put_interface(dev
->intf
);
2918 static void intr_complete(struct urb
*urb
)
2920 struct lan78xx_net
*dev
= urb
->context
;
2921 int status
= urb
->status
;
2926 lan78xx_status(dev
, urb
);
2929 /* software-driven interface shutdown */
2930 case -ENOENT
: /* urb killed */
2931 case -ESHUTDOWN
: /* hardware gone */
2932 netif_dbg(dev
, ifdown
, dev
->net
,
2933 "intr shutdown, code %d\n", status
);
2936 /* NOTE: not throttling like RX/TX, since this endpoint
2937 * already polls infrequently
2940 netdev_dbg(dev
->net
, "intr status %d\n", status
);
2944 if (!netif_running(dev
->net
))
2947 memset(urb
->transfer_buffer
, 0, urb
->transfer_buffer_length
);
2948 status
= usb_submit_urb(urb
, GFP_ATOMIC
);
2950 netif_err(dev
, timer
, dev
->net
,
2951 "intr resubmit --> %d\n", status
);
2954 static void lan78xx_disconnect(struct usb_interface
*intf
)
2956 struct lan78xx_net
*dev
;
2957 struct usb_device
*udev
;
2958 struct net_device
*net
;
2960 dev
= usb_get_intfdata(intf
);
2961 usb_set_intfdata(intf
, NULL
);
2965 udev
= interface_to_usbdev(intf
);
2968 unregister_netdev(net
);
2970 cancel_delayed_work_sync(&dev
->wq
);
2972 usb_scuttle_anchored_urbs(&dev
->deferred
);
2974 lan78xx_unbind(dev
, intf
);
2976 usb_kill_urb(dev
->urb_intr
);
2977 usb_free_urb(dev
->urb_intr
);
2983 void lan78xx_tx_timeout(struct net_device
*net
)
2985 struct lan78xx_net
*dev
= netdev_priv(net
);
2987 unlink_urbs(dev
, &dev
->txq
);
2988 tasklet_schedule(&dev
->bh
);
2991 static const struct net_device_ops lan78xx_netdev_ops
= {
2992 .ndo_open
= lan78xx_open
,
2993 .ndo_stop
= lan78xx_stop
,
2994 .ndo_start_xmit
= lan78xx_start_xmit
,
2995 .ndo_tx_timeout
= lan78xx_tx_timeout
,
2996 .ndo_change_mtu
= lan78xx_change_mtu
,
2997 .ndo_set_mac_address
= lan78xx_set_mac_addr
,
2998 .ndo_validate_addr
= eth_validate_addr
,
2999 .ndo_do_ioctl
= lan78xx_ioctl
,
3000 .ndo_set_rx_mode
= lan78xx_set_multicast
,
3001 .ndo_set_features
= lan78xx_set_features
,
3002 .ndo_vlan_rx_add_vid
= lan78xx_vlan_rx_add_vid
,
3003 .ndo_vlan_rx_kill_vid
= lan78xx_vlan_rx_kill_vid
,
3006 static int lan78xx_probe(struct usb_interface
*intf
,
3007 const struct usb_device_id
*id
)
3009 struct lan78xx_net
*dev
;
3010 struct net_device
*netdev
;
3011 struct usb_device
*udev
;
3017 udev
= interface_to_usbdev(intf
);
3018 udev
= usb_get_dev(udev
);
3021 netdev
= alloc_etherdev(sizeof(struct lan78xx_net
));
3023 dev_err(&intf
->dev
, "Error: OOM\n");
3027 /* netdev_printk() needs this */
3028 SET_NETDEV_DEV(netdev
, &intf
->dev
);
3030 dev
= netdev_priv(netdev
);
3034 dev
->msg_enable
= netif_msg_init(msg_level
, NETIF_MSG_DRV
3035 | NETIF_MSG_PROBE
| NETIF_MSG_LINK
);
3037 skb_queue_head_init(&dev
->rxq
);
3038 skb_queue_head_init(&dev
->txq
);
3039 skb_queue_head_init(&dev
->done
);
3040 skb_queue_head_init(&dev
->rxq_pause
);
3041 skb_queue_head_init(&dev
->txq_pend
);
3042 mutex_init(&dev
->phy_mutex
);
3044 tasklet_init(&dev
->bh
, lan78xx_bh
, (unsigned long)dev
);
3045 INIT_DELAYED_WORK(&dev
->wq
, lan78xx_delayedwork
);
3046 init_usb_anchor(&dev
->deferred
);
3048 netdev
->netdev_ops
= &lan78xx_netdev_ops
;
3049 netdev
->watchdog_timeo
= TX_TIMEOUT_JIFFIES
;
3050 netdev
->ethtool_ops
= &lan78xx_ethtool_ops
;
3052 ret
= lan78xx_bind(dev
, intf
);
3055 strcpy(netdev
->name
, "eth%d");
3057 if (netdev
->mtu
> (dev
->hard_mtu
- netdev
->hard_header_len
))
3058 netdev
->mtu
= dev
->hard_mtu
- netdev
->hard_header_len
;
3060 dev
->ep_blkin
= (intf
->cur_altsetting
)->endpoint
+ 0;
3061 dev
->ep_blkout
= (intf
->cur_altsetting
)->endpoint
+ 1;
3062 dev
->ep_intr
= (intf
->cur_altsetting
)->endpoint
+ 2;
3064 dev
->pipe_in
= usb_rcvbulkpipe(udev
, BULK_IN_PIPE
);
3065 dev
->pipe_out
= usb_sndbulkpipe(udev
, BULK_OUT_PIPE
);
3067 dev
->pipe_intr
= usb_rcvintpipe(dev
->udev
,
3068 dev
->ep_intr
->desc
.bEndpointAddress
&
3069 USB_ENDPOINT_NUMBER_MASK
);
3070 period
= dev
->ep_intr
->desc
.bInterval
;
3072 maxp
= usb_maxpacket(dev
->udev
, dev
->pipe_intr
, 0);
3073 buf
= kmalloc(maxp
, GFP_KERNEL
);
3075 dev
->urb_intr
= usb_alloc_urb(0, GFP_KERNEL
);
3076 if (!dev
->urb_intr
) {
3080 usb_fill_int_urb(dev
->urb_intr
, dev
->udev
,
3081 dev
->pipe_intr
, buf
, maxp
,
3082 intr_complete
, dev
, period
);
3086 dev
->maxpacket
= usb_maxpacket(dev
->udev
, dev
->pipe_out
, 1);
3088 /* driver requires remote-wakeup capability during autosuspend. */
3089 intf
->needs_remote_wakeup
= 1;
3091 ret
= register_netdev(netdev
);
3093 netif_err(dev
, probe
, netdev
, "couldn't register the device\n");
3097 usb_set_intfdata(intf
, dev
);
3099 ret
= device_set_wakeup_enable(&udev
->dev
, true);
3101 /* Default delay of 2sec has more overhead than advantage.
3102 * Set to 10sec as default.
3104 pm_runtime_set_autosuspend_delay(&udev
->dev
,
3105 DEFAULT_AUTOSUSPEND_DELAY
);
3110 lan78xx_unbind(dev
, intf
);
3112 free_netdev(netdev
);
3119 static u16
lan78xx_wakeframe_crc16(const u8
*buf
, int len
)
3121 const u16 crc16poly
= 0x8005;
3127 for (i
= 0; i
< len
; i
++) {
3129 for (bit
= 0; bit
< 8; bit
++) {
3133 if (msb
^ (u16
)(data
& 1)) {
3135 crc
|= (u16
)0x0001U
;
3144 static int lan78xx_set_suspend(struct lan78xx_net
*dev
, u32 wol
)
3152 const u8 ipv4_multicast
[3] = { 0x01, 0x00, 0x5E };
3153 const u8 ipv6_multicast
[3] = { 0x33, 0x33 };
3154 const u8 arp_type
[2] = { 0x08, 0x06 };
3156 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3157 buf
&= ~MAC_TX_TXEN_
;
3158 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3159 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3160 buf
&= ~MAC_RX_RXEN_
;
3161 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3163 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3164 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3165 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3170 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &temp_pmt_ctl
);
3171 temp_pmt_ctl
&= ~PMT_CTL_RES_CLR_WKP_EN_
;
3172 temp_pmt_ctl
|= PMT_CTL_RES_CLR_WKP_STS_
;
3174 for (mask_index
= 0; mask_index
< NUM_OF_WUF_CFG
; mask_index
++)
3175 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
), 0);
3178 if (wol
& WAKE_PHY
) {
3179 temp_pmt_ctl
|= PMT_CTL_PHY_WAKE_EN_
;
3181 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3182 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3183 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3185 if (wol
& WAKE_MAGIC
) {
3186 temp_wucsr
|= WUCSR_MPEN_
;
3188 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3189 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3190 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_3_
;
3192 if (wol
& WAKE_BCAST
) {
3193 temp_wucsr
|= WUCSR_BCST_EN_
;
3195 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3196 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3197 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3199 if (wol
& WAKE_MCAST
) {
3200 temp_wucsr
|= WUCSR_WAKE_EN_
;
3202 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3203 crc
= lan78xx_wakeframe_crc16(ipv4_multicast
, 3);
3204 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3206 WUF_CFGX_TYPE_MCAST_
|
3207 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3208 (crc
& WUF_CFGX_CRC16_MASK_
));
3210 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 7);
3211 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3212 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3213 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3216 /* for IPv6 Multicast */
3217 crc
= lan78xx_wakeframe_crc16(ipv6_multicast
, 2);
3218 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3220 WUF_CFGX_TYPE_MCAST_
|
3221 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3222 (crc
& WUF_CFGX_CRC16_MASK_
));
3224 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 3);
3225 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3226 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3227 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3230 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3231 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3232 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3234 if (wol
& WAKE_UCAST
) {
3235 temp_wucsr
|= WUCSR_PFDA_EN_
;
3237 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3238 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3239 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3241 if (wol
& WAKE_ARP
) {
3242 temp_wucsr
|= WUCSR_WAKE_EN_
;
3244 /* set WUF_CFG & WUF_MASK
3245 * for packettype (offset 12,13) = ARP (0x0806)
3247 crc
= lan78xx_wakeframe_crc16(arp_type
, 2);
3248 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3250 WUF_CFGX_TYPE_ALL_
|
3251 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3252 (crc
& WUF_CFGX_CRC16_MASK_
));
3254 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 0x3000);
3255 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3256 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3257 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3260 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3261 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3262 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3265 ret
= lan78xx_write_reg(dev
, WUCSR
, temp_wucsr
);
3267 /* when multiple WOL bits are set */
3268 if (hweight_long((unsigned long)wol
) > 1) {
3269 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3270 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3271 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3273 ret
= lan78xx_write_reg(dev
, PMT_CTL
, temp_pmt_ctl
);
3276 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3277 buf
|= PMT_CTL_WUPS_MASK_
;
3278 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3280 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3281 buf
|= MAC_RX_RXEN_
;
3282 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3287 int lan78xx_suspend(struct usb_interface
*intf
, pm_message_t message
)
3289 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
3290 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
3295 event
= message
.event
;
3297 if (!dev
->suspend_count
++) {
3298 spin_lock_irq(&dev
->txq
.lock
);
3299 /* don't autosuspend while transmitting */
3300 if ((skb_queue_len(&dev
->txq
) ||
3301 skb_queue_len(&dev
->txq_pend
)) &&
3302 PMSG_IS_AUTO(message
)) {
3303 spin_unlock_irq(&dev
->txq
.lock
);
3307 set_bit(EVENT_DEV_ASLEEP
, &dev
->flags
);
3308 spin_unlock_irq(&dev
->txq
.lock
);
3312 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3313 buf
&= ~MAC_TX_TXEN_
;
3314 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3315 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3316 buf
&= ~MAC_RX_RXEN_
;
3317 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3319 /* empty out the rx and queues */
3320 netif_device_detach(dev
->net
);
3321 lan78xx_terminate_urbs(dev
);
3322 usb_kill_urb(dev
->urb_intr
);
3325 netif_device_attach(dev
->net
);
3328 if (test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
3329 if (PMSG_IS_AUTO(message
)) {
3330 /* auto suspend (selective suspend) */
3331 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3332 buf
&= ~MAC_TX_TXEN_
;
3333 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3334 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3335 buf
&= ~MAC_RX_RXEN_
;
3336 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3338 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3339 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3340 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3342 /* set goodframe wakeup */
3343 ret
= lan78xx_read_reg(dev
, WUCSR
, &buf
);
3345 buf
|= WUCSR_RFE_WAKE_EN_
;
3346 buf
|= WUCSR_STORE_WAKE_
;
3348 ret
= lan78xx_write_reg(dev
, WUCSR
, buf
);
3350 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3352 buf
&= ~PMT_CTL_RES_CLR_WKP_EN_
;
3353 buf
|= PMT_CTL_RES_CLR_WKP_STS_
;
3355 buf
|= PMT_CTL_PHY_WAKE_EN_
;
3356 buf
|= PMT_CTL_WOL_EN_
;
3357 buf
&= ~PMT_CTL_SUS_MODE_MASK_
;
3358 buf
|= PMT_CTL_SUS_MODE_3_
;
3360 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3362 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3364 buf
|= PMT_CTL_WUPS_MASK_
;
3366 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3368 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3369 buf
|= MAC_RX_RXEN_
;
3370 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3372 lan78xx_set_suspend(dev
, pdata
->wol
);
3381 int lan78xx_resume(struct usb_interface
*intf
)
3383 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
3384 struct sk_buff
*skb
;
3389 if (!--dev
->suspend_count
) {
3390 /* resume interrupt URBs */
3391 if (dev
->urb_intr
&& test_bit(EVENT_DEV_OPEN
, &dev
->flags
))
3392 usb_submit_urb(dev
->urb_intr
, GFP_NOIO
);
3394 spin_lock_irq(&dev
->txq
.lock
);
3395 while ((res
= usb_get_from_anchor(&dev
->deferred
))) {
3396 skb
= (struct sk_buff
*)res
->context
;
3397 ret
= usb_submit_urb(res
, GFP_ATOMIC
);
3399 dev_kfree_skb_any(skb
);
3401 usb_autopm_put_interface_async(dev
->intf
);
3403 dev
->net
->trans_start
= jiffies
;
3404 lan78xx_queue_skb(&dev
->txq
, skb
, tx_start
);
3408 clear_bit(EVENT_DEV_ASLEEP
, &dev
->flags
);
3409 spin_unlock_irq(&dev
->txq
.lock
);
3411 if (test_bit(EVENT_DEV_OPEN
, &dev
->flags
)) {
3412 if (!(skb_queue_len(&dev
->txq
) >= dev
->tx_qlen
))
3413 netif_start_queue(dev
->net
);
3414 tasklet_schedule(&dev
->bh
);
3418 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3419 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3420 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3422 ret
= lan78xx_write_reg(dev
, WUCSR2
, WUCSR2_NS_RCD_
|
3424 WUCSR2_IPV6_TCPSYN_RCD_
|
3425 WUCSR2_IPV4_TCPSYN_RCD_
);
3427 ret
= lan78xx_write_reg(dev
, WUCSR
, WUCSR_EEE_TX_WAKE_
|
3428 WUCSR_EEE_RX_WAKE_
|
3430 WUCSR_RFE_WAKE_FR_
|
3435 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3436 buf
|= MAC_TX_TXEN_
;
3437 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3442 int lan78xx_reset_resume(struct usb_interface
*intf
)
3444 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
3448 lan78xx_phy_init(dev
);
3450 return lan78xx_resume(intf
);
3453 static const struct usb_device_id products
[] = {
3455 /* LAN7800 USB Gigabit Ethernet Device */
3456 USB_DEVICE(LAN78XX_USB_VENDOR_ID
, LAN7800_USB_PRODUCT_ID
),
3459 /* LAN7850 USB Gigabit Ethernet Device */
3460 USB_DEVICE(LAN78XX_USB_VENDOR_ID
, LAN7850_USB_PRODUCT_ID
),
3464 MODULE_DEVICE_TABLE(usb
, products
);
3466 static struct usb_driver lan78xx_driver
= {
3467 .name
= DRIVER_NAME
,
3468 .id_table
= products
,
3469 .probe
= lan78xx_probe
,
3470 .disconnect
= lan78xx_disconnect
,
3471 .suspend
= lan78xx_suspend
,
3472 .resume
= lan78xx_resume
,
3473 .reset_resume
= lan78xx_reset_resume
,
3474 .supports_autosuspend
= 1,
3475 .disable_hub_initiated_lpm
= 1,
3478 module_usb_driver(lan78xx_driver
);
3480 MODULE_AUTHOR(DRIVER_AUTHOR
);
3481 MODULE_DESCRIPTION(DRIVER_DESC
);
3482 MODULE_LICENSE("GPL");