2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/interrupt.h>
34 #include <linux/irqdomain.h>
35 #include <linux/irq.h>
36 #include <linux/irqchip/chained_irq.h>
37 #include <linux/microchipphy.h>
40 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
41 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
42 #define DRIVER_NAME "lan78xx"
43 #define DRIVER_VERSION "1.0.6"
45 #define TX_TIMEOUT_JIFFIES (5 * HZ)
46 #define THROTTLE_JIFFIES (HZ / 8)
47 #define UNLINK_TIMEOUT_MS 3
49 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
51 #define SS_USB_PKT_SIZE (1024)
52 #define HS_USB_PKT_SIZE (512)
53 #define FS_USB_PKT_SIZE (64)
55 #define MAX_RX_FIFO_SIZE (12 * 1024)
56 #define MAX_TX_FIFO_SIZE (12 * 1024)
57 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
58 #define DEFAULT_BULK_IN_DELAY (0x0800)
59 #define MAX_SINGLE_PACKET_SIZE (9000)
60 #define DEFAULT_TX_CSUM_ENABLE (true)
61 #define DEFAULT_RX_CSUM_ENABLE (true)
62 #define DEFAULT_TSO_CSUM_ENABLE (true)
63 #define DEFAULT_VLAN_FILTER_ENABLE (true)
64 #define TX_OVERHEAD (8)
67 #define LAN78XX_USB_VENDOR_ID (0x0424)
68 #define LAN7800_USB_PRODUCT_ID (0x7800)
69 #define LAN7850_USB_PRODUCT_ID (0x7850)
70 #define LAN7801_USB_PRODUCT_ID (0x7801)
71 #define LAN78XX_EEPROM_MAGIC (0x78A5)
72 #define LAN78XX_OTP_MAGIC (0x78F3)
77 #define EEPROM_INDICATOR (0xA5)
78 #define EEPROM_MAC_OFFSET (0x01)
79 #define MAX_EEPROM_SIZE 512
80 #define OTP_INDICATOR_1 (0xF3)
81 #define OTP_INDICATOR_2 (0xF7)
83 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
84 WAKE_MCAST | WAKE_BCAST | \
85 WAKE_ARP | WAKE_MAGIC)
87 /* USB related defines */
88 #define BULK_IN_PIPE 1
89 #define BULK_OUT_PIPE 2
91 /* default autosuspend delay (mSec)*/
92 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
94 /* statistic update interval (mSec) */
95 #define STAT_UPDATE_TIMER (1 * 1000)
97 /* defines interrupts from interrupt EP */
98 #define MAX_INT_EP (32)
99 #define INT_EP_INTEP (31)
100 #define INT_EP_OTP_WR_DONE (28)
101 #define INT_EP_EEE_TX_LPI_START (26)
102 #define INT_EP_EEE_TX_LPI_STOP (25)
103 #define INT_EP_EEE_RX_LPI (24)
104 #define INT_EP_MAC_RESET_TIMEOUT (23)
105 #define INT_EP_RDFO (22)
106 #define INT_EP_TXE (21)
107 #define INT_EP_USB_STATUS (20)
108 #define INT_EP_TX_DIS (19)
109 #define INT_EP_RX_DIS (18)
110 #define INT_EP_PHY (17)
111 #define INT_EP_DP (16)
112 #define INT_EP_MAC_ERR (15)
113 #define INT_EP_TDFU (14)
114 #define INT_EP_TDFO (13)
115 #define INT_EP_UTX (12)
116 #define INT_EP_GPIO_11 (11)
117 #define INT_EP_GPIO_10 (10)
118 #define INT_EP_GPIO_9 (9)
119 #define INT_EP_GPIO_8 (8)
120 #define INT_EP_GPIO_7 (7)
121 #define INT_EP_GPIO_6 (6)
122 #define INT_EP_GPIO_5 (5)
123 #define INT_EP_GPIO_4 (4)
124 #define INT_EP_GPIO_3 (3)
125 #define INT_EP_GPIO_2 (2)
126 #define INT_EP_GPIO_1 (1)
127 #define INT_EP_GPIO_0 (0)
129 static const char lan78xx_gstrings
[][ETH_GSTRING_LEN
] = {
131 "RX Alignment Errors",
132 "Rx Fragment Errors",
134 "RX Undersize Frame Errors",
135 "RX Oversize Frame Errors",
137 "RX Unicast Byte Count",
138 "RX Broadcast Byte Count",
139 "RX Multicast Byte Count",
141 "RX Broadcast Frames",
142 "RX Multicast Frames",
145 "RX 65 - 127 Byte Frames",
146 "RX 128 - 255 Byte Frames",
147 "RX 256 - 511 Bytes Frames",
148 "RX 512 - 1023 Byte Frames",
149 "RX 1024 - 1518 Byte Frames",
150 "RX Greater 1518 Byte Frames",
151 "EEE RX LPI Transitions",
154 "TX Excess Deferral Errors",
157 "TX Single Collisions",
158 "TX Multiple Collisions",
159 "TX Excessive Collision",
160 "TX Late Collisions",
161 "TX Unicast Byte Count",
162 "TX Broadcast Byte Count",
163 "TX Multicast Byte Count",
165 "TX Broadcast Frames",
166 "TX Multicast Frames",
169 "TX 65 - 127 Byte Frames",
170 "TX 128 - 255 Byte Frames",
171 "TX 256 - 511 Bytes Frames",
172 "TX 512 - 1023 Byte Frames",
173 "TX 1024 - 1518 Byte Frames",
174 "TX Greater 1518 Byte Frames",
175 "EEE TX LPI Transitions",
179 struct lan78xx_statstage
{
181 u32 rx_alignment_errors
;
182 u32 rx_fragment_errors
;
183 u32 rx_jabber_errors
;
184 u32 rx_undersize_frame_errors
;
185 u32 rx_oversize_frame_errors
;
186 u32 rx_dropped_frames
;
187 u32 rx_unicast_byte_count
;
188 u32 rx_broadcast_byte_count
;
189 u32 rx_multicast_byte_count
;
190 u32 rx_unicast_frames
;
191 u32 rx_broadcast_frames
;
192 u32 rx_multicast_frames
;
194 u32 rx_64_byte_frames
;
195 u32 rx_65_127_byte_frames
;
196 u32 rx_128_255_byte_frames
;
197 u32 rx_256_511_bytes_frames
;
198 u32 rx_512_1023_byte_frames
;
199 u32 rx_1024_1518_byte_frames
;
200 u32 rx_greater_1518_byte_frames
;
201 u32 eee_rx_lpi_transitions
;
204 u32 tx_excess_deferral_errors
;
205 u32 tx_carrier_errors
;
206 u32 tx_bad_byte_count
;
207 u32 tx_single_collisions
;
208 u32 tx_multiple_collisions
;
209 u32 tx_excessive_collision
;
210 u32 tx_late_collisions
;
211 u32 tx_unicast_byte_count
;
212 u32 tx_broadcast_byte_count
;
213 u32 tx_multicast_byte_count
;
214 u32 tx_unicast_frames
;
215 u32 tx_broadcast_frames
;
216 u32 tx_multicast_frames
;
218 u32 tx_64_byte_frames
;
219 u32 tx_65_127_byte_frames
;
220 u32 tx_128_255_byte_frames
;
221 u32 tx_256_511_bytes_frames
;
222 u32 tx_512_1023_byte_frames
;
223 u32 tx_1024_1518_byte_frames
;
224 u32 tx_greater_1518_byte_frames
;
225 u32 eee_tx_lpi_transitions
;
229 struct lan78xx_statstage64
{
231 u64 rx_alignment_errors
;
232 u64 rx_fragment_errors
;
233 u64 rx_jabber_errors
;
234 u64 rx_undersize_frame_errors
;
235 u64 rx_oversize_frame_errors
;
236 u64 rx_dropped_frames
;
237 u64 rx_unicast_byte_count
;
238 u64 rx_broadcast_byte_count
;
239 u64 rx_multicast_byte_count
;
240 u64 rx_unicast_frames
;
241 u64 rx_broadcast_frames
;
242 u64 rx_multicast_frames
;
244 u64 rx_64_byte_frames
;
245 u64 rx_65_127_byte_frames
;
246 u64 rx_128_255_byte_frames
;
247 u64 rx_256_511_bytes_frames
;
248 u64 rx_512_1023_byte_frames
;
249 u64 rx_1024_1518_byte_frames
;
250 u64 rx_greater_1518_byte_frames
;
251 u64 eee_rx_lpi_transitions
;
254 u64 tx_excess_deferral_errors
;
255 u64 tx_carrier_errors
;
256 u64 tx_bad_byte_count
;
257 u64 tx_single_collisions
;
258 u64 tx_multiple_collisions
;
259 u64 tx_excessive_collision
;
260 u64 tx_late_collisions
;
261 u64 tx_unicast_byte_count
;
262 u64 tx_broadcast_byte_count
;
263 u64 tx_multicast_byte_count
;
264 u64 tx_unicast_frames
;
265 u64 tx_broadcast_frames
;
266 u64 tx_multicast_frames
;
268 u64 tx_64_byte_frames
;
269 u64 tx_65_127_byte_frames
;
270 u64 tx_128_255_byte_frames
;
271 u64 tx_256_511_bytes_frames
;
272 u64 tx_512_1023_byte_frames
;
273 u64 tx_1024_1518_byte_frames
;
274 u64 tx_greater_1518_byte_frames
;
275 u64 eee_tx_lpi_transitions
;
281 struct lan78xx_priv
{
282 struct lan78xx_net
*dev
;
284 u32 mchash_table
[DP_SEL_VHF_HASH_LEN
]; /* multicat hash table */
285 u32 pfilter_table
[NUM_OF_MAF
][2]; /* perfect filter table */
286 u32 vlan_table
[DP_SEL_VHF_VLAN_LEN
];
287 struct mutex dataport_mutex
; /* for dataport access */
288 spinlock_t rfe_ctl_lock
; /* for rfe register access */
289 struct work_struct set_multicast
;
290 struct work_struct set_vlan
;
304 struct skb_data
{ /* skb->cb is one of these */
306 struct lan78xx_net
*dev
;
307 enum skb_state state
;
313 struct usb_ctrlrequest req
;
314 struct lan78xx_net
*dev
;
317 #define EVENT_TX_HALT 0
318 #define EVENT_RX_HALT 1
319 #define EVENT_RX_MEMORY 2
320 #define EVENT_STS_SPLIT 3
321 #define EVENT_LINK_RESET 4
322 #define EVENT_RX_PAUSED 5
323 #define EVENT_DEV_WAKING 6
324 #define EVENT_DEV_ASLEEP 7
325 #define EVENT_DEV_OPEN 8
326 #define EVENT_STAT_UPDATE 9
329 struct mutex access_lock
; /* for stats access */
330 struct lan78xx_statstage saved
;
331 struct lan78xx_statstage rollover_count
;
332 struct lan78xx_statstage rollover_max
;
333 struct lan78xx_statstage64 curr_stat
;
336 struct irq_domain_data
{
337 struct irq_domain
*irqdomain
;
339 struct irq_chip
*irqchip
;
340 irq_flow_handler_t irq_handler
;
342 struct mutex irq_lock
; /* for irq bus access */
346 struct net_device
*net
;
347 struct usb_device
*udev
;
348 struct usb_interface
*intf
;
353 struct sk_buff_head rxq
;
354 struct sk_buff_head txq
;
355 struct sk_buff_head done
;
356 struct sk_buff_head rxq_pause
;
357 struct sk_buff_head txq_pend
;
359 struct tasklet_struct bh
;
360 struct delayed_work wq
;
362 struct usb_host_endpoint
*ep_blkin
;
363 struct usb_host_endpoint
*ep_blkout
;
364 struct usb_host_endpoint
*ep_intr
;
368 struct urb
*urb_intr
;
369 struct usb_anchor deferred
;
371 struct mutex phy_mutex
; /* for phy access */
372 unsigned pipe_in
, pipe_out
, pipe_intr
;
374 u32 hard_mtu
; /* count any extra framing */
375 size_t rx_urb_size
; /* size for rx urbs */
379 wait_queue_head_t
*wait
;
380 unsigned char suspend_count
;
383 struct timer_list delay
;
384 struct timer_list stat_monitor
;
386 unsigned long data
[5];
393 struct mii_bus
*mdiobus
;
394 phy_interface_t interface
;
397 u8 fc_request_control
;
400 struct statstage stats
;
402 struct irq_domain_data domain_data
;
405 /* define external phy id */
406 #define PHY_LAN8835 (0x0007C130)
407 #define PHY_KSZ9031RNX (0x00221620)
409 /* use ethtool to change the level for any given device */
410 static int msg_level
= -1;
411 module_param(msg_level
, int, 0);
412 MODULE_PARM_DESC(msg_level
, "Override default message level");
414 static int lan78xx_read_reg(struct lan78xx_net
*dev
, u32 index
, u32
*data
)
416 u32
*buf
= kmalloc(sizeof(u32
), GFP_KERNEL
);
422 ret
= usb_control_msg(dev
->udev
, usb_rcvctrlpipe(dev
->udev
, 0),
423 USB_VENDOR_REQUEST_READ_REGISTER
,
424 USB_DIR_IN
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
425 0, index
, buf
, 4, USB_CTRL_GET_TIMEOUT
);
426 if (likely(ret
>= 0)) {
430 netdev_warn(dev
->net
,
431 "Failed to read register index 0x%08x. ret = %d",
440 static int lan78xx_write_reg(struct lan78xx_net
*dev
, u32 index
, u32 data
)
442 u32
*buf
= kmalloc(sizeof(u32
), GFP_KERNEL
);
451 ret
= usb_control_msg(dev
->udev
, usb_sndctrlpipe(dev
->udev
, 0),
452 USB_VENDOR_REQUEST_WRITE_REGISTER
,
453 USB_DIR_OUT
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
454 0, index
, buf
, 4, USB_CTRL_SET_TIMEOUT
);
455 if (unlikely(ret
< 0)) {
456 netdev_warn(dev
->net
,
457 "Failed to write register index 0x%08x. ret = %d",
466 static int lan78xx_read_stats(struct lan78xx_net
*dev
,
467 struct lan78xx_statstage
*data
)
471 struct lan78xx_statstage
*stats
;
475 stats
= kmalloc(sizeof(*stats
), GFP_KERNEL
);
479 ret
= usb_control_msg(dev
->udev
,
480 usb_rcvctrlpipe(dev
->udev
, 0),
481 USB_VENDOR_REQUEST_GET_STATS
,
482 USB_DIR_IN
| USB_TYPE_VENDOR
| USB_RECIP_DEVICE
,
487 USB_CTRL_SET_TIMEOUT
);
488 if (likely(ret
>= 0)) {
491 for (i
= 0; i
< sizeof(*stats
)/sizeof(u32
); i
++) {
492 le32_to_cpus(&src
[i
]);
496 netdev_warn(dev
->net
,
497 "Failed to read stat ret = 0x%x", ret
);
505 #define check_counter_rollover(struct1, dev_stats, member) { \
506 if (struct1->member < dev_stats.saved.member) \
507 dev_stats.rollover_count.member++; \
510 static void lan78xx_check_stat_rollover(struct lan78xx_net
*dev
,
511 struct lan78xx_statstage
*stats
)
513 check_counter_rollover(stats
, dev
->stats
, rx_fcs_errors
);
514 check_counter_rollover(stats
, dev
->stats
, rx_alignment_errors
);
515 check_counter_rollover(stats
, dev
->stats
, rx_fragment_errors
);
516 check_counter_rollover(stats
, dev
->stats
, rx_jabber_errors
);
517 check_counter_rollover(stats
, dev
->stats
, rx_undersize_frame_errors
);
518 check_counter_rollover(stats
, dev
->stats
, rx_oversize_frame_errors
);
519 check_counter_rollover(stats
, dev
->stats
, rx_dropped_frames
);
520 check_counter_rollover(stats
, dev
->stats
, rx_unicast_byte_count
);
521 check_counter_rollover(stats
, dev
->stats
, rx_broadcast_byte_count
);
522 check_counter_rollover(stats
, dev
->stats
, rx_multicast_byte_count
);
523 check_counter_rollover(stats
, dev
->stats
, rx_unicast_frames
);
524 check_counter_rollover(stats
, dev
->stats
, rx_broadcast_frames
);
525 check_counter_rollover(stats
, dev
->stats
, rx_multicast_frames
);
526 check_counter_rollover(stats
, dev
->stats
, rx_pause_frames
);
527 check_counter_rollover(stats
, dev
->stats
, rx_64_byte_frames
);
528 check_counter_rollover(stats
, dev
->stats
, rx_65_127_byte_frames
);
529 check_counter_rollover(stats
, dev
->stats
, rx_128_255_byte_frames
);
530 check_counter_rollover(stats
, dev
->stats
, rx_256_511_bytes_frames
);
531 check_counter_rollover(stats
, dev
->stats
, rx_512_1023_byte_frames
);
532 check_counter_rollover(stats
, dev
->stats
, rx_1024_1518_byte_frames
);
533 check_counter_rollover(stats
, dev
->stats
, rx_greater_1518_byte_frames
);
534 check_counter_rollover(stats
, dev
->stats
, eee_rx_lpi_transitions
);
535 check_counter_rollover(stats
, dev
->stats
, eee_rx_lpi_time
);
536 check_counter_rollover(stats
, dev
->stats
, tx_fcs_errors
);
537 check_counter_rollover(stats
, dev
->stats
, tx_excess_deferral_errors
);
538 check_counter_rollover(stats
, dev
->stats
, tx_carrier_errors
);
539 check_counter_rollover(stats
, dev
->stats
, tx_bad_byte_count
);
540 check_counter_rollover(stats
, dev
->stats
, tx_single_collisions
);
541 check_counter_rollover(stats
, dev
->stats
, tx_multiple_collisions
);
542 check_counter_rollover(stats
, dev
->stats
, tx_excessive_collision
);
543 check_counter_rollover(stats
, dev
->stats
, tx_late_collisions
);
544 check_counter_rollover(stats
, dev
->stats
, tx_unicast_byte_count
);
545 check_counter_rollover(stats
, dev
->stats
, tx_broadcast_byte_count
);
546 check_counter_rollover(stats
, dev
->stats
, tx_multicast_byte_count
);
547 check_counter_rollover(stats
, dev
->stats
, tx_unicast_frames
);
548 check_counter_rollover(stats
, dev
->stats
, tx_broadcast_frames
);
549 check_counter_rollover(stats
, dev
->stats
, tx_multicast_frames
);
550 check_counter_rollover(stats
, dev
->stats
, tx_pause_frames
);
551 check_counter_rollover(stats
, dev
->stats
, tx_64_byte_frames
);
552 check_counter_rollover(stats
, dev
->stats
, tx_65_127_byte_frames
);
553 check_counter_rollover(stats
, dev
->stats
, tx_128_255_byte_frames
);
554 check_counter_rollover(stats
, dev
->stats
, tx_256_511_bytes_frames
);
555 check_counter_rollover(stats
, dev
->stats
, tx_512_1023_byte_frames
);
556 check_counter_rollover(stats
, dev
->stats
, tx_1024_1518_byte_frames
);
557 check_counter_rollover(stats
, dev
->stats
, tx_greater_1518_byte_frames
);
558 check_counter_rollover(stats
, dev
->stats
, eee_tx_lpi_transitions
);
559 check_counter_rollover(stats
, dev
->stats
, eee_tx_lpi_time
);
561 memcpy(&dev
->stats
.saved
, stats
, sizeof(struct lan78xx_statstage
));
564 static void lan78xx_update_stats(struct lan78xx_net
*dev
)
566 u32
*p
, *count
, *max
;
569 struct lan78xx_statstage lan78xx_stats
;
571 if (usb_autopm_get_interface(dev
->intf
) < 0)
574 p
= (u32
*)&lan78xx_stats
;
575 count
= (u32
*)&dev
->stats
.rollover_count
;
576 max
= (u32
*)&dev
->stats
.rollover_max
;
577 data
= (u64
*)&dev
->stats
.curr_stat
;
579 mutex_lock(&dev
->stats
.access_lock
);
581 if (lan78xx_read_stats(dev
, &lan78xx_stats
) > 0)
582 lan78xx_check_stat_rollover(dev
, &lan78xx_stats
);
584 for (i
= 0; i
< (sizeof(lan78xx_stats
) / (sizeof(u32
))); i
++)
585 data
[i
] = (u64
)p
[i
] + ((u64
)count
[i
] * ((u64
)max
[i
] + 1));
587 mutex_unlock(&dev
->stats
.access_lock
);
589 usb_autopm_put_interface(dev
->intf
);
592 /* Loop until the read is completed with timeout called with phy_mutex held */
593 static int lan78xx_phy_wait_not_busy(struct lan78xx_net
*dev
)
595 unsigned long start_time
= jiffies
;
600 ret
= lan78xx_read_reg(dev
, MII_ACC
, &val
);
601 if (unlikely(ret
< 0))
604 if (!(val
& MII_ACC_MII_BUSY_
))
606 } while (!time_after(jiffies
, start_time
+ HZ
));
611 static inline u32
mii_access(int id
, int index
, int read
)
615 ret
= ((u32
)id
<< MII_ACC_PHY_ADDR_SHIFT_
) & MII_ACC_PHY_ADDR_MASK_
;
616 ret
|= ((u32
)index
<< MII_ACC_MIIRINDA_SHIFT_
) & MII_ACC_MIIRINDA_MASK_
;
618 ret
|= MII_ACC_MII_READ_
;
620 ret
|= MII_ACC_MII_WRITE_
;
621 ret
|= MII_ACC_MII_BUSY_
;
626 static int lan78xx_wait_eeprom(struct lan78xx_net
*dev
)
628 unsigned long start_time
= jiffies
;
633 ret
= lan78xx_read_reg(dev
, E2P_CMD
, &val
);
634 if (unlikely(ret
< 0))
637 if (!(val
& E2P_CMD_EPC_BUSY_
) ||
638 (val
& E2P_CMD_EPC_TIMEOUT_
))
640 usleep_range(40, 100);
641 } while (!time_after(jiffies
, start_time
+ HZ
));
643 if (val
& (E2P_CMD_EPC_TIMEOUT_
| E2P_CMD_EPC_BUSY_
)) {
644 netdev_warn(dev
->net
, "EEPROM read operation timeout");
651 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net
*dev
)
653 unsigned long start_time
= jiffies
;
658 ret
= lan78xx_read_reg(dev
, E2P_CMD
, &val
);
659 if (unlikely(ret
< 0))
662 if (!(val
& E2P_CMD_EPC_BUSY_
))
665 usleep_range(40, 100);
666 } while (!time_after(jiffies
, start_time
+ HZ
));
668 netdev_warn(dev
->net
, "EEPROM is busy");
672 static int lan78xx_read_raw_eeprom(struct lan78xx_net
*dev
, u32 offset
,
673 u32 length
, u8
*data
)
680 /* depends on chip, some EEPROM pins are muxed with LED function.
681 * disable & restore LED function to access EEPROM.
683 ret
= lan78xx_read_reg(dev
, HW_CFG
, &val
);
685 if (dev
->chipid
== ID_REV_CHIP_ID_7800_
) {
686 val
&= ~(HW_CFG_LED1_EN_
| HW_CFG_LED0_EN_
);
687 ret
= lan78xx_write_reg(dev
, HW_CFG
, val
);
690 retval
= lan78xx_eeprom_confirm_not_busy(dev
);
694 for (i
= 0; i
< length
; i
++) {
695 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_READ_
;
696 val
|= (offset
& E2P_CMD_EPC_ADDR_MASK_
);
697 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
698 if (unlikely(ret
< 0)) {
703 retval
= lan78xx_wait_eeprom(dev
);
707 ret
= lan78xx_read_reg(dev
, E2P_DATA
, &val
);
708 if (unlikely(ret
< 0)) {
713 data
[i
] = val
& 0xFF;
719 if (dev
->chipid
== ID_REV_CHIP_ID_7800_
)
720 ret
= lan78xx_write_reg(dev
, HW_CFG
, saved
);
725 static int lan78xx_read_eeprom(struct lan78xx_net
*dev
, u32 offset
,
726 u32 length
, u8
*data
)
731 ret
= lan78xx_read_raw_eeprom(dev
, 0, 1, &sig
);
732 if ((ret
== 0) && (sig
== EEPROM_INDICATOR
))
733 ret
= lan78xx_read_raw_eeprom(dev
, offset
, length
, data
);
740 static int lan78xx_write_raw_eeprom(struct lan78xx_net
*dev
, u32 offset
,
741 u32 length
, u8
*data
)
748 /* depends on chip, some EEPROM pins are muxed with LED function.
749 * disable & restore LED function to access EEPROM.
751 ret
= lan78xx_read_reg(dev
, HW_CFG
, &val
);
753 if (dev
->chipid
== ID_REV_CHIP_ID_7800_
) {
754 val
&= ~(HW_CFG_LED1_EN_
| HW_CFG_LED0_EN_
);
755 ret
= lan78xx_write_reg(dev
, HW_CFG
, val
);
758 retval
= lan78xx_eeprom_confirm_not_busy(dev
);
762 /* Issue write/erase enable command */
763 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_EWEN_
;
764 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
765 if (unlikely(ret
< 0)) {
770 retval
= lan78xx_wait_eeprom(dev
);
774 for (i
= 0; i
< length
; i
++) {
775 /* Fill data register */
777 ret
= lan78xx_write_reg(dev
, E2P_DATA
, val
);
783 /* Send "write" command */
784 val
= E2P_CMD_EPC_BUSY_
| E2P_CMD_EPC_CMD_WRITE_
;
785 val
|= (offset
& E2P_CMD_EPC_ADDR_MASK_
);
786 ret
= lan78xx_write_reg(dev
, E2P_CMD
, val
);
792 retval
= lan78xx_wait_eeprom(dev
);
801 if (dev
->chipid
== ID_REV_CHIP_ID_7800_
)
802 ret
= lan78xx_write_reg(dev
, HW_CFG
, saved
);
807 static int lan78xx_read_raw_otp(struct lan78xx_net
*dev
, u32 offset
,
808 u32 length
, u8
*data
)
813 unsigned long timeout
;
815 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
817 if (buf
& OTP_PWR_DN_PWRDN_N_
) {
818 /* clear it and wait to be cleared */
819 ret
= lan78xx_write_reg(dev
, OTP_PWR_DN
, 0);
821 timeout
= jiffies
+ HZ
;
824 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
825 if (time_after(jiffies
, timeout
)) {
826 netdev_warn(dev
->net
,
827 "timeout on OTP_PWR_DN");
830 } while (buf
& OTP_PWR_DN_PWRDN_N_
);
833 for (i
= 0; i
< length
; i
++) {
834 ret
= lan78xx_write_reg(dev
, OTP_ADDR1
,
835 ((offset
+ i
) >> 8) & OTP_ADDR1_15_11
);
836 ret
= lan78xx_write_reg(dev
, OTP_ADDR2
,
837 ((offset
+ i
) & OTP_ADDR2_10_3
));
839 ret
= lan78xx_write_reg(dev
, OTP_FUNC_CMD
, OTP_FUNC_CMD_READ_
);
840 ret
= lan78xx_write_reg(dev
, OTP_CMD_GO
, OTP_CMD_GO_GO_
);
842 timeout
= jiffies
+ HZ
;
845 ret
= lan78xx_read_reg(dev
, OTP_STATUS
, &buf
);
846 if (time_after(jiffies
, timeout
)) {
847 netdev_warn(dev
->net
,
848 "timeout on OTP_STATUS");
851 } while (buf
& OTP_STATUS_BUSY_
);
853 ret
= lan78xx_read_reg(dev
, OTP_RD_DATA
, &buf
);
855 data
[i
] = (u8
)(buf
& 0xFF);
861 static int lan78xx_write_raw_otp(struct lan78xx_net
*dev
, u32 offset
,
862 u32 length
, u8
*data
)
867 unsigned long timeout
;
869 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
871 if (buf
& OTP_PWR_DN_PWRDN_N_
) {
872 /* clear it and wait to be cleared */
873 ret
= lan78xx_write_reg(dev
, OTP_PWR_DN
, 0);
875 timeout
= jiffies
+ HZ
;
878 ret
= lan78xx_read_reg(dev
, OTP_PWR_DN
, &buf
);
879 if (time_after(jiffies
, timeout
)) {
880 netdev_warn(dev
->net
,
881 "timeout on OTP_PWR_DN completion");
884 } while (buf
& OTP_PWR_DN_PWRDN_N_
);
887 /* set to BYTE program mode */
888 ret
= lan78xx_write_reg(dev
, OTP_PRGM_MODE
, OTP_PRGM_MODE_BYTE_
);
890 for (i
= 0; i
< length
; i
++) {
891 ret
= lan78xx_write_reg(dev
, OTP_ADDR1
,
892 ((offset
+ i
) >> 8) & OTP_ADDR1_15_11
);
893 ret
= lan78xx_write_reg(dev
, OTP_ADDR2
,
894 ((offset
+ i
) & OTP_ADDR2_10_3
));
895 ret
= lan78xx_write_reg(dev
, OTP_PRGM_DATA
, data
[i
]);
896 ret
= lan78xx_write_reg(dev
, OTP_TST_CMD
, OTP_TST_CMD_PRGVRFY_
);
897 ret
= lan78xx_write_reg(dev
, OTP_CMD_GO
, OTP_CMD_GO_GO_
);
899 timeout
= jiffies
+ HZ
;
902 ret
= lan78xx_read_reg(dev
, OTP_STATUS
, &buf
);
903 if (time_after(jiffies
, timeout
)) {
904 netdev_warn(dev
->net
,
905 "Timeout on OTP_STATUS completion");
908 } while (buf
& OTP_STATUS_BUSY_
);
914 static int lan78xx_read_otp(struct lan78xx_net
*dev
, u32 offset
,
915 u32 length
, u8
*data
)
920 ret
= lan78xx_read_raw_otp(dev
, 0, 1, &sig
);
923 if (sig
== OTP_INDICATOR_1
)
925 else if (sig
== OTP_INDICATOR_2
)
929 ret
= lan78xx_read_raw_otp(dev
, offset
, length
, data
);
935 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net
*dev
)
939 for (i
= 0; i
< 100; i
++) {
942 ret
= lan78xx_read_reg(dev
, DP_SEL
, &dp_sel
);
943 if (unlikely(ret
< 0))
946 if (dp_sel
& DP_SEL_DPRDY_
)
949 usleep_range(40, 100);
952 netdev_warn(dev
->net
, "lan78xx_dataport_wait_not_busy timed out");
957 static int lan78xx_dataport_write(struct lan78xx_net
*dev
, u32 ram_select
,
958 u32 addr
, u32 length
, u32
*buf
)
960 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
964 if (usb_autopm_get_interface(dev
->intf
) < 0)
967 mutex_lock(&pdata
->dataport_mutex
);
969 ret
= lan78xx_dataport_wait_not_busy(dev
);
973 ret
= lan78xx_read_reg(dev
, DP_SEL
, &dp_sel
);
975 dp_sel
&= ~DP_SEL_RSEL_MASK_
;
976 dp_sel
|= ram_select
;
977 ret
= lan78xx_write_reg(dev
, DP_SEL
, dp_sel
);
979 for (i
= 0; i
< length
; i
++) {
980 ret
= lan78xx_write_reg(dev
, DP_ADDR
, addr
+ i
);
982 ret
= lan78xx_write_reg(dev
, DP_DATA
, buf
[i
]);
984 ret
= lan78xx_write_reg(dev
, DP_CMD
, DP_CMD_WRITE_
);
986 ret
= lan78xx_dataport_wait_not_busy(dev
);
992 mutex_unlock(&pdata
->dataport_mutex
);
993 usb_autopm_put_interface(dev
->intf
);
998 static void lan78xx_set_addr_filter(struct lan78xx_priv
*pdata
,
999 int index
, u8 addr
[ETH_ALEN
])
1003 if ((pdata
) && (index
> 0) && (index
< NUM_OF_MAF
)) {
1005 temp
= addr
[2] | (temp
<< 8);
1006 temp
= addr
[1] | (temp
<< 8);
1007 temp
= addr
[0] | (temp
<< 8);
1008 pdata
->pfilter_table
[index
][1] = temp
;
1010 temp
= addr
[4] | (temp
<< 8);
1011 temp
|= MAF_HI_VALID_
| MAF_HI_TYPE_DST_
;
1012 pdata
->pfilter_table
[index
][0] = temp
;
1016 /* returns hash bit number for given MAC address */
1017 static inline u32
lan78xx_hash(char addr
[ETH_ALEN
])
1019 return (ether_crc(ETH_ALEN
, addr
) >> 23) & 0x1ff;
1022 static void lan78xx_deferred_multicast_write(struct work_struct
*param
)
1024 struct lan78xx_priv
*pdata
=
1025 container_of(param
, struct lan78xx_priv
, set_multicast
);
1026 struct lan78xx_net
*dev
= pdata
->dev
;
1030 netif_dbg(dev
, drv
, dev
->net
, "deferred multicast write 0x%08x\n",
1033 lan78xx_dataport_write(dev
, DP_SEL_RSEL_VLAN_DA_
, DP_SEL_VHF_VLAN_LEN
,
1034 DP_SEL_VHF_HASH_LEN
, pdata
->mchash_table
);
1036 for (i
= 1; i
< NUM_OF_MAF
; i
++) {
1037 ret
= lan78xx_write_reg(dev
, MAF_HI(i
), 0);
1038 ret
= lan78xx_write_reg(dev
, MAF_LO(i
),
1039 pdata
->pfilter_table
[i
][1]);
1040 ret
= lan78xx_write_reg(dev
, MAF_HI(i
),
1041 pdata
->pfilter_table
[i
][0]);
1044 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
1047 static void lan78xx_set_multicast(struct net_device
*netdev
)
1049 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1050 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1051 unsigned long flags
;
1054 spin_lock_irqsave(&pdata
->rfe_ctl_lock
, flags
);
1056 pdata
->rfe_ctl
&= ~(RFE_CTL_UCAST_EN_
| RFE_CTL_MCAST_EN_
|
1057 RFE_CTL_DA_PERFECT_
| RFE_CTL_MCAST_HASH_
);
1059 for (i
= 0; i
< DP_SEL_VHF_HASH_LEN
; i
++)
1060 pdata
->mchash_table
[i
] = 0;
1061 /* pfilter_table[0] has own HW address */
1062 for (i
= 1; i
< NUM_OF_MAF
; i
++) {
1063 pdata
->pfilter_table
[i
][0] =
1064 pdata
->pfilter_table
[i
][1] = 0;
1067 pdata
->rfe_ctl
|= RFE_CTL_BCAST_EN_
;
1069 if (dev
->net
->flags
& IFF_PROMISC
) {
1070 netif_dbg(dev
, drv
, dev
->net
, "promiscuous mode enabled");
1071 pdata
->rfe_ctl
|= RFE_CTL_MCAST_EN_
| RFE_CTL_UCAST_EN_
;
1073 if (dev
->net
->flags
& IFF_ALLMULTI
) {
1074 netif_dbg(dev
, drv
, dev
->net
,
1075 "receive all multicast enabled");
1076 pdata
->rfe_ctl
|= RFE_CTL_MCAST_EN_
;
1080 if (netdev_mc_count(dev
->net
)) {
1081 struct netdev_hw_addr
*ha
;
1084 netif_dbg(dev
, drv
, dev
->net
, "receive multicast hash filter");
1086 pdata
->rfe_ctl
|= RFE_CTL_DA_PERFECT_
;
1089 netdev_for_each_mc_addr(ha
, netdev
) {
1090 /* set first 32 into Perfect Filter */
1092 lan78xx_set_addr_filter(pdata
, i
, ha
->addr
);
1094 u32 bitnum
= lan78xx_hash(ha
->addr
);
1096 pdata
->mchash_table
[bitnum
/ 32] |=
1097 (1 << (bitnum
% 32));
1098 pdata
->rfe_ctl
|= RFE_CTL_MCAST_HASH_
;
1104 spin_unlock_irqrestore(&pdata
->rfe_ctl_lock
, flags
);
1106 /* defer register writes to a sleepable context */
1107 schedule_work(&pdata
->set_multicast
);
1110 static int lan78xx_update_flowcontrol(struct lan78xx_net
*dev
, u8 duplex
,
1111 u16 lcladv
, u16 rmtadv
)
1113 u32 flow
= 0, fct_flow
= 0;
1117 if (dev
->fc_autoneg
)
1118 cap
= mii_resolve_flowctrl_fdx(lcladv
, rmtadv
);
1120 cap
= dev
->fc_request_control
;
1122 if (cap
& FLOW_CTRL_TX
)
1123 flow
|= (FLOW_CR_TX_FCEN_
| 0xFFFF);
1125 if (cap
& FLOW_CTRL_RX
)
1126 flow
|= FLOW_CR_RX_FCEN_
;
1128 if (dev
->udev
->speed
== USB_SPEED_SUPER
)
1130 else if (dev
->udev
->speed
== USB_SPEED_HIGH
)
1133 netif_dbg(dev
, link
, dev
->net
, "rx pause %s, tx pause %s",
1134 (cap
& FLOW_CTRL_RX
? "enabled" : "disabled"),
1135 (cap
& FLOW_CTRL_TX
? "enabled" : "disabled"));
1137 ret
= lan78xx_write_reg(dev
, FCT_FLOW
, fct_flow
);
1139 /* threshold value should be set before enabling flow */
1140 ret
= lan78xx_write_reg(dev
, FLOW
, flow
);
1145 static int lan78xx_link_reset(struct lan78xx_net
*dev
)
1147 struct phy_device
*phydev
= dev
->net
->phydev
;
1148 struct ethtool_link_ksettings ecmd
;
1149 int ladv
, radv
, ret
;
1152 /* clear LAN78xx interrupt status */
1153 ret
= lan78xx_write_reg(dev
, INT_STS
, INT_STS_PHY_INT_
);
1154 if (unlikely(ret
< 0))
1157 phy_read_status(phydev
);
1159 if (!phydev
->link
&& dev
->link_on
) {
1160 dev
->link_on
= false;
1163 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1164 if (unlikely(ret
< 0))
1167 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1168 if (unlikely(ret
< 0))
1171 del_timer(&dev
->stat_monitor
);
1172 } else if (phydev
->link
&& !dev
->link_on
) {
1173 dev
->link_on
= true;
1175 phy_ethtool_ksettings_get(phydev
, &ecmd
);
1177 if (dev
->udev
->speed
== USB_SPEED_SUPER
) {
1178 if (ecmd
.base
.speed
== 1000) {
1180 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
1181 buf
&= ~USB_CFG1_DEV_U2_INIT_EN_
;
1182 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
1184 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
1185 buf
|= USB_CFG1_DEV_U1_INIT_EN_
;
1186 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
1188 /* enable U1 & U2 */
1189 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
1190 buf
|= USB_CFG1_DEV_U2_INIT_EN_
;
1191 buf
|= USB_CFG1_DEV_U1_INIT_EN_
;
1192 ret
= lan78xx_write_reg(dev
, USB_CFG1
, buf
);
1196 ladv
= phy_read(phydev
, MII_ADVERTISE
);
1200 radv
= phy_read(phydev
, MII_LPA
);
1204 netif_dbg(dev
, link
, dev
->net
,
1205 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1206 ecmd
.base
.speed
, ecmd
.base
.duplex
, ladv
, radv
);
1208 ret
= lan78xx_update_flowcontrol(dev
, ecmd
.base
.duplex
, ladv
,
1211 if (!timer_pending(&dev
->stat_monitor
)) {
1213 mod_timer(&dev
->stat_monitor
,
1214 jiffies
+ STAT_UPDATE_TIMER
);
1221 /* some work can't be done in tasklets, so we use keventd
1223 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1224 * but tasklet_schedule() doesn't. hope the failure is rare.
1226 static void lan78xx_defer_kevent(struct lan78xx_net
*dev
, int work
)
1228 set_bit(work
, &dev
->flags
);
1229 if (!schedule_delayed_work(&dev
->wq
, 0))
1230 netdev_err(dev
->net
, "kevent %d may have been dropped\n", work
);
1233 static void lan78xx_status(struct lan78xx_net
*dev
, struct urb
*urb
)
1237 if (urb
->actual_length
!= 4) {
1238 netdev_warn(dev
->net
,
1239 "unexpected urb length %d", urb
->actual_length
);
1243 memcpy(&intdata
, urb
->transfer_buffer
, 4);
1244 le32_to_cpus(&intdata
);
1246 if (intdata
& INT_ENP_PHY_INT
) {
1247 netif_dbg(dev
, link
, dev
->net
, "PHY INTR: 0x%08x\n", intdata
);
1248 lan78xx_defer_kevent(dev
, EVENT_LINK_RESET
);
1250 if (dev
->domain_data
.phyirq
> 0)
1251 generic_handle_irq(dev
->domain_data
.phyirq
);
1253 netdev_warn(dev
->net
,
1254 "unexpected interrupt: 0x%08x\n", intdata
);
1257 static int lan78xx_ethtool_get_eeprom_len(struct net_device
*netdev
)
1259 return MAX_EEPROM_SIZE
;
1262 static int lan78xx_ethtool_get_eeprom(struct net_device
*netdev
,
1263 struct ethtool_eeprom
*ee
, u8
*data
)
1265 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1267 ee
->magic
= LAN78XX_EEPROM_MAGIC
;
1269 return lan78xx_read_raw_eeprom(dev
, ee
->offset
, ee
->len
, data
);
1272 static int lan78xx_ethtool_set_eeprom(struct net_device
*netdev
,
1273 struct ethtool_eeprom
*ee
, u8
*data
)
1275 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1277 /* Allow entire eeprom update only */
1278 if ((ee
->magic
== LAN78XX_EEPROM_MAGIC
) &&
1279 (ee
->offset
== 0) &&
1281 (data
[0] == EEPROM_INDICATOR
))
1282 return lan78xx_write_raw_eeprom(dev
, ee
->offset
, ee
->len
, data
);
1283 else if ((ee
->magic
== LAN78XX_OTP_MAGIC
) &&
1284 (ee
->offset
== 0) &&
1286 (data
[0] == OTP_INDICATOR_1
))
1287 return lan78xx_write_raw_otp(dev
, ee
->offset
, ee
->len
, data
);
1292 static void lan78xx_get_strings(struct net_device
*netdev
, u32 stringset
,
1295 if (stringset
== ETH_SS_STATS
)
1296 memcpy(data
, lan78xx_gstrings
, sizeof(lan78xx_gstrings
));
1299 static int lan78xx_get_sset_count(struct net_device
*netdev
, int sset
)
1301 if (sset
== ETH_SS_STATS
)
1302 return ARRAY_SIZE(lan78xx_gstrings
);
1307 static void lan78xx_get_stats(struct net_device
*netdev
,
1308 struct ethtool_stats
*stats
, u64
*data
)
1310 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1312 lan78xx_update_stats(dev
);
1314 mutex_lock(&dev
->stats
.access_lock
);
1315 memcpy(data
, &dev
->stats
.curr_stat
, sizeof(dev
->stats
.curr_stat
));
1316 mutex_unlock(&dev
->stats
.access_lock
);
1319 static void lan78xx_get_wol(struct net_device
*netdev
,
1320 struct ethtool_wolinfo
*wol
)
1322 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1325 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1327 if (usb_autopm_get_interface(dev
->intf
) < 0)
1330 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
1331 if (unlikely(ret
< 0)) {
1335 if (buf
& USB_CFG_RMT_WKP_
) {
1336 wol
->supported
= WAKE_ALL
;
1337 wol
->wolopts
= pdata
->wol
;
1344 usb_autopm_put_interface(dev
->intf
);
1347 static int lan78xx_set_wol(struct net_device
*netdev
,
1348 struct ethtool_wolinfo
*wol
)
1350 struct lan78xx_net
*dev
= netdev_priv(netdev
);
1351 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
1354 ret
= usb_autopm_get_interface(dev
->intf
);
1359 if (wol
->wolopts
& WAKE_UCAST
)
1360 pdata
->wol
|= WAKE_UCAST
;
1361 if (wol
->wolopts
& WAKE_MCAST
)
1362 pdata
->wol
|= WAKE_MCAST
;
1363 if (wol
->wolopts
& WAKE_BCAST
)
1364 pdata
->wol
|= WAKE_BCAST
;
1365 if (wol
->wolopts
& WAKE_MAGIC
)
1366 pdata
->wol
|= WAKE_MAGIC
;
1367 if (wol
->wolopts
& WAKE_PHY
)
1368 pdata
->wol
|= WAKE_PHY
;
1369 if (wol
->wolopts
& WAKE_ARP
)
1370 pdata
->wol
|= WAKE_ARP
;
1372 device_set_wakeup_enable(&dev
->udev
->dev
, (bool)wol
->wolopts
);
1374 phy_ethtool_set_wol(netdev
->phydev
, wol
);
1376 usb_autopm_put_interface(dev
->intf
);
1381 static int lan78xx_get_eee(struct net_device
*net
, struct ethtool_eee
*edata
)
1383 struct lan78xx_net
*dev
= netdev_priv(net
);
1384 struct phy_device
*phydev
= net
->phydev
;
1388 ret
= usb_autopm_get_interface(dev
->intf
);
1392 ret
= phy_ethtool_get_eee(phydev
, edata
);
1396 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1397 if (buf
& MAC_CR_EEE_EN_
) {
1398 edata
->eee_enabled
= true;
1399 edata
->eee_active
= !!(edata
->advertised
&
1400 edata
->lp_advertised
);
1401 edata
->tx_lpi_enabled
= true;
1402 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1403 ret
= lan78xx_read_reg(dev
, EEE_TX_LPI_REQ_DLY
, &buf
);
1404 edata
->tx_lpi_timer
= buf
;
1406 edata
->eee_enabled
= false;
1407 edata
->eee_active
= false;
1408 edata
->tx_lpi_enabled
= false;
1409 edata
->tx_lpi_timer
= 0;
1414 usb_autopm_put_interface(dev
->intf
);
1419 static int lan78xx_set_eee(struct net_device
*net
, struct ethtool_eee
*edata
)
1421 struct lan78xx_net
*dev
= netdev_priv(net
);
1425 ret
= usb_autopm_get_interface(dev
->intf
);
1429 if (edata
->eee_enabled
) {
1430 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1431 buf
|= MAC_CR_EEE_EN_
;
1432 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1434 phy_ethtool_set_eee(net
->phydev
, edata
);
1436 buf
= (u32
)edata
->tx_lpi_timer
;
1437 ret
= lan78xx_write_reg(dev
, EEE_TX_LPI_REQ_DLY
, buf
);
1439 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
1440 buf
&= ~MAC_CR_EEE_EN_
;
1441 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
1444 usb_autopm_put_interface(dev
->intf
);
1449 static u32
lan78xx_get_link(struct net_device
*net
)
1451 phy_read_status(net
->phydev
);
1453 return net
->phydev
->link
;
1456 static void lan78xx_get_drvinfo(struct net_device
*net
,
1457 struct ethtool_drvinfo
*info
)
1459 struct lan78xx_net
*dev
= netdev_priv(net
);
1461 strncpy(info
->driver
, DRIVER_NAME
, sizeof(info
->driver
));
1462 strncpy(info
->version
, DRIVER_VERSION
, sizeof(info
->version
));
1463 usb_make_path(dev
->udev
, info
->bus_info
, sizeof(info
->bus_info
));
1466 static u32
lan78xx_get_msglevel(struct net_device
*net
)
1468 struct lan78xx_net
*dev
= netdev_priv(net
);
1470 return dev
->msg_enable
;
1473 static void lan78xx_set_msglevel(struct net_device
*net
, u32 level
)
1475 struct lan78xx_net
*dev
= netdev_priv(net
);
1477 dev
->msg_enable
= level
;
1480 static int lan78xx_get_link_ksettings(struct net_device
*net
,
1481 struct ethtool_link_ksettings
*cmd
)
1483 struct lan78xx_net
*dev
= netdev_priv(net
);
1484 struct phy_device
*phydev
= net
->phydev
;
1487 ret
= usb_autopm_get_interface(dev
->intf
);
1491 ret
= phy_ethtool_ksettings_get(phydev
, cmd
);
1493 usb_autopm_put_interface(dev
->intf
);
1498 static int lan78xx_set_link_ksettings(struct net_device
*net
,
1499 const struct ethtool_link_ksettings
*cmd
)
1501 struct lan78xx_net
*dev
= netdev_priv(net
);
1502 struct phy_device
*phydev
= net
->phydev
;
1506 ret
= usb_autopm_get_interface(dev
->intf
);
1510 /* change speed & duplex */
1511 ret
= phy_ethtool_ksettings_set(phydev
, cmd
);
1513 if (!cmd
->base
.autoneg
) {
1514 /* force link down */
1515 temp
= phy_read(phydev
, MII_BMCR
);
1516 phy_write(phydev
, MII_BMCR
, temp
| BMCR_LOOPBACK
);
1518 phy_write(phydev
, MII_BMCR
, temp
);
1521 usb_autopm_put_interface(dev
->intf
);
1526 static void lan78xx_get_pause(struct net_device
*net
,
1527 struct ethtool_pauseparam
*pause
)
1529 struct lan78xx_net
*dev
= netdev_priv(net
);
1530 struct phy_device
*phydev
= net
->phydev
;
1531 struct ethtool_link_ksettings ecmd
;
1533 phy_ethtool_ksettings_get(phydev
, &ecmd
);
1535 pause
->autoneg
= dev
->fc_autoneg
;
1537 if (dev
->fc_request_control
& FLOW_CTRL_TX
)
1538 pause
->tx_pause
= 1;
1540 if (dev
->fc_request_control
& FLOW_CTRL_RX
)
1541 pause
->rx_pause
= 1;
1544 static int lan78xx_set_pause(struct net_device
*net
,
1545 struct ethtool_pauseparam
*pause
)
1547 struct lan78xx_net
*dev
= netdev_priv(net
);
1548 struct phy_device
*phydev
= net
->phydev
;
1549 struct ethtool_link_ksettings ecmd
;
1552 phy_ethtool_ksettings_get(phydev
, &ecmd
);
1554 if (pause
->autoneg
&& !ecmd
.base
.autoneg
) {
1559 dev
->fc_request_control
= 0;
1560 if (pause
->rx_pause
)
1561 dev
->fc_request_control
|= FLOW_CTRL_RX
;
1563 if (pause
->tx_pause
)
1564 dev
->fc_request_control
|= FLOW_CTRL_TX
;
1566 if (ecmd
.base
.autoneg
) {
1570 ethtool_convert_link_mode_to_legacy_u32(
1571 &advertising
, ecmd
.link_modes
.advertising
);
1573 advertising
&= ~(ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
1574 mii_adv
= (u32
)mii_advertise_flowctrl(dev
->fc_request_control
);
1575 advertising
|= mii_adv_to_ethtool_adv_t(mii_adv
);
1577 ethtool_convert_legacy_u32_to_link_mode(
1578 ecmd
.link_modes
.advertising
, advertising
);
1580 phy_ethtool_ksettings_set(phydev
, &ecmd
);
1583 dev
->fc_autoneg
= pause
->autoneg
;
1590 static const struct ethtool_ops lan78xx_ethtool_ops
= {
1591 .get_link
= lan78xx_get_link
,
1592 .nway_reset
= phy_ethtool_nway_reset
,
1593 .get_drvinfo
= lan78xx_get_drvinfo
,
1594 .get_msglevel
= lan78xx_get_msglevel
,
1595 .set_msglevel
= lan78xx_set_msglevel
,
1596 .get_eeprom_len
= lan78xx_ethtool_get_eeprom_len
,
1597 .get_eeprom
= lan78xx_ethtool_get_eeprom
,
1598 .set_eeprom
= lan78xx_ethtool_set_eeprom
,
1599 .get_ethtool_stats
= lan78xx_get_stats
,
1600 .get_sset_count
= lan78xx_get_sset_count
,
1601 .get_strings
= lan78xx_get_strings
,
1602 .get_wol
= lan78xx_get_wol
,
1603 .set_wol
= lan78xx_set_wol
,
1604 .get_eee
= lan78xx_get_eee
,
1605 .set_eee
= lan78xx_set_eee
,
1606 .get_pauseparam
= lan78xx_get_pause
,
1607 .set_pauseparam
= lan78xx_set_pause
,
1608 .get_link_ksettings
= lan78xx_get_link_ksettings
,
1609 .set_link_ksettings
= lan78xx_set_link_ksettings
,
1612 static int lan78xx_ioctl(struct net_device
*netdev
, struct ifreq
*rq
, int cmd
)
1614 if (!netif_running(netdev
))
1617 return phy_mii_ioctl(netdev
->phydev
, rq
, cmd
);
1620 static void lan78xx_init_mac_address(struct lan78xx_net
*dev
)
1622 u32 addr_lo
, addr_hi
;
1626 ret
= lan78xx_read_reg(dev
, RX_ADDRL
, &addr_lo
);
1627 ret
= lan78xx_read_reg(dev
, RX_ADDRH
, &addr_hi
);
1629 addr
[0] = addr_lo
& 0xFF;
1630 addr
[1] = (addr_lo
>> 8) & 0xFF;
1631 addr
[2] = (addr_lo
>> 16) & 0xFF;
1632 addr
[3] = (addr_lo
>> 24) & 0xFF;
1633 addr
[4] = addr_hi
& 0xFF;
1634 addr
[5] = (addr_hi
>> 8) & 0xFF;
1636 if (!is_valid_ether_addr(addr
)) {
1637 /* reading mac address from EEPROM or OTP */
1638 if ((lan78xx_read_eeprom(dev
, EEPROM_MAC_OFFSET
, ETH_ALEN
,
1640 (lan78xx_read_otp(dev
, EEPROM_MAC_OFFSET
, ETH_ALEN
,
1642 if (is_valid_ether_addr(addr
)) {
1643 /* eeprom values are valid so use them */
1644 netif_dbg(dev
, ifup
, dev
->net
,
1645 "MAC address read from EEPROM");
1647 /* generate random MAC */
1648 random_ether_addr(addr
);
1649 netif_dbg(dev
, ifup
, dev
->net
,
1650 "MAC address set to random addr");
1653 addr_lo
= addr
[0] | (addr
[1] << 8) |
1654 (addr
[2] << 16) | (addr
[3] << 24);
1655 addr_hi
= addr
[4] | (addr
[5] << 8);
1657 ret
= lan78xx_write_reg(dev
, RX_ADDRL
, addr_lo
);
1658 ret
= lan78xx_write_reg(dev
, RX_ADDRH
, addr_hi
);
1660 /* generate random MAC */
1661 random_ether_addr(addr
);
1662 netif_dbg(dev
, ifup
, dev
->net
,
1663 "MAC address set to random addr");
1667 ret
= lan78xx_write_reg(dev
, MAF_LO(0), addr_lo
);
1668 ret
= lan78xx_write_reg(dev
, MAF_HI(0), addr_hi
| MAF_HI_VALID_
);
1670 ether_addr_copy(dev
->net
->dev_addr
, addr
);
1673 /* MDIO read and write wrappers for phylib */
1674 static int lan78xx_mdiobus_read(struct mii_bus
*bus
, int phy_id
, int idx
)
1676 struct lan78xx_net
*dev
= bus
->priv
;
1680 ret
= usb_autopm_get_interface(dev
->intf
);
1684 mutex_lock(&dev
->phy_mutex
);
1686 /* confirm MII not busy */
1687 ret
= lan78xx_phy_wait_not_busy(dev
);
1691 /* set the address, index & direction (read from PHY) */
1692 addr
= mii_access(phy_id
, idx
, MII_READ
);
1693 ret
= lan78xx_write_reg(dev
, MII_ACC
, addr
);
1695 ret
= lan78xx_phy_wait_not_busy(dev
);
1699 ret
= lan78xx_read_reg(dev
, MII_DATA
, &val
);
1701 ret
= (int)(val
& 0xFFFF);
1704 mutex_unlock(&dev
->phy_mutex
);
1705 usb_autopm_put_interface(dev
->intf
);
1710 static int lan78xx_mdiobus_write(struct mii_bus
*bus
, int phy_id
, int idx
,
1713 struct lan78xx_net
*dev
= bus
->priv
;
1717 ret
= usb_autopm_get_interface(dev
->intf
);
1721 mutex_lock(&dev
->phy_mutex
);
1723 /* confirm MII not busy */
1724 ret
= lan78xx_phy_wait_not_busy(dev
);
1729 ret
= lan78xx_write_reg(dev
, MII_DATA
, val
);
1731 /* set the address, index & direction (write to PHY) */
1732 addr
= mii_access(phy_id
, idx
, MII_WRITE
);
1733 ret
= lan78xx_write_reg(dev
, MII_ACC
, addr
);
1735 ret
= lan78xx_phy_wait_not_busy(dev
);
1740 mutex_unlock(&dev
->phy_mutex
);
1741 usb_autopm_put_interface(dev
->intf
);
1745 static int lan78xx_mdio_init(struct lan78xx_net
*dev
)
1749 dev
->mdiobus
= mdiobus_alloc();
1750 if (!dev
->mdiobus
) {
1751 netdev_err(dev
->net
, "can't allocate MDIO bus\n");
1755 dev
->mdiobus
->priv
= (void *)dev
;
1756 dev
->mdiobus
->read
= lan78xx_mdiobus_read
;
1757 dev
->mdiobus
->write
= lan78xx_mdiobus_write
;
1758 dev
->mdiobus
->name
= "lan78xx-mdiobus";
1760 snprintf(dev
->mdiobus
->id
, MII_BUS_ID_SIZE
, "usb-%03d:%03d",
1761 dev
->udev
->bus
->busnum
, dev
->udev
->devnum
);
1763 switch (dev
->chipid
) {
1764 case ID_REV_CHIP_ID_7800_
:
1765 case ID_REV_CHIP_ID_7850_
:
1766 /* set to internal PHY id */
1767 dev
->mdiobus
->phy_mask
= ~(1 << 1);
1769 case ID_REV_CHIP_ID_7801_
:
1770 /* scan thru PHYAD[2..0] */
1771 dev
->mdiobus
->phy_mask
= ~(0xFF);
1775 ret
= mdiobus_register(dev
->mdiobus
);
1777 netdev_err(dev
->net
, "can't register MDIO bus\n");
1781 netdev_dbg(dev
->net
, "registered mdiobus bus %s\n", dev
->mdiobus
->id
);
1784 mdiobus_free(dev
->mdiobus
);
1788 static void lan78xx_remove_mdio(struct lan78xx_net
*dev
)
1790 mdiobus_unregister(dev
->mdiobus
);
1791 mdiobus_free(dev
->mdiobus
);
1794 static void lan78xx_link_status_change(struct net_device
*net
)
1796 struct phy_device
*phydev
= net
->phydev
;
1799 /* At forced 100 F/H mode, chip may fail to set mode correctly
1800 * when cable is switched between long(~50+m) and short one.
1801 * As workaround, set to 10 before setting to 100
1802 * at forced 100 F/H mode.
1804 if (!phydev
->autoneg
&& (phydev
->speed
== 100)) {
1805 /* disable phy interrupt */
1806 temp
= phy_read(phydev
, LAN88XX_INT_MASK
);
1807 temp
&= ~LAN88XX_INT_MASK_MDINTPIN_EN_
;
1808 ret
= phy_write(phydev
, LAN88XX_INT_MASK
, temp
);
1810 temp
= phy_read(phydev
, MII_BMCR
);
1811 temp
&= ~(BMCR_SPEED100
| BMCR_SPEED1000
);
1812 phy_write(phydev
, MII_BMCR
, temp
); /* set to 10 first */
1813 temp
|= BMCR_SPEED100
;
1814 phy_write(phydev
, MII_BMCR
, temp
); /* set to 100 later */
1816 /* clear pending interrupt generated while workaround */
1817 temp
= phy_read(phydev
, LAN88XX_INT_STS
);
1819 /* enable phy interrupt back */
1820 temp
= phy_read(phydev
, LAN88XX_INT_MASK
);
1821 temp
|= LAN88XX_INT_MASK_MDINTPIN_EN_
;
1822 ret
= phy_write(phydev
, LAN88XX_INT_MASK
, temp
);
1826 static int irq_map(struct irq_domain
*d
, unsigned int irq
,
1827 irq_hw_number_t hwirq
)
1829 struct irq_domain_data
*data
= d
->host_data
;
1831 irq_set_chip_data(irq
, data
);
1832 irq_set_chip_and_handler(irq
, data
->irqchip
, data
->irq_handler
);
1833 irq_set_noprobe(irq
);
1838 static void irq_unmap(struct irq_domain
*d
, unsigned int irq
)
1840 irq_set_chip_and_handler(irq
, NULL
, NULL
);
1841 irq_set_chip_data(irq
, NULL
);
1844 static const struct irq_domain_ops chip_domain_ops
= {
1849 static void lan78xx_irq_mask(struct irq_data
*irqd
)
1851 struct irq_domain_data
*data
= irq_data_get_irq_chip_data(irqd
);
1853 data
->irqenable
&= ~BIT(irqd_to_hwirq(irqd
));
1856 static void lan78xx_irq_unmask(struct irq_data
*irqd
)
1858 struct irq_domain_data
*data
= irq_data_get_irq_chip_data(irqd
);
1860 data
->irqenable
|= BIT(irqd_to_hwirq(irqd
));
1863 static void lan78xx_irq_bus_lock(struct irq_data
*irqd
)
1865 struct irq_domain_data
*data
= irq_data_get_irq_chip_data(irqd
);
1867 mutex_lock(&data
->irq_lock
);
1870 static void lan78xx_irq_bus_sync_unlock(struct irq_data
*irqd
)
1872 struct irq_domain_data
*data
= irq_data_get_irq_chip_data(irqd
);
1873 struct lan78xx_net
*dev
=
1874 container_of(data
, struct lan78xx_net
, domain_data
);
1878 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
1879 * are only two callbacks executed in non-atomic contex.
1881 ret
= lan78xx_read_reg(dev
, INT_EP_CTL
, &buf
);
1882 if (buf
!= data
->irqenable
)
1883 ret
= lan78xx_write_reg(dev
, INT_EP_CTL
, data
->irqenable
);
1885 mutex_unlock(&data
->irq_lock
);
1888 static struct irq_chip lan78xx_irqchip
= {
1889 .name
= "lan78xx-irqs",
1890 .irq_mask
= lan78xx_irq_mask
,
1891 .irq_unmask
= lan78xx_irq_unmask
,
1892 .irq_bus_lock
= lan78xx_irq_bus_lock
,
1893 .irq_bus_sync_unlock
= lan78xx_irq_bus_sync_unlock
,
1896 static int lan78xx_setup_irq_domain(struct lan78xx_net
*dev
)
1898 struct device_node
*of_node
;
1899 struct irq_domain
*irqdomain
;
1900 unsigned int irqmap
= 0;
1904 of_node
= dev
->udev
->dev
.parent
->of_node
;
1906 mutex_init(&dev
->domain_data
.irq_lock
);
1908 lan78xx_read_reg(dev
, INT_EP_CTL
, &buf
);
1909 dev
->domain_data
.irqenable
= buf
;
1911 dev
->domain_data
.irqchip
= &lan78xx_irqchip
;
1912 dev
->domain_data
.irq_handler
= handle_simple_irq
;
1914 irqdomain
= irq_domain_add_simple(of_node
, MAX_INT_EP
, 0,
1915 &chip_domain_ops
, &dev
->domain_data
);
1917 /* create mapping for PHY interrupt */
1918 irqmap
= irq_create_mapping(irqdomain
, INT_EP_PHY
);
1920 irq_domain_remove(irqdomain
);
1929 dev
->domain_data
.irqdomain
= irqdomain
;
1930 dev
->domain_data
.phyirq
= irqmap
;
1935 static void lan78xx_remove_irq_domain(struct lan78xx_net
*dev
)
1937 if (dev
->domain_data
.phyirq
> 0) {
1938 irq_dispose_mapping(dev
->domain_data
.phyirq
);
1940 if (dev
->domain_data
.irqdomain
)
1941 irq_domain_remove(dev
->domain_data
.irqdomain
);
1943 dev
->domain_data
.phyirq
= 0;
1944 dev
->domain_data
.irqdomain
= NULL
;
1947 static int lan8835_fixup(struct phy_device
*phydev
)
1951 struct lan78xx_net
*dev
= netdev_priv(phydev
->attached_dev
);
1953 /* LED2/PME_N/IRQ_N/RGMII_ID pin to IRQ_N mode */
1954 buf
= phy_read_mmd_indirect(phydev
, 0x8010, 3);
1957 phy_write_mmd_indirect(phydev
, 0x8010, 3, buf
);
1959 /* RGMII MAC TXC Delay Enable */
1960 ret
= lan78xx_write_reg(dev
, MAC_RGMII_ID
,
1961 MAC_RGMII_ID_TXC_DELAY_EN_
);
1963 /* RGMII TX DLL Tune Adjust */
1964 ret
= lan78xx_write_reg(dev
, RGMII_TX_BYP_DLL
, 0x3D00);
1966 dev
->interface
= PHY_INTERFACE_MODE_RGMII_TXID
;
1971 static int ksz9031rnx_fixup(struct phy_device
*phydev
)
1973 struct lan78xx_net
*dev
= netdev_priv(phydev
->attached_dev
);
1975 /* Micrel9301RNX PHY configuration */
1976 /* RGMII Control Signal Pad Skew */
1977 phy_write_mmd_indirect(phydev
, 4, 2, 0x0077);
1978 /* RGMII RX Data Pad Skew */
1979 phy_write_mmd_indirect(phydev
, 5, 2, 0x7777);
1980 /* RGMII RX Clock Pad Skew */
1981 phy_write_mmd_indirect(phydev
, 8, 2, 0x1FF);
1983 dev
->interface
= PHY_INTERFACE_MODE_RGMII_RXID
;
1988 static int lan78xx_phy_init(struct lan78xx_net
*dev
)
1992 struct phy_device
*phydev
= dev
->net
->phydev
;
1994 phydev
= phy_find_first(dev
->mdiobus
);
1996 netdev_err(dev
->net
, "no PHY found\n");
2000 if ((dev
->chipid
== ID_REV_CHIP_ID_7800_
) ||
2001 (dev
->chipid
== ID_REV_CHIP_ID_7850_
)) {
2002 phydev
->is_internal
= true;
2003 dev
->interface
= PHY_INTERFACE_MODE_GMII
;
2005 } else if (dev
->chipid
== ID_REV_CHIP_ID_7801_
) {
2007 netdev_err(dev
->net
, "no PHY driver found\n");
2011 dev
->interface
= PHY_INTERFACE_MODE_RGMII
;
2013 /* external PHY fixup for KSZ9031RNX */
2014 ret
= phy_register_fixup_for_uid(PHY_KSZ9031RNX
, 0xfffffff0,
2017 netdev_err(dev
->net
, "fail to register fixup\n");
2020 /* external PHY fixup for LAN8835 */
2021 ret
= phy_register_fixup_for_uid(PHY_LAN8835
, 0xfffffff0,
2024 netdev_err(dev
->net
, "fail to register fixup\n");
2027 /* add more external PHY fixup here if needed */
2029 phydev
->is_internal
= false;
2031 netdev_err(dev
->net
, "unknown ID found\n");
2036 /* if phyirq is not set, use polling mode in phylib */
2037 if (dev
->domain_data
.phyirq
> 0)
2038 phydev
->irq
= dev
->domain_data
.phyirq
;
2041 netdev_dbg(dev
->net
, "phydev->irq = %d\n", phydev
->irq
);
2043 /* set to AUTOMDIX */
2044 phydev
->mdix
= ETH_TP_MDI_AUTO
;
2046 ret
= phy_connect_direct(dev
->net
, phydev
,
2047 lan78xx_link_status_change
,
2050 netdev_err(dev
->net
, "can't attach PHY to %s\n",
2055 /* MAC doesn't support 1000T Half */
2056 phydev
->supported
&= ~SUPPORTED_1000baseT_Half
;
2058 /* support both flow controls */
2059 dev
->fc_request_control
= (FLOW_CTRL_RX
| FLOW_CTRL_TX
);
2060 phydev
->advertising
&= ~(ADVERTISED_Pause
| ADVERTISED_Asym_Pause
);
2061 mii_adv
= (u32
)mii_advertise_flowctrl(dev
->fc_request_control
);
2062 phydev
->advertising
|= mii_adv_to_ethtool_adv_t(mii_adv
);
2064 genphy_config_aneg(phydev
);
2066 dev
->fc_autoneg
= phydev
->autoneg
;
2070 netif_dbg(dev
, ifup
, dev
->net
, "phy initialised successfully");
2075 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX
, 0xfffffff0);
2076 phy_unregister_fixup_for_uid(PHY_LAN8835
, 0xfffffff0);
2081 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net
*dev
, int size
)
2087 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
2089 rxenabled
= ((buf
& MAC_RX_RXEN_
) != 0);
2092 buf
&= ~MAC_RX_RXEN_
;
2093 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
2096 /* add 4 to size for FCS */
2097 buf
&= ~MAC_RX_MAX_SIZE_MASK_
;
2098 buf
|= (((size
+ 4) << MAC_RX_MAX_SIZE_SHIFT_
) & MAC_RX_MAX_SIZE_MASK_
);
2100 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
2103 buf
|= MAC_RX_RXEN_
;
2104 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
2110 static int unlink_urbs(struct lan78xx_net
*dev
, struct sk_buff_head
*q
)
2112 struct sk_buff
*skb
;
2113 unsigned long flags
;
2116 spin_lock_irqsave(&q
->lock
, flags
);
2117 while (!skb_queue_empty(q
)) {
2118 struct skb_data
*entry
;
2122 skb_queue_walk(q
, skb
) {
2123 entry
= (struct skb_data
*)skb
->cb
;
2124 if (entry
->state
!= unlink_start
)
2129 entry
->state
= unlink_start
;
2132 /* Get reference count of the URB to avoid it to be
2133 * freed during usb_unlink_urb, which may trigger
2134 * use-after-free problem inside usb_unlink_urb since
2135 * usb_unlink_urb is always racing with .complete
2136 * handler(include defer_bh).
2139 spin_unlock_irqrestore(&q
->lock
, flags
);
2140 /* during some PM-driven resume scenarios,
2141 * these (async) unlinks complete immediately
2143 ret
= usb_unlink_urb(urb
);
2144 if (ret
!= -EINPROGRESS
&& ret
!= 0)
2145 netdev_dbg(dev
->net
, "unlink urb err, %d\n", ret
);
2149 spin_lock_irqsave(&q
->lock
, flags
);
2151 spin_unlock_irqrestore(&q
->lock
, flags
);
2155 static int lan78xx_change_mtu(struct net_device
*netdev
, int new_mtu
)
2157 struct lan78xx_net
*dev
= netdev_priv(netdev
);
2158 int ll_mtu
= new_mtu
+ netdev
->hard_header_len
;
2159 int old_hard_mtu
= dev
->hard_mtu
;
2160 int old_rx_urb_size
= dev
->rx_urb_size
;
2163 /* no second zero-length packet read wanted after mtu-sized packets */
2164 if ((ll_mtu
% dev
->maxpacket
) == 0)
2167 ret
= lan78xx_set_rx_max_frame_length(dev
, new_mtu
+ ETH_HLEN
);
2169 netdev
->mtu
= new_mtu
;
2171 dev
->hard_mtu
= netdev
->mtu
+ netdev
->hard_header_len
;
2172 if (dev
->rx_urb_size
== old_hard_mtu
) {
2173 dev
->rx_urb_size
= dev
->hard_mtu
;
2174 if (dev
->rx_urb_size
> old_rx_urb_size
) {
2175 if (netif_running(dev
->net
)) {
2176 unlink_urbs(dev
, &dev
->rxq
);
2177 tasklet_schedule(&dev
->bh
);
2185 static int lan78xx_set_mac_addr(struct net_device
*netdev
, void *p
)
2187 struct lan78xx_net
*dev
= netdev_priv(netdev
);
2188 struct sockaddr
*addr
= p
;
2189 u32 addr_lo
, addr_hi
;
2192 if (netif_running(netdev
))
2195 if (!is_valid_ether_addr(addr
->sa_data
))
2196 return -EADDRNOTAVAIL
;
2198 ether_addr_copy(netdev
->dev_addr
, addr
->sa_data
);
2200 addr_lo
= netdev
->dev_addr
[0] |
2201 netdev
->dev_addr
[1] << 8 |
2202 netdev
->dev_addr
[2] << 16 |
2203 netdev
->dev_addr
[3] << 24;
2204 addr_hi
= netdev
->dev_addr
[4] |
2205 netdev
->dev_addr
[5] << 8;
2207 ret
= lan78xx_write_reg(dev
, RX_ADDRL
, addr_lo
);
2208 ret
= lan78xx_write_reg(dev
, RX_ADDRH
, addr_hi
);
2213 /* Enable or disable Rx checksum offload engine */
2214 static int lan78xx_set_features(struct net_device
*netdev
,
2215 netdev_features_t features
)
2217 struct lan78xx_net
*dev
= netdev_priv(netdev
);
2218 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2219 unsigned long flags
;
2222 spin_lock_irqsave(&pdata
->rfe_ctl_lock
, flags
);
2224 if (features
& NETIF_F_RXCSUM
) {
2225 pdata
->rfe_ctl
|= RFE_CTL_TCPUDP_COE_
| RFE_CTL_IP_COE_
;
2226 pdata
->rfe_ctl
|= RFE_CTL_ICMP_COE_
| RFE_CTL_IGMP_COE_
;
2228 pdata
->rfe_ctl
&= ~(RFE_CTL_TCPUDP_COE_
| RFE_CTL_IP_COE_
);
2229 pdata
->rfe_ctl
&= ~(RFE_CTL_ICMP_COE_
| RFE_CTL_IGMP_COE_
);
2232 if (features
& NETIF_F_HW_VLAN_CTAG_RX
)
2233 pdata
->rfe_ctl
|= RFE_CTL_VLAN_FILTER_
;
2235 pdata
->rfe_ctl
&= ~RFE_CTL_VLAN_FILTER_
;
2237 spin_unlock_irqrestore(&pdata
->rfe_ctl_lock
, flags
);
2239 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
2244 static void lan78xx_deferred_vlan_write(struct work_struct
*param
)
2246 struct lan78xx_priv
*pdata
=
2247 container_of(param
, struct lan78xx_priv
, set_vlan
);
2248 struct lan78xx_net
*dev
= pdata
->dev
;
2250 lan78xx_dataport_write(dev
, DP_SEL_RSEL_VLAN_DA_
, 0,
2251 DP_SEL_VHF_VLAN_LEN
, pdata
->vlan_table
);
2254 static int lan78xx_vlan_rx_add_vid(struct net_device
*netdev
,
2255 __be16 proto
, u16 vid
)
2257 struct lan78xx_net
*dev
= netdev_priv(netdev
);
2258 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2260 u16 vid_dword_index
;
2262 vid_dword_index
= (vid
>> 5) & 0x7F;
2263 vid_bit_index
= vid
& 0x1F;
2265 pdata
->vlan_table
[vid_dword_index
] |= (1 << vid_bit_index
);
2267 /* defer register writes to a sleepable context */
2268 schedule_work(&pdata
->set_vlan
);
2273 static int lan78xx_vlan_rx_kill_vid(struct net_device
*netdev
,
2274 __be16 proto
, u16 vid
)
2276 struct lan78xx_net
*dev
= netdev_priv(netdev
);
2277 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2279 u16 vid_dword_index
;
2281 vid_dword_index
= (vid
>> 5) & 0x7F;
2282 vid_bit_index
= vid
& 0x1F;
2284 pdata
->vlan_table
[vid_dword_index
] &= ~(1 << vid_bit_index
);
2286 /* defer register writes to a sleepable context */
2287 schedule_work(&pdata
->set_vlan
);
2292 static void lan78xx_init_ltm(struct lan78xx_net
*dev
)
2296 u32 regs
[6] = { 0 };
2298 ret
= lan78xx_read_reg(dev
, USB_CFG1
, &buf
);
2299 if (buf
& USB_CFG1_LTM_ENABLE_
) {
2301 /* Get values from EEPROM first */
2302 if (lan78xx_read_eeprom(dev
, 0x3F, 2, temp
) == 0) {
2303 if (temp
[0] == 24) {
2304 ret
= lan78xx_read_raw_eeprom(dev
,
2311 } else if (lan78xx_read_otp(dev
, 0x3F, 2, temp
) == 0) {
2312 if (temp
[0] == 24) {
2313 ret
= lan78xx_read_raw_otp(dev
,
2323 lan78xx_write_reg(dev
, LTM_BELT_IDLE0
, regs
[0]);
2324 lan78xx_write_reg(dev
, LTM_BELT_IDLE1
, regs
[1]);
2325 lan78xx_write_reg(dev
, LTM_BELT_ACT0
, regs
[2]);
2326 lan78xx_write_reg(dev
, LTM_BELT_ACT1
, regs
[3]);
2327 lan78xx_write_reg(dev
, LTM_INACTIVE0
, regs
[4]);
2328 lan78xx_write_reg(dev
, LTM_INACTIVE1
, regs
[5]);
2331 static int lan78xx_reset(struct lan78xx_net
*dev
)
2333 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2336 unsigned long timeout
;
2338 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
2339 buf
|= HW_CFG_LRST_
;
2340 ret
= lan78xx_write_reg(dev
, HW_CFG
, buf
);
2342 timeout
= jiffies
+ HZ
;
2345 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
2346 if (time_after(jiffies
, timeout
)) {
2347 netdev_warn(dev
->net
,
2348 "timeout on completion of LiteReset");
2351 } while (buf
& HW_CFG_LRST_
);
2353 lan78xx_init_mac_address(dev
);
2355 /* save DEVID for later usage */
2356 ret
= lan78xx_read_reg(dev
, ID_REV
, &buf
);
2357 dev
->chipid
= (buf
& ID_REV_CHIP_ID_MASK_
) >> 16;
2358 dev
->chiprev
= buf
& ID_REV_CHIP_REV_MASK_
;
2360 /* Respond to the IN token with a NAK */
2361 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
2362 buf
|= USB_CFG_BIR_
;
2363 ret
= lan78xx_write_reg(dev
, USB_CFG0
, buf
);
2366 lan78xx_init_ltm(dev
);
2368 dev
->net
->hard_header_len
+= TX_OVERHEAD
;
2369 dev
->hard_mtu
= dev
->net
->mtu
+ dev
->net
->hard_header_len
;
2371 if (dev
->udev
->speed
== USB_SPEED_SUPER
) {
2372 buf
= DEFAULT_BURST_CAP_SIZE
/ SS_USB_PKT_SIZE
;
2373 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
2376 } else if (dev
->udev
->speed
== USB_SPEED_HIGH
) {
2377 buf
= DEFAULT_BURST_CAP_SIZE
/ HS_USB_PKT_SIZE
;
2378 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
2379 dev
->rx_qlen
= RX_MAX_QUEUE_MEMORY
/ dev
->rx_urb_size
;
2380 dev
->tx_qlen
= RX_MAX_QUEUE_MEMORY
/ dev
->hard_mtu
;
2382 buf
= DEFAULT_BURST_CAP_SIZE
/ FS_USB_PKT_SIZE
;
2383 dev
->rx_urb_size
= DEFAULT_BURST_CAP_SIZE
;
2387 ret
= lan78xx_write_reg(dev
, BURST_CAP
, buf
);
2388 ret
= lan78xx_write_reg(dev
, BULK_IN_DLY
, DEFAULT_BULK_IN_DELAY
);
2390 ret
= lan78xx_read_reg(dev
, HW_CFG
, &buf
);
2392 ret
= lan78xx_write_reg(dev
, HW_CFG
, buf
);
2394 ret
= lan78xx_read_reg(dev
, USB_CFG0
, &buf
);
2395 buf
|= USB_CFG_BCE_
;
2396 ret
= lan78xx_write_reg(dev
, USB_CFG0
, buf
);
2398 /* set FIFO sizes */
2399 buf
= (MAX_RX_FIFO_SIZE
- 512) / 512;
2400 ret
= lan78xx_write_reg(dev
, FCT_RX_FIFO_END
, buf
);
2402 buf
= (MAX_TX_FIFO_SIZE
- 512) / 512;
2403 ret
= lan78xx_write_reg(dev
, FCT_TX_FIFO_END
, buf
);
2405 ret
= lan78xx_write_reg(dev
, INT_STS
, INT_STS_CLEAR_ALL_
);
2406 ret
= lan78xx_write_reg(dev
, FLOW
, 0);
2407 ret
= lan78xx_write_reg(dev
, FCT_FLOW
, 0);
2409 /* Don't need rfe_ctl_lock during initialisation */
2410 ret
= lan78xx_read_reg(dev
, RFE_CTL
, &pdata
->rfe_ctl
);
2411 pdata
->rfe_ctl
|= RFE_CTL_BCAST_EN_
| RFE_CTL_DA_PERFECT_
;
2412 ret
= lan78xx_write_reg(dev
, RFE_CTL
, pdata
->rfe_ctl
);
2414 /* Enable or disable checksum offload engines */
2415 lan78xx_set_features(dev
->net
, dev
->net
->features
);
2417 lan78xx_set_multicast(dev
->net
);
2420 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
2421 buf
|= PMT_CTL_PHY_RST_
;
2422 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
2424 timeout
= jiffies
+ HZ
;
2427 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
2428 if (time_after(jiffies
, timeout
)) {
2429 netdev_warn(dev
->net
, "timeout waiting for PHY Reset");
2432 } while ((buf
& PMT_CTL_PHY_RST_
) || !(buf
& PMT_CTL_READY_
));
2434 ret
= lan78xx_read_reg(dev
, MAC_CR
, &buf
);
2435 /* LAN7801 only has RGMII mode */
2436 if (dev
->chipid
== ID_REV_CHIP_ID_7801_
)
2437 buf
&= ~MAC_CR_GMII_EN_
;
2438 buf
|= MAC_CR_AUTO_DUPLEX_
| MAC_CR_AUTO_SPEED_
;
2439 ret
= lan78xx_write_reg(dev
, MAC_CR
, buf
);
2441 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
2442 buf
|= MAC_TX_TXEN_
;
2443 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
2445 ret
= lan78xx_read_reg(dev
, FCT_TX_CTL
, &buf
);
2446 buf
|= FCT_TX_CTL_EN_
;
2447 ret
= lan78xx_write_reg(dev
, FCT_TX_CTL
, buf
);
2449 ret
= lan78xx_set_rx_max_frame_length(dev
, dev
->net
->mtu
+ ETH_HLEN
);
2451 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
2452 buf
|= MAC_RX_RXEN_
;
2453 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
2455 ret
= lan78xx_read_reg(dev
, FCT_RX_CTL
, &buf
);
2456 buf
|= FCT_RX_CTL_EN_
;
2457 ret
= lan78xx_write_reg(dev
, FCT_RX_CTL
, buf
);
2462 static void lan78xx_init_stats(struct lan78xx_net
*dev
)
2467 /* initialize for stats update
2468 * some counters are 20bits and some are 32bits
2470 p
= (u32
*)&dev
->stats
.rollover_max
;
2471 for (i
= 0; i
< (sizeof(dev
->stats
.rollover_max
) / (sizeof(u32
))); i
++)
2474 dev
->stats
.rollover_max
.rx_unicast_byte_count
= 0xFFFFFFFF;
2475 dev
->stats
.rollover_max
.rx_broadcast_byte_count
= 0xFFFFFFFF;
2476 dev
->stats
.rollover_max
.rx_multicast_byte_count
= 0xFFFFFFFF;
2477 dev
->stats
.rollover_max
.eee_rx_lpi_transitions
= 0xFFFFFFFF;
2478 dev
->stats
.rollover_max
.eee_rx_lpi_time
= 0xFFFFFFFF;
2479 dev
->stats
.rollover_max
.tx_unicast_byte_count
= 0xFFFFFFFF;
2480 dev
->stats
.rollover_max
.tx_broadcast_byte_count
= 0xFFFFFFFF;
2481 dev
->stats
.rollover_max
.tx_multicast_byte_count
= 0xFFFFFFFF;
2482 dev
->stats
.rollover_max
.eee_tx_lpi_transitions
= 0xFFFFFFFF;
2483 dev
->stats
.rollover_max
.eee_tx_lpi_time
= 0xFFFFFFFF;
2485 lan78xx_defer_kevent(dev
, EVENT_STAT_UPDATE
);
2488 static int lan78xx_open(struct net_device
*net
)
2490 struct lan78xx_net
*dev
= netdev_priv(net
);
2493 ret
= usb_autopm_get_interface(dev
->intf
);
2497 ret
= lan78xx_reset(dev
);
2501 ret
= lan78xx_phy_init(dev
);
2505 /* for Link Check */
2506 if (dev
->urb_intr
) {
2507 ret
= usb_submit_urb(dev
->urb_intr
, GFP_KERNEL
);
2509 netif_err(dev
, ifup
, dev
->net
,
2510 "intr submit %d\n", ret
);
2515 lan78xx_init_stats(dev
);
2517 set_bit(EVENT_DEV_OPEN
, &dev
->flags
);
2519 netif_start_queue(net
);
2521 dev
->link_on
= false;
2523 lan78xx_defer_kevent(dev
, EVENT_LINK_RESET
);
2525 usb_autopm_put_interface(dev
->intf
);
2531 static void lan78xx_terminate_urbs(struct lan78xx_net
*dev
)
2533 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup
);
2534 DECLARE_WAITQUEUE(wait
, current
);
2537 /* ensure there are no more active urbs */
2538 add_wait_queue(&unlink_wakeup
, &wait
);
2539 set_current_state(TASK_UNINTERRUPTIBLE
);
2540 dev
->wait
= &unlink_wakeup
;
2541 temp
= unlink_urbs(dev
, &dev
->txq
) + unlink_urbs(dev
, &dev
->rxq
);
2543 /* maybe wait for deletions to finish. */
2544 while (!skb_queue_empty(&dev
->rxq
) &&
2545 !skb_queue_empty(&dev
->txq
) &&
2546 !skb_queue_empty(&dev
->done
)) {
2547 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS
));
2548 set_current_state(TASK_UNINTERRUPTIBLE
);
2549 netif_dbg(dev
, ifdown
, dev
->net
,
2550 "waited for %d urb completions\n", temp
);
2552 set_current_state(TASK_RUNNING
);
2554 remove_wait_queue(&unlink_wakeup
, &wait
);
2557 static int lan78xx_stop(struct net_device
*net
)
2559 struct lan78xx_net
*dev
= netdev_priv(net
);
2561 if (timer_pending(&dev
->stat_monitor
))
2562 del_timer_sync(&dev
->stat_monitor
);
2564 phy_unregister_fixup_for_uid(PHY_KSZ9031RNX
, 0xfffffff0);
2565 phy_unregister_fixup_for_uid(PHY_LAN8835
, 0xfffffff0);
2567 phy_stop(net
->phydev
);
2568 phy_disconnect(net
->phydev
);
2572 clear_bit(EVENT_DEV_OPEN
, &dev
->flags
);
2573 netif_stop_queue(net
);
2575 netif_info(dev
, ifdown
, dev
->net
,
2576 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2577 net
->stats
.rx_packets
, net
->stats
.tx_packets
,
2578 net
->stats
.rx_errors
, net
->stats
.tx_errors
);
2580 lan78xx_terminate_urbs(dev
);
2582 usb_kill_urb(dev
->urb_intr
);
2584 skb_queue_purge(&dev
->rxq_pause
);
2586 /* deferred work (task, timer, softirq) must also stop.
2587 * can't flush_scheduled_work() until we drop rtnl (later),
2588 * else workers could deadlock; so make workers a NOP.
2591 cancel_delayed_work_sync(&dev
->wq
);
2592 tasklet_kill(&dev
->bh
);
2594 usb_autopm_put_interface(dev
->intf
);
2599 static int lan78xx_linearize(struct sk_buff
*skb
)
2601 return skb_linearize(skb
);
2604 static struct sk_buff
*lan78xx_tx_prep(struct lan78xx_net
*dev
,
2605 struct sk_buff
*skb
, gfp_t flags
)
2607 u32 tx_cmd_a
, tx_cmd_b
;
2609 if (skb_headroom(skb
) < TX_OVERHEAD
) {
2610 struct sk_buff
*skb2
;
2612 skb2
= skb_copy_expand(skb
, TX_OVERHEAD
, 0, flags
);
2613 dev_kfree_skb_any(skb
);
2619 if (lan78xx_linearize(skb
) < 0)
2622 tx_cmd_a
= (u32
)(skb
->len
& TX_CMD_A_LEN_MASK_
) | TX_CMD_A_FCS_
;
2624 if (skb
->ip_summed
== CHECKSUM_PARTIAL
)
2625 tx_cmd_a
|= TX_CMD_A_IPE_
| TX_CMD_A_TPE_
;
2628 if (skb_is_gso(skb
)) {
2629 u16 mss
= max(skb_shinfo(skb
)->gso_size
, TX_CMD_B_MSS_MIN_
);
2631 tx_cmd_b
= (mss
<< TX_CMD_B_MSS_SHIFT_
) & TX_CMD_B_MSS_MASK_
;
2633 tx_cmd_a
|= TX_CMD_A_LSO_
;
2636 if (skb_vlan_tag_present(skb
)) {
2637 tx_cmd_a
|= TX_CMD_A_IVTG_
;
2638 tx_cmd_b
|= skb_vlan_tag_get(skb
) & TX_CMD_B_VTAG_MASK_
;
2642 cpu_to_le32s(&tx_cmd_b
);
2643 memcpy(skb
->data
, &tx_cmd_b
, 4);
2646 cpu_to_le32s(&tx_cmd_a
);
2647 memcpy(skb
->data
, &tx_cmd_a
, 4);
2652 static enum skb_state
defer_bh(struct lan78xx_net
*dev
, struct sk_buff
*skb
,
2653 struct sk_buff_head
*list
, enum skb_state state
)
2655 unsigned long flags
;
2656 enum skb_state old_state
;
2657 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2659 spin_lock_irqsave(&list
->lock
, flags
);
2660 old_state
= entry
->state
;
2661 entry
->state
= state
;
2663 __skb_unlink(skb
, list
);
2664 spin_unlock(&list
->lock
);
2665 spin_lock(&dev
->done
.lock
);
2667 __skb_queue_tail(&dev
->done
, skb
);
2668 if (skb_queue_len(&dev
->done
) == 1)
2669 tasklet_schedule(&dev
->bh
);
2670 spin_unlock_irqrestore(&dev
->done
.lock
, flags
);
2675 static void tx_complete(struct urb
*urb
)
2677 struct sk_buff
*skb
= (struct sk_buff
*)urb
->context
;
2678 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
2679 struct lan78xx_net
*dev
= entry
->dev
;
2681 if (urb
->status
== 0) {
2682 dev
->net
->stats
.tx_packets
+= entry
->num_of_packet
;
2683 dev
->net
->stats
.tx_bytes
+= entry
->length
;
2685 dev
->net
->stats
.tx_errors
++;
2687 switch (urb
->status
) {
2689 lan78xx_defer_kevent(dev
, EVENT_TX_HALT
);
2692 /* software-driven interface shutdown */
2700 netif_stop_queue(dev
->net
);
2703 netif_dbg(dev
, tx_err
, dev
->net
,
2704 "tx err %d\n", entry
->urb
->status
);
2709 usb_autopm_put_interface_async(dev
->intf
);
2711 defer_bh(dev
, skb
, &dev
->txq
, tx_done
);
2714 static void lan78xx_queue_skb(struct sk_buff_head
*list
,
2715 struct sk_buff
*newsk
, enum skb_state state
)
2717 struct skb_data
*entry
= (struct skb_data
*)newsk
->cb
;
2719 __skb_queue_tail(list
, newsk
);
2720 entry
->state
= state
;
2724 lan78xx_start_xmit(struct sk_buff
*skb
, struct net_device
*net
)
2726 struct lan78xx_net
*dev
= netdev_priv(net
);
2727 struct sk_buff
*skb2
= NULL
;
2730 skb_tx_timestamp(skb
);
2731 skb2
= lan78xx_tx_prep(dev
, skb
, GFP_ATOMIC
);
2735 skb_queue_tail(&dev
->txq_pend
, skb2
);
2737 /* throttle TX patch at slower than SUPER SPEED USB */
2738 if ((dev
->udev
->speed
< USB_SPEED_SUPER
) &&
2739 (skb_queue_len(&dev
->txq_pend
) > 10))
2740 netif_stop_queue(net
);
2742 netif_dbg(dev
, tx_err
, dev
->net
,
2743 "lan78xx_tx_prep return NULL\n");
2744 dev
->net
->stats
.tx_errors
++;
2745 dev
->net
->stats
.tx_dropped
++;
2748 tasklet_schedule(&dev
->bh
);
2750 return NETDEV_TX_OK
;
2754 lan78xx_get_endpoints(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2757 struct usb_host_interface
*alt
= NULL
;
2758 struct usb_host_endpoint
*in
= NULL
, *out
= NULL
;
2759 struct usb_host_endpoint
*status
= NULL
;
2761 for (tmp
= 0; tmp
< intf
->num_altsetting
; tmp
++) {
2767 alt
= intf
->altsetting
+ tmp
;
2769 for (ep
= 0; ep
< alt
->desc
.bNumEndpoints
; ep
++) {
2770 struct usb_host_endpoint
*e
;
2773 e
= alt
->endpoint
+ ep
;
2774 switch (e
->desc
.bmAttributes
) {
2775 case USB_ENDPOINT_XFER_INT
:
2776 if (!usb_endpoint_dir_in(&e
->desc
))
2780 case USB_ENDPOINT_XFER_BULK
:
2785 if (usb_endpoint_dir_in(&e
->desc
)) {
2788 else if (intr
&& !status
)
2798 if (!alt
|| !in
|| !out
)
2801 dev
->pipe_in
= usb_rcvbulkpipe(dev
->udev
,
2802 in
->desc
.bEndpointAddress
&
2803 USB_ENDPOINT_NUMBER_MASK
);
2804 dev
->pipe_out
= usb_sndbulkpipe(dev
->udev
,
2805 out
->desc
.bEndpointAddress
&
2806 USB_ENDPOINT_NUMBER_MASK
);
2807 dev
->ep_intr
= status
;
2812 static int lan78xx_bind(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2814 struct lan78xx_priv
*pdata
= NULL
;
2818 ret
= lan78xx_get_endpoints(dev
, intf
);
2820 dev
->data
[0] = (unsigned long)kzalloc(sizeof(*pdata
), GFP_KERNEL
);
2822 pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2824 netdev_warn(dev
->net
, "Unable to allocate lan78xx_priv");
2830 spin_lock_init(&pdata
->rfe_ctl_lock
);
2831 mutex_init(&pdata
->dataport_mutex
);
2833 INIT_WORK(&pdata
->set_multicast
, lan78xx_deferred_multicast_write
);
2835 for (i
= 0; i
< DP_SEL_VHF_VLAN_LEN
; i
++)
2836 pdata
->vlan_table
[i
] = 0;
2838 INIT_WORK(&pdata
->set_vlan
, lan78xx_deferred_vlan_write
);
2840 dev
->net
->features
= 0;
2842 if (DEFAULT_TX_CSUM_ENABLE
)
2843 dev
->net
->features
|= NETIF_F_HW_CSUM
;
2845 if (DEFAULT_RX_CSUM_ENABLE
)
2846 dev
->net
->features
|= NETIF_F_RXCSUM
;
2848 if (DEFAULT_TSO_CSUM_ENABLE
)
2849 dev
->net
->features
|= NETIF_F_TSO
| NETIF_F_TSO6
| NETIF_F_SG
;
2851 dev
->net
->hw_features
= dev
->net
->features
;
2853 ret
= lan78xx_setup_irq_domain(dev
);
2855 netdev_warn(dev
->net
,
2856 "lan78xx_setup_irq_domain() failed : %d", ret
);
2861 /* Init all registers */
2862 ret
= lan78xx_reset(dev
);
2864 lan78xx_mdio_init(dev
);
2866 dev
->net
->flags
|= IFF_MULTICAST
;
2868 pdata
->wol
= WAKE_MAGIC
;
2873 static void lan78xx_unbind(struct lan78xx_net
*dev
, struct usb_interface
*intf
)
2875 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
2877 lan78xx_remove_irq_domain(dev
);
2879 lan78xx_remove_mdio(dev
);
2882 netif_dbg(dev
, ifdown
, dev
->net
, "free pdata");
2889 static void lan78xx_rx_csum_offload(struct lan78xx_net
*dev
,
2890 struct sk_buff
*skb
,
2891 u32 rx_cmd_a
, u32 rx_cmd_b
)
2893 if (!(dev
->net
->features
& NETIF_F_RXCSUM
) ||
2894 unlikely(rx_cmd_a
& RX_CMD_A_ICSM_
)) {
2895 skb
->ip_summed
= CHECKSUM_NONE
;
2897 skb
->csum
= ntohs((u16
)(rx_cmd_b
>> RX_CMD_B_CSUM_SHIFT_
));
2898 skb
->ip_summed
= CHECKSUM_COMPLETE
;
2902 static void lan78xx_skb_return(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
2906 if (test_bit(EVENT_RX_PAUSED
, &dev
->flags
)) {
2907 skb_queue_tail(&dev
->rxq_pause
, skb
);
2911 dev
->net
->stats
.rx_packets
++;
2912 dev
->net
->stats
.rx_bytes
+= skb
->len
;
2914 skb
->protocol
= eth_type_trans(skb
, dev
->net
);
2916 netif_dbg(dev
, rx_status
, dev
->net
, "< rx, len %zu, type 0x%x\n",
2917 skb
->len
+ sizeof(struct ethhdr
), skb
->protocol
);
2918 memset(skb
->cb
, 0, sizeof(struct skb_data
));
2920 if (skb_defer_rx_timestamp(skb
))
2923 status
= netif_rx(skb
);
2924 if (status
!= NET_RX_SUCCESS
)
2925 netif_dbg(dev
, rx_err
, dev
->net
,
2926 "netif_rx status %d\n", status
);
2929 static int lan78xx_rx(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
2931 if (skb
->len
< dev
->net
->hard_header_len
)
2934 while (skb
->len
> 0) {
2935 u32 rx_cmd_a
, rx_cmd_b
, align_count
, size
;
2937 struct sk_buff
*skb2
;
2938 unsigned char *packet
;
2940 memcpy(&rx_cmd_a
, skb
->data
, sizeof(rx_cmd_a
));
2941 le32_to_cpus(&rx_cmd_a
);
2942 skb_pull(skb
, sizeof(rx_cmd_a
));
2944 memcpy(&rx_cmd_b
, skb
->data
, sizeof(rx_cmd_b
));
2945 le32_to_cpus(&rx_cmd_b
);
2946 skb_pull(skb
, sizeof(rx_cmd_b
));
2948 memcpy(&rx_cmd_c
, skb
->data
, sizeof(rx_cmd_c
));
2949 le16_to_cpus(&rx_cmd_c
);
2950 skb_pull(skb
, sizeof(rx_cmd_c
));
2954 /* get the packet length */
2955 size
= (rx_cmd_a
& RX_CMD_A_LEN_MASK_
);
2956 align_count
= (4 - ((size
+ RXW_PADDING
) % 4)) % 4;
2958 if (unlikely(rx_cmd_a
& RX_CMD_A_RED_
)) {
2959 netif_dbg(dev
, rx_err
, dev
->net
,
2960 "Error rx_cmd_a=0x%08x", rx_cmd_a
);
2962 /* last frame in this batch */
2963 if (skb
->len
== size
) {
2964 lan78xx_rx_csum_offload(dev
, skb
,
2965 rx_cmd_a
, rx_cmd_b
);
2967 skb_trim(skb
, skb
->len
- 4); /* remove fcs */
2968 skb
->truesize
= size
+ sizeof(struct sk_buff
);
2973 skb2
= skb_clone(skb
, GFP_ATOMIC
);
2974 if (unlikely(!skb2
)) {
2975 netdev_warn(dev
->net
, "Error allocating skb");
2980 skb2
->data
= packet
;
2981 skb_set_tail_pointer(skb2
, size
);
2983 lan78xx_rx_csum_offload(dev
, skb2
, rx_cmd_a
, rx_cmd_b
);
2985 skb_trim(skb2
, skb2
->len
- 4); /* remove fcs */
2986 skb2
->truesize
= size
+ sizeof(struct sk_buff
);
2988 lan78xx_skb_return(dev
, skb2
);
2991 skb_pull(skb
, size
);
2993 /* padding bytes before the next frame starts */
2995 skb_pull(skb
, align_count
);
3001 static inline void rx_process(struct lan78xx_net
*dev
, struct sk_buff
*skb
)
3003 if (!lan78xx_rx(dev
, skb
)) {
3004 dev
->net
->stats
.rx_errors
++;
3009 lan78xx_skb_return(dev
, skb
);
3013 netif_dbg(dev
, rx_err
, dev
->net
, "drop\n");
3014 dev
->net
->stats
.rx_errors
++;
3016 skb_queue_tail(&dev
->done
, skb
);
3019 static void rx_complete(struct urb
*urb
);
3021 static int rx_submit(struct lan78xx_net
*dev
, struct urb
*urb
, gfp_t flags
)
3023 struct sk_buff
*skb
;
3024 struct skb_data
*entry
;
3025 unsigned long lockflags
;
3026 size_t size
= dev
->rx_urb_size
;
3029 skb
= netdev_alloc_skb_ip_align(dev
->net
, size
);
3035 entry
= (struct skb_data
*)skb
->cb
;
3040 usb_fill_bulk_urb(urb
, dev
->udev
, dev
->pipe_in
,
3041 skb
->data
, size
, rx_complete
, skb
);
3043 spin_lock_irqsave(&dev
->rxq
.lock
, lockflags
);
3045 if (netif_device_present(dev
->net
) &&
3046 netif_running(dev
->net
) &&
3047 !test_bit(EVENT_RX_HALT
, &dev
->flags
) &&
3048 !test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
3049 ret
= usb_submit_urb(urb
, GFP_ATOMIC
);
3052 lan78xx_queue_skb(&dev
->rxq
, skb
, rx_start
);
3055 lan78xx_defer_kevent(dev
, EVENT_RX_HALT
);
3058 netif_dbg(dev
, ifdown
, dev
->net
, "device gone\n");
3059 netif_device_detach(dev
->net
);
3065 netif_dbg(dev
, rx_err
, dev
->net
,
3066 "rx submit, %d\n", ret
);
3067 tasklet_schedule(&dev
->bh
);
3070 netif_dbg(dev
, ifdown
, dev
->net
, "rx: stopped\n");
3073 spin_unlock_irqrestore(&dev
->rxq
.lock
, lockflags
);
3075 dev_kfree_skb_any(skb
);
3081 static void rx_complete(struct urb
*urb
)
3083 struct sk_buff
*skb
= (struct sk_buff
*)urb
->context
;
3084 struct skb_data
*entry
= (struct skb_data
*)skb
->cb
;
3085 struct lan78xx_net
*dev
= entry
->dev
;
3086 int urb_status
= urb
->status
;
3087 enum skb_state state
;
3089 skb_put(skb
, urb
->actual_length
);
3093 switch (urb_status
) {
3095 if (skb
->len
< dev
->net
->hard_header_len
) {
3097 dev
->net
->stats
.rx_errors
++;
3098 dev
->net
->stats
.rx_length_errors
++;
3099 netif_dbg(dev
, rx_err
, dev
->net
,
3100 "rx length %d\n", skb
->len
);
3102 usb_mark_last_busy(dev
->udev
);
3105 dev
->net
->stats
.rx_errors
++;
3106 lan78xx_defer_kevent(dev
, EVENT_RX_HALT
);
3108 case -ECONNRESET
: /* async unlink */
3109 case -ESHUTDOWN
: /* hardware gone */
3110 netif_dbg(dev
, ifdown
, dev
->net
,
3111 "rx shutdown, code %d\n", urb_status
);
3119 dev
->net
->stats
.rx_errors
++;
3125 /* data overrun ... flush fifo? */
3127 dev
->net
->stats
.rx_over_errors
++;
3132 dev
->net
->stats
.rx_errors
++;
3133 netif_dbg(dev
, rx_err
, dev
->net
, "rx status %d\n", urb_status
);
3137 state
= defer_bh(dev
, skb
, &dev
->rxq
, state
);
3140 if (netif_running(dev
->net
) &&
3141 !test_bit(EVENT_RX_HALT
, &dev
->flags
) &&
3142 state
!= unlink_start
) {
3143 rx_submit(dev
, urb
, GFP_ATOMIC
);
3148 netif_dbg(dev
, rx_err
, dev
->net
, "no read resubmitted\n");
3151 static void lan78xx_tx_bh(struct lan78xx_net
*dev
)
3154 struct urb
*urb
= NULL
;
3155 struct skb_data
*entry
;
3156 unsigned long flags
;
3157 struct sk_buff_head
*tqp
= &dev
->txq_pend
;
3158 struct sk_buff
*skb
, *skb2
;
3161 int skb_totallen
, pkt_cnt
;
3167 for (skb
= tqp
->next
; pkt_cnt
< tqp
->qlen
; skb
= skb
->next
) {
3168 if (skb_is_gso(skb
)) {
3170 /* handle previous packets first */
3174 length
= skb
->len
- TX_OVERHEAD
;
3175 skb2
= skb_dequeue(tqp
);
3179 if ((skb_totallen
+ skb
->len
) > MAX_SINGLE_PACKET_SIZE
)
3181 skb_totallen
= skb
->len
+ roundup(skb_totallen
, sizeof(u32
));
3185 /* copy to a single skb */
3186 skb
= alloc_skb(skb_totallen
, GFP_ATOMIC
);
3190 skb_put(skb
, skb_totallen
);
3192 for (count
= pos
= 0; count
< pkt_cnt
; count
++) {
3193 skb2
= skb_dequeue(tqp
);
3195 length
+= (skb2
->len
- TX_OVERHEAD
);
3196 memcpy(skb
->data
+ pos
, skb2
->data
, skb2
->len
);
3197 pos
+= roundup(skb2
->len
, sizeof(u32
));
3198 dev_kfree_skb(skb2
);
3203 urb
= usb_alloc_urb(0, GFP_ATOMIC
);
3207 entry
= (struct skb_data
*)skb
->cb
;
3210 entry
->length
= length
;
3211 entry
->num_of_packet
= count
;
3213 spin_lock_irqsave(&dev
->txq
.lock
, flags
);
3214 ret
= usb_autopm_get_interface_async(dev
->intf
);
3216 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
3220 usb_fill_bulk_urb(urb
, dev
->udev
, dev
->pipe_out
,
3221 skb
->data
, skb
->len
, tx_complete
, skb
);
3223 if (length
% dev
->maxpacket
== 0) {
3224 /* send USB_ZERO_PACKET */
3225 urb
->transfer_flags
|= URB_ZERO_PACKET
;
3229 /* if this triggers the device is still a sleep */
3230 if (test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
3231 /* transmission will be done in resume */
3232 usb_anchor_urb(urb
, &dev
->deferred
);
3233 /* no use to process more packets */
3234 netif_stop_queue(dev
->net
);
3236 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
3237 netdev_dbg(dev
->net
, "Delaying transmission for resumption\n");
3242 ret
= usb_submit_urb(urb
, GFP_ATOMIC
);
3245 netif_trans_update(dev
->net
);
3246 lan78xx_queue_skb(&dev
->txq
, skb
, tx_start
);
3247 if (skb_queue_len(&dev
->txq
) >= dev
->tx_qlen
)
3248 netif_stop_queue(dev
->net
);
3251 netif_stop_queue(dev
->net
);
3252 lan78xx_defer_kevent(dev
, EVENT_TX_HALT
);
3253 usb_autopm_put_interface_async(dev
->intf
);
3256 usb_autopm_put_interface_async(dev
->intf
);
3257 netif_dbg(dev
, tx_err
, dev
->net
,
3258 "tx: submit urb err %d\n", ret
);
3262 spin_unlock_irqrestore(&dev
->txq
.lock
, flags
);
3265 netif_dbg(dev
, tx_err
, dev
->net
, "drop, code %d\n", ret
);
3267 dev
->net
->stats
.tx_dropped
++;
3269 dev_kfree_skb_any(skb
);
3272 netif_dbg(dev
, tx_queued
, dev
->net
,
3273 "> tx, len %d, type 0x%x\n", length
, skb
->protocol
);
3276 static void lan78xx_rx_bh(struct lan78xx_net
*dev
)
3281 if (skb_queue_len(&dev
->rxq
) < dev
->rx_qlen
) {
3282 for (i
= 0; i
< 10; i
++) {
3283 if (skb_queue_len(&dev
->rxq
) >= dev
->rx_qlen
)
3285 urb
= usb_alloc_urb(0, GFP_ATOMIC
);
3287 if (rx_submit(dev
, urb
, GFP_ATOMIC
) == -ENOLINK
)
3291 if (skb_queue_len(&dev
->rxq
) < dev
->rx_qlen
)
3292 tasklet_schedule(&dev
->bh
);
3294 if (skb_queue_len(&dev
->txq
) < dev
->tx_qlen
)
3295 netif_wake_queue(dev
->net
);
3298 static void lan78xx_bh(unsigned long param
)
3300 struct lan78xx_net
*dev
= (struct lan78xx_net
*)param
;
3301 struct sk_buff
*skb
;
3302 struct skb_data
*entry
;
3304 while ((skb
= skb_dequeue(&dev
->done
))) {
3305 entry
= (struct skb_data
*)(skb
->cb
);
3306 switch (entry
->state
) {
3308 entry
->state
= rx_cleanup
;
3309 rx_process(dev
, skb
);
3312 usb_free_urb(entry
->urb
);
3316 usb_free_urb(entry
->urb
);
3320 netdev_dbg(dev
->net
, "skb state %d\n", entry
->state
);
3325 if (netif_device_present(dev
->net
) && netif_running(dev
->net
)) {
3326 /* reset update timer delta */
3327 if (timer_pending(&dev
->stat_monitor
) && (dev
->delta
!= 1)) {
3329 mod_timer(&dev
->stat_monitor
,
3330 jiffies
+ STAT_UPDATE_TIMER
);
3333 if (!skb_queue_empty(&dev
->txq_pend
))
3336 if (!timer_pending(&dev
->delay
) &&
3337 !test_bit(EVENT_RX_HALT
, &dev
->flags
))
3342 static void lan78xx_delayedwork(struct work_struct
*work
)
3345 struct lan78xx_net
*dev
;
3347 dev
= container_of(work
, struct lan78xx_net
, wq
.work
);
3349 if (test_bit(EVENT_TX_HALT
, &dev
->flags
)) {
3350 unlink_urbs(dev
, &dev
->txq
);
3351 status
= usb_autopm_get_interface(dev
->intf
);
3354 status
= usb_clear_halt(dev
->udev
, dev
->pipe_out
);
3355 usb_autopm_put_interface(dev
->intf
);
3358 status
!= -ESHUTDOWN
) {
3359 if (netif_msg_tx_err(dev
))
3361 netdev_err(dev
->net
,
3362 "can't clear tx halt, status %d\n",
3365 clear_bit(EVENT_TX_HALT
, &dev
->flags
);
3366 if (status
!= -ESHUTDOWN
)
3367 netif_wake_queue(dev
->net
);
3370 if (test_bit(EVENT_RX_HALT
, &dev
->flags
)) {
3371 unlink_urbs(dev
, &dev
->rxq
);
3372 status
= usb_autopm_get_interface(dev
->intf
);
3375 status
= usb_clear_halt(dev
->udev
, dev
->pipe_in
);
3376 usb_autopm_put_interface(dev
->intf
);
3379 status
!= -ESHUTDOWN
) {
3380 if (netif_msg_rx_err(dev
))
3382 netdev_err(dev
->net
,
3383 "can't clear rx halt, status %d\n",
3386 clear_bit(EVENT_RX_HALT
, &dev
->flags
);
3387 tasklet_schedule(&dev
->bh
);
3391 if (test_bit(EVENT_LINK_RESET
, &dev
->flags
)) {
3394 clear_bit(EVENT_LINK_RESET
, &dev
->flags
);
3395 status
= usb_autopm_get_interface(dev
->intf
);
3398 if (lan78xx_link_reset(dev
) < 0) {
3399 usb_autopm_put_interface(dev
->intf
);
3401 netdev_info(dev
->net
, "link reset failed (%d)\n",
3404 usb_autopm_put_interface(dev
->intf
);
3408 if (test_bit(EVENT_STAT_UPDATE
, &dev
->flags
)) {
3409 lan78xx_update_stats(dev
);
3411 clear_bit(EVENT_STAT_UPDATE
, &dev
->flags
);
3413 mod_timer(&dev
->stat_monitor
,
3414 jiffies
+ (STAT_UPDATE_TIMER
* dev
->delta
));
3416 dev
->delta
= min((dev
->delta
* 2), 50);
3420 static void intr_complete(struct urb
*urb
)
3422 struct lan78xx_net
*dev
= urb
->context
;
3423 int status
= urb
->status
;
3428 lan78xx_status(dev
, urb
);
3431 /* software-driven interface shutdown */
3432 case -ENOENT
: /* urb killed */
3433 case -ESHUTDOWN
: /* hardware gone */
3434 netif_dbg(dev
, ifdown
, dev
->net
,
3435 "intr shutdown, code %d\n", status
);
3438 /* NOTE: not throttling like RX/TX, since this endpoint
3439 * already polls infrequently
3442 netdev_dbg(dev
->net
, "intr status %d\n", status
);
3446 if (!netif_running(dev
->net
))
3449 memset(urb
->transfer_buffer
, 0, urb
->transfer_buffer_length
);
3450 status
= usb_submit_urb(urb
, GFP_ATOMIC
);
3452 netif_err(dev
, timer
, dev
->net
,
3453 "intr resubmit --> %d\n", status
);
3456 static void lan78xx_disconnect(struct usb_interface
*intf
)
3458 struct lan78xx_net
*dev
;
3459 struct usb_device
*udev
;
3460 struct net_device
*net
;
3462 dev
= usb_get_intfdata(intf
);
3463 usb_set_intfdata(intf
, NULL
);
3467 udev
= interface_to_usbdev(intf
);
3470 unregister_netdev(net
);
3472 cancel_delayed_work_sync(&dev
->wq
);
3474 usb_scuttle_anchored_urbs(&dev
->deferred
);
3476 lan78xx_unbind(dev
, intf
);
3478 usb_kill_urb(dev
->urb_intr
);
3479 usb_free_urb(dev
->urb_intr
);
3485 static void lan78xx_tx_timeout(struct net_device
*net
)
3487 struct lan78xx_net
*dev
= netdev_priv(net
);
3489 unlink_urbs(dev
, &dev
->txq
);
3490 tasklet_schedule(&dev
->bh
);
3493 static const struct net_device_ops lan78xx_netdev_ops
= {
3494 .ndo_open
= lan78xx_open
,
3495 .ndo_stop
= lan78xx_stop
,
3496 .ndo_start_xmit
= lan78xx_start_xmit
,
3497 .ndo_tx_timeout
= lan78xx_tx_timeout
,
3498 .ndo_change_mtu
= lan78xx_change_mtu
,
3499 .ndo_set_mac_address
= lan78xx_set_mac_addr
,
3500 .ndo_validate_addr
= eth_validate_addr
,
3501 .ndo_do_ioctl
= lan78xx_ioctl
,
3502 .ndo_set_rx_mode
= lan78xx_set_multicast
,
3503 .ndo_set_features
= lan78xx_set_features
,
3504 .ndo_vlan_rx_add_vid
= lan78xx_vlan_rx_add_vid
,
3505 .ndo_vlan_rx_kill_vid
= lan78xx_vlan_rx_kill_vid
,
3508 static void lan78xx_stat_monitor(unsigned long param
)
3510 struct lan78xx_net
*dev
;
3512 dev
= (struct lan78xx_net
*)param
;
3514 lan78xx_defer_kevent(dev
, EVENT_STAT_UPDATE
);
3517 static int lan78xx_probe(struct usb_interface
*intf
,
3518 const struct usb_device_id
*id
)
3520 struct lan78xx_net
*dev
;
3521 struct net_device
*netdev
;
3522 struct usb_device
*udev
;
3528 udev
= interface_to_usbdev(intf
);
3529 udev
= usb_get_dev(udev
);
3532 netdev
= alloc_etherdev(sizeof(struct lan78xx_net
));
3534 dev_err(&intf
->dev
, "Error: OOM\n");
3538 /* netdev_printk() needs this */
3539 SET_NETDEV_DEV(netdev
, &intf
->dev
);
3541 dev
= netdev_priv(netdev
);
3545 dev
->msg_enable
= netif_msg_init(msg_level
, NETIF_MSG_DRV
3546 | NETIF_MSG_PROBE
| NETIF_MSG_LINK
);
3548 skb_queue_head_init(&dev
->rxq
);
3549 skb_queue_head_init(&dev
->txq
);
3550 skb_queue_head_init(&dev
->done
);
3551 skb_queue_head_init(&dev
->rxq_pause
);
3552 skb_queue_head_init(&dev
->txq_pend
);
3553 mutex_init(&dev
->phy_mutex
);
3555 tasklet_init(&dev
->bh
, lan78xx_bh
, (unsigned long)dev
);
3556 INIT_DELAYED_WORK(&dev
->wq
, lan78xx_delayedwork
);
3557 init_usb_anchor(&dev
->deferred
);
3559 netdev
->netdev_ops
= &lan78xx_netdev_ops
;
3560 netdev
->watchdog_timeo
= TX_TIMEOUT_JIFFIES
;
3561 netdev
->ethtool_ops
= &lan78xx_ethtool_ops
;
3563 dev
->stat_monitor
.function
= lan78xx_stat_monitor
;
3564 dev
->stat_monitor
.data
= (unsigned long)dev
;
3566 init_timer(&dev
->stat_monitor
);
3568 mutex_init(&dev
->stats
.access_lock
);
3570 ret
= lan78xx_bind(dev
, intf
);
3573 strcpy(netdev
->name
, "eth%d");
3575 if (netdev
->mtu
> (dev
->hard_mtu
- netdev
->hard_header_len
))
3576 netdev
->mtu
= dev
->hard_mtu
- netdev
->hard_header_len
;
3578 /* MTU range: 68 - 9000 */
3579 netdev
->max_mtu
= MAX_SINGLE_PACKET_SIZE
;
3581 dev
->ep_blkin
= (intf
->cur_altsetting
)->endpoint
+ 0;
3582 dev
->ep_blkout
= (intf
->cur_altsetting
)->endpoint
+ 1;
3583 dev
->ep_intr
= (intf
->cur_altsetting
)->endpoint
+ 2;
3585 dev
->pipe_in
= usb_rcvbulkpipe(udev
, BULK_IN_PIPE
);
3586 dev
->pipe_out
= usb_sndbulkpipe(udev
, BULK_OUT_PIPE
);
3588 dev
->pipe_intr
= usb_rcvintpipe(dev
->udev
,
3589 dev
->ep_intr
->desc
.bEndpointAddress
&
3590 USB_ENDPOINT_NUMBER_MASK
);
3591 period
= dev
->ep_intr
->desc
.bInterval
;
3593 maxp
= usb_maxpacket(dev
->udev
, dev
->pipe_intr
, 0);
3594 buf
= kmalloc(maxp
, GFP_KERNEL
);
3596 dev
->urb_intr
= usb_alloc_urb(0, GFP_KERNEL
);
3597 if (!dev
->urb_intr
) {
3602 usb_fill_int_urb(dev
->urb_intr
, dev
->udev
,
3603 dev
->pipe_intr
, buf
, maxp
,
3604 intr_complete
, dev
, period
);
3608 dev
->maxpacket
= usb_maxpacket(dev
->udev
, dev
->pipe_out
, 1);
3610 /* driver requires remote-wakeup capability during autosuspend. */
3611 intf
->needs_remote_wakeup
= 1;
3613 ret
= register_netdev(netdev
);
3615 netif_err(dev
, probe
, netdev
, "couldn't register the device\n");
3619 usb_set_intfdata(intf
, dev
);
3621 ret
= device_set_wakeup_enable(&udev
->dev
, true);
3623 /* Default delay of 2sec has more overhead than advantage.
3624 * Set to 10sec as default.
3626 pm_runtime_set_autosuspend_delay(&udev
->dev
,
3627 DEFAULT_AUTOSUSPEND_DELAY
);
3632 lan78xx_unbind(dev
, intf
);
3634 free_netdev(netdev
);
3641 static u16
lan78xx_wakeframe_crc16(const u8
*buf
, int len
)
3643 const u16 crc16poly
= 0x8005;
3649 for (i
= 0; i
< len
; i
++) {
3651 for (bit
= 0; bit
< 8; bit
++) {
3655 if (msb
^ (u16
)(data
& 1)) {
3657 crc
|= (u16
)0x0001U
;
3666 static int lan78xx_set_suspend(struct lan78xx_net
*dev
, u32 wol
)
3674 const u8 ipv4_multicast
[3] = { 0x01, 0x00, 0x5E };
3675 const u8 ipv6_multicast
[3] = { 0x33, 0x33 };
3676 const u8 arp_type
[2] = { 0x08, 0x06 };
3678 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3679 buf
&= ~MAC_TX_TXEN_
;
3680 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3681 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3682 buf
&= ~MAC_RX_RXEN_
;
3683 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3685 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3686 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3687 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3692 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &temp_pmt_ctl
);
3693 temp_pmt_ctl
&= ~PMT_CTL_RES_CLR_WKP_EN_
;
3694 temp_pmt_ctl
|= PMT_CTL_RES_CLR_WKP_STS_
;
3696 for (mask_index
= 0; mask_index
< NUM_OF_WUF_CFG
; mask_index
++)
3697 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
), 0);
3700 if (wol
& WAKE_PHY
) {
3701 temp_pmt_ctl
|= PMT_CTL_PHY_WAKE_EN_
;
3703 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3704 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3705 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3707 if (wol
& WAKE_MAGIC
) {
3708 temp_wucsr
|= WUCSR_MPEN_
;
3710 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3711 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3712 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_3_
;
3714 if (wol
& WAKE_BCAST
) {
3715 temp_wucsr
|= WUCSR_BCST_EN_
;
3717 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3718 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3719 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3721 if (wol
& WAKE_MCAST
) {
3722 temp_wucsr
|= WUCSR_WAKE_EN_
;
3724 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3725 crc
= lan78xx_wakeframe_crc16(ipv4_multicast
, 3);
3726 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3728 WUF_CFGX_TYPE_MCAST_
|
3729 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3730 (crc
& WUF_CFGX_CRC16_MASK_
));
3732 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 7);
3733 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3734 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3735 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3738 /* for IPv6 Multicast */
3739 crc
= lan78xx_wakeframe_crc16(ipv6_multicast
, 2);
3740 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3742 WUF_CFGX_TYPE_MCAST_
|
3743 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3744 (crc
& WUF_CFGX_CRC16_MASK_
));
3746 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 3);
3747 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3748 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3749 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3752 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3753 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3754 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3756 if (wol
& WAKE_UCAST
) {
3757 temp_wucsr
|= WUCSR_PFDA_EN_
;
3759 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3760 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3761 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3763 if (wol
& WAKE_ARP
) {
3764 temp_wucsr
|= WUCSR_WAKE_EN_
;
3766 /* set WUF_CFG & WUF_MASK
3767 * for packettype (offset 12,13) = ARP (0x0806)
3769 crc
= lan78xx_wakeframe_crc16(arp_type
, 2);
3770 ret
= lan78xx_write_reg(dev
, WUF_CFG(mask_index
),
3772 WUF_CFGX_TYPE_ALL_
|
3773 (0 << WUF_CFGX_OFFSET_SHIFT_
) |
3774 (crc
& WUF_CFGX_CRC16_MASK_
));
3776 ret
= lan78xx_write_reg(dev
, WUF_MASK0(mask_index
), 0x3000);
3777 ret
= lan78xx_write_reg(dev
, WUF_MASK1(mask_index
), 0);
3778 ret
= lan78xx_write_reg(dev
, WUF_MASK2(mask_index
), 0);
3779 ret
= lan78xx_write_reg(dev
, WUF_MASK3(mask_index
), 0);
3782 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3783 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3784 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3787 ret
= lan78xx_write_reg(dev
, WUCSR
, temp_wucsr
);
3789 /* when multiple WOL bits are set */
3790 if (hweight_long((unsigned long)wol
) > 1) {
3791 temp_pmt_ctl
|= PMT_CTL_WOL_EN_
;
3792 temp_pmt_ctl
&= ~PMT_CTL_SUS_MODE_MASK_
;
3793 temp_pmt_ctl
|= PMT_CTL_SUS_MODE_0_
;
3795 ret
= lan78xx_write_reg(dev
, PMT_CTL
, temp_pmt_ctl
);
3798 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3799 buf
|= PMT_CTL_WUPS_MASK_
;
3800 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3802 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3803 buf
|= MAC_RX_RXEN_
;
3804 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3809 static int lan78xx_suspend(struct usb_interface
*intf
, pm_message_t message
)
3811 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
3812 struct lan78xx_priv
*pdata
= (struct lan78xx_priv
*)(dev
->data
[0]);
3817 event
= message
.event
;
3819 if (!dev
->suspend_count
++) {
3820 spin_lock_irq(&dev
->txq
.lock
);
3821 /* don't autosuspend while transmitting */
3822 if ((skb_queue_len(&dev
->txq
) ||
3823 skb_queue_len(&dev
->txq_pend
)) &&
3824 PMSG_IS_AUTO(message
)) {
3825 spin_unlock_irq(&dev
->txq
.lock
);
3829 set_bit(EVENT_DEV_ASLEEP
, &dev
->flags
);
3830 spin_unlock_irq(&dev
->txq
.lock
);
3834 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3835 buf
&= ~MAC_TX_TXEN_
;
3836 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3837 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3838 buf
&= ~MAC_RX_RXEN_
;
3839 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3841 /* empty out the rx and queues */
3842 netif_device_detach(dev
->net
);
3843 lan78xx_terminate_urbs(dev
);
3844 usb_kill_urb(dev
->urb_intr
);
3847 netif_device_attach(dev
->net
);
3850 if (test_bit(EVENT_DEV_ASLEEP
, &dev
->flags
)) {
3851 del_timer(&dev
->stat_monitor
);
3853 if (PMSG_IS_AUTO(message
)) {
3854 /* auto suspend (selective suspend) */
3855 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3856 buf
&= ~MAC_TX_TXEN_
;
3857 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3858 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3859 buf
&= ~MAC_RX_RXEN_
;
3860 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3862 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3863 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3864 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3866 /* set goodframe wakeup */
3867 ret
= lan78xx_read_reg(dev
, WUCSR
, &buf
);
3869 buf
|= WUCSR_RFE_WAKE_EN_
;
3870 buf
|= WUCSR_STORE_WAKE_
;
3872 ret
= lan78xx_write_reg(dev
, WUCSR
, buf
);
3874 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3876 buf
&= ~PMT_CTL_RES_CLR_WKP_EN_
;
3877 buf
|= PMT_CTL_RES_CLR_WKP_STS_
;
3879 buf
|= PMT_CTL_PHY_WAKE_EN_
;
3880 buf
|= PMT_CTL_WOL_EN_
;
3881 buf
&= ~PMT_CTL_SUS_MODE_MASK_
;
3882 buf
|= PMT_CTL_SUS_MODE_3_
;
3884 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3886 ret
= lan78xx_read_reg(dev
, PMT_CTL
, &buf
);
3888 buf
|= PMT_CTL_WUPS_MASK_
;
3890 ret
= lan78xx_write_reg(dev
, PMT_CTL
, buf
);
3892 ret
= lan78xx_read_reg(dev
, MAC_RX
, &buf
);
3893 buf
|= MAC_RX_RXEN_
;
3894 ret
= lan78xx_write_reg(dev
, MAC_RX
, buf
);
3896 lan78xx_set_suspend(dev
, pdata
->wol
);
3905 static int lan78xx_resume(struct usb_interface
*intf
)
3907 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
3908 struct sk_buff
*skb
;
3913 if (!timer_pending(&dev
->stat_monitor
)) {
3915 mod_timer(&dev
->stat_monitor
,
3916 jiffies
+ STAT_UPDATE_TIMER
);
3919 if (!--dev
->suspend_count
) {
3920 /* resume interrupt URBs */
3921 if (dev
->urb_intr
&& test_bit(EVENT_DEV_OPEN
, &dev
->flags
))
3922 usb_submit_urb(dev
->urb_intr
, GFP_NOIO
);
3924 spin_lock_irq(&dev
->txq
.lock
);
3925 while ((res
= usb_get_from_anchor(&dev
->deferred
))) {
3926 skb
= (struct sk_buff
*)res
->context
;
3927 ret
= usb_submit_urb(res
, GFP_ATOMIC
);
3929 dev_kfree_skb_any(skb
);
3931 usb_autopm_put_interface_async(dev
->intf
);
3933 netif_trans_update(dev
->net
);
3934 lan78xx_queue_skb(&dev
->txq
, skb
, tx_start
);
3938 clear_bit(EVENT_DEV_ASLEEP
, &dev
->flags
);
3939 spin_unlock_irq(&dev
->txq
.lock
);
3941 if (test_bit(EVENT_DEV_OPEN
, &dev
->flags
)) {
3942 if (!(skb_queue_len(&dev
->txq
) >= dev
->tx_qlen
))
3943 netif_start_queue(dev
->net
);
3944 tasklet_schedule(&dev
->bh
);
3948 ret
= lan78xx_write_reg(dev
, WUCSR2
, 0);
3949 ret
= lan78xx_write_reg(dev
, WUCSR
, 0);
3950 ret
= lan78xx_write_reg(dev
, WK_SRC
, 0xFFF1FF1FUL
);
3952 ret
= lan78xx_write_reg(dev
, WUCSR2
, WUCSR2_NS_RCD_
|
3954 WUCSR2_IPV6_TCPSYN_RCD_
|
3955 WUCSR2_IPV4_TCPSYN_RCD_
);
3957 ret
= lan78xx_write_reg(dev
, WUCSR
, WUCSR_EEE_TX_WAKE_
|
3958 WUCSR_EEE_RX_WAKE_
|
3960 WUCSR_RFE_WAKE_FR_
|
3965 ret
= lan78xx_read_reg(dev
, MAC_TX
, &buf
);
3966 buf
|= MAC_TX_TXEN_
;
3967 ret
= lan78xx_write_reg(dev
, MAC_TX
, buf
);
3972 static int lan78xx_reset_resume(struct usb_interface
*intf
)
3974 struct lan78xx_net
*dev
= usb_get_intfdata(intf
);
3978 lan78xx_phy_init(dev
);
3980 return lan78xx_resume(intf
);
3983 static const struct usb_device_id products
[] = {
3985 /* LAN7800 USB Gigabit Ethernet Device */
3986 USB_DEVICE(LAN78XX_USB_VENDOR_ID
, LAN7800_USB_PRODUCT_ID
),
3989 /* LAN7850 USB Gigabit Ethernet Device */
3990 USB_DEVICE(LAN78XX_USB_VENDOR_ID
, LAN7850_USB_PRODUCT_ID
),
3993 /* LAN7801 USB Gigabit Ethernet Device */
3994 USB_DEVICE(LAN78XX_USB_VENDOR_ID
, LAN7801_USB_PRODUCT_ID
),
3998 MODULE_DEVICE_TABLE(usb
, products
);
4000 static struct usb_driver lan78xx_driver
= {
4001 .name
= DRIVER_NAME
,
4002 .id_table
= products
,
4003 .probe
= lan78xx_probe
,
4004 .disconnect
= lan78xx_disconnect
,
4005 .suspend
= lan78xx_suspend
,
4006 .resume
= lan78xx_resume
,
4007 .reset_resume
= lan78xx_reset_resume
,
4008 .supports_autosuspend
= 1,
4009 .disable_hub_initiated_lpm
= 1,
4012 module_usb_driver(lan78xx_driver
);
4014 MODULE_AUTHOR(DRIVER_AUTHOR
);
4015 MODULE_DESCRIPTION(DRIVER_DESC
);
4016 MODULE_LICENSE("GPL");