Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux...
[linux/fpc-iii.git] / drivers / net / usb / lan78xx.c
blobf20890ee03f33368fd68c6b5fb82f8fd76fa4310
1 /*
2 * Copyright (C) 2015 Microchip Technology
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 #include <linux/version.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/usb.h>
23 #include <linux/crc32.h>
24 #include <linux/signal.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/uaccess.h>
28 #include <linux/list.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/mdio.h>
32 #include <net/ip6_checksum.h>
33 #include <linux/microchipphy.h>
34 #include "lan78xx.h"
36 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
37 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
38 #define DRIVER_NAME "lan78xx"
39 #define DRIVER_VERSION "1.0.4"
41 #define TX_TIMEOUT_JIFFIES (5 * HZ)
42 #define THROTTLE_JIFFIES (HZ / 8)
43 #define UNLINK_TIMEOUT_MS 3
45 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
47 #define SS_USB_PKT_SIZE (1024)
48 #define HS_USB_PKT_SIZE (512)
49 #define FS_USB_PKT_SIZE (64)
51 #define MAX_RX_FIFO_SIZE (12 * 1024)
52 #define MAX_TX_FIFO_SIZE (12 * 1024)
53 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
54 #define DEFAULT_BULK_IN_DELAY (0x0800)
55 #define MAX_SINGLE_PACKET_SIZE (9000)
56 #define DEFAULT_TX_CSUM_ENABLE (true)
57 #define DEFAULT_RX_CSUM_ENABLE (true)
58 #define DEFAULT_TSO_CSUM_ENABLE (true)
59 #define DEFAULT_VLAN_FILTER_ENABLE (true)
60 #define TX_OVERHEAD (8)
61 #define RXW_PADDING 2
63 #define LAN78XX_USB_VENDOR_ID (0x0424)
64 #define LAN7800_USB_PRODUCT_ID (0x7800)
65 #define LAN7850_USB_PRODUCT_ID (0x7850)
66 #define LAN78XX_EEPROM_MAGIC (0x78A5)
67 #define LAN78XX_OTP_MAGIC (0x78F3)
69 #define MII_READ 1
70 #define MII_WRITE 0
72 #define EEPROM_INDICATOR (0xA5)
73 #define EEPROM_MAC_OFFSET (0x01)
74 #define MAX_EEPROM_SIZE 512
75 #define OTP_INDICATOR_1 (0xF3)
76 #define OTP_INDICATOR_2 (0xF7)
78 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
79 WAKE_MCAST | WAKE_BCAST | \
80 WAKE_ARP | WAKE_MAGIC)
82 /* USB related defines */
83 #define BULK_IN_PIPE 1
84 #define BULK_OUT_PIPE 2
86 /* default autosuspend delay (mSec)*/
87 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
89 /* statistic update interval (mSec) */
90 #define STAT_UPDATE_TIMER (1 * 1000)
92 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
93 "RX FCS Errors",
94 "RX Alignment Errors",
95 "Rx Fragment Errors",
96 "RX Jabber Errors",
97 "RX Undersize Frame Errors",
98 "RX Oversize Frame Errors",
99 "RX Dropped Frames",
100 "RX Unicast Byte Count",
101 "RX Broadcast Byte Count",
102 "RX Multicast Byte Count",
103 "RX Unicast Frames",
104 "RX Broadcast Frames",
105 "RX Multicast Frames",
106 "RX Pause Frames",
107 "RX 64 Byte Frames",
108 "RX 65 - 127 Byte Frames",
109 "RX 128 - 255 Byte Frames",
110 "RX 256 - 511 Bytes Frames",
111 "RX 512 - 1023 Byte Frames",
112 "RX 1024 - 1518 Byte Frames",
113 "RX Greater 1518 Byte Frames",
114 "EEE RX LPI Transitions",
115 "EEE RX LPI Time",
116 "TX FCS Errors",
117 "TX Excess Deferral Errors",
118 "TX Carrier Errors",
119 "TX Bad Byte Count",
120 "TX Single Collisions",
121 "TX Multiple Collisions",
122 "TX Excessive Collision",
123 "TX Late Collisions",
124 "TX Unicast Byte Count",
125 "TX Broadcast Byte Count",
126 "TX Multicast Byte Count",
127 "TX Unicast Frames",
128 "TX Broadcast Frames",
129 "TX Multicast Frames",
130 "TX Pause Frames",
131 "TX 64 Byte Frames",
132 "TX 65 - 127 Byte Frames",
133 "TX 128 - 255 Byte Frames",
134 "TX 256 - 511 Bytes Frames",
135 "TX 512 - 1023 Byte Frames",
136 "TX 1024 - 1518 Byte Frames",
137 "TX Greater 1518 Byte Frames",
138 "EEE TX LPI Transitions",
139 "EEE TX LPI Time",
142 struct lan78xx_statstage {
143 u32 rx_fcs_errors;
144 u32 rx_alignment_errors;
145 u32 rx_fragment_errors;
146 u32 rx_jabber_errors;
147 u32 rx_undersize_frame_errors;
148 u32 rx_oversize_frame_errors;
149 u32 rx_dropped_frames;
150 u32 rx_unicast_byte_count;
151 u32 rx_broadcast_byte_count;
152 u32 rx_multicast_byte_count;
153 u32 rx_unicast_frames;
154 u32 rx_broadcast_frames;
155 u32 rx_multicast_frames;
156 u32 rx_pause_frames;
157 u32 rx_64_byte_frames;
158 u32 rx_65_127_byte_frames;
159 u32 rx_128_255_byte_frames;
160 u32 rx_256_511_bytes_frames;
161 u32 rx_512_1023_byte_frames;
162 u32 rx_1024_1518_byte_frames;
163 u32 rx_greater_1518_byte_frames;
164 u32 eee_rx_lpi_transitions;
165 u32 eee_rx_lpi_time;
166 u32 tx_fcs_errors;
167 u32 tx_excess_deferral_errors;
168 u32 tx_carrier_errors;
169 u32 tx_bad_byte_count;
170 u32 tx_single_collisions;
171 u32 tx_multiple_collisions;
172 u32 tx_excessive_collision;
173 u32 tx_late_collisions;
174 u32 tx_unicast_byte_count;
175 u32 tx_broadcast_byte_count;
176 u32 tx_multicast_byte_count;
177 u32 tx_unicast_frames;
178 u32 tx_broadcast_frames;
179 u32 tx_multicast_frames;
180 u32 tx_pause_frames;
181 u32 tx_64_byte_frames;
182 u32 tx_65_127_byte_frames;
183 u32 tx_128_255_byte_frames;
184 u32 tx_256_511_bytes_frames;
185 u32 tx_512_1023_byte_frames;
186 u32 tx_1024_1518_byte_frames;
187 u32 tx_greater_1518_byte_frames;
188 u32 eee_tx_lpi_transitions;
189 u32 eee_tx_lpi_time;
192 struct lan78xx_statstage64 {
193 u64 rx_fcs_errors;
194 u64 rx_alignment_errors;
195 u64 rx_fragment_errors;
196 u64 rx_jabber_errors;
197 u64 rx_undersize_frame_errors;
198 u64 rx_oversize_frame_errors;
199 u64 rx_dropped_frames;
200 u64 rx_unicast_byte_count;
201 u64 rx_broadcast_byte_count;
202 u64 rx_multicast_byte_count;
203 u64 rx_unicast_frames;
204 u64 rx_broadcast_frames;
205 u64 rx_multicast_frames;
206 u64 rx_pause_frames;
207 u64 rx_64_byte_frames;
208 u64 rx_65_127_byte_frames;
209 u64 rx_128_255_byte_frames;
210 u64 rx_256_511_bytes_frames;
211 u64 rx_512_1023_byte_frames;
212 u64 rx_1024_1518_byte_frames;
213 u64 rx_greater_1518_byte_frames;
214 u64 eee_rx_lpi_transitions;
215 u64 eee_rx_lpi_time;
216 u64 tx_fcs_errors;
217 u64 tx_excess_deferral_errors;
218 u64 tx_carrier_errors;
219 u64 tx_bad_byte_count;
220 u64 tx_single_collisions;
221 u64 tx_multiple_collisions;
222 u64 tx_excessive_collision;
223 u64 tx_late_collisions;
224 u64 tx_unicast_byte_count;
225 u64 tx_broadcast_byte_count;
226 u64 tx_multicast_byte_count;
227 u64 tx_unicast_frames;
228 u64 tx_broadcast_frames;
229 u64 tx_multicast_frames;
230 u64 tx_pause_frames;
231 u64 tx_64_byte_frames;
232 u64 tx_65_127_byte_frames;
233 u64 tx_128_255_byte_frames;
234 u64 tx_256_511_bytes_frames;
235 u64 tx_512_1023_byte_frames;
236 u64 tx_1024_1518_byte_frames;
237 u64 tx_greater_1518_byte_frames;
238 u64 eee_tx_lpi_transitions;
239 u64 eee_tx_lpi_time;
242 struct lan78xx_net;
244 struct lan78xx_priv {
245 struct lan78xx_net *dev;
246 u32 rfe_ctl;
247 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicat hash table */
248 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
249 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
250 struct mutex dataport_mutex; /* for dataport access */
251 spinlock_t rfe_ctl_lock; /* for rfe register access */
252 struct work_struct set_multicast;
253 struct work_struct set_vlan;
254 u32 wol;
257 enum skb_state {
258 illegal = 0,
259 tx_start,
260 tx_done,
261 rx_start,
262 rx_done,
263 rx_cleanup,
264 unlink_start
267 struct skb_data { /* skb->cb is one of these */
268 struct urb *urb;
269 struct lan78xx_net *dev;
270 enum skb_state state;
271 size_t length;
274 struct usb_context {
275 struct usb_ctrlrequest req;
276 struct lan78xx_net *dev;
279 #define EVENT_TX_HALT 0
280 #define EVENT_RX_HALT 1
281 #define EVENT_RX_MEMORY 2
282 #define EVENT_STS_SPLIT 3
283 #define EVENT_LINK_RESET 4
284 #define EVENT_RX_PAUSED 5
285 #define EVENT_DEV_WAKING 6
286 #define EVENT_DEV_ASLEEP 7
287 #define EVENT_DEV_OPEN 8
288 #define EVENT_STAT_UPDATE 9
290 struct statstage {
291 struct mutex access_lock; /* for stats access */
292 struct lan78xx_statstage saved;
293 struct lan78xx_statstage rollover_count;
294 struct lan78xx_statstage rollover_max;
295 struct lan78xx_statstage64 curr_stat;
298 struct lan78xx_net {
299 struct net_device *net;
300 struct usb_device *udev;
301 struct usb_interface *intf;
302 void *driver_priv;
304 int rx_qlen;
305 int tx_qlen;
306 struct sk_buff_head rxq;
307 struct sk_buff_head txq;
308 struct sk_buff_head done;
309 struct sk_buff_head rxq_pause;
310 struct sk_buff_head txq_pend;
312 struct tasklet_struct bh;
313 struct delayed_work wq;
315 struct usb_host_endpoint *ep_blkin;
316 struct usb_host_endpoint *ep_blkout;
317 struct usb_host_endpoint *ep_intr;
319 int msg_enable;
321 struct urb *urb_intr;
322 struct usb_anchor deferred;
324 struct mutex phy_mutex; /* for phy access */
325 unsigned pipe_in, pipe_out, pipe_intr;
327 u32 hard_mtu; /* count any extra framing */
328 size_t rx_urb_size; /* size for rx urbs */
330 unsigned long flags;
332 wait_queue_head_t *wait;
333 unsigned char suspend_count;
335 unsigned maxpacket;
336 struct timer_list delay;
337 struct timer_list stat_monitor;
339 unsigned long data[5];
341 int link_on;
342 u8 mdix_ctrl;
344 u32 chipid;
345 u32 chiprev;
346 struct mii_bus *mdiobus;
348 int fc_autoneg;
349 u8 fc_request_control;
351 int delta;
352 struct statstage stats;
355 /* use ethtool to change the level for any given device */
356 static int msg_level = -1;
357 module_param(msg_level, int, 0);
358 MODULE_PARM_DESC(msg_level, "Override default message level");
360 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
362 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
363 int ret;
365 if (!buf)
366 return -ENOMEM;
368 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
369 USB_VENDOR_REQUEST_READ_REGISTER,
370 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
371 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
372 if (likely(ret >= 0)) {
373 le32_to_cpus(buf);
374 *data = *buf;
375 } else {
376 netdev_warn(dev->net,
377 "Failed to read register index 0x%08x. ret = %d",
378 index, ret);
381 kfree(buf);
383 return ret;
386 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
388 u32 *buf = kmalloc(sizeof(u32), GFP_KERNEL);
389 int ret;
391 if (!buf)
392 return -ENOMEM;
394 *buf = data;
395 cpu_to_le32s(buf);
397 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
398 USB_VENDOR_REQUEST_WRITE_REGISTER,
399 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
400 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
401 if (unlikely(ret < 0)) {
402 netdev_warn(dev->net,
403 "Failed to write register index 0x%08x. ret = %d",
404 index, ret);
407 kfree(buf);
409 return ret;
412 static int lan78xx_read_stats(struct lan78xx_net *dev,
413 struct lan78xx_statstage *data)
415 int ret = 0;
416 int i;
417 struct lan78xx_statstage *stats;
418 u32 *src;
419 u32 *dst;
421 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
422 if (!stats)
423 return -ENOMEM;
425 ret = usb_control_msg(dev->udev,
426 usb_rcvctrlpipe(dev->udev, 0),
427 USB_VENDOR_REQUEST_GET_STATS,
428 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
431 (void *)stats,
432 sizeof(*stats),
433 USB_CTRL_SET_TIMEOUT);
434 if (likely(ret >= 0)) {
435 src = (u32 *)stats;
436 dst = (u32 *)data;
437 for (i = 0; i < sizeof(*stats)/sizeof(u32); i++) {
438 le32_to_cpus(&src[i]);
439 dst[i] = src[i];
441 } else {
442 netdev_warn(dev->net,
443 "Failed to read stat ret = 0x%x", ret);
446 kfree(stats);
448 return ret;
451 #define check_counter_rollover(struct1, dev_stats, member) { \
452 if (struct1->member < dev_stats.saved.member) \
453 dev_stats.rollover_count.member++; \
456 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
457 struct lan78xx_statstage *stats)
459 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
460 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
461 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
462 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
463 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
464 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
465 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
466 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
467 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
468 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
469 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
470 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
471 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
472 check_counter_rollover(stats, dev->stats, rx_pause_frames);
473 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
474 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
475 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
476 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
477 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
478 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
479 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
480 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
481 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
482 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
483 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
484 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
485 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
486 check_counter_rollover(stats, dev->stats, tx_single_collisions);
487 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
488 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
489 check_counter_rollover(stats, dev->stats, tx_late_collisions);
490 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
491 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
492 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
493 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
494 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
495 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
496 check_counter_rollover(stats, dev->stats, tx_pause_frames);
497 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
498 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
499 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
500 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
501 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
502 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
503 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
504 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
505 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
507 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
510 static void lan78xx_update_stats(struct lan78xx_net *dev)
512 u32 *p, *count, *max;
513 u64 *data;
514 int i;
515 struct lan78xx_statstage lan78xx_stats;
517 if (usb_autopm_get_interface(dev->intf) < 0)
518 return;
520 p = (u32 *)&lan78xx_stats;
521 count = (u32 *)&dev->stats.rollover_count;
522 max = (u32 *)&dev->stats.rollover_max;
523 data = (u64 *)&dev->stats.curr_stat;
525 mutex_lock(&dev->stats.access_lock);
527 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
528 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
530 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
531 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
533 mutex_unlock(&dev->stats.access_lock);
535 usb_autopm_put_interface(dev->intf);
538 /* Loop until the read is completed with timeout called with phy_mutex held */
539 static int lan78xx_phy_wait_not_busy(struct lan78xx_net *dev)
541 unsigned long start_time = jiffies;
542 u32 val;
543 int ret;
545 do {
546 ret = lan78xx_read_reg(dev, MII_ACC, &val);
547 if (unlikely(ret < 0))
548 return -EIO;
550 if (!(val & MII_ACC_MII_BUSY_))
551 return 0;
552 } while (!time_after(jiffies, start_time + HZ));
554 return -EIO;
557 static inline u32 mii_access(int id, int index, int read)
559 u32 ret;
561 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
562 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
563 if (read)
564 ret |= MII_ACC_MII_READ_;
565 else
566 ret |= MII_ACC_MII_WRITE_;
567 ret |= MII_ACC_MII_BUSY_;
569 return ret;
572 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
574 unsigned long start_time = jiffies;
575 u32 val;
576 int ret;
578 do {
579 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
580 if (unlikely(ret < 0))
581 return -EIO;
583 if (!(val & E2P_CMD_EPC_BUSY_) ||
584 (val & E2P_CMD_EPC_TIMEOUT_))
585 break;
586 usleep_range(40, 100);
587 } while (!time_after(jiffies, start_time + HZ));
589 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
590 netdev_warn(dev->net, "EEPROM read operation timeout");
591 return -EIO;
594 return 0;
597 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
599 unsigned long start_time = jiffies;
600 u32 val;
601 int ret;
603 do {
604 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
605 if (unlikely(ret < 0))
606 return -EIO;
608 if (!(val & E2P_CMD_EPC_BUSY_))
609 return 0;
611 usleep_range(40, 100);
612 } while (!time_after(jiffies, start_time + HZ));
614 netdev_warn(dev->net, "EEPROM is busy");
615 return -EIO;
618 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
619 u32 length, u8 *data)
621 u32 val;
622 u32 saved;
623 int i, ret;
624 int retval;
626 /* depends on chip, some EEPROM pins are muxed with LED function.
627 * disable & restore LED function to access EEPROM.
629 ret = lan78xx_read_reg(dev, HW_CFG, &val);
630 saved = val;
631 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
632 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
633 ret = lan78xx_write_reg(dev, HW_CFG, val);
636 retval = lan78xx_eeprom_confirm_not_busy(dev);
637 if (retval)
638 return retval;
640 for (i = 0; i < length; i++) {
641 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
642 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
643 ret = lan78xx_write_reg(dev, E2P_CMD, val);
644 if (unlikely(ret < 0)) {
645 retval = -EIO;
646 goto exit;
649 retval = lan78xx_wait_eeprom(dev);
650 if (retval < 0)
651 goto exit;
653 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
654 if (unlikely(ret < 0)) {
655 retval = -EIO;
656 goto exit;
659 data[i] = val & 0xFF;
660 offset++;
663 retval = 0;
664 exit:
665 if (dev->chipid == ID_REV_CHIP_ID_7800_)
666 ret = lan78xx_write_reg(dev, HW_CFG, saved);
668 return retval;
671 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
672 u32 length, u8 *data)
674 u8 sig;
675 int ret;
677 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
678 if ((ret == 0) && (sig == EEPROM_INDICATOR))
679 ret = lan78xx_read_raw_eeprom(dev, offset, length, data);
680 else
681 ret = -EINVAL;
683 return ret;
686 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
687 u32 length, u8 *data)
689 u32 val;
690 u32 saved;
691 int i, ret;
692 int retval;
694 /* depends on chip, some EEPROM pins are muxed with LED function.
695 * disable & restore LED function to access EEPROM.
697 ret = lan78xx_read_reg(dev, HW_CFG, &val);
698 saved = val;
699 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
700 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
701 ret = lan78xx_write_reg(dev, HW_CFG, val);
704 retval = lan78xx_eeprom_confirm_not_busy(dev);
705 if (retval)
706 goto exit;
708 /* Issue write/erase enable command */
709 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
710 ret = lan78xx_write_reg(dev, E2P_CMD, val);
711 if (unlikely(ret < 0)) {
712 retval = -EIO;
713 goto exit;
716 retval = lan78xx_wait_eeprom(dev);
717 if (retval < 0)
718 goto exit;
720 for (i = 0; i < length; i++) {
721 /* Fill data register */
722 val = data[i];
723 ret = lan78xx_write_reg(dev, E2P_DATA, val);
724 if (ret < 0) {
725 retval = -EIO;
726 goto exit;
729 /* Send "write" command */
730 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
731 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
732 ret = lan78xx_write_reg(dev, E2P_CMD, val);
733 if (ret < 0) {
734 retval = -EIO;
735 goto exit;
738 retval = lan78xx_wait_eeprom(dev);
739 if (retval < 0)
740 goto exit;
742 offset++;
745 retval = 0;
746 exit:
747 if (dev->chipid == ID_REV_CHIP_ID_7800_)
748 ret = lan78xx_write_reg(dev, HW_CFG, saved);
750 return retval;
753 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
754 u32 length, u8 *data)
756 int i;
757 int ret;
758 u32 buf;
759 unsigned long timeout;
761 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
763 if (buf & OTP_PWR_DN_PWRDN_N_) {
764 /* clear it and wait to be cleared */
765 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
767 timeout = jiffies + HZ;
768 do {
769 usleep_range(1, 10);
770 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
771 if (time_after(jiffies, timeout)) {
772 netdev_warn(dev->net,
773 "timeout on OTP_PWR_DN");
774 return -EIO;
776 } while (buf & OTP_PWR_DN_PWRDN_N_);
779 for (i = 0; i < length; i++) {
780 ret = lan78xx_write_reg(dev, OTP_ADDR1,
781 ((offset + i) >> 8) & OTP_ADDR1_15_11);
782 ret = lan78xx_write_reg(dev, OTP_ADDR2,
783 ((offset + i) & OTP_ADDR2_10_3));
785 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
786 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
788 timeout = jiffies + HZ;
789 do {
790 udelay(1);
791 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
792 if (time_after(jiffies, timeout)) {
793 netdev_warn(dev->net,
794 "timeout on OTP_STATUS");
795 return -EIO;
797 } while (buf & OTP_STATUS_BUSY_);
799 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
801 data[i] = (u8)(buf & 0xFF);
804 return 0;
807 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
808 u32 length, u8 *data)
810 int i;
811 int ret;
812 u32 buf;
813 unsigned long timeout;
815 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
817 if (buf & OTP_PWR_DN_PWRDN_N_) {
818 /* clear it and wait to be cleared */
819 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
821 timeout = jiffies + HZ;
822 do {
823 udelay(1);
824 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
825 if (time_after(jiffies, timeout)) {
826 netdev_warn(dev->net,
827 "timeout on OTP_PWR_DN completion");
828 return -EIO;
830 } while (buf & OTP_PWR_DN_PWRDN_N_);
833 /* set to BYTE program mode */
834 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
836 for (i = 0; i < length; i++) {
837 ret = lan78xx_write_reg(dev, OTP_ADDR1,
838 ((offset + i) >> 8) & OTP_ADDR1_15_11);
839 ret = lan78xx_write_reg(dev, OTP_ADDR2,
840 ((offset + i) & OTP_ADDR2_10_3));
841 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
842 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
843 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
845 timeout = jiffies + HZ;
846 do {
847 udelay(1);
848 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
849 if (time_after(jiffies, timeout)) {
850 netdev_warn(dev->net,
851 "Timeout on OTP_STATUS completion");
852 return -EIO;
854 } while (buf & OTP_STATUS_BUSY_);
857 return 0;
860 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
861 u32 length, u8 *data)
863 u8 sig;
864 int ret;
866 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
868 if (ret == 0) {
869 if (sig == OTP_INDICATOR_1)
870 offset = offset;
871 else if (sig == OTP_INDICATOR_2)
872 offset += 0x100;
873 else
874 ret = -EINVAL;
875 ret = lan78xx_read_raw_otp(dev, offset, length, data);
878 return ret;
881 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
883 int i, ret;
885 for (i = 0; i < 100; i++) {
886 u32 dp_sel;
888 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
889 if (unlikely(ret < 0))
890 return -EIO;
892 if (dp_sel & DP_SEL_DPRDY_)
893 return 0;
895 usleep_range(40, 100);
898 netdev_warn(dev->net, "lan78xx_dataport_wait_not_busy timed out");
900 return -EIO;
903 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
904 u32 addr, u32 length, u32 *buf)
906 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
907 u32 dp_sel;
908 int i, ret;
910 if (usb_autopm_get_interface(dev->intf) < 0)
911 return 0;
913 mutex_lock(&pdata->dataport_mutex);
915 ret = lan78xx_dataport_wait_not_busy(dev);
916 if (ret < 0)
917 goto done;
919 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
921 dp_sel &= ~DP_SEL_RSEL_MASK_;
922 dp_sel |= ram_select;
923 ret = lan78xx_write_reg(dev, DP_SEL, dp_sel);
925 for (i = 0; i < length; i++) {
926 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
928 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
930 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
932 ret = lan78xx_dataport_wait_not_busy(dev);
933 if (ret < 0)
934 goto done;
937 done:
938 mutex_unlock(&pdata->dataport_mutex);
939 usb_autopm_put_interface(dev->intf);
941 return ret;
944 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
945 int index, u8 addr[ETH_ALEN])
947 u32 temp;
949 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
950 temp = addr[3];
951 temp = addr[2] | (temp << 8);
952 temp = addr[1] | (temp << 8);
953 temp = addr[0] | (temp << 8);
954 pdata->pfilter_table[index][1] = temp;
955 temp = addr[5];
956 temp = addr[4] | (temp << 8);
957 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
958 pdata->pfilter_table[index][0] = temp;
962 /* returns hash bit number for given MAC address */
963 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
965 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
968 static void lan78xx_deferred_multicast_write(struct work_struct *param)
970 struct lan78xx_priv *pdata =
971 container_of(param, struct lan78xx_priv, set_multicast);
972 struct lan78xx_net *dev = pdata->dev;
973 int i;
974 int ret;
976 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
977 pdata->rfe_ctl);
979 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, DP_SEL_VHF_VLAN_LEN,
980 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
982 for (i = 1; i < NUM_OF_MAF; i++) {
983 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
984 ret = lan78xx_write_reg(dev, MAF_LO(i),
985 pdata->pfilter_table[i][1]);
986 ret = lan78xx_write_reg(dev, MAF_HI(i),
987 pdata->pfilter_table[i][0]);
990 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
993 static void lan78xx_set_multicast(struct net_device *netdev)
995 struct lan78xx_net *dev = netdev_priv(netdev);
996 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
997 unsigned long flags;
998 int i;
1000 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1002 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1003 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1005 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1006 pdata->mchash_table[i] = 0;
1007 /* pfilter_table[0] has own HW address */
1008 for (i = 1; i < NUM_OF_MAF; i++) {
1009 pdata->pfilter_table[i][0] =
1010 pdata->pfilter_table[i][1] = 0;
1013 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1015 if (dev->net->flags & IFF_PROMISC) {
1016 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1017 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1018 } else {
1019 if (dev->net->flags & IFF_ALLMULTI) {
1020 netif_dbg(dev, drv, dev->net,
1021 "receive all multicast enabled");
1022 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1026 if (netdev_mc_count(dev->net)) {
1027 struct netdev_hw_addr *ha;
1028 int i;
1030 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1032 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1034 i = 1;
1035 netdev_for_each_mc_addr(ha, netdev) {
1036 /* set first 32 into Perfect Filter */
1037 if (i < 33) {
1038 lan78xx_set_addr_filter(pdata, i, ha->addr);
1039 } else {
1040 u32 bitnum = lan78xx_hash(ha->addr);
1042 pdata->mchash_table[bitnum / 32] |=
1043 (1 << (bitnum % 32));
1044 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1046 i++;
1050 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1052 /* defer register writes to a sleepable context */
1053 schedule_work(&pdata->set_multicast);
1056 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1057 u16 lcladv, u16 rmtadv)
1059 u32 flow = 0, fct_flow = 0;
1060 int ret;
1061 u8 cap;
1063 if (dev->fc_autoneg)
1064 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1065 else
1066 cap = dev->fc_request_control;
1068 if (cap & FLOW_CTRL_TX)
1069 flow |= (FLOW_CR_TX_FCEN_ | 0xFFFF);
1071 if (cap & FLOW_CTRL_RX)
1072 flow |= FLOW_CR_RX_FCEN_;
1074 if (dev->udev->speed == USB_SPEED_SUPER)
1075 fct_flow = 0x817;
1076 else if (dev->udev->speed == USB_SPEED_HIGH)
1077 fct_flow = 0x211;
1079 netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1080 (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1081 (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1083 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
1085 /* threshold value should be set before enabling flow */
1086 ret = lan78xx_write_reg(dev, FLOW, flow);
1088 return 0;
1091 static int lan78xx_link_reset(struct lan78xx_net *dev)
1093 struct phy_device *phydev = dev->net->phydev;
1094 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1095 int ladv, radv, ret;
1096 u32 buf;
1098 /* clear PHY interrupt status */
1099 ret = phy_read(phydev, LAN88XX_INT_STS);
1100 if (unlikely(ret < 0))
1101 return -EIO;
1103 /* clear LAN78xx interrupt status */
1104 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1105 if (unlikely(ret < 0))
1106 return -EIO;
1108 phy_read_status(phydev);
1110 if (!phydev->link && dev->link_on) {
1111 dev->link_on = false;
1113 /* reset MAC */
1114 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1115 if (unlikely(ret < 0))
1116 return -EIO;
1117 buf |= MAC_CR_RST_;
1118 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1119 if (unlikely(ret < 0))
1120 return -EIO;
1122 phy_mac_interrupt(phydev, 0);
1124 del_timer(&dev->stat_monitor);
1125 } else if (phydev->link && !dev->link_on) {
1126 dev->link_on = true;
1128 phy_ethtool_gset(phydev, &ecmd);
1130 ret = phy_read(phydev, LAN88XX_INT_STS);
1132 if (dev->udev->speed == USB_SPEED_SUPER) {
1133 if (ethtool_cmd_speed(&ecmd) == 1000) {
1134 /* disable U2 */
1135 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1136 buf &= ~USB_CFG1_DEV_U2_INIT_EN_;
1137 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1138 /* enable U1 */
1139 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1140 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1141 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1142 } else {
1143 /* enable U1 & U2 */
1144 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
1145 buf |= USB_CFG1_DEV_U2_INIT_EN_;
1146 buf |= USB_CFG1_DEV_U1_INIT_EN_;
1147 ret = lan78xx_write_reg(dev, USB_CFG1, buf);
1151 ladv = phy_read(phydev, MII_ADVERTISE);
1152 if (ladv < 0)
1153 return ladv;
1155 radv = phy_read(phydev, MII_LPA);
1156 if (radv < 0)
1157 return radv;
1159 netif_dbg(dev, link, dev->net,
1160 "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1161 ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
1163 ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
1164 phy_mac_interrupt(phydev, 1);
1166 if (!timer_pending(&dev->stat_monitor)) {
1167 dev->delta = 1;
1168 mod_timer(&dev->stat_monitor,
1169 jiffies + STAT_UPDATE_TIMER);
1173 return ret;
1176 /* some work can't be done in tasklets, so we use keventd
1178 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1179 * but tasklet_schedule() doesn't. hope the failure is rare.
1181 void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1183 set_bit(work, &dev->flags);
1184 if (!schedule_delayed_work(&dev->wq, 0))
1185 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1188 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1190 u32 intdata;
1192 if (urb->actual_length != 4) {
1193 netdev_warn(dev->net,
1194 "unexpected urb length %d", urb->actual_length);
1195 return;
1198 memcpy(&intdata, urb->transfer_buffer, 4);
1199 le32_to_cpus(&intdata);
1201 if (intdata & INT_ENP_PHY_INT) {
1202 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1203 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1204 } else
1205 netdev_warn(dev->net,
1206 "unexpected interrupt: 0x%08x\n", intdata);
1209 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1211 return MAX_EEPROM_SIZE;
1214 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1215 struct ethtool_eeprom *ee, u8 *data)
1217 struct lan78xx_net *dev = netdev_priv(netdev);
1219 ee->magic = LAN78XX_EEPROM_MAGIC;
1221 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1224 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1225 struct ethtool_eeprom *ee, u8 *data)
1227 struct lan78xx_net *dev = netdev_priv(netdev);
1229 /* Allow entire eeprom update only */
1230 if ((ee->magic == LAN78XX_EEPROM_MAGIC) &&
1231 (ee->offset == 0) &&
1232 (ee->len == 512) &&
1233 (data[0] == EEPROM_INDICATOR))
1234 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1235 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1236 (ee->offset == 0) &&
1237 (ee->len == 512) &&
1238 (data[0] == OTP_INDICATOR_1))
1239 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1241 return -EINVAL;
1244 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1245 u8 *data)
1247 if (stringset == ETH_SS_STATS)
1248 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1251 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1253 if (sset == ETH_SS_STATS)
1254 return ARRAY_SIZE(lan78xx_gstrings);
1255 else
1256 return -EOPNOTSUPP;
1259 static void lan78xx_get_stats(struct net_device *netdev,
1260 struct ethtool_stats *stats, u64 *data)
1262 struct lan78xx_net *dev = netdev_priv(netdev);
1264 lan78xx_update_stats(dev);
1266 mutex_lock(&dev->stats.access_lock);
1267 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1268 mutex_unlock(&dev->stats.access_lock);
1271 static void lan78xx_get_wol(struct net_device *netdev,
1272 struct ethtool_wolinfo *wol)
1274 struct lan78xx_net *dev = netdev_priv(netdev);
1275 int ret;
1276 u32 buf;
1277 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1279 if (usb_autopm_get_interface(dev->intf) < 0)
1280 return;
1282 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1283 if (unlikely(ret < 0)) {
1284 wol->supported = 0;
1285 wol->wolopts = 0;
1286 } else {
1287 if (buf & USB_CFG_RMT_WKP_) {
1288 wol->supported = WAKE_ALL;
1289 wol->wolopts = pdata->wol;
1290 } else {
1291 wol->supported = 0;
1292 wol->wolopts = 0;
1296 usb_autopm_put_interface(dev->intf);
1299 static int lan78xx_set_wol(struct net_device *netdev,
1300 struct ethtool_wolinfo *wol)
1302 struct lan78xx_net *dev = netdev_priv(netdev);
1303 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1304 int ret;
1306 ret = usb_autopm_get_interface(dev->intf);
1307 if (ret < 0)
1308 return ret;
1310 pdata->wol = 0;
1311 if (wol->wolopts & WAKE_UCAST)
1312 pdata->wol |= WAKE_UCAST;
1313 if (wol->wolopts & WAKE_MCAST)
1314 pdata->wol |= WAKE_MCAST;
1315 if (wol->wolopts & WAKE_BCAST)
1316 pdata->wol |= WAKE_BCAST;
1317 if (wol->wolopts & WAKE_MAGIC)
1318 pdata->wol |= WAKE_MAGIC;
1319 if (wol->wolopts & WAKE_PHY)
1320 pdata->wol |= WAKE_PHY;
1321 if (wol->wolopts & WAKE_ARP)
1322 pdata->wol |= WAKE_ARP;
1324 device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1326 phy_ethtool_set_wol(netdev->phydev, wol);
1328 usb_autopm_put_interface(dev->intf);
1330 return ret;
1333 static int lan78xx_get_eee(struct net_device *net, struct ethtool_eee *edata)
1335 struct lan78xx_net *dev = netdev_priv(net);
1336 struct phy_device *phydev = net->phydev;
1337 int ret;
1338 u32 buf;
1340 ret = usb_autopm_get_interface(dev->intf);
1341 if (ret < 0)
1342 return ret;
1344 ret = phy_ethtool_get_eee(phydev, edata);
1345 if (ret < 0)
1346 goto exit;
1348 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1349 if (buf & MAC_CR_EEE_EN_) {
1350 edata->eee_enabled = true;
1351 edata->eee_active = !!(edata->advertised &
1352 edata->lp_advertised);
1353 edata->tx_lpi_enabled = true;
1354 /* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1355 ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1356 edata->tx_lpi_timer = buf;
1357 } else {
1358 edata->eee_enabled = false;
1359 edata->eee_active = false;
1360 edata->tx_lpi_enabled = false;
1361 edata->tx_lpi_timer = 0;
1364 ret = 0;
1365 exit:
1366 usb_autopm_put_interface(dev->intf);
1368 return ret;
1371 static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1373 struct lan78xx_net *dev = netdev_priv(net);
1374 int ret;
1375 u32 buf;
1377 ret = usb_autopm_get_interface(dev->intf);
1378 if (ret < 0)
1379 return ret;
1381 if (edata->eee_enabled) {
1382 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1383 buf |= MAC_CR_EEE_EN_;
1384 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1386 phy_ethtool_set_eee(net->phydev, edata);
1388 buf = (u32)edata->tx_lpi_timer;
1389 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1390 } else {
1391 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1392 buf &= ~MAC_CR_EEE_EN_;
1393 ret = lan78xx_write_reg(dev, MAC_CR, buf);
1396 usb_autopm_put_interface(dev->intf);
1398 return 0;
1401 static u32 lan78xx_get_link(struct net_device *net)
1403 phy_read_status(net->phydev);
1405 return net->phydev->link;
1408 int lan78xx_nway_reset(struct net_device *net)
1410 return phy_start_aneg(net->phydev);
1413 static void lan78xx_get_drvinfo(struct net_device *net,
1414 struct ethtool_drvinfo *info)
1416 struct lan78xx_net *dev = netdev_priv(net);
1418 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1419 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
1420 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1423 static u32 lan78xx_get_msglevel(struct net_device *net)
1425 struct lan78xx_net *dev = netdev_priv(net);
1427 return dev->msg_enable;
1430 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1432 struct lan78xx_net *dev = netdev_priv(net);
1434 dev->msg_enable = level;
1437 static int lan78xx_get_mdix_status(struct net_device *net)
1439 struct phy_device *phydev = net->phydev;
1440 int buf;
1442 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_1);
1443 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1444 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS, LAN88XX_EXT_PAGE_SPACE_0);
1446 return buf;
1449 static void lan78xx_set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
1451 struct lan78xx_net *dev = netdev_priv(net);
1452 struct phy_device *phydev = net->phydev;
1453 int buf;
1455 if (mdix_ctrl == ETH_TP_MDI) {
1456 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1457 LAN88XX_EXT_PAGE_SPACE_1);
1458 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1459 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1460 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1461 buf | LAN88XX_EXT_MODE_CTRL_MDI_);
1462 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1463 LAN88XX_EXT_PAGE_SPACE_0);
1464 } else if (mdix_ctrl == ETH_TP_MDI_X) {
1465 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1466 LAN88XX_EXT_PAGE_SPACE_1);
1467 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1468 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1469 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1470 buf | LAN88XX_EXT_MODE_CTRL_MDI_X_);
1471 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1472 LAN88XX_EXT_PAGE_SPACE_0);
1473 } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
1474 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1475 LAN88XX_EXT_PAGE_SPACE_1);
1476 buf = phy_read(phydev, LAN88XX_EXT_MODE_CTRL);
1477 buf &= ~LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1478 phy_write(phydev, LAN88XX_EXT_MODE_CTRL,
1479 buf | LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_);
1480 phy_write(phydev, LAN88XX_EXT_PAGE_ACCESS,
1481 LAN88XX_EXT_PAGE_SPACE_0);
1483 dev->mdix_ctrl = mdix_ctrl;
1486 static int lan78xx_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
1488 struct lan78xx_net *dev = netdev_priv(net);
1489 struct phy_device *phydev = net->phydev;
1490 int ret;
1491 int buf;
1493 ret = usb_autopm_get_interface(dev->intf);
1494 if (ret < 0)
1495 return ret;
1497 ret = phy_ethtool_gset(phydev, cmd);
1499 buf = lan78xx_get_mdix_status(net);
1501 buf &= LAN88XX_EXT_MODE_CTRL_MDIX_MASK_;
1502 if (buf == LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_) {
1503 cmd->eth_tp_mdix = ETH_TP_MDI_AUTO;
1504 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
1505 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_) {
1506 cmd->eth_tp_mdix = ETH_TP_MDI;
1507 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI;
1508 } else if (buf == LAN88XX_EXT_MODE_CTRL_MDI_X_) {
1509 cmd->eth_tp_mdix = ETH_TP_MDI_X;
1510 cmd->eth_tp_mdix_ctrl = ETH_TP_MDI_X;
1513 usb_autopm_put_interface(dev->intf);
1515 return ret;
1518 static int lan78xx_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
1520 struct lan78xx_net *dev = netdev_priv(net);
1521 struct phy_device *phydev = net->phydev;
1522 int ret = 0;
1523 int temp;
1525 ret = usb_autopm_get_interface(dev->intf);
1526 if (ret < 0)
1527 return ret;
1529 if (dev->mdix_ctrl != cmd->eth_tp_mdix_ctrl) {
1530 lan78xx_set_mdix_status(net, cmd->eth_tp_mdix_ctrl);
1533 /* change speed & duplex */
1534 ret = phy_ethtool_sset(phydev, cmd);
1536 if (!cmd->autoneg) {
1537 /* force link down */
1538 temp = phy_read(phydev, MII_BMCR);
1539 phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
1540 mdelay(1);
1541 phy_write(phydev, MII_BMCR, temp);
1544 usb_autopm_put_interface(dev->intf);
1546 return ret;
1549 static void lan78xx_get_pause(struct net_device *net,
1550 struct ethtool_pauseparam *pause)
1552 struct lan78xx_net *dev = netdev_priv(net);
1553 struct phy_device *phydev = net->phydev;
1554 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1556 phy_ethtool_gset(phydev, &ecmd);
1558 pause->autoneg = dev->fc_autoneg;
1560 if (dev->fc_request_control & FLOW_CTRL_TX)
1561 pause->tx_pause = 1;
1563 if (dev->fc_request_control & FLOW_CTRL_RX)
1564 pause->rx_pause = 1;
1567 static int lan78xx_set_pause(struct net_device *net,
1568 struct ethtool_pauseparam *pause)
1570 struct lan78xx_net *dev = netdev_priv(net);
1571 struct phy_device *phydev = net->phydev;
1572 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
1573 int ret;
1575 phy_ethtool_gset(phydev, &ecmd);
1577 if (pause->autoneg && !ecmd.autoneg) {
1578 ret = -EINVAL;
1579 goto exit;
1582 dev->fc_request_control = 0;
1583 if (pause->rx_pause)
1584 dev->fc_request_control |= FLOW_CTRL_RX;
1586 if (pause->tx_pause)
1587 dev->fc_request_control |= FLOW_CTRL_TX;
1589 if (ecmd.autoneg) {
1590 u32 mii_adv;
1592 ecmd.advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1593 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1594 ecmd.advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1595 phy_ethtool_sset(phydev, &ecmd);
1598 dev->fc_autoneg = pause->autoneg;
1600 ret = 0;
1601 exit:
1602 return ret;
1605 static const struct ethtool_ops lan78xx_ethtool_ops = {
1606 .get_link = lan78xx_get_link,
1607 .nway_reset = lan78xx_nway_reset,
1608 .get_drvinfo = lan78xx_get_drvinfo,
1609 .get_msglevel = lan78xx_get_msglevel,
1610 .set_msglevel = lan78xx_set_msglevel,
1611 .get_settings = lan78xx_get_settings,
1612 .set_settings = lan78xx_set_settings,
1613 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1614 .get_eeprom = lan78xx_ethtool_get_eeprom,
1615 .set_eeprom = lan78xx_ethtool_set_eeprom,
1616 .get_ethtool_stats = lan78xx_get_stats,
1617 .get_sset_count = lan78xx_get_sset_count,
1618 .get_strings = lan78xx_get_strings,
1619 .get_wol = lan78xx_get_wol,
1620 .set_wol = lan78xx_set_wol,
1621 .get_eee = lan78xx_get_eee,
1622 .set_eee = lan78xx_set_eee,
1623 .get_pauseparam = lan78xx_get_pause,
1624 .set_pauseparam = lan78xx_set_pause,
1627 static int lan78xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
1629 if (!netif_running(netdev))
1630 return -EINVAL;
1632 return phy_mii_ioctl(netdev->phydev, rq, cmd);
1635 static void lan78xx_init_mac_address(struct lan78xx_net *dev)
1637 u32 addr_lo, addr_hi;
1638 int ret;
1639 u8 addr[6];
1641 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1642 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1644 addr[0] = addr_lo & 0xFF;
1645 addr[1] = (addr_lo >> 8) & 0xFF;
1646 addr[2] = (addr_lo >> 16) & 0xFF;
1647 addr[3] = (addr_lo >> 24) & 0xFF;
1648 addr[4] = addr_hi & 0xFF;
1649 addr[5] = (addr_hi >> 8) & 0xFF;
1651 if (!is_valid_ether_addr(addr)) {
1652 /* reading mac address from EEPROM or OTP */
1653 if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1654 addr) == 0) ||
1655 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
1656 addr) == 0)) {
1657 if (is_valid_ether_addr(addr)) {
1658 /* eeprom values are valid so use them */
1659 netif_dbg(dev, ifup, dev->net,
1660 "MAC address read from EEPROM");
1661 } else {
1662 /* generate random MAC */
1663 random_ether_addr(addr);
1664 netif_dbg(dev, ifup, dev->net,
1665 "MAC address set to random addr");
1668 addr_lo = addr[0] | (addr[1] << 8) |
1669 (addr[2] << 16) | (addr[3] << 24);
1670 addr_hi = addr[4] | (addr[5] << 8);
1672 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1673 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1674 } else {
1675 /* generate random MAC */
1676 random_ether_addr(addr);
1677 netif_dbg(dev, ifup, dev->net,
1678 "MAC address set to random addr");
1682 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1683 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1685 ether_addr_copy(dev->net->dev_addr, addr);
1688 /* MDIO read and write wrappers for phylib */
1689 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1691 struct lan78xx_net *dev = bus->priv;
1692 u32 val, addr;
1693 int ret;
1695 ret = usb_autopm_get_interface(dev->intf);
1696 if (ret < 0)
1697 return ret;
1699 mutex_lock(&dev->phy_mutex);
1701 /* confirm MII not busy */
1702 ret = lan78xx_phy_wait_not_busy(dev);
1703 if (ret < 0)
1704 goto done;
1706 /* set the address, index & direction (read from PHY) */
1707 addr = mii_access(phy_id, idx, MII_READ);
1708 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1710 ret = lan78xx_phy_wait_not_busy(dev);
1711 if (ret < 0)
1712 goto done;
1714 ret = lan78xx_read_reg(dev, MII_DATA, &val);
1716 ret = (int)(val & 0xFFFF);
1718 done:
1719 mutex_unlock(&dev->phy_mutex);
1720 usb_autopm_put_interface(dev->intf);
1721 return ret;
1724 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
1725 u16 regval)
1727 struct lan78xx_net *dev = bus->priv;
1728 u32 val, addr;
1729 int ret;
1731 ret = usb_autopm_get_interface(dev->intf);
1732 if (ret < 0)
1733 return ret;
1735 mutex_lock(&dev->phy_mutex);
1737 /* confirm MII not busy */
1738 ret = lan78xx_phy_wait_not_busy(dev);
1739 if (ret < 0)
1740 goto done;
1742 val = (u32)regval;
1743 ret = lan78xx_write_reg(dev, MII_DATA, val);
1745 /* set the address, index & direction (write to PHY) */
1746 addr = mii_access(phy_id, idx, MII_WRITE);
1747 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1749 ret = lan78xx_phy_wait_not_busy(dev);
1750 if (ret < 0)
1751 goto done;
1753 done:
1754 mutex_unlock(&dev->phy_mutex);
1755 usb_autopm_put_interface(dev->intf);
1756 return 0;
1759 static int lan78xx_mdio_init(struct lan78xx_net *dev)
1761 int ret;
1763 dev->mdiobus = mdiobus_alloc();
1764 if (!dev->mdiobus) {
1765 netdev_err(dev->net, "can't allocate MDIO bus\n");
1766 return -ENOMEM;
1769 dev->mdiobus->priv = (void *)dev;
1770 dev->mdiobus->read = lan78xx_mdiobus_read;
1771 dev->mdiobus->write = lan78xx_mdiobus_write;
1772 dev->mdiobus->name = "lan78xx-mdiobus";
1774 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
1775 dev->udev->bus->busnum, dev->udev->devnum);
1777 switch (dev->chipid) {
1778 case ID_REV_CHIP_ID_7800_:
1779 case ID_REV_CHIP_ID_7850_:
1780 /* set to internal PHY id */
1781 dev->mdiobus->phy_mask = ~(1 << 1);
1782 break;
1785 ret = mdiobus_register(dev->mdiobus);
1786 if (ret) {
1787 netdev_err(dev->net, "can't register MDIO bus\n");
1788 goto exit1;
1791 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
1792 return 0;
1793 exit1:
1794 mdiobus_free(dev->mdiobus);
1795 return ret;
1798 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
1800 mdiobus_unregister(dev->mdiobus);
1801 mdiobus_free(dev->mdiobus);
1804 static void lan78xx_link_status_change(struct net_device *net)
1806 /* nothing to do */
1809 static int lan78xx_phy_init(struct lan78xx_net *dev)
1811 int ret;
1812 u32 mii_adv;
1813 struct phy_device *phydev = dev->net->phydev;
1815 phydev = phy_find_first(dev->mdiobus);
1816 if (!phydev) {
1817 netdev_err(dev->net, "no PHY found\n");
1818 return -EIO;
1821 /* Enable PHY interrupts.
1822 * We handle our own interrupt
1824 ret = phy_read(phydev, LAN88XX_INT_STS);
1825 ret = phy_write(phydev, LAN88XX_INT_MASK,
1826 LAN88XX_INT_MASK_MDINTPIN_EN_ |
1827 LAN88XX_INT_MASK_LINK_CHANGE_);
1829 phydev->irq = PHY_IGNORE_INTERRUPT;
1831 ret = phy_connect_direct(dev->net, phydev,
1832 lan78xx_link_status_change,
1833 PHY_INTERFACE_MODE_GMII);
1834 if (ret) {
1835 netdev_err(dev->net, "can't attach PHY to %s\n",
1836 dev->mdiobus->id);
1837 return -EIO;
1840 /* set to AUTOMDIX */
1841 lan78xx_set_mdix_status(dev->net, ETH_TP_MDI_AUTO);
1843 /* MAC doesn't support 1000T Half */
1844 phydev->supported &= ~SUPPORTED_1000baseT_Half;
1846 /* support both flow controls */
1847 dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
1848 phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
1849 mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
1850 phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv);
1852 genphy_config_aneg(phydev);
1854 dev->fc_autoneg = phydev->autoneg;
1856 phy_start(phydev);
1858 netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
1860 return 0;
1863 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
1865 int ret = 0;
1866 u32 buf;
1867 bool rxenabled;
1869 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
1871 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
1873 if (rxenabled) {
1874 buf &= ~MAC_RX_RXEN_;
1875 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1878 /* add 4 to size for FCS */
1879 buf &= ~MAC_RX_MAX_SIZE_MASK_;
1880 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
1882 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1884 if (rxenabled) {
1885 buf |= MAC_RX_RXEN_;
1886 ret = lan78xx_write_reg(dev, MAC_RX, buf);
1889 return 0;
1892 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
1894 struct sk_buff *skb;
1895 unsigned long flags;
1896 int count = 0;
1898 spin_lock_irqsave(&q->lock, flags);
1899 while (!skb_queue_empty(q)) {
1900 struct skb_data *entry;
1901 struct urb *urb;
1902 int ret;
1904 skb_queue_walk(q, skb) {
1905 entry = (struct skb_data *)skb->cb;
1906 if (entry->state != unlink_start)
1907 goto found;
1909 break;
1910 found:
1911 entry->state = unlink_start;
1912 urb = entry->urb;
1914 /* Get reference count of the URB to avoid it to be
1915 * freed during usb_unlink_urb, which may trigger
1916 * use-after-free problem inside usb_unlink_urb since
1917 * usb_unlink_urb is always racing with .complete
1918 * handler(include defer_bh).
1920 usb_get_urb(urb);
1921 spin_unlock_irqrestore(&q->lock, flags);
1922 /* during some PM-driven resume scenarios,
1923 * these (async) unlinks complete immediately
1925 ret = usb_unlink_urb(urb);
1926 if (ret != -EINPROGRESS && ret != 0)
1927 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
1928 else
1929 count++;
1930 usb_put_urb(urb);
1931 spin_lock_irqsave(&q->lock, flags);
1933 spin_unlock_irqrestore(&q->lock, flags);
1934 return count;
1937 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
1939 struct lan78xx_net *dev = netdev_priv(netdev);
1940 int ll_mtu = new_mtu + netdev->hard_header_len;
1941 int old_hard_mtu = dev->hard_mtu;
1942 int old_rx_urb_size = dev->rx_urb_size;
1943 int ret;
1945 if (new_mtu > MAX_SINGLE_PACKET_SIZE)
1946 return -EINVAL;
1948 if (new_mtu <= 0)
1949 return -EINVAL;
1950 /* no second zero-length packet read wanted after mtu-sized packets */
1951 if ((ll_mtu % dev->maxpacket) == 0)
1952 return -EDOM;
1954 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
1956 netdev->mtu = new_mtu;
1958 dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
1959 if (dev->rx_urb_size == old_hard_mtu) {
1960 dev->rx_urb_size = dev->hard_mtu;
1961 if (dev->rx_urb_size > old_rx_urb_size) {
1962 if (netif_running(dev->net)) {
1963 unlink_urbs(dev, &dev->rxq);
1964 tasklet_schedule(&dev->bh);
1969 return 0;
1972 int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
1974 struct lan78xx_net *dev = netdev_priv(netdev);
1975 struct sockaddr *addr = p;
1976 u32 addr_lo, addr_hi;
1977 int ret;
1979 if (netif_running(netdev))
1980 return -EBUSY;
1982 if (!is_valid_ether_addr(addr->sa_data))
1983 return -EADDRNOTAVAIL;
1985 ether_addr_copy(netdev->dev_addr, addr->sa_data);
1987 addr_lo = netdev->dev_addr[0] |
1988 netdev->dev_addr[1] << 8 |
1989 netdev->dev_addr[2] << 16 |
1990 netdev->dev_addr[3] << 24;
1991 addr_hi = netdev->dev_addr[4] |
1992 netdev->dev_addr[5] << 8;
1994 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1995 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1997 return 0;
2000 /* Enable or disable Rx checksum offload engine */
2001 static int lan78xx_set_features(struct net_device *netdev,
2002 netdev_features_t features)
2004 struct lan78xx_net *dev = netdev_priv(netdev);
2005 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2006 unsigned long flags;
2007 int ret;
2009 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
2011 if (features & NETIF_F_RXCSUM) {
2012 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
2013 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
2014 } else {
2015 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
2016 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
2019 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2020 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2021 else
2022 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
2024 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
2026 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2028 return 0;
2031 static void lan78xx_deferred_vlan_write(struct work_struct *param)
2033 struct lan78xx_priv *pdata =
2034 container_of(param, struct lan78xx_priv, set_vlan);
2035 struct lan78xx_net *dev = pdata->dev;
2037 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
2038 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
2041 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
2042 __be16 proto, u16 vid)
2044 struct lan78xx_net *dev = netdev_priv(netdev);
2045 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2046 u16 vid_bit_index;
2047 u16 vid_dword_index;
2049 vid_dword_index = (vid >> 5) & 0x7F;
2050 vid_bit_index = vid & 0x1F;
2052 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
2054 /* defer register writes to a sleepable context */
2055 schedule_work(&pdata->set_vlan);
2057 return 0;
2060 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
2061 __be16 proto, u16 vid)
2063 struct lan78xx_net *dev = netdev_priv(netdev);
2064 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2065 u16 vid_bit_index;
2066 u16 vid_dword_index;
2068 vid_dword_index = (vid >> 5) & 0x7F;
2069 vid_bit_index = vid & 0x1F;
2071 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
2073 /* defer register writes to a sleepable context */
2074 schedule_work(&pdata->set_vlan);
2076 return 0;
2079 static void lan78xx_init_ltm(struct lan78xx_net *dev)
2081 int ret;
2082 u32 buf;
2083 u32 regs[6] = { 0 };
2085 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
2086 if (buf & USB_CFG1_LTM_ENABLE_) {
2087 u8 temp[2];
2088 /* Get values from EEPROM first */
2089 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
2090 if (temp[0] == 24) {
2091 ret = lan78xx_read_raw_eeprom(dev,
2092 temp[1] * 2,
2094 (u8 *)regs);
2095 if (ret < 0)
2096 return;
2098 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
2099 if (temp[0] == 24) {
2100 ret = lan78xx_read_raw_otp(dev,
2101 temp[1] * 2,
2103 (u8 *)regs);
2104 if (ret < 0)
2105 return;
2110 lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
2111 lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
2112 lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
2113 lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
2114 lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
2115 lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
2118 static int lan78xx_reset(struct lan78xx_net *dev)
2120 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2121 u32 buf;
2122 int ret = 0;
2123 unsigned long timeout;
2125 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2126 buf |= HW_CFG_LRST_;
2127 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2129 timeout = jiffies + HZ;
2130 do {
2131 mdelay(1);
2132 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2133 if (time_after(jiffies, timeout)) {
2134 netdev_warn(dev->net,
2135 "timeout on completion of LiteReset");
2136 return -EIO;
2138 } while (buf & HW_CFG_LRST_);
2140 lan78xx_init_mac_address(dev);
2142 /* save DEVID for later usage */
2143 ret = lan78xx_read_reg(dev, ID_REV, &buf);
2144 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
2145 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
2147 /* Respond to the IN token with a NAK */
2148 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2149 buf |= USB_CFG_BIR_;
2150 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2152 /* Init LTM */
2153 lan78xx_init_ltm(dev);
2155 dev->net->hard_header_len += TX_OVERHEAD;
2156 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
2158 if (dev->udev->speed == USB_SPEED_SUPER) {
2159 buf = DEFAULT_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
2160 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2161 dev->rx_qlen = 4;
2162 dev->tx_qlen = 4;
2163 } else if (dev->udev->speed == USB_SPEED_HIGH) {
2164 buf = DEFAULT_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
2165 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2166 dev->rx_qlen = RX_MAX_QUEUE_MEMORY / dev->rx_urb_size;
2167 dev->tx_qlen = RX_MAX_QUEUE_MEMORY / dev->hard_mtu;
2168 } else {
2169 buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
2170 dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
2171 dev->rx_qlen = 4;
2174 ret = lan78xx_write_reg(dev, BURST_CAP, buf);
2175 ret = lan78xx_write_reg(dev, BULK_IN_DLY, DEFAULT_BULK_IN_DELAY);
2177 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
2178 buf |= HW_CFG_MEF_;
2179 ret = lan78xx_write_reg(dev, HW_CFG, buf);
2181 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
2182 buf |= USB_CFG_BCE_;
2183 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
2185 /* set FIFO sizes */
2186 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
2187 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
2189 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
2190 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
2192 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
2193 ret = lan78xx_write_reg(dev, FLOW, 0);
2194 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
2196 /* Don't need rfe_ctl_lock during initialisation */
2197 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
2198 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
2199 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
2201 /* Enable or disable checksum offload engines */
2202 lan78xx_set_features(dev->net, dev->net->features);
2204 lan78xx_set_multicast(dev->net);
2206 /* reset PHY */
2207 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2208 buf |= PMT_CTL_PHY_RST_;
2209 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
2211 timeout = jiffies + HZ;
2212 do {
2213 mdelay(1);
2214 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
2215 if (time_after(jiffies, timeout)) {
2216 netdev_warn(dev->net, "timeout waiting for PHY Reset");
2217 return -EIO;
2219 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
2221 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
2222 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2223 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2225 /* enable PHY interrupts */
2226 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2227 buf |= INT_ENP_PHY_INT;
2228 ret = lan78xx_write_reg(dev, INT_EP_CTL, buf);
2230 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
2231 buf |= MAC_TX_TXEN_;
2232 ret = lan78xx_write_reg(dev, MAC_TX, buf);
2234 ret = lan78xx_read_reg(dev, FCT_TX_CTL, &buf);
2235 buf |= FCT_TX_CTL_EN_;
2236 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2238 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
2240 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2241 buf |= MAC_RX_RXEN_;
2242 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2244 ret = lan78xx_read_reg(dev, FCT_RX_CTL, &buf);
2245 buf |= FCT_RX_CTL_EN_;
2246 ret = lan78xx_write_reg(dev, FCT_RX_CTL, buf);
2248 return 0;
2251 static void lan78xx_init_stats(struct lan78xx_net *dev)
2253 u32 *p;
2254 int i;
2256 /* initialize for stats update
2257 * some counters are 20bits and some are 32bits
2259 p = (u32 *)&dev->stats.rollover_max;
2260 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
2261 p[i] = 0xFFFFF;
2263 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
2264 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
2265 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
2266 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
2267 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
2268 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
2269 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
2270 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
2271 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
2272 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
2274 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
2277 static int lan78xx_open(struct net_device *net)
2279 struct lan78xx_net *dev = netdev_priv(net);
2280 int ret;
2282 ret = usb_autopm_get_interface(dev->intf);
2283 if (ret < 0)
2284 goto out;
2286 ret = lan78xx_reset(dev);
2287 if (ret < 0)
2288 goto done;
2290 ret = lan78xx_phy_init(dev);
2291 if (ret < 0)
2292 goto done;
2294 /* for Link Check */
2295 if (dev->urb_intr) {
2296 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
2297 if (ret < 0) {
2298 netif_err(dev, ifup, dev->net,
2299 "intr submit %d\n", ret);
2300 goto done;
2304 lan78xx_init_stats(dev);
2306 set_bit(EVENT_DEV_OPEN, &dev->flags);
2308 netif_start_queue(net);
2310 dev->link_on = false;
2312 lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
2313 done:
2314 usb_autopm_put_interface(dev->intf);
2316 out:
2317 return ret;
2320 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
2322 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
2323 DECLARE_WAITQUEUE(wait, current);
2324 int temp;
2326 /* ensure there are no more active urbs */
2327 add_wait_queue(&unlink_wakeup, &wait);
2328 set_current_state(TASK_UNINTERRUPTIBLE);
2329 dev->wait = &unlink_wakeup;
2330 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
2332 /* maybe wait for deletions to finish. */
2333 while (!skb_queue_empty(&dev->rxq) &&
2334 !skb_queue_empty(&dev->txq) &&
2335 !skb_queue_empty(&dev->done)) {
2336 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
2337 set_current_state(TASK_UNINTERRUPTIBLE);
2338 netif_dbg(dev, ifdown, dev->net,
2339 "waited for %d urb completions\n", temp);
2341 set_current_state(TASK_RUNNING);
2342 dev->wait = NULL;
2343 remove_wait_queue(&unlink_wakeup, &wait);
2346 int lan78xx_stop(struct net_device *net)
2348 struct lan78xx_net *dev = netdev_priv(net);
2350 if (timer_pending(&dev->stat_monitor))
2351 del_timer_sync(&dev->stat_monitor);
2353 phy_stop(net->phydev);
2354 phy_disconnect(net->phydev);
2355 net->phydev = NULL;
2357 clear_bit(EVENT_DEV_OPEN, &dev->flags);
2358 netif_stop_queue(net);
2360 netif_info(dev, ifdown, dev->net,
2361 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
2362 net->stats.rx_packets, net->stats.tx_packets,
2363 net->stats.rx_errors, net->stats.tx_errors);
2365 lan78xx_terminate_urbs(dev);
2367 usb_kill_urb(dev->urb_intr);
2369 skb_queue_purge(&dev->rxq_pause);
2371 /* deferred work (task, timer, softirq) must also stop.
2372 * can't flush_scheduled_work() until we drop rtnl (later),
2373 * else workers could deadlock; so make workers a NOP.
2375 dev->flags = 0;
2376 cancel_delayed_work_sync(&dev->wq);
2377 tasklet_kill(&dev->bh);
2379 usb_autopm_put_interface(dev->intf);
2381 return 0;
2384 static int lan78xx_linearize(struct sk_buff *skb)
2386 return skb_linearize(skb);
2389 static struct sk_buff *lan78xx_tx_prep(struct lan78xx_net *dev,
2390 struct sk_buff *skb, gfp_t flags)
2392 u32 tx_cmd_a, tx_cmd_b;
2394 if (skb_headroom(skb) < TX_OVERHEAD) {
2395 struct sk_buff *skb2;
2397 skb2 = skb_copy_expand(skb, TX_OVERHEAD, 0, flags);
2398 dev_kfree_skb_any(skb);
2399 skb = skb2;
2400 if (!skb)
2401 return NULL;
2404 if (lan78xx_linearize(skb) < 0)
2405 return NULL;
2407 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
2409 if (skb->ip_summed == CHECKSUM_PARTIAL)
2410 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
2412 tx_cmd_b = 0;
2413 if (skb_is_gso(skb)) {
2414 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
2416 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
2418 tx_cmd_a |= TX_CMD_A_LSO_;
2421 if (skb_vlan_tag_present(skb)) {
2422 tx_cmd_a |= TX_CMD_A_IVTG_;
2423 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
2426 skb_push(skb, 4);
2427 cpu_to_le32s(&tx_cmd_b);
2428 memcpy(skb->data, &tx_cmd_b, 4);
2430 skb_push(skb, 4);
2431 cpu_to_le32s(&tx_cmd_a);
2432 memcpy(skb->data, &tx_cmd_a, 4);
2434 return skb;
2437 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
2438 struct sk_buff_head *list, enum skb_state state)
2440 unsigned long flags;
2441 enum skb_state old_state;
2442 struct skb_data *entry = (struct skb_data *)skb->cb;
2444 spin_lock_irqsave(&list->lock, flags);
2445 old_state = entry->state;
2446 entry->state = state;
2448 __skb_unlink(skb, list);
2449 spin_unlock(&list->lock);
2450 spin_lock(&dev->done.lock);
2452 __skb_queue_tail(&dev->done, skb);
2453 if (skb_queue_len(&dev->done) == 1)
2454 tasklet_schedule(&dev->bh);
2455 spin_unlock_irqrestore(&dev->done.lock, flags);
2457 return old_state;
2460 static void tx_complete(struct urb *urb)
2462 struct sk_buff *skb = (struct sk_buff *)urb->context;
2463 struct skb_data *entry = (struct skb_data *)skb->cb;
2464 struct lan78xx_net *dev = entry->dev;
2466 if (urb->status == 0) {
2467 dev->net->stats.tx_packets++;
2468 dev->net->stats.tx_bytes += entry->length;
2469 } else {
2470 dev->net->stats.tx_errors++;
2472 switch (urb->status) {
2473 case -EPIPE:
2474 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
2475 break;
2477 /* software-driven interface shutdown */
2478 case -ECONNRESET:
2479 case -ESHUTDOWN:
2480 break;
2482 case -EPROTO:
2483 case -ETIME:
2484 case -EILSEQ:
2485 netif_stop_queue(dev->net);
2486 break;
2487 default:
2488 netif_dbg(dev, tx_err, dev->net,
2489 "tx err %d\n", entry->urb->status);
2490 break;
2494 usb_autopm_put_interface_async(dev->intf);
2496 defer_bh(dev, skb, &dev->txq, tx_done);
2499 static void lan78xx_queue_skb(struct sk_buff_head *list,
2500 struct sk_buff *newsk, enum skb_state state)
2502 struct skb_data *entry = (struct skb_data *)newsk->cb;
2504 __skb_queue_tail(list, newsk);
2505 entry->state = state;
2508 netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
2510 struct lan78xx_net *dev = netdev_priv(net);
2511 struct sk_buff *skb2 = NULL;
2513 if (skb) {
2514 skb_tx_timestamp(skb);
2515 skb2 = lan78xx_tx_prep(dev, skb, GFP_ATOMIC);
2518 if (skb2) {
2519 skb_queue_tail(&dev->txq_pend, skb2);
2521 /* throttle TX patch at slower than SUPER SPEED USB */
2522 if ((dev->udev->speed < USB_SPEED_SUPER) &&
2523 (skb_queue_len(&dev->txq_pend) > 10))
2524 netif_stop_queue(net);
2525 } else {
2526 netif_dbg(dev, tx_err, dev->net,
2527 "lan78xx_tx_prep return NULL\n");
2528 dev->net->stats.tx_errors++;
2529 dev->net->stats.tx_dropped++;
2532 tasklet_schedule(&dev->bh);
2534 return NETDEV_TX_OK;
2537 int lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
2539 int tmp;
2540 struct usb_host_interface *alt = NULL;
2541 struct usb_host_endpoint *in = NULL, *out = NULL;
2542 struct usb_host_endpoint *status = NULL;
2544 for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
2545 unsigned ep;
2547 in = NULL;
2548 out = NULL;
2549 status = NULL;
2550 alt = intf->altsetting + tmp;
2552 for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
2553 struct usb_host_endpoint *e;
2554 int intr = 0;
2556 e = alt->endpoint + ep;
2557 switch (e->desc.bmAttributes) {
2558 case USB_ENDPOINT_XFER_INT:
2559 if (!usb_endpoint_dir_in(&e->desc))
2560 continue;
2561 intr = 1;
2562 /* FALLTHROUGH */
2563 case USB_ENDPOINT_XFER_BULK:
2564 break;
2565 default:
2566 continue;
2568 if (usb_endpoint_dir_in(&e->desc)) {
2569 if (!intr && !in)
2570 in = e;
2571 else if (intr && !status)
2572 status = e;
2573 } else {
2574 if (!out)
2575 out = e;
2578 if (in && out)
2579 break;
2581 if (!alt || !in || !out)
2582 return -EINVAL;
2584 dev->pipe_in = usb_rcvbulkpipe(dev->udev,
2585 in->desc.bEndpointAddress &
2586 USB_ENDPOINT_NUMBER_MASK);
2587 dev->pipe_out = usb_sndbulkpipe(dev->udev,
2588 out->desc.bEndpointAddress &
2589 USB_ENDPOINT_NUMBER_MASK);
2590 dev->ep_intr = status;
2592 return 0;
2595 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2597 struct lan78xx_priv *pdata = NULL;
2598 int ret;
2599 int i;
2601 ret = lan78xx_get_endpoints(dev, intf);
2603 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
2605 pdata = (struct lan78xx_priv *)(dev->data[0]);
2606 if (!pdata) {
2607 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
2608 return -ENOMEM;
2611 pdata->dev = dev;
2613 spin_lock_init(&pdata->rfe_ctl_lock);
2614 mutex_init(&pdata->dataport_mutex);
2616 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
2618 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
2619 pdata->vlan_table[i] = 0;
2621 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
2623 dev->net->features = 0;
2625 if (DEFAULT_TX_CSUM_ENABLE)
2626 dev->net->features |= NETIF_F_HW_CSUM;
2628 if (DEFAULT_RX_CSUM_ENABLE)
2629 dev->net->features |= NETIF_F_RXCSUM;
2631 if (DEFAULT_TSO_CSUM_ENABLE)
2632 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2634 dev->net->hw_features = dev->net->features;
2636 /* Init all registers */
2637 ret = lan78xx_reset(dev);
2639 lan78xx_mdio_init(dev);
2641 dev->net->flags |= IFF_MULTICAST;
2643 pdata->wol = WAKE_MAGIC;
2645 return 0;
2648 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
2650 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
2652 lan78xx_remove_mdio(dev);
2654 if (pdata) {
2655 netif_dbg(dev, ifdown, dev->net, "free pdata");
2656 kfree(pdata);
2657 pdata = NULL;
2658 dev->data[0] = 0;
2662 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
2663 struct sk_buff *skb,
2664 u32 rx_cmd_a, u32 rx_cmd_b)
2666 if (!(dev->net->features & NETIF_F_RXCSUM) ||
2667 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
2668 skb->ip_summed = CHECKSUM_NONE;
2669 } else {
2670 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
2671 skb->ip_summed = CHECKSUM_COMPLETE;
2675 void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
2677 int status;
2679 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
2680 skb_queue_tail(&dev->rxq_pause, skb);
2681 return;
2684 skb->protocol = eth_type_trans(skb, dev->net);
2685 dev->net->stats.rx_packets++;
2686 dev->net->stats.rx_bytes += skb->len;
2688 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
2689 skb->len + sizeof(struct ethhdr), skb->protocol);
2690 memset(skb->cb, 0, sizeof(struct skb_data));
2692 if (skb_defer_rx_timestamp(skb))
2693 return;
2695 status = netif_rx(skb);
2696 if (status != NET_RX_SUCCESS)
2697 netif_dbg(dev, rx_err, dev->net,
2698 "netif_rx status %d\n", status);
2701 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
2703 if (skb->len < dev->net->hard_header_len)
2704 return 0;
2706 while (skb->len > 0) {
2707 u32 rx_cmd_a, rx_cmd_b, align_count, size;
2708 u16 rx_cmd_c;
2709 struct sk_buff *skb2;
2710 unsigned char *packet;
2712 memcpy(&rx_cmd_a, skb->data, sizeof(rx_cmd_a));
2713 le32_to_cpus(&rx_cmd_a);
2714 skb_pull(skb, sizeof(rx_cmd_a));
2716 memcpy(&rx_cmd_b, skb->data, sizeof(rx_cmd_b));
2717 le32_to_cpus(&rx_cmd_b);
2718 skb_pull(skb, sizeof(rx_cmd_b));
2720 memcpy(&rx_cmd_c, skb->data, sizeof(rx_cmd_c));
2721 le16_to_cpus(&rx_cmd_c);
2722 skb_pull(skb, sizeof(rx_cmd_c));
2724 packet = skb->data;
2726 /* get the packet length */
2727 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
2728 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
2730 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
2731 netif_dbg(dev, rx_err, dev->net,
2732 "Error rx_cmd_a=0x%08x", rx_cmd_a);
2733 } else {
2734 /* last frame in this batch */
2735 if (skb->len == size) {
2736 lan78xx_rx_csum_offload(dev, skb,
2737 rx_cmd_a, rx_cmd_b);
2739 skb_trim(skb, skb->len - 4); /* remove fcs */
2740 skb->truesize = size + sizeof(struct sk_buff);
2742 return 1;
2745 skb2 = skb_clone(skb, GFP_ATOMIC);
2746 if (unlikely(!skb2)) {
2747 netdev_warn(dev->net, "Error allocating skb");
2748 return 0;
2751 skb2->len = size;
2752 skb2->data = packet;
2753 skb_set_tail_pointer(skb2, size);
2755 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
2757 skb_trim(skb2, skb2->len - 4); /* remove fcs */
2758 skb2->truesize = size + sizeof(struct sk_buff);
2760 lan78xx_skb_return(dev, skb2);
2763 skb_pull(skb, size);
2765 /* padding bytes before the next frame starts */
2766 if (skb->len)
2767 skb_pull(skb, align_count);
2770 return 1;
2773 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb)
2775 if (!lan78xx_rx(dev, skb)) {
2776 dev->net->stats.rx_errors++;
2777 goto done;
2780 if (skb->len) {
2781 lan78xx_skb_return(dev, skb);
2782 return;
2785 netif_dbg(dev, rx_err, dev->net, "drop\n");
2786 dev->net->stats.rx_errors++;
2787 done:
2788 skb_queue_tail(&dev->done, skb);
2791 static void rx_complete(struct urb *urb);
2793 static int rx_submit(struct lan78xx_net *dev, struct urb *urb, gfp_t flags)
2795 struct sk_buff *skb;
2796 struct skb_data *entry;
2797 unsigned long lockflags;
2798 size_t size = dev->rx_urb_size;
2799 int ret = 0;
2801 skb = netdev_alloc_skb_ip_align(dev->net, size);
2802 if (!skb) {
2803 usb_free_urb(urb);
2804 return -ENOMEM;
2807 entry = (struct skb_data *)skb->cb;
2808 entry->urb = urb;
2809 entry->dev = dev;
2810 entry->length = 0;
2812 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
2813 skb->data, size, rx_complete, skb);
2815 spin_lock_irqsave(&dev->rxq.lock, lockflags);
2817 if (netif_device_present(dev->net) &&
2818 netif_running(dev->net) &&
2819 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2820 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
2821 ret = usb_submit_urb(urb, GFP_ATOMIC);
2822 switch (ret) {
2823 case 0:
2824 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
2825 break;
2826 case -EPIPE:
2827 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2828 break;
2829 case -ENODEV:
2830 netif_dbg(dev, ifdown, dev->net, "device gone\n");
2831 netif_device_detach(dev->net);
2832 break;
2833 case -EHOSTUNREACH:
2834 ret = -ENOLINK;
2835 break;
2836 default:
2837 netif_dbg(dev, rx_err, dev->net,
2838 "rx submit, %d\n", ret);
2839 tasklet_schedule(&dev->bh);
2841 } else {
2842 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
2843 ret = -ENOLINK;
2845 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
2846 if (ret) {
2847 dev_kfree_skb_any(skb);
2848 usb_free_urb(urb);
2850 return ret;
2853 static void rx_complete(struct urb *urb)
2855 struct sk_buff *skb = (struct sk_buff *)urb->context;
2856 struct skb_data *entry = (struct skb_data *)skb->cb;
2857 struct lan78xx_net *dev = entry->dev;
2858 int urb_status = urb->status;
2859 enum skb_state state;
2861 skb_put(skb, urb->actual_length);
2862 state = rx_done;
2863 entry->urb = NULL;
2865 switch (urb_status) {
2866 case 0:
2867 if (skb->len < dev->net->hard_header_len) {
2868 state = rx_cleanup;
2869 dev->net->stats.rx_errors++;
2870 dev->net->stats.rx_length_errors++;
2871 netif_dbg(dev, rx_err, dev->net,
2872 "rx length %d\n", skb->len);
2874 usb_mark_last_busy(dev->udev);
2875 break;
2876 case -EPIPE:
2877 dev->net->stats.rx_errors++;
2878 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
2879 /* FALLTHROUGH */
2880 case -ECONNRESET: /* async unlink */
2881 case -ESHUTDOWN: /* hardware gone */
2882 netif_dbg(dev, ifdown, dev->net,
2883 "rx shutdown, code %d\n", urb_status);
2884 state = rx_cleanup;
2885 entry->urb = urb;
2886 urb = NULL;
2887 break;
2888 case -EPROTO:
2889 case -ETIME:
2890 case -EILSEQ:
2891 dev->net->stats.rx_errors++;
2892 state = rx_cleanup;
2893 entry->urb = urb;
2894 urb = NULL;
2895 break;
2897 /* data overrun ... flush fifo? */
2898 case -EOVERFLOW:
2899 dev->net->stats.rx_over_errors++;
2900 /* FALLTHROUGH */
2902 default:
2903 state = rx_cleanup;
2904 dev->net->stats.rx_errors++;
2905 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
2906 break;
2909 state = defer_bh(dev, skb, &dev->rxq, state);
2911 if (urb) {
2912 if (netif_running(dev->net) &&
2913 !test_bit(EVENT_RX_HALT, &dev->flags) &&
2914 state != unlink_start) {
2915 rx_submit(dev, urb, GFP_ATOMIC);
2916 return;
2918 usb_free_urb(urb);
2920 netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
2923 static void lan78xx_tx_bh(struct lan78xx_net *dev)
2925 int length;
2926 struct urb *urb = NULL;
2927 struct skb_data *entry;
2928 unsigned long flags;
2929 struct sk_buff_head *tqp = &dev->txq_pend;
2930 struct sk_buff *skb, *skb2;
2931 int ret;
2932 int count, pos;
2933 int skb_totallen, pkt_cnt;
2935 skb_totallen = 0;
2936 pkt_cnt = 0;
2937 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
2938 if (skb_is_gso(skb)) {
2939 if (pkt_cnt) {
2940 /* handle previous packets first */
2941 break;
2943 length = skb->len;
2944 skb2 = skb_dequeue(tqp);
2945 goto gso_skb;
2948 if ((skb_totallen + skb->len) > MAX_SINGLE_PACKET_SIZE)
2949 break;
2950 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
2951 pkt_cnt++;
2954 /* copy to a single skb */
2955 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
2956 if (!skb)
2957 goto drop;
2959 skb_put(skb, skb_totallen);
2961 for (count = pos = 0; count < pkt_cnt; count++) {
2962 skb2 = skb_dequeue(tqp);
2963 if (skb2) {
2964 memcpy(skb->data + pos, skb2->data, skb2->len);
2965 pos += roundup(skb2->len, sizeof(u32));
2966 dev_kfree_skb(skb2);
2970 length = skb_totallen;
2972 gso_skb:
2973 urb = usb_alloc_urb(0, GFP_ATOMIC);
2974 if (!urb) {
2975 netif_dbg(dev, tx_err, dev->net, "no urb\n");
2976 goto drop;
2979 entry = (struct skb_data *)skb->cb;
2980 entry->urb = urb;
2981 entry->dev = dev;
2982 entry->length = length;
2984 spin_lock_irqsave(&dev->txq.lock, flags);
2985 ret = usb_autopm_get_interface_async(dev->intf);
2986 if (ret < 0) {
2987 spin_unlock_irqrestore(&dev->txq.lock, flags);
2988 goto drop;
2991 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_out,
2992 skb->data, skb->len, tx_complete, skb);
2994 if (length % dev->maxpacket == 0) {
2995 /* send USB_ZERO_PACKET */
2996 urb->transfer_flags |= URB_ZERO_PACKET;
2999 #ifdef CONFIG_PM
3000 /* if this triggers the device is still a sleep */
3001 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3002 /* transmission will be done in resume */
3003 usb_anchor_urb(urb, &dev->deferred);
3004 /* no use to process more packets */
3005 netif_stop_queue(dev->net);
3006 usb_put_urb(urb);
3007 spin_unlock_irqrestore(&dev->txq.lock, flags);
3008 netdev_dbg(dev->net, "Delaying transmission for resumption\n");
3009 return;
3011 #endif
3013 ret = usb_submit_urb(urb, GFP_ATOMIC);
3014 switch (ret) {
3015 case 0:
3016 dev->net->trans_start = jiffies;
3017 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3018 if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
3019 netif_stop_queue(dev->net);
3020 break;
3021 case -EPIPE:
3022 netif_stop_queue(dev->net);
3023 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3024 usb_autopm_put_interface_async(dev->intf);
3025 break;
3026 default:
3027 usb_autopm_put_interface_async(dev->intf);
3028 netif_dbg(dev, tx_err, dev->net,
3029 "tx: submit urb err %d\n", ret);
3030 break;
3033 spin_unlock_irqrestore(&dev->txq.lock, flags);
3035 if (ret) {
3036 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", ret);
3037 drop:
3038 dev->net->stats.tx_dropped++;
3039 if (skb)
3040 dev_kfree_skb_any(skb);
3041 usb_free_urb(urb);
3042 } else
3043 netif_dbg(dev, tx_queued, dev->net,
3044 "> tx, len %d, type 0x%x\n", length, skb->protocol);
3047 static void lan78xx_rx_bh(struct lan78xx_net *dev)
3049 struct urb *urb;
3050 int i;
3052 if (skb_queue_len(&dev->rxq) < dev->rx_qlen) {
3053 for (i = 0; i < 10; i++) {
3054 if (skb_queue_len(&dev->rxq) >= dev->rx_qlen)
3055 break;
3056 urb = usb_alloc_urb(0, GFP_ATOMIC);
3057 if (urb)
3058 if (rx_submit(dev, urb, GFP_ATOMIC) == -ENOLINK)
3059 return;
3062 if (skb_queue_len(&dev->rxq) < dev->rx_qlen)
3063 tasklet_schedule(&dev->bh);
3065 if (skb_queue_len(&dev->txq) < dev->tx_qlen)
3066 netif_wake_queue(dev->net);
3069 static void lan78xx_bh(unsigned long param)
3071 struct lan78xx_net *dev = (struct lan78xx_net *)param;
3072 struct sk_buff *skb;
3073 struct skb_data *entry;
3075 while ((skb = skb_dequeue(&dev->done))) {
3076 entry = (struct skb_data *)(skb->cb);
3077 switch (entry->state) {
3078 case rx_done:
3079 entry->state = rx_cleanup;
3080 rx_process(dev, skb);
3081 continue;
3082 case tx_done:
3083 usb_free_urb(entry->urb);
3084 dev_kfree_skb(skb);
3085 continue;
3086 case rx_cleanup:
3087 usb_free_urb(entry->urb);
3088 dev_kfree_skb(skb);
3089 continue;
3090 default:
3091 netdev_dbg(dev->net, "skb state %d\n", entry->state);
3092 return;
3096 if (netif_device_present(dev->net) && netif_running(dev->net)) {
3097 /* reset update timer delta */
3098 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
3099 dev->delta = 1;
3100 mod_timer(&dev->stat_monitor,
3101 jiffies + STAT_UPDATE_TIMER);
3104 if (!skb_queue_empty(&dev->txq_pend))
3105 lan78xx_tx_bh(dev);
3107 if (!timer_pending(&dev->delay) &&
3108 !test_bit(EVENT_RX_HALT, &dev->flags))
3109 lan78xx_rx_bh(dev);
3113 static void lan78xx_delayedwork(struct work_struct *work)
3115 int status;
3116 struct lan78xx_net *dev;
3118 dev = container_of(work, struct lan78xx_net, wq.work);
3120 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
3121 unlink_urbs(dev, &dev->txq);
3122 status = usb_autopm_get_interface(dev->intf);
3123 if (status < 0)
3124 goto fail_pipe;
3125 status = usb_clear_halt(dev->udev, dev->pipe_out);
3126 usb_autopm_put_interface(dev->intf);
3127 if (status < 0 &&
3128 status != -EPIPE &&
3129 status != -ESHUTDOWN) {
3130 if (netif_msg_tx_err(dev))
3131 fail_pipe:
3132 netdev_err(dev->net,
3133 "can't clear tx halt, status %d\n",
3134 status);
3135 } else {
3136 clear_bit(EVENT_TX_HALT, &dev->flags);
3137 if (status != -ESHUTDOWN)
3138 netif_wake_queue(dev->net);
3141 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
3142 unlink_urbs(dev, &dev->rxq);
3143 status = usb_autopm_get_interface(dev->intf);
3144 if (status < 0)
3145 goto fail_halt;
3146 status = usb_clear_halt(dev->udev, dev->pipe_in);
3147 usb_autopm_put_interface(dev->intf);
3148 if (status < 0 &&
3149 status != -EPIPE &&
3150 status != -ESHUTDOWN) {
3151 if (netif_msg_rx_err(dev))
3152 fail_halt:
3153 netdev_err(dev->net,
3154 "can't clear rx halt, status %d\n",
3155 status);
3156 } else {
3157 clear_bit(EVENT_RX_HALT, &dev->flags);
3158 tasklet_schedule(&dev->bh);
3162 if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
3163 int ret = 0;
3165 clear_bit(EVENT_LINK_RESET, &dev->flags);
3166 status = usb_autopm_get_interface(dev->intf);
3167 if (status < 0)
3168 goto skip_reset;
3169 if (lan78xx_link_reset(dev) < 0) {
3170 usb_autopm_put_interface(dev->intf);
3171 skip_reset:
3172 netdev_info(dev->net, "link reset failed (%d)\n",
3173 ret);
3174 } else {
3175 usb_autopm_put_interface(dev->intf);
3179 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
3180 lan78xx_update_stats(dev);
3182 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3184 mod_timer(&dev->stat_monitor,
3185 jiffies + (STAT_UPDATE_TIMER * dev->delta));
3187 dev->delta = min((dev->delta * 2), 50);
3191 static void intr_complete(struct urb *urb)
3193 struct lan78xx_net *dev = urb->context;
3194 int status = urb->status;
3196 switch (status) {
3197 /* success */
3198 case 0:
3199 lan78xx_status(dev, urb);
3200 break;
3202 /* software-driven interface shutdown */
3203 case -ENOENT: /* urb killed */
3204 case -ESHUTDOWN: /* hardware gone */
3205 netif_dbg(dev, ifdown, dev->net,
3206 "intr shutdown, code %d\n", status);
3207 return;
3209 /* NOTE: not throttling like RX/TX, since this endpoint
3210 * already polls infrequently
3212 default:
3213 netdev_dbg(dev->net, "intr status %d\n", status);
3214 break;
3217 if (!netif_running(dev->net))
3218 return;
3220 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
3221 status = usb_submit_urb(urb, GFP_ATOMIC);
3222 if (status != 0)
3223 netif_err(dev, timer, dev->net,
3224 "intr resubmit --> %d\n", status);
3227 static void lan78xx_disconnect(struct usb_interface *intf)
3229 struct lan78xx_net *dev;
3230 struct usb_device *udev;
3231 struct net_device *net;
3233 dev = usb_get_intfdata(intf);
3234 usb_set_intfdata(intf, NULL);
3235 if (!dev)
3236 return;
3238 udev = interface_to_usbdev(intf);
3240 net = dev->net;
3241 unregister_netdev(net);
3243 cancel_delayed_work_sync(&dev->wq);
3245 usb_scuttle_anchored_urbs(&dev->deferred);
3247 lan78xx_unbind(dev, intf);
3249 usb_kill_urb(dev->urb_intr);
3250 usb_free_urb(dev->urb_intr);
3252 free_netdev(net);
3253 usb_put_dev(udev);
3256 void lan78xx_tx_timeout(struct net_device *net)
3258 struct lan78xx_net *dev = netdev_priv(net);
3260 unlink_urbs(dev, &dev->txq);
3261 tasklet_schedule(&dev->bh);
3264 static const struct net_device_ops lan78xx_netdev_ops = {
3265 .ndo_open = lan78xx_open,
3266 .ndo_stop = lan78xx_stop,
3267 .ndo_start_xmit = lan78xx_start_xmit,
3268 .ndo_tx_timeout = lan78xx_tx_timeout,
3269 .ndo_change_mtu = lan78xx_change_mtu,
3270 .ndo_set_mac_address = lan78xx_set_mac_addr,
3271 .ndo_validate_addr = eth_validate_addr,
3272 .ndo_do_ioctl = lan78xx_ioctl,
3273 .ndo_set_rx_mode = lan78xx_set_multicast,
3274 .ndo_set_features = lan78xx_set_features,
3275 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
3276 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
3279 static void lan78xx_stat_monitor(unsigned long param)
3281 struct lan78xx_net *dev;
3283 dev = (struct lan78xx_net *)param;
3285 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
3288 static int lan78xx_probe(struct usb_interface *intf,
3289 const struct usb_device_id *id)
3291 struct lan78xx_net *dev;
3292 struct net_device *netdev;
3293 struct usb_device *udev;
3294 int ret;
3295 unsigned maxp;
3296 unsigned period;
3297 u8 *buf = NULL;
3299 udev = interface_to_usbdev(intf);
3300 udev = usb_get_dev(udev);
3302 ret = -ENOMEM;
3303 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
3304 if (!netdev) {
3305 dev_err(&intf->dev, "Error: OOM\n");
3306 goto out1;
3309 /* netdev_printk() needs this */
3310 SET_NETDEV_DEV(netdev, &intf->dev);
3312 dev = netdev_priv(netdev);
3313 dev->udev = udev;
3314 dev->intf = intf;
3315 dev->net = netdev;
3316 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
3317 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
3319 skb_queue_head_init(&dev->rxq);
3320 skb_queue_head_init(&dev->txq);
3321 skb_queue_head_init(&dev->done);
3322 skb_queue_head_init(&dev->rxq_pause);
3323 skb_queue_head_init(&dev->txq_pend);
3324 mutex_init(&dev->phy_mutex);
3326 tasklet_init(&dev->bh, lan78xx_bh, (unsigned long)dev);
3327 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
3328 init_usb_anchor(&dev->deferred);
3330 netdev->netdev_ops = &lan78xx_netdev_ops;
3331 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
3332 netdev->ethtool_ops = &lan78xx_ethtool_ops;
3334 dev->stat_monitor.function = lan78xx_stat_monitor;
3335 dev->stat_monitor.data = (unsigned long)dev;
3336 dev->delta = 1;
3337 init_timer(&dev->stat_monitor);
3339 mutex_init(&dev->stats.access_lock);
3341 ret = lan78xx_bind(dev, intf);
3342 if (ret < 0)
3343 goto out2;
3344 strcpy(netdev->name, "eth%d");
3346 if (netdev->mtu > (dev->hard_mtu - netdev->hard_header_len))
3347 netdev->mtu = dev->hard_mtu - netdev->hard_header_len;
3349 dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
3350 dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
3351 dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
3353 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
3354 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
3356 dev->pipe_intr = usb_rcvintpipe(dev->udev,
3357 dev->ep_intr->desc.bEndpointAddress &
3358 USB_ENDPOINT_NUMBER_MASK);
3359 period = dev->ep_intr->desc.bInterval;
3361 maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
3362 buf = kmalloc(maxp, GFP_KERNEL);
3363 if (buf) {
3364 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
3365 if (!dev->urb_intr) {
3366 kfree(buf);
3367 goto out3;
3368 } else {
3369 usb_fill_int_urb(dev->urb_intr, dev->udev,
3370 dev->pipe_intr, buf, maxp,
3371 intr_complete, dev, period);
3375 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
3377 /* driver requires remote-wakeup capability during autosuspend. */
3378 intf->needs_remote_wakeup = 1;
3380 ret = register_netdev(netdev);
3381 if (ret != 0) {
3382 netif_err(dev, probe, netdev, "couldn't register the device\n");
3383 goto out2;
3386 usb_set_intfdata(intf, dev);
3388 ret = device_set_wakeup_enable(&udev->dev, true);
3390 /* Default delay of 2sec has more overhead than advantage.
3391 * Set to 10sec as default.
3393 pm_runtime_set_autosuspend_delay(&udev->dev,
3394 DEFAULT_AUTOSUSPEND_DELAY);
3396 return 0;
3398 out3:
3399 lan78xx_unbind(dev, intf);
3400 out2:
3401 free_netdev(netdev);
3402 out1:
3403 usb_put_dev(udev);
3405 return ret;
3408 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
3410 const u16 crc16poly = 0x8005;
3411 int i;
3412 u16 bit, crc, msb;
3413 u8 data;
3415 crc = 0xFFFF;
3416 for (i = 0; i < len; i++) {
3417 data = *buf++;
3418 for (bit = 0; bit < 8; bit++) {
3419 msb = crc >> 15;
3420 crc <<= 1;
3422 if (msb ^ (u16)(data & 1)) {
3423 crc ^= crc16poly;
3424 crc |= (u16)0x0001U;
3426 data >>= 1;
3430 return crc;
3433 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
3435 u32 buf;
3436 int ret;
3437 int mask_index;
3438 u16 crc;
3439 u32 temp_wucsr;
3440 u32 temp_pmt_ctl;
3441 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
3442 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
3443 const u8 arp_type[2] = { 0x08, 0x06 };
3445 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3446 buf &= ~MAC_TX_TXEN_;
3447 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3448 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3449 buf &= ~MAC_RX_RXEN_;
3450 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3452 ret = lan78xx_write_reg(dev, WUCSR, 0);
3453 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3454 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3456 temp_wucsr = 0;
3458 temp_pmt_ctl = 0;
3459 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
3460 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
3461 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
3463 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++)
3464 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
3466 mask_index = 0;
3467 if (wol & WAKE_PHY) {
3468 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
3470 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3471 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3472 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3474 if (wol & WAKE_MAGIC) {
3475 temp_wucsr |= WUCSR_MPEN_;
3477 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3478 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3479 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
3481 if (wol & WAKE_BCAST) {
3482 temp_wucsr |= WUCSR_BCST_EN_;
3484 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3485 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3486 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3488 if (wol & WAKE_MCAST) {
3489 temp_wucsr |= WUCSR_WAKE_EN_;
3491 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
3492 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
3493 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3494 WUF_CFGX_EN_ |
3495 WUF_CFGX_TYPE_MCAST_ |
3496 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3497 (crc & WUF_CFGX_CRC16_MASK_));
3499 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
3500 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3501 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3502 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3503 mask_index++;
3505 /* for IPv6 Multicast */
3506 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
3507 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3508 WUF_CFGX_EN_ |
3509 WUF_CFGX_TYPE_MCAST_ |
3510 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3511 (crc & WUF_CFGX_CRC16_MASK_));
3513 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
3514 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3515 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3516 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3517 mask_index++;
3519 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3520 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3521 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3523 if (wol & WAKE_UCAST) {
3524 temp_wucsr |= WUCSR_PFDA_EN_;
3526 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3527 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3528 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3530 if (wol & WAKE_ARP) {
3531 temp_wucsr |= WUCSR_WAKE_EN_;
3533 /* set WUF_CFG & WUF_MASK
3534 * for packettype (offset 12,13) = ARP (0x0806)
3536 crc = lan78xx_wakeframe_crc16(arp_type, 2);
3537 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
3538 WUF_CFGX_EN_ |
3539 WUF_CFGX_TYPE_ALL_ |
3540 (0 << WUF_CFGX_OFFSET_SHIFT_) |
3541 (crc & WUF_CFGX_CRC16_MASK_));
3543 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
3544 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
3545 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
3546 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
3547 mask_index++;
3549 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3550 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3551 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3554 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
3556 /* when multiple WOL bits are set */
3557 if (hweight_long((unsigned long)wol) > 1) {
3558 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
3559 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
3560 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
3562 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
3564 /* clear WUPS */
3565 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3566 buf |= PMT_CTL_WUPS_MASK_;
3567 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3569 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3570 buf |= MAC_RX_RXEN_;
3571 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3573 return 0;
3576 int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
3578 struct lan78xx_net *dev = usb_get_intfdata(intf);
3579 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3580 u32 buf;
3581 int ret;
3582 int event;
3584 event = message.event;
3586 if (!dev->suspend_count++) {
3587 spin_lock_irq(&dev->txq.lock);
3588 /* don't autosuspend while transmitting */
3589 if ((skb_queue_len(&dev->txq) ||
3590 skb_queue_len(&dev->txq_pend)) &&
3591 PMSG_IS_AUTO(message)) {
3592 spin_unlock_irq(&dev->txq.lock);
3593 ret = -EBUSY;
3594 goto out;
3595 } else {
3596 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
3597 spin_unlock_irq(&dev->txq.lock);
3600 /* stop TX & RX */
3601 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3602 buf &= ~MAC_TX_TXEN_;
3603 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3604 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3605 buf &= ~MAC_RX_RXEN_;
3606 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3608 /* empty out the rx and queues */
3609 netif_device_detach(dev->net);
3610 lan78xx_terminate_urbs(dev);
3611 usb_kill_urb(dev->urb_intr);
3613 /* reattach */
3614 netif_device_attach(dev->net);
3617 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
3618 del_timer(&dev->stat_monitor);
3620 if (PMSG_IS_AUTO(message)) {
3621 /* auto suspend (selective suspend) */
3622 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3623 buf &= ~MAC_TX_TXEN_;
3624 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3625 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3626 buf &= ~MAC_RX_RXEN_;
3627 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3629 ret = lan78xx_write_reg(dev, WUCSR, 0);
3630 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3631 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3633 /* set goodframe wakeup */
3634 ret = lan78xx_read_reg(dev, WUCSR, &buf);
3636 buf |= WUCSR_RFE_WAKE_EN_;
3637 buf |= WUCSR_STORE_WAKE_;
3639 ret = lan78xx_write_reg(dev, WUCSR, buf);
3641 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3643 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
3644 buf |= PMT_CTL_RES_CLR_WKP_STS_;
3646 buf |= PMT_CTL_PHY_WAKE_EN_;
3647 buf |= PMT_CTL_WOL_EN_;
3648 buf &= ~PMT_CTL_SUS_MODE_MASK_;
3649 buf |= PMT_CTL_SUS_MODE_3_;
3651 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3653 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3655 buf |= PMT_CTL_WUPS_MASK_;
3657 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3659 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
3660 buf |= MAC_RX_RXEN_;
3661 ret = lan78xx_write_reg(dev, MAC_RX, buf);
3662 } else {
3663 lan78xx_set_suspend(dev, pdata->wol);
3667 ret = 0;
3668 out:
3669 return ret;
3672 int lan78xx_resume(struct usb_interface *intf)
3674 struct lan78xx_net *dev = usb_get_intfdata(intf);
3675 struct sk_buff *skb;
3676 struct urb *res;
3677 int ret;
3678 u32 buf;
3680 if (!timer_pending(&dev->stat_monitor)) {
3681 dev->delta = 1;
3682 mod_timer(&dev->stat_monitor,
3683 jiffies + STAT_UPDATE_TIMER);
3686 if (!--dev->suspend_count) {
3687 /* resume interrupt URBs */
3688 if (dev->urb_intr && test_bit(EVENT_DEV_OPEN, &dev->flags))
3689 usb_submit_urb(dev->urb_intr, GFP_NOIO);
3691 spin_lock_irq(&dev->txq.lock);
3692 while ((res = usb_get_from_anchor(&dev->deferred))) {
3693 skb = (struct sk_buff *)res->context;
3694 ret = usb_submit_urb(res, GFP_ATOMIC);
3695 if (ret < 0) {
3696 dev_kfree_skb_any(skb);
3697 usb_free_urb(res);
3698 usb_autopm_put_interface_async(dev->intf);
3699 } else {
3700 dev->net->trans_start = jiffies;
3701 lan78xx_queue_skb(&dev->txq, skb, tx_start);
3705 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
3706 spin_unlock_irq(&dev->txq.lock);
3708 if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
3709 if (!(skb_queue_len(&dev->txq) >= dev->tx_qlen))
3710 netif_start_queue(dev->net);
3711 tasklet_schedule(&dev->bh);
3715 ret = lan78xx_write_reg(dev, WUCSR2, 0);
3716 ret = lan78xx_write_reg(dev, WUCSR, 0);
3717 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
3719 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
3720 WUCSR2_ARP_RCD_ |
3721 WUCSR2_IPV6_TCPSYN_RCD_ |
3722 WUCSR2_IPV4_TCPSYN_RCD_);
3724 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
3725 WUCSR_EEE_RX_WAKE_ |
3726 WUCSR_PFDA_FR_ |
3727 WUCSR_RFE_WAKE_FR_ |
3728 WUCSR_WUFR_ |
3729 WUCSR_MPR_ |
3730 WUCSR_BCST_FR_);
3732 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
3733 buf |= MAC_TX_TXEN_;
3734 ret = lan78xx_write_reg(dev, MAC_TX, buf);
3736 return 0;
3739 int lan78xx_reset_resume(struct usb_interface *intf)
3741 struct lan78xx_net *dev = usb_get_intfdata(intf);
3743 lan78xx_reset(dev);
3745 lan78xx_phy_init(dev);
3747 return lan78xx_resume(intf);
3750 static const struct usb_device_id products[] = {
3752 /* LAN7800 USB Gigabit Ethernet Device */
3753 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
3756 /* LAN7850 USB Gigabit Ethernet Device */
3757 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
3761 MODULE_DEVICE_TABLE(usb, products);
3763 static struct usb_driver lan78xx_driver = {
3764 .name = DRIVER_NAME,
3765 .id_table = products,
3766 .probe = lan78xx_probe,
3767 .disconnect = lan78xx_disconnect,
3768 .suspend = lan78xx_suspend,
3769 .resume = lan78xx_resume,
3770 .reset_resume = lan78xx_reset_resume,
3771 .supports_autosuspend = 1,
3772 .disable_hub_initiated_lpm = 1,
3775 module_usb_driver(lan78xx_driver);
3777 MODULE_AUTHOR(DRIVER_AUTHOR);
3778 MODULE_DESCRIPTION(DRIVER_DESC);
3779 MODULE_LICENSE("GPL");