Linux 4.1.16
[linux/fpc-iii.git] / drivers / net / phy / phy.c
blob47cd578052fc2328169fcc9df304be79e7af9ac5
1 /* Framework for configuring and reading PHY devices
2 * Based on code in sungem_phy.c and gianfar_phy.c
4 * Author: Andy Fleming
6 * Copyright (c) 2004 Freescale Semiconductor, Inc.
7 * Copyright (c) 2006, 2007 Maciej W. Rozycki
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kernel.h>
19 #include <linux/string.h>
20 #include <linux/errno.h>
21 #include <linux/unistd.h>
22 #include <linux/interrupt.h>
23 #include <linux/delay.h>
24 #include <linux/netdevice.h>
25 #include <linux/etherdevice.h>
26 #include <linux/skbuff.h>
27 #include <linux/mm.h>
28 #include <linux/module.h>
29 #include <linux/mii.h>
30 #include <linux/ethtool.h>
31 #include <linux/phy.h>
32 #include <linux/timer.h>
33 #include <linux/workqueue.h>
34 #include <linux/mdio.h>
35 #include <linux/io.h>
36 #include <linux/uaccess.h>
37 #include <linux/atomic.h>
39 #include <asm/irq.h>
41 static const char *phy_speed_to_str(int speed)
43 switch (speed) {
44 case SPEED_10:
45 return "10Mbps";
46 case SPEED_100:
47 return "100Mbps";
48 case SPEED_1000:
49 return "1Gbps";
50 case SPEED_2500:
51 return "2.5Gbps";
52 case SPEED_10000:
53 return "10Gbps";
54 case SPEED_UNKNOWN:
55 return "Unknown";
56 default:
57 return "Unsupported (update phy.c)";
61 /**
62 * phy_print_status - Convenience function to print out the current phy status
63 * @phydev: the phy_device struct
65 void phy_print_status(struct phy_device *phydev)
67 if (phydev->link) {
68 netdev_info(phydev->attached_dev,
69 "Link is Up - %s/%s - flow control %s\n",
70 phy_speed_to_str(phydev->speed),
71 DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
72 phydev->pause ? "rx/tx" : "off");
73 } else {
74 netdev_info(phydev->attached_dev, "Link is Down\n");
77 EXPORT_SYMBOL(phy_print_status);
79 /**
80 * phy_clear_interrupt - Ack the phy device's interrupt
81 * @phydev: the phy_device struct
83 * If the @phydev driver has an ack_interrupt function, call it to
84 * ack and clear the phy device's interrupt.
86 * Returns 0 on success or < 0 on error.
88 static int phy_clear_interrupt(struct phy_device *phydev)
90 if (phydev->drv->ack_interrupt)
91 return phydev->drv->ack_interrupt(phydev);
93 return 0;
96 /**
97 * phy_config_interrupt - configure the PHY device for the requested interrupts
98 * @phydev: the phy_device struct
99 * @interrupts: interrupt flags to configure for this @phydev
101 * Returns 0 on success or < 0 on error.
103 static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
105 phydev->interrupts = interrupts;
106 if (phydev->drv->config_intr)
107 return phydev->drv->config_intr(phydev);
109 return 0;
114 * phy_aneg_done - return auto-negotiation status
115 * @phydev: target phy_device struct
117 * Description: Return the auto-negotiation status from this @phydev
118 * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
119 * is still pending.
121 static inline int phy_aneg_done(struct phy_device *phydev)
123 if (phydev->drv->aneg_done)
124 return phydev->drv->aneg_done(phydev);
126 return genphy_aneg_done(phydev);
129 /* A structure for mapping a particular speed and duplex
130 * combination to a particular SUPPORTED and ADVERTISED value
132 struct phy_setting {
133 int speed;
134 int duplex;
135 u32 setting;
138 /* A mapping of all SUPPORTED settings to speed/duplex */
139 static const struct phy_setting settings[] = {
141 .speed = SPEED_10000,
142 .duplex = DUPLEX_FULL,
143 .setting = SUPPORTED_10000baseKR_Full,
146 .speed = SPEED_10000,
147 .duplex = DUPLEX_FULL,
148 .setting = SUPPORTED_10000baseKX4_Full,
151 .speed = SPEED_10000,
152 .duplex = DUPLEX_FULL,
153 .setting = SUPPORTED_10000baseT_Full,
156 .speed = SPEED_2500,
157 .duplex = DUPLEX_FULL,
158 .setting = SUPPORTED_2500baseX_Full,
161 .speed = SPEED_1000,
162 .duplex = DUPLEX_FULL,
163 .setting = SUPPORTED_1000baseKX_Full,
166 .speed = SPEED_1000,
167 .duplex = DUPLEX_FULL,
168 .setting = SUPPORTED_1000baseT_Full,
171 .speed = SPEED_1000,
172 .duplex = DUPLEX_HALF,
173 .setting = SUPPORTED_1000baseT_Half,
176 .speed = SPEED_100,
177 .duplex = DUPLEX_FULL,
178 .setting = SUPPORTED_100baseT_Full,
181 .speed = SPEED_100,
182 .duplex = DUPLEX_HALF,
183 .setting = SUPPORTED_100baseT_Half,
186 .speed = SPEED_10,
187 .duplex = DUPLEX_FULL,
188 .setting = SUPPORTED_10baseT_Full,
191 .speed = SPEED_10,
192 .duplex = DUPLEX_HALF,
193 .setting = SUPPORTED_10baseT_Half,
197 #define MAX_NUM_SETTINGS ARRAY_SIZE(settings)
200 * phy_find_setting - find a PHY settings array entry that matches speed & duplex
201 * @speed: speed to match
202 * @duplex: duplex to match
204 * Description: Searches the settings array for the setting which
205 * matches the desired speed and duplex, and returns the index
206 * of that setting. Returns the index of the last setting if
207 * none of the others match.
209 static inline unsigned int phy_find_setting(int speed, int duplex)
211 unsigned int idx = 0;
213 while (idx < ARRAY_SIZE(settings) &&
214 (settings[idx].speed != speed || settings[idx].duplex != duplex))
215 idx++;
217 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
221 * phy_find_valid - find a PHY setting that matches the requested features mask
222 * @idx: The first index in settings[] to search
223 * @features: A mask of the valid settings
225 * Description: Returns the index of the first valid setting less
226 * than or equal to the one pointed to by idx, as determined by
227 * the mask in features. Returns the index of the last setting
228 * if nothing else matches.
230 static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
232 while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
233 idx++;
235 return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
239 * phy_check_valid - check if there is a valid PHY setting which matches
240 * speed, duplex, and feature mask
241 * @speed: speed to match
242 * @duplex: duplex to match
243 * @features: A mask of the valid settings
245 * Description: Returns true if there is a valid setting, false otherwise.
247 static inline bool phy_check_valid(int speed, int duplex, u32 features)
249 unsigned int idx;
251 idx = phy_find_valid(phy_find_setting(speed, duplex), features);
253 return settings[idx].speed == speed && settings[idx].duplex == duplex &&
254 (settings[idx].setting & features);
258 * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
259 * @phydev: the target phy_device struct
261 * Description: Make sure the PHY is set to supported speeds and
262 * duplexes. Drop down by one in this order: 1000/FULL,
263 * 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
265 static void phy_sanitize_settings(struct phy_device *phydev)
267 u32 features = phydev->supported;
268 unsigned int idx;
270 /* Sanitize settings based on PHY capabilities */
271 if ((features & SUPPORTED_Autoneg) == 0)
272 phydev->autoneg = AUTONEG_DISABLE;
274 idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
275 features);
277 phydev->speed = settings[idx].speed;
278 phydev->duplex = settings[idx].duplex;
282 * phy_ethtool_sset - generic ethtool sset function, handles all the details
283 * @phydev: target phy_device struct
284 * @cmd: ethtool_cmd
286 * A few notes about parameter checking:
287 * - We don't set port or transceiver, so we don't care what they
288 * were set to.
289 * - phy_start_aneg() will make sure forced settings are sane, and
290 * choose the next best ones from the ones selected, so we don't
291 * care if ethtool tries to give us bad values.
293 int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
295 u32 speed = ethtool_cmd_speed(cmd);
297 if (cmd->phy_address != phydev->addr)
298 return -EINVAL;
300 /* We make sure that we don't pass unsupported values in to the PHY */
301 cmd->advertising &= phydev->supported;
303 /* Verify the settings we care about. */
304 if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
305 return -EINVAL;
307 if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
308 return -EINVAL;
310 if (cmd->autoneg == AUTONEG_DISABLE &&
311 ((speed != SPEED_1000 &&
312 speed != SPEED_100 &&
313 speed != SPEED_10) ||
314 (cmd->duplex != DUPLEX_HALF &&
315 cmd->duplex != DUPLEX_FULL)))
316 return -EINVAL;
318 phydev->autoneg = cmd->autoneg;
320 phydev->speed = speed;
322 phydev->advertising = cmd->advertising;
324 if (AUTONEG_ENABLE == cmd->autoneg)
325 phydev->advertising |= ADVERTISED_Autoneg;
326 else
327 phydev->advertising &= ~ADVERTISED_Autoneg;
329 phydev->duplex = cmd->duplex;
331 /* Restart the PHY */
332 phy_start_aneg(phydev);
334 return 0;
336 EXPORT_SYMBOL(phy_ethtool_sset);
338 int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
340 cmd->supported = phydev->supported;
342 cmd->advertising = phydev->advertising;
343 cmd->lp_advertising = phydev->lp_advertising;
345 ethtool_cmd_speed_set(cmd, phydev->speed);
346 cmd->duplex = phydev->duplex;
347 if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
348 cmd->port = PORT_BNC;
349 else
350 cmd->port = PORT_MII;
351 cmd->phy_address = phydev->addr;
352 cmd->transceiver = phy_is_internal(phydev) ?
353 XCVR_INTERNAL : XCVR_EXTERNAL;
354 cmd->autoneg = phydev->autoneg;
356 return 0;
358 EXPORT_SYMBOL(phy_ethtool_gset);
361 * phy_mii_ioctl - generic PHY MII ioctl interface
362 * @phydev: the phy_device struct
363 * @ifr: &struct ifreq for socket ioctl's
364 * @cmd: ioctl cmd to execute
366 * Note that this function is currently incompatible with the
367 * PHYCONTROL layer. It changes registers without regard to
368 * current state. Use at own risk.
370 int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
372 struct mii_ioctl_data *mii_data = if_mii(ifr);
373 u16 val = mii_data->val_in;
374 bool change_autoneg = false;
376 switch (cmd) {
377 case SIOCGMIIPHY:
378 mii_data->phy_id = phydev->addr;
379 /* fall through */
381 case SIOCGMIIREG:
382 mii_data->val_out = mdiobus_read(phydev->bus, mii_data->phy_id,
383 mii_data->reg_num);
384 return 0;
386 case SIOCSMIIREG:
387 if (mii_data->phy_id == phydev->addr) {
388 switch (mii_data->reg_num) {
389 case MII_BMCR:
390 if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
391 if (phydev->autoneg == AUTONEG_ENABLE)
392 change_autoneg = true;
393 phydev->autoneg = AUTONEG_DISABLE;
394 if (val & BMCR_FULLDPLX)
395 phydev->duplex = DUPLEX_FULL;
396 else
397 phydev->duplex = DUPLEX_HALF;
398 if (val & BMCR_SPEED1000)
399 phydev->speed = SPEED_1000;
400 else if (val & BMCR_SPEED100)
401 phydev->speed = SPEED_100;
402 else phydev->speed = SPEED_10;
404 else {
405 if (phydev->autoneg == AUTONEG_DISABLE)
406 change_autoneg = true;
407 phydev->autoneg = AUTONEG_ENABLE;
409 break;
410 case MII_ADVERTISE:
411 phydev->advertising = mii_adv_to_ethtool_adv_t(val);
412 change_autoneg = true;
413 break;
414 default:
415 /* do nothing */
416 break;
420 mdiobus_write(phydev->bus, mii_data->phy_id,
421 mii_data->reg_num, val);
423 if (mii_data->reg_num == MII_BMCR &&
424 val & BMCR_RESET)
425 return phy_init_hw(phydev);
427 if (change_autoneg)
428 return phy_start_aneg(phydev);
430 return 0;
432 case SIOCSHWTSTAMP:
433 if (phydev->drv->hwtstamp)
434 return phydev->drv->hwtstamp(phydev, ifr);
435 /* fall through */
437 default:
438 return -EOPNOTSUPP;
441 EXPORT_SYMBOL(phy_mii_ioctl);
444 * phy_start_aneg - start auto-negotiation for this PHY device
445 * @phydev: the phy_device struct
447 * Description: Sanitizes the settings (if we're not autonegotiating
448 * them), and then calls the driver's config_aneg function.
449 * If the PHYCONTROL Layer is operating, we change the state to
450 * reflect the beginning of Auto-negotiation or forcing.
452 int phy_start_aneg(struct phy_device *phydev)
454 int err;
456 mutex_lock(&phydev->lock);
458 if (AUTONEG_DISABLE == phydev->autoneg)
459 phy_sanitize_settings(phydev);
461 /* Invalidate LP advertising flags */
462 phydev->lp_advertising = 0;
464 err = phydev->drv->config_aneg(phydev);
465 if (err < 0)
466 goto out_unlock;
468 if (phydev->state != PHY_HALTED) {
469 if (AUTONEG_ENABLE == phydev->autoneg) {
470 phydev->state = PHY_AN;
471 phydev->link_timeout = PHY_AN_TIMEOUT;
472 } else {
473 phydev->state = PHY_FORCING;
474 phydev->link_timeout = PHY_FORCE_TIMEOUT;
478 out_unlock:
479 mutex_unlock(&phydev->lock);
480 return err;
482 EXPORT_SYMBOL(phy_start_aneg);
485 * phy_start_machine - start PHY state machine tracking
486 * @phydev: the phy_device struct
488 * Description: The PHY infrastructure can run a state machine
489 * which tracks whether the PHY is starting up, negotiating,
490 * etc. This function starts the timer which tracks the state
491 * of the PHY. If you want to maintain your own state machine,
492 * do not call this function.
494 void phy_start_machine(struct phy_device *phydev)
496 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
500 * phy_stop_machine - stop the PHY state machine tracking
501 * @phydev: target phy_device struct
503 * Description: Stops the state machine timer, sets the state to UP
504 * (unless it wasn't up yet). This function must be called BEFORE
505 * phy_detach.
507 void phy_stop_machine(struct phy_device *phydev)
509 cancel_delayed_work_sync(&phydev->state_queue);
511 mutex_lock(&phydev->lock);
512 if (phydev->state > PHY_UP)
513 phydev->state = PHY_UP;
514 mutex_unlock(&phydev->lock);
518 * phy_error - enter HALTED state for this PHY device
519 * @phydev: target phy_device struct
521 * Moves the PHY to the HALTED state in response to a read
522 * or write error, and tells the controller the link is down.
523 * Must not be called from interrupt context, or while the
524 * phydev->lock is held.
526 static void phy_error(struct phy_device *phydev)
528 mutex_lock(&phydev->lock);
529 phydev->state = PHY_HALTED;
530 mutex_unlock(&phydev->lock);
534 * phy_interrupt - PHY interrupt handler
535 * @irq: interrupt line
536 * @phy_dat: phy_device pointer
538 * Description: When a PHY interrupt occurs, the handler disables
539 * interrupts, and schedules a work task to clear the interrupt.
541 static irqreturn_t phy_interrupt(int irq, void *phy_dat)
543 struct phy_device *phydev = phy_dat;
545 if (PHY_HALTED == phydev->state)
546 return IRQ_NONE; /* It can't be ours. */
548 /* The MDIO bus is not allowed to be written in interrupt
549 * context, so we need to disable the irq here. A work
550 * queue will write the PHY to disable and clear the
551 * interrupt, and then reenable the irq line.
553 disable_irq_nosync(irq);
554 atomic_inc(&phydev->irq_disable);
556 queue_work(system_power_efficient_wq, &phydev->phy_queue);
558 return IRQ_HANDLED;
562 * phy_enable_interrupts - Enable the interrupts from the PHY side
563 * @phydev: target phy_device struct
565 static int phy_enable_interrupts(struct phy_device *phydev)
567 int err = phy_clear_interrupt(phydev);
569 if (err < 0)
570 return err;
572 return phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
576 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
577 * @phydev: target phy_device struct
579 static int phy_disable_interrupts(struct phy_device *phydev)
581 int err;
583 /* Disable PHY interrupts */
584 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
585 if (err)
586 goto phy_err;
588 /* Clear the interrupt */
589 err = phy_clear_interrupt(phydev);
590 if (err)
591 goto phy_err;
593 return 0;
595 phy_err:
596 phy_error(phydev);
598 return err;
602 * phy_start_interrupts - request and enable interrupts for a PHY device
603 * @phydev: target phy_device struct
605 * Description: Request the interrupt for the given PHY.
606 * If this fails, then we set irq to PHY_POLL.
607 * Otherwise, we enable the interrupts in the PHY.
608 * This should only be called with a valid IRQ number.
609 * Returns 0 on success or < 0 on error.
611 int phy_start_interrupts(struct phy_device *phydev)
613 atomic_set(&phydev->irq_disable, 0);
614 if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
615 phydev) < 0) {
616 pr_warn("%s: Can't get IRQ %d (PHY)\n",
617 phydev->bus->name, phydev->irq);
618 phydev->irq = PHY_POLL;
619 return 0;
622 return phy_enable_interrupts(phydev);
624 EXPORT_SYMBOL(phy_start_interrupts);
627 * phy_stop_interrupts - disable interrupts from a PHY device
628 * @phydev: target phy_device struct
630 int phy_stop_interrupts(struct phy_device *phydev)
632 int err = phy_disable_interrupts(phydev);
634 if (err)
635 phy_error(phydev);
637 free_irq(phydev->irq, phydev);
639 /* Cannot call flush_scheduled_work() here as desired because
640 * of rtnl_lock(), but we do not really care about what would
641 * be done, except from enable_irq(), so cancel any work
642 * possibly pending and take care of the matter below.
644 cancel_work_sync(&phydev->phy_queue);
645 /* If work indeed has been cancelled, disable_irq() will have
646 * been left unbalanced from phy_interrupt() and enable_irq()
647 * has to be called so that other devices on the line work.
649 while (atomic_dec_return(&phydev->irq_disable) >= 0)
650 enable_irq(phydev->irq);
652 return err;
654 EXPORT_SYMBOL(phy_stop_interrupts);
657 * phy_change - Scheduled by the phy_interrupt/timer to handle PHY changes
658 * @work: work_struct that describes the work to be done
660 void phy_change(struct work_struct *work)
662 struct phy_device *phydev =
663 container_of(work, struct phy_device, phy_queue);
665 if (phydev->drv->did_interrupt &&
666 !phydev->drv->did_interrupt(phydev))
667 goto ignore;
669 if (phy_disable_interrupts(phydev))
670 goto phy_err;
672 mutex_lock(&phydev->lock);
673 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
674 phydev->state = PHY_CHANGELINK;
675 mutex_unlock(&phydev->lock);
677 atomic_dec(&phydev->irq_disable);
678 enable_irq(phydev->irq);
680 /* Reenable interrupts */
681 if (PHY_HALTED != phydev->state &&
682 phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
683 goto irq_enable_err;
685 /* reschedule state queue work to run as soon as possible */
686 cancel_delayed_work_sync(&phydev->state_queue);
687 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
688 return;
690 ignore:
691 atomic_dec(&phydev->irq_disable);
692 enable_irq(phydev->irq);
693 return;
695 irq_enable_err:
696 disable_irq(phydev->irq);
697 atomic_inc(&phydev->irq_disable);
698 phy_err:
699 phy_error(phydev);
703 * phy_stop - Bring down the PHY link, and stop checking the status
704 * @phydev: target phy_device struct
706 void phy_stop(struct phy_device *phydev)
708 mutex_lock(&phydev->lock);
710 if (PHY_HALTED == phydev->state)
711 goto out_unlock;
713 if (phy_interrupt_is_valid(phydev)) {
714 /* Disable PHY Interrupts */
715 phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
717 /* Clear any pending interrupts */
718 phy_clear_interrupt(phydev);
721 phydev->state = PHY_HALTED;
723 out_unlock:
724 mutex_unlock(&phydev->lock);
726 /* Cannot call flush_scheduled_work() here as desired because
727 * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change()
728 * will not reenable interrupts.
731 EXPORT_SYMBOL(phy_stop);
734 * phy_start - start or restart a PHY device
735 * @phydev: target phy_device struct
737 * Description: Indicates the attached device's readiness to
738 * handle PHY-related work. Used during startup to start the
739 * PHY, and after a call to phy_stop() to resume operation.
740 * Also used to indicate the MDIO bus has cleared an error
741 * condition.
743 void phy_start(struct phy_device *phydev)
745 bool do_resume = false;
746 int err = 0;
748 mutex_lock(&phydev->lock);
750 switch (phydev->state) {
751 case PHY_STARTING:
752 phydev->state = PHY_PENDING;
753 break;
754 case PHY_READY:
755 phydev->state = PHY_UP;
756 break;
757 case PHY_HALTED:
758 /* make sure interrupts are re-enabled for the PHY */
759 err = phy_enable_interrupts(phydev);
760 if (err < 0)
761 break;
763 phydev->state = PHY_RESUMING;
764 do_resume = true;
765 break;
766 default:
767 break;
769 mutex_unlock(&phydev->lock);
771 /* if phy was suspended, bring the physical link up again */
772 if (do_resume)
773 phy_resume(phydev);
775 EXPORT_SYMBOL(phy_start);
778 * phy_state_machine - Handle the state machine
779 * @work: work_struct that describes the work to be done
781 void phy_state_machine(struct work_struct *work)
783 struct delayed_work *dwork = to_delayed_work(work);
784 struct phy_device *phydev =
785 container_of(dwork, struct phy_device, state_queue);
786 bool needs_aneg = false, do_suspend = false;
787 int err = 0;
789 mutex_lock(&phydev->lock);
791 if (phydev->drv->link_change_notify)
792 phydev->drv->link_change_notify(phydev);
794 switch (phydev->state) {
795 case PHY_DOWN:
796 case PHY_STARTING:
797 case PHY_READY:
798 case PHY_PENDING:
799 break;
800 case PHY_UP:
801 needs_aneg = true;
803 phydev->link_timeout = PHY_AN_TIMEOUT;
805 break;
806 case PHY_AN:
807 err = phy_read_status(phydev);
808 if (err < 0)
809 break;
811 /* If the link is down, give up on negotiation for now */
812 if (!phydev->link) {
813 phydev->state = PHY_NOLINK;
814 netif_carrier_off(phydev->attached_dev);
815 phydev->adjust_link(phydev->attached_dev);
816 break;
819 /* Check if negotiation is done. Break if there's an error */
820 err = phy_aneg_done(phydev);
821 if (err < 0)
822 break;
824 /* If AN is done, we're running */
825 if (err > 0) {
826 phydev->state = PHY_RUNNING;
827 netif_carrier_on(phydev->attached_dev);
828 phydev->adjust_link(phydev->attached_dev);
830 } else if (0 == phydev->link_timeout--)
831 needs_aneg = true;
832 break;
833 case PHY_NOLINK:
834 err = phy_read_status(phydev);
835 if (err)
836 break;
838 if (phydev->link) {
839 if (AUTONEG_ENABLE == phydev->autoneg) {
840 err = phy_aneg_done(phydev);
841 if (err < 0)
842 break;
844 if (!err) {
845 phydev->state = PHY_AN;
846 phydev->link_timeout = PHY_AN_TIMEOUT;
847 break;
850 phydev->state = PHY_RUNNING;
851 netif_carrier_on(phydev->attached_dev);
852 phydev->adjust_link(phydev->attached_dev);
854 break;
855 case PHY_FORCING:
856 err = genphy_update_link(phydev);
857 if (err)
858 break;
860 if (phydev->link) {
861 phydev->state = PHY_RUNNING;
862 netif_carrier_on(phydev->attached_dev);
863 } else {
864 if (0 == phydev->link_timeout--)
865 needs_aneg = true;
868 phydev->adjust_link(phydev->attached_dev);
869 break;
870 case PHY_RUNNING:
871 /* Only register a CHANGE if we are
872 * polling or ignoring interrupts
874 if (!phy_interrupt_is_valid(phydev))
875 phydev->state = PHY_CHANGELINK;
876 break;
877 case PHY_CHANGELINK:
878 err = phy_read_status(phydev);
879 if (err)
880 break;
882 if (phydev->link) {
883 phydev->state = PHY_RUNNING;
884 netif_carrier_on(phydev->attached_dev);
885 } else {
886 phydev->state = PHY_NOLINK;
887 netif_carrier_off(phydev->attached_dev);
890 phydev->adjust_link(phydev->attached_dev);
892 if (phy_interrupt_is_valid(phydev))
893 err = phy_config_interrupt(phydev,
894 PHY_INTERRUPT_ENABLED);
895 break;
896 case PHY_HALTED:
897 if (phydev->link) {
898 phydev->link = 0;
899 netif_carrier_off(phydev->attached_dev);
900 phydev->adjust_link(phydev->attached_dev);
901 do_suspend = true;
903 break;
904 case PHY_RESUMING:
905 if (AUTONEG_ENABLE == phydev->autoneg) {
906 err = phy_aneg_done(phydev);
907 if (err < 0)
908 break;
910 /* err > 0 if AN is done.
911 * Otherwise, it's 0, and we're still waiting for AN
913 if (err > 0) {
914 err = phy_read_status(phydev);
915 if (err)
916 break;
918 if (phydev->link) {
919 phydev->state = PHY_RUNNING;
920 netif_carrier_on(phydev->attached_dev);
921 } else {
922 phydev->state = PHY_NOLINK;
924 phydev->adjust_link(phydev->attached_dev);
925 } else {
926 phydev->state = PHY_AN;
927 phydev->link_timeout = PHY_AN_TIMEOUT;
929 } else {
930 err = phy_read_status(phydev);
931 if (err)
932 break;
934 if (phydev->link) {
935 phydev->state = PHY_RUNNING;
936 netif_carrier_on(phydev->attached_dev);
937 } else {
938 phydev->state = PHY_NOLINK;
940 phydev->adjust_link(phydev->attached_dev);
942 break;
945 mutex_unlock(&phydev->lock);
947 if (needs_aneg)
948 err = phy_start_aneg(phydev);
949 else if (do_suspend)
950 phy_suspend(phydev);
952 if (err < 0)
953 phy_error(phydev);
955 queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
956 PHY_STATE_TIME * HZ);
959 void phy_mac_interrupt(struct phy_device *phydev, int new_link)
961 cancel_work_sync(&phydev->phy_queue);
962 phydev->link = new_link;
963 schedule_work(&phydev->phy_queue);
965 EXPORT_SYMBOL(phy_mac_interrupt);
967 static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
968 int addr)
970 /* Write the desired MMD Devad */
971 bus->write(bus, addr, MII_MMD_CTRL, devad);
973 /* Write the desired MMD register address */
974 bus->write(bus, addr, MII_MMD_DATA, prtad);
976 /* Select the Function : DATA with no post increment */
977 bus->write(bus, addr, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
981 * phy_read_mmd_indirect - reads data from the MMD registers
982 * @phydev: The PHY device bus
983 * @prtad: MMD Address
984 * @devad: MMD DEVAD
985 * @addr: PHY address on the MII bus
987 * Description: it reads data from the MMD registers (clause 22 to access to
988 * clause 45) of the specified phy address.
989 * To read these register we have:
990 * 1) Write reg 13 // DEVAD
991 * 2) Write reg 14 // MMD Address
992 * 3) Write reg 13 // MMD Data Command for MMD DEVAD
993 * 3) Read reg 14 // Read MMD data
995 int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
996 int devad, int addr)
998 struct phy_driver *phydrv = phydev->drv;
999 int value = -1;
1001 if (phydrv->read_mmd_indirect == NULL) {
1002 mmd_phy_indirect(phydev->bus, prtad, devad, addr);
1004 /* Read the content of the MMD's selected register */
1005 value = phydev->bus->read(phydev->bus, addr, MII_MMD_DATA);
1006 } else {
1007 value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
1009 return value;
1011 EXPORT_SYMBOL(phy_read_mmd_indirect);
1014 * phy_write_mmd_indirect - writes data to the MMD registers
1015 * @phydev: The PHY device
1016 * @prtad: MMD Address
1017 * @devad: MMD DEVAD
1018 * @addr: PHY address on the MII bus
1019 * @data: data to write in the MMD register
1021 * Description: Write data from the MMD registers of the specified
1022 * phy address.
1023 * To write these register we have:
1024 * 1) Write reg 13 // DEVAD
1025 * 2) Write reg 14 // MMD Address
1026 * 3) Write reg 13 // MMD Data Command for MMD DEVAD
1027 * 3) Write reg 14 // Write MMD data
1029 void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
1030 int devad, int addr, u32 data)
1032 struct phy_driver *phydrv = phydev->drv;
1034 if (phydrv->write_mmd_indirect == NULL) {
1035 mmd_phy_indirect(phydev->bus, prtad, devad, addr);
1037 /* Write the data into MMD's selected register */
1038 phydev->bus->write(phydev->bus, addr, MII_MMD_DATA, data);
1039 } else {
1040 phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
1043 EXPORT_SYMBOL(phy_write_mmd_indirect);
1046 * phy_init_eee - init and check the EEE feature
1047 * @phydev: target phy_device struct
1048 * @clk_stop_enable: PHY may stop the clock during LPI
1050 * Description: it checks if the Energy-Efficient Ethernet (EEE)
1051 * is supported by looking at the MMD registers 3.20 and 7.60/61
1052 * and it programs the MMD register 3.0 setting the "Clock stop enable"
1053 * bit if required.
1055 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1057 /* According to 802.3az,the EEE is supported only in full duplex-mode.
1058 * Also EEE feature is active when core is operating with MII, GMII
1059 * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
1060 * should return an error if they do not support EEE.
1062 if ((phydev->duplex == DUPLEX_FULL) &&
1063 ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
1064 (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
1065 (phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
1066 phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) ||
1067 phy_is_internal(phydev))) {
1068 int eee_lp, eee_cap, eee_adv;
1069 u32 lp, cap, adv;
1070 int status;
1072 /* Read phy status to properly get the right settings */
1073 status = phy_read_status(phydev);
1074 if (status)
1075 return status;
1077 /* First check if the EEE ability is supported */
1078 eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
1079 MDIO_MMD_PCS, phydev->addr);
1080 if (eee_cap <= 0)
1081 goto eee_exit_err;
1083 cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
1084 if (!cap)
1085 goto eee_exit_err;
1087 /* Check which link settings negotiated and verify it in
1088 * the EEE advertising registers.
1090 eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
1091 MDIO_MMD_AN, phydev->addr);
1092 if (eee_lp <= 0)
1093 goto eee_exit_err;
1095 eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
1096 MDIO_MMD_AN, phydev->addr);
1097 if (eee_adv <= 0)
1098 goto eee_exit_err;
1100 adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
1101 lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
1102 if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
1103 goto eee_exit_err;
1105 if (clk_stop_enable) {
1106 /* Configure the PHY to stop receiving xMII
1107 * clock while it is signaling LPI.
1109 int val = phy_read_mmd_indirect(phydev, MDIO_CTRL1,
1110 MDIO_MMD_PCS,
1111 phydev->addr);
1112 if (val < 0)
1113 return val;
1115 val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
1116 phy_write_mmd_indirect(phydev, MDIO_CTRL1,
1117 MDIO_MMD_PCS, phydev->addr,
1118 val);
1121 return 0; /* EEE supported */
1123 eee_exit_err:
1124 return -EPROTONOSUPPORT;
1126 EXPORT_SYMBOL(phy_init_eee);
1129 * phy_get_eee_err - report the EEE wake error count
1130 * @phydev: target phy_device struct
1132 * Description: it is to report the number of time where the PHY
1133 * failed to complete its normal wake sequence.
1135 int phy_get_eee_err(struct phy_device *phydev)
1137 return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR,
1138 MDIO_MMD_PCS, phydev->addr);
1140 EXPORT_SYMBOL(phy_get_eee_err);
1143 * phy_ethtool_get_eee - get EEE supported and status
1144 * @phydev: target phy_device struct
1145 * @data: ethtool_eee data
1147 * Description: it reportes the Supported/Advertisement/LP Advertisement
1148 * capabilities.
1150 int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
1152 int val;
1154 /* Get Supported EEE */
1155 val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
1156 MDIO_MMD_PCS, phydev->addr);
1157 if (val < 0)
1158 return val;
1159 data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
1161 /* Get advertisement EEE */
1162 val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
1163 MDIO_MMD_AN, phydev->addr);
1164 if (val < 0)
1165 return val;
1166 data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1168 /* Get LP advertisement EEE */
1169 val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
1170 MDIO_MMD_AN, phydev->addr);
1171 if (val < 0)
1172 return val;
1173 data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
1175 return 0;
1177 EXPORT_SYMBOL(phy_ethtool_get_eee);
1180 * phy_ethtool_set_eee - set EEE supported and status
1181 * @phydev: target phy_device struct
1182 * @data: ethtool_eee data
1184 * Description: it is to program the Advertisement EEE register.
1186 int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1188 int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
1190 phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
1191 phydev->addr, val);
1193 return 0;
1195 EXPORT_SYMBOL(phy_ethtool_set_eee);
1197 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1199 if (phydev->drv->set_wol)
1200 return phydev->drv->set_wol(phydev, wol);
1202 return -EOPNOTSUPP;
1204 EXPORT_SYMBOL(phy_ethtool_set_wol);
1206 void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1208 if (phydev->drv->get_wol)
1209 phydev->drv->get_wol(phydev, wol);
1211 EXPORT_SYMBOL(phy_ethtool_get_wol);