1 // SPDX-License-Identifier: GPL-2.0-only
2 /* aQuantia Corporation Network Driver
3 * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
8 bool aq_mdio_busy_wait(struct aq_hw_s
*aq_hw
)
13 err
= readx_poll_timeout_atomic(hw_atl_mdio_busy_get
, aq_hw
,
14 val
, val
== 0U, 10U, 100000U);
22 u16
aq_mdio_read_word(struct aq_hw_s
*aq_hw
, u16 mmd
, u16 addr
)
24 u16 phy_addr
= aq_hw
->phy_id
<< 5 | mmd
;
26 /* Set Address register. */
27 hw_atl_glb_mdio_iface4_set(aq_hw
, (addr
& HW_ATL_MDIO_ADDRESS_MSK
) <<
28 HW_ATL_MDIO_ADDRESS_SHIFT
);
29 /* Send Address command. */
30 hw_atl_glb_mdio_iface2_set(aq_hw
, HW_ATL_MDIO_EXECUTE_OPERATION_MSK
|
31 (3 << HW_ATL_MDIO_OP_MODE_SHIFT
) |
32 ((phy_addr
& HW_ATL_MDIO_PHY_ADDRESS_MSK
) <<
33 HW_ATL_MDIO_PHY_ADDRESS_SHIFT
));
35 aq_mdio_busy_wait(aq_hw
);
37 /* Send Read command. */
38 hw_atl_glb_mdio_iface2_set(aq_hw
, HW_ATL_MDIO_EXECUTE_OPERATION_MSK
|
39 (1 << HW_ATL_MDIO_OP_MODE_SHIFT
) |
40 ((phy_addr
& HW_ATL_MDIO_PHY_ADDRESS_MSK
) <<
41 HW_ATL_MDIO_PHY_ADDRESS_SHIFT
));
43 aq_mdio_busy_wait(aq_hw
);
45 return (u16
)hw_atl_glb_mdio_iface5_get(aq_hw
);
48 void aq_mdio_write_word(struct aq_hw_s
*aq_hw
, u16 mmd
, u16 addr
, u16 data
)
50 u16 phy_addr
= aq_hw
->phy_id
<< 5 | mmd
;
52 /* Set Address register. */
53 hw_atl_glb_mdio_iface4_set(aq_hw
, (addr
& HW_ATL_MDIO_ADDRESS_MSK
) <<
54 HW_ATL_MDIO_ADDRESS_SHIFT
);
55 /* Send Address command. */
56 hw_atl_glb_mdio_iface2_set(aq_hw
, HW_ATL_MDIO_EXECUTE_OPERATION_MSK
|
57 (3 << HW_ATL_MDIO_OP_MODE_SHIFT
) |
58 ((phy_addr
& HW_ATL_MDIO_PHY_ADDRESS_MSK
) <<
59 HW_ATL_MDIO_PHY_ADDRESS_SHIFT
));
61 aq_mdio_busy_wait(aq_hw
);
63 hw_atl_glb_mdio_iface3_set(aq_hw
, (data
& HW_ATL_MDIO_WRITE_DATA_MSK
) <<
64 HW_ATL_MDIO_WRITE_DATA_SHIFT
);
65 /* Send Write command. */
66 hw_atl_glb_mdio_iface2_set(aq_hw
, HW_ATL_MDIO_EXECUTE_OPERATION_MSK
|
67 (2 << HW_ATL_MDIO_OP_MODE_SHIFT
) |
68 ((phy_addr
& HW_ATL_MDIO_PHY_ADDRESS_MSK
) <<
69 HW_ATL_MDIO_PHY_ADDRESS_SHIFT
));
71 aq_mdio_busy_wait(aq_hw
);
74 u16
aq_phy_read_reg(struct aq_hw_s
*aq_hw
, u16 mmd
, u16 address
)
79 err
= readx_poll_timeout_atomic(hw_atl_sem_mdio_get
, aq_hw
,
80 val
, val
== 1U, 10U, 100000U);
87 err
= aq_mdio_read_word(aq_hw
, mmd
, address
);
89 hw_atl_reg_glb_cpu_sem_set(aq_hw
, 1U, HW_ATL_FW_SM_MDIO
);
95 void aq_phy_write_reg(struct aq_hw_s
*aq_hw
, u16 mmd
, u16 address
, u16 data
)
100 err
= readx_poll_timeout_atomic(hw_atl_sem_mdio_get
, aq_hw
,
101 val
, val
== 1U, 10U, 100000U);
105 aq_mdio_write_word(aq_hw
, mmd
, address
, data
);
106 hw_atl_reg_glb_cpu_sem_set(aq_hw
, 1U, HW_ATL_FW_SM_MDIO
);
109 bool aq_phy_init_phy_id(struct aq_hw_s
*aq_hw
)
113 for (aq_hw
->phy_id
= 0; aq_hw
->phy_id
< HW_ATL_PHY_ID_MAX
;
115 /* PMA Standard Device Identifier 2: Address 1.3 */
116 val
= aq_phy_read_reg(aq_hw
, MDIO_MMD_PMAPMD
, 3);
125 bool aq_phy_init(struct aq_hw_s
*aq_hw
)
129 if (aq_hw
->phy_id
== HW_ATL_PHY_ID_MAX
)
130 if (!aq_phy_init_phy_id(aq_hw
))
133 /* PMA Standard Device Identifier:
137 dev_id
= aq_phy_read_reg(aq_hw
, MDIO_MMD_PMAPMD
, 2);
139 dev_id
|= aq_phy_read_reg(aq_hw
, MDIO_MMD_PMAPMD
, 3);
141 if (dev_id
== 0xffffffff) {
142 aq_hw
->phy_id
= HW_ATL_PHY_ID_MAX
;