drm/rockchip: Don't change hdmi reference clock rate
[drm/drm-misc.git] / drivers / net / phy / nxp-c45-tja11xx.c
blobade544bc007d25278b74643e7f6f2183c4c8b124
1 // SPDX-License-Identifier: GPL-2.0
2 /* NXP C45 PHY driver
3 * Copyright 2021-2023 NXP
4 * Author: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
5 */
7 #include <linux/delay.h>
8 #include <linux/ethtool.h>
9 #include <linux/ethtool_netlink.h>
10 #include <linux/kernel.h>
11 #include <linux/mii.h>
12 #include <linux/module.h>
13 #include <linux/of.h>
14 #include <linux/phy.h>
15 #include <linux/processor.h>
16 #include <linux/property.h>
17 #include <linux/ptp_classify.h>
18 #include <linux/net_tstamp.h>
20 #include "nxp-c45-tja11xx.h"
22 #define PHY_ID_TJA_1103 0x001BB010
23 #define PHY_ID_TJA_1120 0x001BB031
25 #define VEND1_DEVICE_CONTROL 0x0040
26 #define DEVICE_CONTROL_RESET BIT(15)
27 #define DEVICE_CONTROL_CONFIG_GLOBAL_EN BIT(14)
28 #define DEVICE_CONTROL_CONFIG_ALL_EN BIT(13)
30 #define VEND1_DEVICE_CONFIG 0x0048
32 #define TJA1120_VEND1_EXT_TS_MODE 0x1012
34 #define TJA1120_GLOBAL_INFRA_IRQ_ACK 0x2C08
35 #define TJA1120_GLOBAL_INFRA_IRQ_EN 0x2C0A
36 #define TJA1120_GLOBAL_INFRA_IRQ_STATUS 0x2C0C
37 #define TJA1120_DEV_BOOT_DONE BIT(1)
39 #define TJA1120_VEND1_PTP_TRIG_DATA_S 0x1070
41 #define TJA1120_EGRESS_TS_DATA_S 0x9060
42 #define TJA1120_EGRESS_TS_END 0x9067
43 #define TJA1120_TS_VALID BIT(0)
44 #define TJA1120_MORE_TS BIT(15)
46 #define VEND1_PHY_IRQ_ACK 0x80A0
47 #define VEND1_PHY_IRQ_EN 0x80A1
48 #define VEND1_PHY_IRQ_STATUS 0x80A2
49 #define PHY_IRQ_LINK_EVENT BIT(1)
51 #define VEND1_ALWAYS_ACCESSIBLE 0x801F
52 #define FUSA_PASS BIT(4)
54 #define VEND1_PHY_CONTROL 0x8100
55 #define PHY_CONFIG_EN BIT(14)
56 #define PHY_START_OP BIT(0)
58 #define VEND1_PHY_CONFIG 0x8108
59 #define PHY_CONFIG_AUTO BIT(0)
61 #define TJA1120_EPHY_RESETS 0x810A
62 #define EPHY_PCS_RESET BIT(3)
64 #define VEND1_SIGNAL_QUALITY 0x8320
65 #define SQI_VALID BIT(14)
66 #define SQI_MASK GENMASK(2, 0)
67 #define MAX_SQI SQI_MASK
69 #define CABLE_TEST_ENABLE BIT(15)
70 #define CABLE_TEST_START BIT(14)
71 #define CABLE_TEST_OK 0x00
72 #define CABLE_TEST_SHORTED 0x01
73 #define CABLE_TEST_OPEN 0x02
74 #define CABLE_TEST_UNKNOWN 0x07
76 #define VEND1_PORT_CONTROL 0x8040
77 #define PORT_CONTROL_EN BIT(14)
79 #define VEND1_PORT_ABILITIES 0x8046
80 #define MACSEC_ABILITY BIT(5)
81 #define PTP_ABILITY BIT(3)
83 #define VEND1_PORT_FUNC_IRQ_EN 0x807A
84 #define MACSEC_IRQS BIT(5)
85 #define PTP_IRQS BIT(3)
87 #define VEND1_PTP_IRQ_ACK 0x9008
88 #define EGR_TS_IRQ BIT(1)
90 #define VEND1_PORT_INFRA_CONTROL 0xAC00
91 #define PORT_INFRA_CONTROL_EN BIT(14)
93 #define VEND1_RXID 0xAFCC
94 #define VEND1_TXID 0xAFCD
95 #define ID_ENABLE BIT(15)
97 #define VEND1_ABILITIES 0xAFC4
98 #define RGMII_ID_ABILITY BIT(15)
99 #define RGMII_ABILITY BIT(14)
100 #define RMII_ABILITY BIT(10)
101 #define REVMII_ABILITY BIT(9)
102 #define MII_ABILITY BIT(8)
103 #define SGMII_ABILITY BIT(0)
105 #define VEND1_MII_BASIC_CONFIG 0xAFC6
106 #define MII_BASIC_CONFIG_REV BIT(4)
107 #define MII_BASIC_CONFIG_SGMII 0x9
108 #define MII_BASIC_CONFIG_RGMII 0x7
109 #define MII_BASIC_CONFIG_RMII 0x5
110 #define MII_BASIC_CONFIG_MII 0x4
112 #define VEND1_SYMBOL_ERROR_CNT_XTD 0x8351
113 #define EXTENDED_CNT_EN BIT(15)
114 #define VEND1_MONITOR_STATUS 0xAC80
115 #define MONITOR_RESET BIT(15)
116 #define VEND1_MONITOR_CONFIG 0xAC86
117 #define LOST_FRAMES_CNT_EN BIT(9)
118 #define ALL_FRAMES_CNT_EN BIT(8)
120 #define VEND1_SYMBOL_ERROR_COUNTER 0x8350
121 #define VEND1_LINK_DROP_COUNTER 0x8352
122 #define VEND1_LINK_LOSSES_AND_FAILURES 0x8353
123 #define VEND1_RX_PREAMBLE_COUNT 0xAFCE
124 #define VEND1_TX_PREAMBLE_COUNT 0xAFCF
125 #define VEND1_RX_IPG_LENGTH 0xAFD0
126 #define VEND1_TX_IPG_LENGTH 0xAFD1
127 #define COUNTER_EN BIT(15)
129 #define VEND1_PTP_CONFIG 0x1102
130 #define EXT_TRG_EDGE BIT(1)
132 #define TJA1120_SYNC_TRIG_FILTER 0x1010
133 #define PTP_TRIG_RISE_TS BIT(3)
134 #define PTP_TRIG_FALLING_TS BIT(2)
136 #define CLK_RATE_ADJ_LD BIT(15)
137 #define CLK_RATE_ADJ_DIR BIT(14)
139 #define VEND1_RX_TS_INSRT_CTRL 0x114D
140 #define TJA1103_RX_TS_INSRT_MODE2 0x02
142 #define TJA1120_RX_TS_INSRT_CTRL 0x9012
143 #define TJA1120_RX_TS_INSRT_EN BIT(15)
144 #define TJA1120_TS_INSRT_MODE BIT(4)
146 #define VEND1_EGR_RING_DATA_0 0x114E
147 #define VEND1_EGR_RING_CTRL 0x1154
149 #define RING_DATA_0_TS_VALID BIT(15)
151 #define RING_DONE BIT(0)
153 #define TS_SEC_MASK GENMASK(1, 0)
155 #define PTP_ENABLE BIT(3)
156 #define PHY_TEST_ENABLE BIT(0)
158 #define VEND1_PORT_PTP_CONTROL 0x9000
159 #define PORT_PTP_CONTROL_BYPASS BIT(11)
161 #define PTP_CLK_PERIOD_100BT1 15ULL
162 #define PTP_CLK_PERIOD_1000BT1 8ULL
164 #define EVENT_MSG_FILT_ALL 0x0F
165 #define EVENT_MSG_FILT_NONE 0x00
167 #define VEND1_GPIO_FUNC_CONFIG_BASE 0x2C40
168 #define GPIO_FUNC_EN BIT(15)
169 #define GPIO_FUNC_PTP BIT(6)
170 #define GPIO_SIGNAL_PTP_TRIGGER 0x01
171 #define GPIO_SIGNAL_PPS_OUT 0x12
172 #define GPIO_DISABLE 0
173 #define GPIO_PPS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
174 GPIO_SIGNAL_PPS_OUT)
175 #define GPIO_EXTTS_OUT_CFG (GPIO_FUNC_EN | GPIO_FUNC_PTP | \
176 GPIO_SIGNAL_PTP_TRIGGER)
178 #define RGMII_PERIOD_PS 8000U
179 #define PS_PER_DEGREE div_u64(RGMII_PERIOD_PS, 360)
180 #define MIN_ID_PS 1644U
181 #define MAX_ID_PS 2260U
182 #define DEFAULT_ID_PS 2000U
184 #define PPM_TO_SUBNS_INC(ppb, ptp_clk_period) div_u64(GENMASK_ULL(31, 0) * \
185 (ppb) * (ptp_clk_period), NSEC_PER_SEC)
187 #define NXP_C45_SKB_CB(skb) ((struct nxp_c45_skb_cb *)(skb)->cb)
189 #define TJA11XX_REVERSE_MODE BIT(0)
191 struct nxp_c45_phy;
193 struct nxp_c45_skb_cb {
194 struct ptp_header *header;
195 unsigned int type;
198 #define NXP_C45_REG_FIELD(_reg, _devad, _offset, _size) \
199 ((struct nxp_c45_reg_field) { \
200 .reg = _reg, \
201 .devad = _devad, \
202 .offset = _offset, \
203 .size = _size, \
206 struct nxp_c45_reg_field {
207 u16 reg;
208 u8 devad;
209 u8 offset;
210 u8 size;
213 struct nxp_c45_hwts {
214 u32 nsec;
215 u32 sec;
216 u8 domain_number;
217 u16 sequence_id;
218 u8 msg_type;
221 struct nxp_c45_regmap {
222 /* PTP config regs. */
223 u16 vend1_ptp_clk_period;
224 u16 vend1_event_msg_filt;
226 /* LTC bits and regs. */
227 struct nxp_c45_reg_field ltc_read;
228 struct nxp_c45_reg_field ltc_write;
229 struct nxp_c45_reg_field ltc_lock_ctrl;
230 u16 vend1_ltc_wr_nsec_0;
231 u16 vend1_ltc_wr_nsec_1;
232 u16 vend1_ltc_wr_sec_0;
233 u16 vend1_ltc_wr_sec_1;
234 u16 vend1_ltc_rd_nsec_0;
235 u16 vend1_ltc_rd_nsec_1;
236 u16 vend1_ltc_rd_sec_0;
237 u16 vend1_ltc_rd_sec_1;
238 u16 vend1_rate_adj_subns_0;
239 u16 vend1_rate_adj_subns_1;
241 /* External trigger reg fields. */
242 struct nxp_c45_reg_field irq_egr_ts_en;
243 struct nxp_c45_reg_field irq_egr_ts_status;
244 struct nxp_c45_reg_field domain_number;
245 struct nxp_c45_reg_field msg_type;
246 struct nxp_c45_reg_field sequence_id;
247 struct nxp_c45_reg_field sec_1_0;
248 struct nxp_c45_reg_field sec_4_2;
249 struct nxp_c45_reg_field nsec_15_0;
250 struct nxp_c45_reg_field nsec_29_16;
252 /* PPS and EXT Trigger bits and regs. */
253 struct nxp_c45_reg_field pps_enable;
254 struct nxp_c45_reg_field pps_polarity;
255 u16 vend1_ext_trg_data_0;
256 u16 vend1_ext_trg_data_1;
257 u16 vend1_ext_trg_data_2;
258 u16 vend1_ext_trg_data_3;
259 u16 vend1_ext_trg_ctrl;
261 /* Cable test reg fields. */
262 u16 cable_test;
263 struct nxp_c45_reg_field cable_test_valid;
264 struct nxp_c45_reg_field cable_test_result;
267 struct nxp_c45_phy_stats {
268 const char *name;
269 const struct nxp_c45_reg_field counter;
272 struct nxp_c45_phy_data {
273 const struct nxp_c45_regmap *regmap;
274 const struct nxp_c45_phy_stats *stats;
275 int n_stats;
276 u8 ptp_clk_period;
277 bool ext_ts_both_edges;
278 bool ack_ptp_irq;
279 void (*counters_enable)(struct phy_device *phydev);
280 bool (*get_egressts)(struct nxp_c45_phy *priv,
281 struct nxp_c45_hwts *hwts);
282 bool (*get_extts)(struct nxp_c45_phy *priv, struct timespec64 *extts);
283 void (*ptp_init)(struct phy_device *phydev);
284 void (*ptp_enable)(struct phy_device *phydev, bool enable);
285 void (*nmi_handler)(struct phy_device *phydev,
286 irqreturn_t *irq_status);
289 static const
290 struct nxp_c45_phy_data *nxp_c45_get_data(struct phy_device *phydev)
292 return phydev->drv->driver_data;
295 static const
296 struct nxp_c45_regmap *nxp_c45_get_regmap(struct phy_device *phydev)
298 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
300 return phy_data->regmap;
303 static int nxp_c45_read_reg_field(struct phy_device *phydev,
304 const struct nxp_c45_reg_field *reg_field)
306 u16 mask;
307 int ret;
309 if (reg_field->size == 0) {
310 phydev_err(phydev, "Trying to read a reg field of size 0.\n");
311 return -EINVAL;
314 ret = phy_read_mmd(phydev, reg_field->devad, reg_field->reg);
315 if (ret < 0)
316 return ret;
318 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
319 GENMASK(reg_field->offset + reg_field->size - 1,
320 reg_field->offset);
321 ret &= mask;
322 ret >>= reg_field->offset;
324 return ret;
327 static int nxp_c45_write_reg_field(struct phy_device *phydev,
328 const struct nxp_c45_reg_field *reg_field,
329 u16 val)
331 u16 mask;
332 u16 set;
334 if (reg_field->size == 0) {
335 phydev_err(phydev, "Trying to write a reg field of size 0.\n");
336 return -EINVAL;
339 mask = reg_field->size == 1 ? BIT(reg_field->offset) :
340 GENMASK(reg_field->offset + reg_field->size - 1,
341 reg_field->offset);
342 set = val << reg_field->offset;
344 return phy_modify_mmd_changed(phydev, reg_field->devad,
345 reg_field->reg, mask, set);
348 static int nxp_c45_set_reg_field(struct phy_device *phydev,
349 const struct nxp_c45_reg_field *reg_field)
351 if (reg_field->size != 1) {
352 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
353 return -EINVAL;
356 return nxp_c45_write_reg_field(phydev, reg_field, 1);
359 static int nxp_c45_clear_reg_field(struct phy_device *phydev,
360 const struct nxp_c45_reg_field *reg_field)
362 if (reg_field->size != 1) {
363 phydev_err(phydev, "Trying to set a reg field of size different than 1.\n");
364 return -EINVAL;
367 return nxp_c45_write_reg_field(phydev, reg_field, 0);
370 static bool nxp_c45_poll_txts(struct phy_device *phydev)
372 return phydev->irq <= 0;
375 static int _nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
376 struct timespec64 *ts,
377 struct ptp_system_timestamp *sts)
379 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
380 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
382 nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_read);
383 ts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
384 regmap->vend1_ltc_rd_nsec_0);
385 ts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
386 regmap->vend1_ltc_rd_nsec_1) << 16;
387 ts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
388 regmap->vend1_ltc_rd_sec_0);
389 ts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
390 regmap->vend1_ltc_rd_sec_1) << 16;
392 return 0;
395 static int nxp_c45_ptp_gettimex64(struct ptp_clock_info *ptp,
396 struct timespec64 *ts,
397 struct ptp_system_timestamp *sts)
399 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
401 mutex_lock(&priv->ptp_lock);
402 _nxp_c45_ptp_gettimex64(ptp, ts, sts);
403 mutex_unlock(&priv->ptp_lock);
405 return 0;
408 static int _nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
409 const struct timespec64 *ts)
411 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
412 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
414 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_0,
415 ts->tv_nsec);
416 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_nsec_1,
417 ts->tv_nsec >> 16);
418 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_0,
419 ts->tv_sec);
420 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, regmap->vend1_ltc_wr_sec_1,
421 ts->tv_sec >> 16);
422 nxp_c45_set_reg_field(priv->phydev, &regmap->ltc_write);
424 return 0;
427 static int nxp_c45_ptp_settime64(struct ptp_clock_info *ptp,
428 const struct timespec64 *ts)
430 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
432 mutex_lock(&priv->ptp_lock);
433 _nxp_c45_ptp_settime64(ptp, ts);
434 mutex_unlock(&priv->ptp_lock);
436 return 0;
439 static int nxp_c45_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
441 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
442 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
443 const struct nxp_c45_regmap *regmap = data->regmap;
444 s32 ppb = scaled_ppm_to_ppb(scaled_ppm);
445 u64 subns_inc_val;
446 bool inc;
448 mutex_lock(&priv->ptp_lock);
449 inc = ppb >= 0;
450 ppb = abs(ppb);
452 subns_inc_val = PPM_TO_SUBNS_INC(ppb, data->ptp_clk_period);
454 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
455 regmap->vend1_rate_adj_subns_0,
456 subns_inc_val);
457 subns_inc_val >>= 16;
458 subns_inc_val |= CLK_RATE_ADJ_LD;
459 if (inc)
460 subns_inc_val |= CLK_RATE_ADJ_DIR;
462 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
463 regmap->vend1_rate_adj_subns_1,
464 subns_inc_val);
465 mutex_unlock(&priv->ptp_lock);
467 return 0;
470 static int nxp_c45_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
472 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
473 struct timespec64 now, then;
475 mutex_lock(&priv->ptp_lock);
476 then = ns_to_timespec64(delta);
477 _nxp_c45_ptp_gettimex64(ptp, &now, NULL);
478 now = timespec64_add(now, then);
479 _nxp_c45_ptp_settime64(ptp, &now);
480 mutex_unlock(&priv->ptp_lock);
482 return 0;
485 static void nxp_c45_reconstruct_ts(struct timespec64 *ts,
486 struct nxp_c45_hwts *hwts)
488 ts->tv_nsec = hwts->nsec;
489 if ((ts->tv_sec & TS_SEC_MASK) < (hwts->sec & TS_SEC_MASK))
490 ts->tv_sec -= TS_SEC_MASK + 1;
491 ts->tv_sec &= ~TS_SEC_MASK;
492 ts->tv_sec |= hwts->sec & TS_SEC_MASK;
495 static bool nxp_c45_match_ts(struct ptp_header *header,
496 struct nxp_c45_hwts *hwts,
497 unsigned int type)
499 return ntohs(header->sequence_id) == hwts->sequence_id &&
500 ptp_get_msgtype(header, type) == hwts->msg_type &&
501 header->domain_number == hwts->domain_number;
504 static bool nxp_c45_get_extts(struct nxp_c45_phy *priv,
505 struct timespec64 *extts)
507 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
509 extts->tv_nsec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
510 regmap->vend1_ext_trg_data_0);
511 extts->tv_nsec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
512 regmap->vend1_ext_trg_data_1) << 16;
513 extts->tv_sec = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
514 regmap->vend1_ext_trg_data_2);
515 extts->tv_sec |= phy_read_mmd(priv->phydev, MDIO_MMD_VEND1,
516 regmap->vend1_ext_trg_data_3) << 16;
517 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1,
518 regmap->vend1_ext_trg_ctrl, RING_DONE);
520 return true;
523 static bool tja1120_extts_is_valid(struct phy_device *phydev)
525 bool valid;
526 int reg;
528 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
529 TJA1120_VEND1_PTP_TRIG_DATA_S);
530 valid = !!(reg & TJA1120_TS_VALID);
532 return valid;
535 static bool tja1120_get_extts(struct nxp_c45_phy *priv,
536 struct timespec64 *extts)
538 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
539 struct phy_device *phydev = priv->phydev;
540 bool more_ts;
541 bool valid;
542 u16 reg;
544 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1,
545 regmap->vend1_ext_trg_ctrl);
546 more_ts = !!(reg & TJA1120_MORE_TS);
548 valid = tja1120_extts_is_valid(phydev);
549 if (!valid) {
550 if (!more_ts)
551 goto tja1120_get_extts_out;
553 /* Bug workaround for TJA1120 engineering samples: move the new
554 * timestamp from the FIFO to the buffer.
556 phy_write_mmd(phydev, MDIO_MMD_VEND1,
557 regmap->vend1_ext_trg_ctrl, RING_DONE);
558 valid = tja1120_extts_is_valid(phydev);
559 if (!valid)
560 goto tja1120_get_extts_out;
563 nxp_c45_get_extts(priv, extts);
564 tja1120_get_extts_out:
565 return valid;
568 static void nxp_c45_read_egress_ts(struct nxp_c45_phy *priv,
569 struct nxp_c45_hwts *hwts)
571 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
572 struct phy_device *phydev = priv->phydev;
574 hwts->domain_number =
575 nxp_c45_read_reg_field(phydev, &regmap->domain_number);
576 hwts->msg_type =
577 nxp_c45_read_reg_field(phydev, &regmap->msg_type);
578 hwts->sequence_id =
579 nxp_c45_read_reg_field(phydev, &regmap->sequence_id);
580 hwts->nsec =
581 nxp_c45_read_reg_field(phydev, &regmap->nsec_15_0);
582 hwts->nsec |=
583 nxp_c45_read_reg_field(phydev, &regmap->nsec_29_16) << 16;
584 hwts->sec = nxp_c45_read_reg_field(phydev, &regmap->sec_1_0);
585 hwts->sec |= nxp_c45_read_reg_field(phydev, &regmap->sec_4_2) << 2;
588 static bool nxp_c45_get_hwtxts(struct nxp_c45_phy *priv,
589 struct nxp_c45_hwts *hwts)
591 bool valid;
592 u16 reg;
594 mutex_lock(&priv->ptp_lock);
595 phy_write_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_CTRL,
596 RING_DONE);
597 reg = phy_read_mmd(priv->phydev, MDIO_MMD_VEND1, VEND1_EGR_RING_DATA_0);
598 valid = !!(reg & RING_DATA_0_TS_VALID);
599 if (!valid)
600 goto nxp_c45_get_hwtxts_out;
602 nxp_c45_read_egress_ts(priv, hwts);
603 nxp_c45_get_hwtxts_out:
604 mutex_unlock(&priv->ptp_lock);
605 return valid;
608 static bool tja1120_egress_ts_is_valid(struct phy_device *phydev)
610 bool valid;
611 u16 reg;
613 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S);
614 valid = !!(reg & TJA1120_TS_VALID);
616 return valid;
619 static bool tja1120_get_hwtxts(struct nxp_c45_phy *priv,
620 struct nxp_c45_hwts *hwts)
622 struct phy_device *phydev = priv->phydev;
623 bool more_ts;
624 bool valid;
625 u16 reg;
627 mutex_lock(&priv->ptp_lock);
628 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_END);
629 more_ts = !!(reg & TJA1120_MORE_TS);
630 valid = tja1120_egress_ts_is_valid(phydev);
631 if (!valid) {
632 if (!more_ts)
633 goto tja1120_get_hwtxts_out;
635 /* Bug workaround for TJA1120 engineering samples: move the
636 * new timestamp from the FIFO to the buffer.
638 phy_write_mmd(phydev, MDIO_MMD_VEND1,
639 TJA1120_EGRESS_TS_END, TJA1120_TS_VALID);
640 valid = tja1120_egress_ts_is_valid(phydev);
641 if (!valid)
642 goto tja1120_get_hwtxts_out;
644 nxp_c45_read_egress_ts(priv, hwts);
645 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, TJA1120_EGRESS_TS_DATA_S,
646 TJA1120_TS_VALID);
647 tja1120_get_hwtxts_out:
648 mutex_unlock(&priv->ptp_lock);
649 return valid;
652 static void nxp_c45_process_txts(struct nxp_c45_phy *priv,
653 struct nxp_c45_hwts *txts)
655 struct sk_buff *skb, *tmp, *skb_match = NULL;
656 struct skb_shared_hwtstamps shhwtstamps;
657 struct timespec64 ts;
658 unsigned long flags;
659 bool ts_match;
660 s64 ts_ns;
662 spin_lock_irqsave(&priv->tx_queue.lock, flags);
663 skb_queue_walk_safe(&priv->tx_queue, skb, tmp) {
664 ts_match = nxp_c45_match_ts(NXP_C45_SKB_CB(skb)->header, txts,
665 NXP_C45_SKB_CB(skb)->type);
666 if (!ts_match)
667 continue;
668 skb_match = skb;
669 __skb_unlink(skb, &priv->tx_queue);
670 break;
672 spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
674 if (skb_match) {
675 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
676 nxp_c45_reconstruct_ts(&ts, txts);
677 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
678 ts_ns = timespec64_to_ns(&ts);
679 shhwtstamps.hwtstamp = ns_to_ktime(ts_ns);
680 skb_complete_tx_timestamp(skb_match, &shhwtstamps);
681 } else {
682 phydev_warn(priv->phydev,
683 "the tx timestamp doesn't match with any skb\n");
687 static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
689 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
690 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
691 bool poll_txts = nxp_c45_poll_txts(priv->phydev);
692 struct skb_shared_hwtstamps *shhwtstamps_rx;
693 struct ptp_clock_event event;
694 struct nxp_c45_hwts hwts;
695 bool reschedule = false;
696 struct timespec64 ts;
697 struct sk_buff *skb;
698 bool ts_valid;
699 u32 ts_raw;
701 while (!skb_queue_empty_lockless(&priv->tx_queue) && poll_txts) {
702 ts_valid = data->get_egressts(priv, &hwts);
703 if (unlikely(!ts_valid)) {
704 /* Still more skbs in the queue */
705 reschedule = true;
706 break;
709 nxp_c45_process_txts(priv, &hwts);
712 while ((skb = skb_dequeue(&priv->rx_queue)) != NULL) {
713 nxp_c45_ptp_gettimex64(&priv->caps, &ts, NULL);
714 ts_raw = __be32_to_cpu(NXP_C45_SKB_CB(skb)->header->reserved2);
715 hwts.sec = ts_raw >> 30;
716 hwts.nsec = ts_raw & GENMASK(29, 0);
717 nxp_c45_reconstruct_ts(&ts, &hwts);
718 shhwtstamps_rx = skb_hwtstamps(skb);
719 shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
720 NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
721 netif_rx(skb);
724 if (priv->extts) {
725 ts_valid = data->get_extts(priv, &ts);
726 if (ts_valid && timespec64_compare(&ts, &priv->extts_ts) != 0) {
727 priv->extts_ts = ts;
728 event.index = priv->extts_index;
729 event.type = PTP_CLOCK_EXTTS;
730 event.timestamp = ns_to_ktime(timespec64_to_ns(&ts));
731 ptp_clock_event(priv->ptp_clock, &event);
733 reschedule = true;
736 return reschedule ? 1 : -1;
739 static void nxp_c45_gpio_config(struct nxp_c45_phy *priv,
740 int pin, u16 pin_cfg)
742 struct phy_device *phydev = priv->phydev;
744 phy_write_mmd(phydev, MDIO_MMD_VEND1,
745 VEND1_GPIO_FUNC_CONFIG_BASE + pin, pin_cfg);
748 static int nxp_c45_perout_enable(struct nxp_c45_phy *priv,
749 struct ptp_perout_request *perout, int on)
751 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(priv->phydev);
752 struct phy_device *phydev = priv->phydev;
753 int pin;
755 if (perout->flags & ~PTP_PEROUT_PHASE)
756 return -EOPNOTSUPP;
758 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_PEROUT, perout->index);
759 if (pin < 0)
760 return pin;
762 if (!on) {
763 nxp_c45_clear_reg_field(priv->phydev,
764 &regmap->pps_enable);
765 nxp_c45_clear_reg_field(priv->phydev,
766 &regmap->pps_polarity);
768 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
770 return 0;
773 /* The PPS signal is fixed to 1 second and is always generated when the
774 * seconds counter is incremented. The start time is not configurable.
775 * If the clock is adjusted, the PPS signal is automatically readjusted.
777 if (perout->period.sec != 1 || perout->period.nsec != 0) {
778 phydev_warn(phydev, "The period can be set only to 1 second.");
779 return -EINVAL;
782 if (!(perout->flags & PTP_PEROUT_PHASE)) {
783 if (perout->start.sec != 0 || perout->start.nsec != 0) {
784 phydev_warn(phydev, "The start time is not configurable. Should be set to 0 seconds and 0 nanoseconds.");
785 return -EINVAL;
787 } else {
788 if (perout->phase.nsec != 0 &&
789 perout->phase.nsec != (NSEC_PER_SEC >> 1)) {
790 phydev_warn(phydev, "The phase can be set only to 0 or 500000000 nanoseconds.");
791 return -EINVAL;
794 if (perout->phase.nsec == 0)
795 nxp_c45_clear_reg_field(priv->phydev,
796 &regmap->pps_polarity);
797 else
798 nxp_c45_set_reg_field(priv->phydev,
799 &regmap->pps_polarity);
802 nxp_c45_gpio_config(priv, pin, GPIO_PPS_OUT_CFG);
804 nxp_c45_set_reg_field(priv->phydev, &regmap->pps_enable);
806 return 0;
809 static void nxp_c45_set_rising_or_falling(struct phy_device *phydev,
810 struct ptp_extts_request *extts)
812 if (extts->flags & PTP_RISING_EDGE)
813 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
814 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
816 if (extts->flags & PTP_FALLING_EDGE)
817 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
818 VEND1_PTP_CONFIG, EXT_TRG_EDGE);
821 static void nxp_c45_set_rising_and_falling(struct phy_device *phydev,
822 struct ptp_extts_request *extts)
824 /* PTP_EXTTS_REQUEST may have only the PTP_ENABLE_FEATURE flag set. In
825 * this case external ts will be enabled on rising edge.
827 if (extts->flags & PTP_RISING_EDGE ||
828 extts->flags == PTP_ENABLE_FEATURE)
829 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
830 TJA1120_SYNC_TRIG_FILTER,
831 PTP_TRIG_RISE_TS);
832 else
833 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
834 TJA1120_SYNC_TRIG_FILTER,
835 PTP_TRIG_RISE_TS);
837 if (extts->flags & PTP_FALLING_EDGE)
838 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
839 TJA1120_SYNC_TRIG_FILTER,
840 PTP_TRIG_FALLING_TS);
841 else
842 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
843 TJA1120_SYNC_TRIG_FILTER,
844 PTP_TRIG_FALLING_TS);
847 static int nxp_c45_extts_enable(struct nxp_c45_phy *priv,
848 struct ptp_extts_request *extts, int on)
850 const struct nxp_c45_phy_data *data = nxp_c45_get_data(priv->phydev);
851 int pin;
853 if (extts->flags & ~(PTP_ENABLE_FEATURE |
854 PTP_RISING_EDGE |
855 PTP_FALLING_EDGE |
856 PTP_STRICT_FLAGS))
857 return -EOPNOTSUPP;
859 /* Sampling on both edges is not supported */
860 if ((extts->flags & PTP_RISING_EDGE) &&
861 (extts->flags & PTP_FALLING_EDGE) &&
862 !data->ext_ts_both_edges)
863 return -EOPNOTSUPP;
865 pin = ptp_find_pin(priv->ptp_clock, PTP_PF_EXTTS, extts->index);
866 if (pin < 0)
867 return pin;
869 if (!on) {
870 nxp_c45_gpio_config(priv, pin, GPIO_DISABLE);
871 priv->extts = false;
873 return 0;
876 if (data->ext_ts_both_edges)
877 nxp_c45_set_rising_and_falling(priv->phydev, extts);
878 else
879 nxp_c45_set_rising_or_falling(priv->phydev, extts);
881 nxp_c45_gpio_config(priv, pin, GPIO_EXTTS_OUT_CFG);
882 priv->extts = true;
883 priv->extts_index = extts->index;
884 ptp_schedule_worker(priv->ptp_clock, 0);
886 return 0;
889 static int nxp_c45_ptp_enable(struct ptp_clock_info *ptp,
890 struct ptp_clock_request *req, int on)
892 struct nxp_c45_phy *priv = container_of(ptp, struct nxp_c45_phy, caps);
894 switch (req->type) {
895 case PTP_CLK_REQ_EXTTS:
896 return nxp_c45_extts_enable(priv, &req->extts, on);
897 case PTP_CLK_REQ_PEROUT:
898 return nxp_c45_perout_enable(priv, &req->perout, on);
899 default:
900 return -EOPNOTSUPP;
904 static struct ptp_pin_desc nxp_c45_ptp_pins[] = {
905 { "nxp_c45_gpio0", 0, PTP_PF_NONE},
906 { "nxp_c45_gpio1", 1, PTP_PF_NONE},
907 { "nxp_c45_gpio2", 2, PTP_PF_NONE},
908 { "nxp_c45_gpio3", 3, PTP_PF_NONE},
909 { "nxp_c45_gpio4", 4, PTP_PF_NONE},
910 { "nxp_c45_gpio5", 5, PTP_PF_NONE},
911 { "nxp_c45_gpio6", 6, PTP_PF_NONE},
912 { "nxp_c45_gpio7", 7, PTP_PF_NONE},
913 { "nxp_c45_gpio8", 8, PTP_PF_NONE},
914 { "nxp_c45_gpio9", 9, PTP_PF_NONE},
915 { "nxp_c45_gpio10", 10, PTP_PF_NONE},
916 { "nxp_c45_gpio11", 11, PTP_PF_NONE},
919 static int nxp_c45_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
920 enum ptp_pin_function func, unsigned int chan)
922 if (pin >= ARRAY_SIZE(nxp_c45_ptp_pins))
923 return -EINVAL;
925 switch (func) {
926 case PTP_PF_NONE:
927 case PTP_PF_PEROUT:
928 case PTP_PF_EXTTS:
929 break;
930 default:
931 return -EOPNOTSUPP;
934 return 0;
937 static int nxp_c45_init_ptp_clock(struct nxp_c45_phy *priv)
939 priv->caps = (struct ptp_clock_info) {
940 .owner = THIS_MODULE,
941 .name = "NXP C45 PHC",
942 .max_adj = 16666666,
943 .adjfine = nxp_c45_ptp_adjfine,
944 .adjtime = nxp_c45_ptp_adjtime,
945 .gettimex64 = nxp_c45_ptp_gettimex64,
946 .settime64 = nxp_c45_ptp_settime64,
947 .enable = nxp_c45_ptp_enable,
948 .verify = nxp_c45_ptp_verify_pin,
949 .do_aux_work = nxp_c45_do_aux_work,
950 .pin_config = nxp_c45_ptp_pins,
951 .n_pins = ARRAY_SIZE(nxp_c45_ptp_pins),
952 .n_ext_ts = 1,
953 .n_per_out = 1,
956 priv->ptp_clock = ptp_clock_register(&priv->caps,
957 &priv->phydev->mdio.dev);
959 if (IS_ERR(priv->ptp_clock))
960 return PTR_ERR(priv->ptp_clock);
962 if (!priv->ptp_clock)
963 return -ENOMEM;
965 return 0;
968 static void nxp_c45_txtstamp(struct mii_timestamper *mii_ts,
969 struct sk_buff *skb, int type)
971 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
972 mii_ts);
974 switch (priv->hwts_tx) {
975 case HWTSTAMP_TX_ON:
976 NXP_C45_SKB_CB(skb)->type = type;
977 NXP_C45_SKB_CB(skb)->header = ptp_parse_header(skb, type);
978 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
979 skb_queue_tail(&priv->tx_queue, skb);
980 if (nxp_c45_poll_txts(priv->phydev))
981 ptp_schedule_worker(priv->ptp_clock, 0);
982 break;
983 case HWTSTAMP_TX_OFF:
984 default:
985 kfree_skb(skb);
986 break;
990 static bool nxp_c45_rxtstamp(struct mii_timestamper *mii_ts,
991 struct sk_buff *skb, int type)
993 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
994 mii_ts);
995 struct ptp_header *header = ptp_parse_header(skb, type);
997 if (!header)
998 return false;
1000 if (!priv->hwts_rx)
1001 return false;
1003 NXP_C45_SKB_CB(skb)->header = header;
1004 skb_queue_tail(&priv->rx_queue, skb);
1005 ptp_schedule_worker(priv->ptp_clock, 0);
1007 return true;
1010 static int nxp_c45_hwtstamp(struct mii_timestamper *mii_ts,
1011 struct kernel_hwtstamp_config *cfg,
1012 struct netlink_ext_ack *extack)
1014 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1015 mii_ts);
1016 struct phy_device *phydev = priv->phydev;
1017 const struct nxp_c45_phy_data *data;
1019 if (cfg->tx_type < 0 || cfg->tx_type > HWTSTAMP_TX_ON)
1020 return -ERANGE;
1022 data = nxp_c45_get_data(phydev);
1023 priv->hwts_tx = cfg->tx_type;
1025 switch (cfg->rx_filter) {
1026 case HWTSTAMP_FILTER_NONE:
1027 priv->hwts_rx = 0;
1028 break;
1029 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1030 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1031 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1032 priv->hwts_rx = 1;
1033 cfg->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1034 break;
1035 default:
1036 return -ERANGE;
1039 if (priv->hwts_rx || priv->hwts_tx) {
1040 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1041 data->regmap->vend1_event_msg_filt,
1042 EVENT_MSG_FILT_ALL);
1043 data->ptp_enable(phydev, true);
1044 } else {
1045 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1046 data->regmap->vend1_event_msg_filt,
1047 EVENT_MSG_FILT_NONE);
1048 data->ptp_enable(phydev, false);
1051 if (nxp_c45_poll_txts(priv->phydev))
1052 goto nxp_c45_no_ptp_irq;
1054 if (priv->hwts_tx)
1055 nxp_c45_set_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1056 else
1057 nxp_c45_clear_reg_field(phydev, &data->regmap->irq_egr_ts_en);
1059 nxp_c45_no_ptp_irq:
1060 return 0;
1063 static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
1064 struct kernel_ethtool_ts_info *ts_info)
1066 struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
1067 mii_ts);
1069 ts_info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
1070 SOF_TIMESTAMPING_RX_HARDWARE |
1071 SOF_TIMESTAMPING_RAW_HARDWARE;
1072 ts_info->phc_index = ptp_clock_index(priv->ptp_clock);
1073 ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
1074 ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
1075 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1076 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1077 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT);
1079 return 0;
1082 static const struct nxp_c45_phy_stats common_hw_stats[] = {
1083 { "phy_link_status_drop_cnt",
1084 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 8, 6), },
1085 { "phy_link_availability_drop_cnt",
1086 NXP_C45_REG_FIELD(0x8352, MDIO_MMD_VEND1, 0, 6), },
1087 { "phy_link_loss_cnt",
1088 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 10, 6), },
1089 { "phy_link_failure_cnt",
1090 NXP_C45_REG_FIELD(0x8353, MDIO_MMD_VEND1, 0, 10), },
1091 { "phy_symbol_error_cnt",
1092 NXP_C45_REG_FIELD(0x8350, MDIO_MMD_VEND1, 0, 16) },
1095 static const struct nxp_c45_phy_stats tja1103_hw_stats[] = {
1096 { "rx_preamble_count",
1097 NXP_C45_REG_FIELD(0xAFCE, MDIO_MMD_VEND1, 0, 6), },
1098 { "tx_preamble_count",
1099 NXP_C45_REG_FIELD(0xAFCF, MDIO_MMD_VEND1, 0, 6), },
1100 { "rx_ipg_length",
1101 NXP_C45_REG_FIELD(0xAFD0, MDIO_MMD_VEND1, 0, 9), },
1102 { "tx_ipg_length",
1103 NXP_C45_REG_FIELD(0xAFD1, MDIO_MMD_VEND1, 0, 9), },
1106 static const struct nxp_c45_phy_stats tja1120_hw_stats[] = {
1107 { "phy_symbol_error_cnt_ext",
1108 NXP_C45_REG_FIELD(0x8351, MDIO_MMD_VEND1, 0, 14) },
1109 { "tx_frames_xtd",
1110 NXP_C45_REG_FIELD(0xACA1, MDIO_MMD_VEND1, 0, 8), },
1111 { "tx_frames",
1112 NXP_C45_REG_FIELD(0xACA0, MDIO_MMD_VEND1, 0, 16), },
1113 { "rx_frames_xtd",
1114 NXP_C45_REG_FIELD(0xACA3, MDIO_MMD_VEND1, 0, 8), },
1115 { "rx_frames",
1116 NXP_C45_REG_FIELD(0xACA2, MDIO_MMD_VEND1, 0, 16), },
1117 { "tx_lost_frames_xtd",
1118 NXP_C45_REG_FIELD(0xACA5, MDIO_MMD_VEND1, 0, 8), },
1119 { "tx_lost_frames",
1120 NXP_C45_REG_FIELD(0xACA4, MDIO_MMD_VEND1, 0, 16), },
1121 { "rx_lost_frames_xtd",
1122 NXP_C45_REG_FIELD(0xACA7, MDIO_MMD_VEND1, 0, 8), },
1123 { "rx_lost_frames",
1124 NXP_C45_REG_FIELD(0xACA6, MDIO_MMD_VEND1, 0, 16), },
1127 static int nxp_c45_get_sset_count(struct phy_device *phydev)
1129 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1131 return ARRAY_SIZE(common_hw_stats) + (phy_data ? phy_data->n_stats : 0);
1134 static void nxp_c45_get_strings(struct phy_device *phydev, u8 *data)
1136 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1137 size_t count = nxp_c45_get_sset_count(phydev);
1138 size_t idx;
1139 size_t i;
1141 for (i = 0; i < count; i++) {
1142 if (i < ARRAY_SIZE(common_hw_stats)) {
1143 ethtool_puts(&data, common_hw_stats[i].name);
1144 continue;
1146 idx = i - ARRAY_SIZE(common_hw_stats);
1147 ethtool_puts(&data, phy_data->stats[idx].name);
1151 static void nxp_c45_get_stats(struct phy_device *phydev,
1152 struct ethtool_stats *stats, u64 *data)
1154 const struct nxp_c45_phy_data *phy_data = nxp_c45_get_data(phydev);
1155 size_t count = nxp_c45_get_sset_count(phydev);
1156 const struct nxp_c45_reg_field *reg_field;
1157 size_t idx;
1158 size_t i;
1159 int ret;
1161 for (i = 0; i < count; i++) {
1162 if (i < ARRAY_SIZE(common_hw_stats)) {
1163 reg_field = &common_hw_stats[i].counter;
1164 } else {
1165 idx = i - ARRAY_SIZE(common_hw_stats);
1166 reg_field = &phy_data->stats[idx].counter;
1169 ret = nxp_c45_read_reg_field(phydev, reg_field);
1170 if (ret < 0)
1171 data[i] = U64_MAX;
1172 else
1173 data[i] = ret;
1177 static int nxp_c45_config_enable(struct phy_device *phydev)
1179 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1180 DEVICE_CONTROL_CONFIG_GLOBAL_EN |
1181 DEVICE_CONTROL_CONFIG_ALL_EN);
1182 usleep_range(400, 450);
1184 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_CONTROL,
1185 PORT_CONTROL_EN);
1186 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1187 PHY_CONFIG_EN);
1188 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_INFRA_CONTROL,
1189 PORT_INFRA_CONTROL_EN);
1191 return 0;
1194 static int nxp_c45_start_op(struct phy_device *phydev)
1196 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONTROL,
1197 PHY_START_OP);
1200 static int nxp_c45_config_intr(struct phy_device *phydev)
1202 int ret;
1204 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
1205 ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1206 VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1207 if (ret)
1208 return ret;
1210 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1211 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1214 ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1215 VEND1_PORT_FUNC_IRQ_EN, MACSEC_IRQS);
1216 if (ret)
1217 return ret;
1219 return phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1220 VEND1_PHY_IRQ_EN, PHY_IRQ_LINK_EVENT);
1223 static int tja1103_config_intr(struct phy_device *phydev)
1225 int ret;
1227 /* We can't disable the FUSA IRQ for TJA1103, but we can clean it up. */
1228 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_ALWAYS_ACCESSIBLE,
1229 FUSA_PASS);
1230 if (ret)
1231 return ret;
1233 return nxp_c45_config_intr(phydev);
1236 static int tja1120_config_intr(struct phy_device *phydev)
1238 int ret;
1240 if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
1241 ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1242 TJA1120_GLOBAL_INFRA_IRQ_EN,
1243 TJA1120_DEV_BOOT_DONE);
1244 else
1245 ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1246 TJA1120_GLOBAL_INFRA_IRQ_EN,
1247 TJA1120_DEV_BOOT_DONE);
1248 if (ret)
1249 return ret;
1251 return nxp_c45_config_intr(phydev);
1254 static irqreturn_t nxp_c45_handle_interrupt(struct phy_device *phydev)
1256 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1257 struct nxp_c45_phy *priv = phydev->priv;
1258 irqreturn_t ret = IRQ_NONE;
1259 struct nxp_c45_hwts hwts;
1260 int irq;
1262 irq = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_STATUS);
1263 if (irq & PHY_IRQ_LINK_EVENT) {
1264 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_IRQ_ACK,
1265 PHY_IRQ_LINK_EVENT);
1266 phy_trigger_machine(phydev);
1267 ret = IRQ_HANDLED;
1270 irq = nxp_c45_read_reg_field(phydev, &data->regmap->irq_egr_ts_status);
1271 if (irq) {
1272 /* If ack_ptp_irq is false, the IRQ bit is self-clear and will
1273 * be cleared when the EGR TS FIFO is empty. Otherwise, the
1274 * IRQ bit should be cleared before reading the timestamp,
1276 if (data->ack_ptp_irq)
1277 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1278 VEND1_PTP_IRQ_ACK, EGR_TS_IRQ);
1279 while (data->get_egressts(priv, &hwts))
1280 nxp_c45_process_txts(priv, &hwts);
1282 ret = IRQ_HANDLED;
1285 data->nmi_handler(phydev, &ret);
1286 nxp_c45_handle_macsec_interrupt(phydev, &ret);
1288 return ret;
1291 static int nxp_c45_soft_reset(struct phy_device *phydev)
1293 int ret;
1295 ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONTROL,
1296 DEVICE_CONTROL_RESET);
1297 if (ret)
1298 return ret;
1300 return phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
1301 VEND1_DEVICE_CONTROL, ret,
1302 !(ret & DEVICE_CONTROL_RESET), 20000,
1303 240000, false);
1306 static int nxp_c45_cable_test_start(struct phy_device *phydev)
1308 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1310 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1311 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1312 return phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1313 CABLE_TEST_ENABLE | CABLE_TEST_START);
1316 static int nxp_c45_cable_test_get_status(struct phy_device *phydev,
1317 bool *finished)
1319 const struct nxp_c45_regmap *regmap = nxp_c45_get_regmap(phydev);
1320 int ret;
1321 u8 cable_test_result;
1323 ret = nxp_c45_read_reg_field(phydev, &regmap->cable_test_valid);
1324 if (!ret) {
1325 *finished = false;
1326 return 0;
1329 *finished = true;
1330 cable_test_result = nxp_c45_read_reg_field(phydev,
1331 &regmap->cable_test_result);
1333 switch (cable_test_result) {
1334 case CABLE_TEST_OK:
1335 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1336 ETHTOOL_A_CABLE_RESULT_CODE_OK);
1337 break;
1338 case CABLE_TEST_SHORTED:
1339 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1340 ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT);
1341 break;
1342 case CABLE_TEST_OPEN:
1343 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1344 ETHTOOL_A_CABLE_RESULT_CODE_OPEN);
1345 break;
1346 default:
1347 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
1348 ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC);
1351 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, regmap->cable_test,
1352 CABLE_TEST_ENABLE);
1353 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1354 VEND1_PORT_FUNC_ENABLES, PHY_TEST_ENABLE);
1356 return nxp_c45_start_op(phydev);
1359 static int nxp_c45_get_sqi(struct phy_device *phydev)
1361 int reg;
1363 reg = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_SIGNAL_QUALITY);
1364 if (!(reg & SQI_VALID))
1365 return -EINVAL;
1367 reg &= SQI_MASK;
1369 return reg;
1372 static void tja1120_link_change_notify(struct phy_device *phydev)
1374 /* Bug workaround for TJA1120 enegineering samples: fix egress
1375 * timestamps lost after link recovery.
1377 if (phydev->state == PHY_NOLINK) {
1378 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1379 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1380 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1381 TJA1120_EPHY_RESETS, EPHY_PCS_RESET);
1385 static int nxp_c45_get_sqi_max(struct phy_device *phydev)
1387 return MAX_SQI;
1390 static int nxp_c45_check_delay(struct phy_device *phydev, u32 delay)
1392 if (delay < MIN_ID_PS) {
1393 phydev_err(phydev, "delay value smaller than %u\n", MIN_ID_PS);
1394 return -EINVAL;
1397 if (delay > MAX_ID_PS) {
1398 phydev_err(phydev, "delay value higher than %u\n", MAX_ID_PS);
1399 return -EINVAL;
1402 return 0;
1405 static void nxp_c45_counters_enable(struct phy_device *phydev)
1407 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1409 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_LINK_DROP_COUNTER,
1410 COUNTER_EN);
1412 data->counters_enable(phydev);
1415 static void nxp_c45_ptp_init(struct phy_device *phydev)
1417 const struct nxp_c45_phy_data *data = nxp_c45_get_data(phydev);
1419 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1420 data->regmap->vend1_ptp_clk_period,
1421 data->ptp_clk_period);
1422 nxp_c45_clear_reg_field(phydev, &data->regmap->ltc_lock_ctrl);
1424 data->ptp_init(phydev);
1427 static u64 nxp_c45_get_phase_shift(u64 phase_offset_raw)
1429 /* The delay in degree phase is 73.8 + phase_offset_raw * 0.9.
1430 * To avoid floating point operations we'll multiply by 10
1431 * and get 1 decimal point precision.
1433 phase_offset_raw *= 10;
1434 phase_offset_raw -= 738;
1435 return div_u64(phase_offset_raw, 9);
1438 static void nxp_c45_disable_delays(struct phy_device *phydev)
1440 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID, ID_ENABLE);
1441 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID, ID_ENABLE);
1444 static void nxp_c45_set_delays(struct phy_device *phydev)
1446 struct nxp_c45_phy *priv = phydev->priv;
1447 u64 tx_delay = priv->tx_delay;
1448 u64 rx_delay = priv->rx_delay;
1449 u64 degree;
1451 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1452 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1453 degree = div_u64(tx_delay, PS_PER_DEGREE);
1454 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1455 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1456 } else {
1457 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TXID,
1458 ID_ENABLE);
1461 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1462 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1463 degree = div_u64(rx_delay, PS_PER_DEGREE);
1464 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1465 ID_ENABLE | nxp_c45_get_phase_shift(degree));
1466 } else {
1467 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RXID,
1468 ID_ENABLE);
1472 static int nxp_c45_get_delays(struct phy_device *phydev)
1474 struct nxp_c45_phy *priv = phydev->priv;
1475 int ret;
1477 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1478 phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID) {
1479 ret = device_property_read_u32(&phydev->mdio.dev,
1480 "tx-internal-delay-ps",
1481 &priv->tx_delay);
1482 if (ret)
1483 priv->tx_delay = DEFAULT_ID_PS;
1485 ret = nxp_c45_check_delay(phydev, priv->tx_delay);
1486 if (ret) {
1487 phydev_err(phydev,
1488 "tx-internal-delay-ps invalid value\n");
1489 return ret;
1493 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
1494 phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
1495 ret = device_property_read_u32(&phydev->mdio.dev,
1496 "rx-internal-delay-ps",
1497 &priv->rx_delay);
1498 if (ret)
1499 priv->rx_delay = DEFAULT_ID_PS;
1501 ret = nxp_c45_check_delay(phydev, priv->rx_delay);
1502 if (ret) {
1503 phydev_err(phydev,
1504 "rx-internal-delay-ps invalid value\n");
1505 return ret;
1509 return 0;
1512 static int nxp_c45_set_phy_mode(struct phy_device *phydev)
1514 struct nxp_c45_phy *priv = phydev->priv;
1515 u16 basic_config;
1516 int ret;
1518 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_ABILITIES);
1519 phydev_dbg(phydev, "Clause 45 managed PHY abilities 0x%x\n", ret);
1521 switch (phydev->interface) {
1522 case PHY_INTERFACE_MODE_RGMII:
1523 if (!(ret & RGMII_ABILITY)) {
1524 phydev_err(phydev, "rgmii mode not supported\n");
1525 return -EINVAL;
1527 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1528 MII_BASIC_CONFIG_RGMII);
1529 nxp_c45_disable_delays(phydev);
1530 break;
1531 case PHY_INTERFACE_MODE_RGMII_ID:
1532 case PHY_INTERFACE_MODE_RGMII_TXID:
1533 case PHY_INTERFACE_MODE_RGMII_RXID:
1534 if (!(ret & RGMII_ID_ABILITY)) {
1535 phydev_err(phydev, "rgmii-id, rgmii-txid, rgmii-rxid modes are not supported\n");
1536 return -EINVAL;
1538 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1539 MII_BASIC_CONFIG_RGMII);
1540 ret = nxp_c45_get_delays(phydev);
1541 if (ret)
1542 return ret;
1544 nxp_c45_set_delays(phydev);
1545 break;
1546 case PHY_INTERFACE_MODE_MII:
1547 if (!(ret & MII_ABILITY)) {
1548 phydev_err(phydev, "mii mode not supported\n");
1549 return -EINVAL;
1551 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1552 MII_BASIC_CONFIG_MII);
1553 break;
1554 case PHY_INTERFACE_MODE_REVMII:
1555 if (!(ret & REVMII_ABILITY)) {
1556 phydev_err(phydev, "rev-mii mode not supported\n");
1557 return -EINVAL;
1559 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1560 MII_BASIC_CONFIG_MII | MII_BASIC_CONFIG_REV);
1561 break;
1562 case PHY_INTERFACE_MODE_RMII:
1563 if (!(ret & RMII_ABILITY)) {
1564 phydev_err(phydev, "rmii mode not supported\n");
1565 return -EINVAL;
1568 basic_config = MII_BASIC_CONFIG_RMII;
1570 /* This is not PHY_INTERFACE_MODE_REVRMII */
1571 if (priv->flags & TJA11XX_REVERSE_MODE)
1572 basic_config |= MII_BASIC_CONFIG_REV;
1574 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1575 basic_config);
1576 break;
1577 case PHY_INTERFACE_MODE_SGMII:
1578 if (!(ret & SGMII_ABILITY)) {
1579 phydev_err(phydev, "sgmii mode not supported\n");
1580 return -EINVAL;
1582 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_MII_BASIC_CONFIG,
1583 MII_BASIC_CONFIG_SGMII);
1584 break;
1585 case PHY_INTERFACE_MODE_INTERNAL:
1586 break;
1587 default:
1588 return -EINVAL;
1591 return 0;
1594 static int nxp_c45_config_init(struct phy_device *phydev)
1596 int ret;
1598 ret = nxp_c45_config_enable(phydev);
1599 if (ret) {
1600 phydev_err(phydev, "Failed to enable config\n");
1601 return ret;
1604 /* Bug workaround for SJA1110 rev B: enable write access
1605 * to MDIO_MMD_PMAPMD
1607 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F8, 1);
1608 phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x01F9, 2);
1610 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PHY_CONFIG,
1611 PHY_CONFIG_AUTO);
1613 ret = nxp_c45_set_phy_mode(phydev);
1614 if (ret)
1615 return ret;
1617 phydev->autoneg = AUTONEG_DISABLE;
1619 nxp_c45_counters_enable(phydev);
1620 nxp_c45_ptp_init(phydev);
1621 ret = nxp_c45_macsec_config_init(phydev);
1622 if (ret)
1623 return ret;
1625 return nxp_c45_start_op(phydev);
1628 static int nxp_c45_get_features(struct phy_device *phydev)
1630 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, phydev->supported);
1631 linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, phydev->supported);
1633 return genphy_c45_pma_read_abilities(phydev);
1636 static int nxp_c45_parse_dt(struct phy_device *phydev)
1638 struct device_node *node = phydev->mdio.dev.of_node;
1639 struct nxp_c45_phy *priv = phydev->priv;
1641 if (!IS_ENABLED(CONFIG_OF_MDIO))
1642 return 0;
1644 if (of_property_read_bool(node, "nxp,rmii-refclk-out"))
1645 priv->flags |= TJA11XX_REVERSE_MODE;
1647 return 0;
1650 static int nxp_c45_probe(struct phy_device *phydev)
1652 struct nxp_c45_phy *priv;
1653 bool macsec_ability;
1654 int phy_abilities;
1655 bool ptp_ability;
1656 int ret = 0;
1658 priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
1659 if (!priv)
1660 return -ENOMEM;
1662 skb_queue_head_init(&priv->tx_queue);
1663 skb_queue_head_init(&priv->rx_queue);
1665 priv->phydev = phydev;
1667 phydev->priv = priv;
1669 nxp_c45_parse_dt(phydev);
1671 mutex_init(&priv->ptp_lock);
1673 phy_abilities = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1674 VEND1_PORT_ABILITIES);
1675 ptp_ability = !!(phy_abilities & PTP_ABILITY);
1676 if (!ptp_ability) {
1677 phydev_dbg(phydev, "the phy does not support PTP");
1678 goto no_ptp_support;
1681 if (IS_ENABLED(CONFIG_PTP_1588_CLOCK) &&
1682 IS_ENABLED(CONFIG_NETWORK_PHY_TIMESTAMPING)) {
1683 priv->mii_ts.rxtstamp = nxp_c45_rxtstamp;
1684 priv->mii_ts.txtstamp = nxp_c45_txtstamp;
1685 priv->mii_ts.hwtstamp = nxp_c45_hwtstamp;
1686 priv->mii_ts.ts_info = nxp_c45_ts_info;
1687 phydev->mii_ts = &priv->mii_ts;
1688 ret = nxp_c45_init_ptp_clock(priv);
1690 /* Timestamp selected by default to keep legacy API */
1691 phydev->default_timestamp = true;
1692 } else {
1693 phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
1696 no_ptp_support:
1697 macsec_ability = !!(phy_abilities & MACSEC_ABILITY);
1698 if (!macsec_ability) {
1699 phydev_info(phydev, "the phy does not support MACsec\n");
1700 goto no_macsec_support;
1703 if (IS_ENABLED(CONFIG_MACSEC)) {
1704 ret = nxp_c45_macsec_probe(phydev);
1705 phydev_dbg(phydev, "MACsec support enabled.");
1706 } else {
1707 phydev_dbg(phydev, "MACsec support not enabled even if the phy supports it");
1710 no_macsec_support:
1712 return ret;
1715 static void nxp_c45_remove(struct phy_device *phydev)
1717 struct nxp_c45_phy *priv = phydev->priv;
1719 if (priv->ptp_clock)
1720 ptp_clock_unregister(priv->ptp_clock);
1722 skb_queue_purge(&priv->tx_queue);
1723 skb_queue_purge(&priv->rx_queue);
1724 nxp_c45_macsec_remove(phydev);
1727 static void tja1103_counters_enable(struct phy_device *phydev)
1729 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_PREAMBLE_COUNT,
1730 COUNTER_EN);
1731 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_PREAMBLE_COUNT,
1732 COUNTER_EN);
1733 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_IPG_LENGTH,
1734 COUNTER_EN);
1735 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_TX_IPG_LENGTH,
1736 COUNTER_EN);
1739 static void tja1103_ptp_init(struct phy_device *phydev)
1741 phy_write_mmd(phydev, MDIO_MMD_VEND1, VEND1_RX_TS_INSRT_CTRL,
1742 TJA1103_RX_TS_INSRT_MODE2);
1743 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_PORT_FUNC_ENABLES,
1744 PTP_ENABLE);
1747 static void tja1103_ptp_enable(struct phy_device *phydev, bool enable)
1749 if (enable)
1750 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1751 VEND1_PORT_PTP_CONTROL,
1752 PORT_PTP_CONTROL_BYPASS);
1753 else
1754 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1755 VEND1_PORT_PTP_CONTROL,
1756 PORT_PTP_CONTROL_BYPASS);
1759 static void tja1103_nmi_handler(struct phy_device *phydev,
1760 irqreturn_t *irq_status)
1762 int ret;
1764 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1765 VEND1_ALWAYS_ACCESSIBLE);
1766 if (ret & FUSA_PASS) {
1767 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1768 VEND1_ALWAYS_ACCESSIBLE,
1769 FUSA_PASS);
1770 *irq_status = IRQ_HANDLED;
1774 static const struct nxp_c45_regmap tja1103_regmap = {
1775 .vend1_ptp_clk_period = 0x1104,
1776 .vend1_event_msg_filt = 0x1148,
1777 .pps_enable =
1778 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 3, 1),
1779 .pps_polarity =
1780 NXP_C45_REG_FIELD(0x1102, MDIO_MMD_VEND1, 2, 1),
1781 .ltc_lock_ctrl =
1782 NXP_C45_REG_FIELD(0x1115, MDIO_MMD_VEND1, 0, 1),
1783 .ltc_read =
1784 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 2, 1),
1785 .ltc_write =
1786 NXP_C45_REG_FIELD(0x1105, MDIO_MMD_VEND1, 0, 1),
1787 .vend1_ltc_wr_nsec_0 = 0x1106,
1788 .vend1_ltc_wr_nsec_1 = 0x1107,
1789 .vend1_ltc_wr_sec_0 = 0x1108,
1790 .vend1_ltc_wr_sec_1 = 0x1109,
1791 .vend1_ltc_rd_nsec_0 = 0x110A,
1792 .vend1_ltc_rd_nsec_1 = 0x110B,
1793 .vend1_ltc_rd_sec_0 = 0x110C,
1794 .vend1_ltc_rd_sec_1 = 0x110D,
1795 .vend1_rate_adj_subns_0 = 0x110F,
1796 .vend1_rate_adj_subns_1 = 0x1110,
1797 .irq_egr_ts_en =
1798 NXP_C45_REG_FIELD(0x1131, MDIO_MMD_VEND1, 0, 1),
1799 .irq_egr_ts_status =
1800 NXP_C45_REG_FIELD(0x1132, MDIO_MMD_VEND1, 0, 1),
1801 .domain_number =
1802 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 0, 8),
1803 .msg_type =
1804 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 8, 4),
1805 .sequence_id =
1806 NXP_C45_REG_FIELD(0x114F, MDIO_MMD_VEND1, 0, 16),
1807 .sec_1_0 =
1808 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 14, 2),
1809 .sec_4_2 =
1810 NXP_C45_REG_FIELD(0x114E, MDIO_MMD_VEND1, 12, 3),
1811 .nsec_15_0 =
1812 NXP_C45_REG_FIELD(0x1150, MDIO_MMD_VEND1, 0, 16),
1813 .nsec_29_16 =
1814 NXP_C45_REG_FIELD(0x1151, MDIO_MMD_VEND1, 0, 14),
1815 .vend1_ext_trg_data_0 = 0x1121,
1816 .vend1_ext_trg_data_1 = 0x1122,
1817 .vend1_ext_trg_data_2 = 0x1123,
1818 .vend1_ext_trg_data_3 = 0x1124,
1819 .vend1_ext_trg_ctrl = 0x1126,
1820 .cable_test = 0x8330,
1821 .cable_test_valid =
1822 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 13, 1),
1823 .cable_test_result =
1824 NXP_C45_REG_FIELD(0x8330, MDIO_MMD_VEND1, 0, 3),
1827 static const struct nxp_c45_phy_data tja1103_phy_data = {
1828 .regmap = &tja1103_regmap,
1829 .stats = tja1103_hw_stats,
1830 .n_stats = ARRAY_SIZE(tja1103_hw_stats),
1831 .ptp_clk_period = PTP_CLK_PERIOD_100BT1,
1832 .ext_ts_both_edges = false,
1833 .ack_ptp_irq = false,
1834 .counters_enable = tja1103_counters_enable,
1835 .get_egressts = nxp_c45_get_hwtxts,
1836 .get_extts = nxp_c45_get_extts,
1837 .ptp_init = tja1103_ptp_init,
1838 .ptp_enable = tja1103_ptp_enable,
1839 .nmi_handler = tja1103_nmi_handler,
1842 static void tja1120_counters_enable(struct phy_device *phydev)
1844 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_SYMBOL_ERROR_CNT_XTD,
1845 EXTENDED_CNT_EN);
1846 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_STATUS,
1847 MONITOR_RESET);
1848 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_MONITOR_CONFIG,
1849 ALL_FRAMES_CNT_EN | LOST_FRAMES_CNT_EN);
1852 static void tja1120_ptp_init(struct phy_device *phydev)
1854 phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_RX_TS_INSRT_CTRL,
1855 TJA1120_RX_TS_INSRT_EN | TJA1120_TS_INSRT_MODE);
1856 phy_write_mmd(phydev, MDIO_MMD_VEND1, TJA1120_VEND1_EXT_TS_MODE,
1857 TJA1120_TS_INSRT_MODE);
1858 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1, VEND1_DEVICE_CONFIG,
1859 PTP_ENABLE);
1862 static void tja1120_ptp_enable(struct phy_device *phydev, bool enable)
1864 if (enable)
1865 phy_set_bits_mmd(phydev, MDIO_MMD_VEND1,
1866 VEND1_PORT_FUNC_ENABLES,
1867 PTP_ENABLE);
1868 else
1869 phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1,
1870 VEND1_PORT_FUNC_ENABLES,
1871 PTP_ENABLE);
1874 static void tja1120_nmi_handler(struct phy_device *phydev,
1875 irqreturn_t *irq_status)
1877 int ret;
1879 ret = phy_read_mmd(phydev, MDIO_MMD_VEND1,
1880 TJA1120_GLOBAL_INFRA_IRQ_STATUS);
1881 if (ret & TJA1120_DEV_BOOT_DONE) {
1882 phy_write_mmd(phydev, MDIO_MMD_VEND1,
1883 TJA1120_GLOBAL_INFRA_IRQ_ACK,
1884 TJA1120_DEV_BOOT_DONE);
1885 *irq_status = IRQ_HANDLED;
1889 static const struct nxp_c45_regmap tja1120_regmap = {
1890 .vend1_ptp_clk_period = 0x1020,
1891 .vend1_event_msg_filt = 0x9010,
1892 .pps_enable =
1893 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 4, 1),
1894 .pps_polarity =
1895 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 5, 1),
1896 .ltc_lock_ctrl =
1897 NXP_C45_REG_FIELD(0x1006, MDIO_MMD_VEND1, 2, 1),
1898 .ltc_read =
1899 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 1, 1),
1900 .ltc_write =
1901 NXP_C45_REG_FIELD(0x1000, MDIO_MMD_VEND1, 2, 1),
1902 .vend1_ltc_wr_nsec_0 = 0x1040,
1903 .vend1_ltc_wr_nsec_1 = 0x1041,
1904 .vend1_ltc_wr_sec_0 = 0x1042,
1905 .vend1_ltc_wr_sec_1 = 0x1043,
1906 .vend1_ltc_rd_nsec_0 = 0x1048,
1907 .vend1_ltc_rd_nsec_1 = 0x1049,
1908 .vend1_ltc_rd_sec_0 = 0x104A,
1909 .vend1_ltc_rd_sec_1 = 0x104B,
1910 .vend1_rate_adj_subns_0 = 0x1030,
1911 .vend1_rate_adj_subns_1 = 0x1031,
1912 .irq_egr_ts_en =
1913 NXP_C45_REG_FIELD(0x900A, MDIO_MMD_VEND1, 1, 1),
1914 .irq_egr_ts_status =
1915 NXP_C45_REG_FIELD(0x900C, MDIO_MMD_VEND1, 1, 1),
1916 .domain_number =
1917 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 8, 8),
1918 .msg_type =
1919 NXP_C45_REG_FIELD(0x9061, MDIO_MMD_VEND1, 4, 4),
1920 .sequence_id =
1921 NXP_C45_REG_FIELD(0x9062, MDIO_MMD_VEND1, 0, 16),
1922 .sec_1_0 =
1923 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 0, 2),
1924 .sec_4_2 =
1925 NXP_C45_REG_FIELD(0x9065, MDIO_MMD_VEND1, 2, 3),
1926 .nsec_15_0 =
1927 NXP_C45_REG_FIELD(0x9063, MDIO_MMD_VEND1, 0, 16),
1928 .nsec_29_16 =
1929 NXP_C45_REG_FIELD(0x9064, MDIO_MMD_VEND1, 0, 14),
1930 .vend1_ext_trg_data_0 = 0x1071,
1931 .vend1_ext_trg_data_1 = 0x1072,
1932 .vend1_ext_trg_data_2 = 0x1073,
1933 .vend1_ext_trg_data_3 = 0x1074,
1934 .vend1_ext_trg_ctrl = 0x1075,
1935 .cable_test = 0x8360,
1936 .cable_test_valid =
1937 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 15, 1),
1938 .cable_test_result =
1939 NXP_C45_REG_FIELD(0x8361, MDIO_MMD_VEND1, 0, 3),
1942 static const struct nxp_c45_phy_data tja1120_phy_data = {
1943 .regmap = &tja1120_regmap,
1944 .stats = tja1120_hw_stats,
1945 .n_stats = ARRAY_SIZE(tja1120_hw_stats),
1946 .ptp_clk_period = PTP_CLK_PERIOD_1000BT1,
1947 .ext_ts_both_edges = true,
1948 .ack_ptp_irq = true,
1949 .counters_enable = tja1120_counters_enable,
1950 .get_egressts = tja1120_get_hwtxts,
1951 .get_extts = tja1120_get_extts,
1952 .ptp_init = tja1120_ptp_init,
1953 .ptp_enable = tja1120_ptp_enable,
1954 .nmi_handler = tja1120_nmi_handler,
1957 static struct phy_driver nxp_c45_driver[] = {
1959 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103),
1960 .name = "NXP C45 TJA1103",
1961 .get_features = nxp_c45_get_features,
1962 .driver_data = &tja1103_phy_data,
1963 .probe = nxp_c45_probe,
1964 .soft_reset = nxp_c45_soft_reset,
1965 .config_aneg = genphy_c45_config_aneg,
1966 .config_init = nxp_c45_config_init,
1967 .config_intr = tja1103_config_intr,
1968 .handle_interrupt = nxp_c45_handle_interrupt,
1969 .read_status = genphy_c45_read_status,
1970 .suspend = genphy_c45_pma_suspend,
1971 .resume = genphy_c45_pma_resume,
1972 .get_sset_count = nxp_c45_get_sset_count,
1973 .get_strings = nxp_c45_get_strings,
1974 .get_stats = nxp_c45_get_stats,
1975 .cable_test_start = nxp_c45_cable_test_start,
1976 .cable_test_get_status = nxp_c45_cable_test_get_status,
1977 .set_loopback = genphy_c45_loopback,
1978 .get_sqi = nxp_c45_get_sqi,
1979 .get_sqi_max = nxp_c45_get_sqi_max,
1980 .remove = nxp_c45_remove,
1983 PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120),
1984 .name = "NXP C45 TJA1120",
1985 .get_features = nxp_c45_get_features,
1986 .driver_data = &tja1120_phy_data,
1987 .probe = nxp_c45_probe,
1988 .soft_reset = nxp_c45_soft_reset,
1989 .config_aneg = genphy_c45_config_aneg,
1990 .config_init = nxp_c45_config_init,
1991 .config_intr = tja1120_config_intr,
1992 .handle_interrupt = nxp_c45_handle_interrupt,
1993 .read_status = genphy_c45_read_status,
1994 .link_change_notify = tja1120_link_change_notify,
1995 .suspend = genphy_c45_pma_suspend,
1996 .resume = genphy_c45_pma_resume,
1997 .get_sset_count = nxp_c45_get_sset_count,
1998 .get_strings = nxp_c45_get_strings,
1999 .get_stats = nxp_c45_get_stats,
2000 .cable_test_start = nxp_c45_cable_test_start,
2001 .cable_test_get_status = nxp_c45_cable_test_get_status,
2002 .set_loopback = genphy_c45_loopback,
2003 .get_sqi = nxp_c45_get_sqi,
2004 .get_sqi_max = nxp_c45_get_sqi_max,
2005 .remove = nxp_c45_remove,
2009 module_phy_driver(nxp_c45_driver);
2011 static struct mdio_device_id __maybe_unused nxp_c45_tbl[] = {
2012 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1103) },
2013 { PHY_ID_MATCH_MODEL(PHY_ID_TJA_1120) },
2014 { /*sentinel*/ },
2017 MODULE_DEVICE_TABLE(mdio, nxp_c45_tbl);
2019 MODULE_AUTHOR("Radu Pirea <radu-nicolae.pirea@oss.nxp.com>");
2020 MODULE_DESCRIPTION("NXP C45 PHY driver");
2021 MODULE_LICENSE("GPL v2");