treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / dsa / sja1105 / sja1105_clocking.c
blob9082e52b55e92b8e489607d0e27b5c17db1934f6
1 // SPDX-License-Identifier: BSD-3-Clause
2 /* Copyright (c) 2016-2018, NXP Semiconductors
3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
4 */
5 #include <linux/packing.h>
6 #include "sja1105.h"
8 #define SJA1105_SIZE_CGU_CMD 4
10 struct sja1105_cfg_pad_mii_tx {
11 u64 d32_os;
12 u64 d32_ipud;
13 u64 d10_os;
14 u64 d10_ipud;
15 u64 ctrl_os;
16 u64 ctrl_ipud;
17 u64 clk_os;
18 u64 clk_ih;
19 u64 clk_ipud;
22 struct sja1105_cfg_pad_mii_id {
23 u64 rxc_stable_ovr;
24 u64 rxc_delay;
25 u64 rxc_bypass;
26 u64 rxc_pd;
27 u64 txc_stable_ovr;
28 u64 txc_delay;
29 u64 txc_bypass;
30 u64 txc_pd;
33 /* UM10944 Table 82.
34 * IDIV_0_C to IDIV_4_C control registers
35 * (addr. 10000Bh to 10000Fh)
37 struct sja1105_cgu_idiv {
38 u64 clksrc;
39 u64 autoblock;
40 u64 idiv;
41 u64 pd;
44 /* PLL_1_C control register
46 * SJA1105 E/T: UM10944 Table 81 (address 10000Ah)
47 * SJA1105 P/Q/R/S: UM11040 Table 116 (address 10000Ah)
49 struct sja1105_cgu_pll_ctrl {
50 u64 pllclksrc;
51 u64 msel;
52 u64 autoblock;
53 u64 psel;
54 u64 direct;
55 u64 fbsel;
56 u64 bypass;
57 u64 pd;
60 enum {
61 CLKSRC_MII0_TX_CLK = 0x00,
62 CLKSRC_MII0_RX_CLK = 0x01,
63 CLKSRC_MII1_TX_CLK = 0x02,
64 CLKSRC_MII1_RX_CLK = 0x03,
65 CLKSRC_MII2_TX_CLK = 0x04,
66 CLKSRC_MII2_RX_CLK = 0x05,
67 CLKSRC_MII3_TX_CLK = 0x06,
68 CLKSRC_MII3_RX_CLK = 0x07,
69 CLKSRC_MII4_TX_CLK = 0x08,
70 CLKSRC_MII4_RX_CLK = 0x09,
71 CLKSRC_PLL0 = 0x0B,
72 CLKSRC_PLL1 = 0x0E,
73 CLKSRC_IDIV0 = 0x11,
74 CLKSRC_IDIV1 = 0x12,
75 CLKSRC_IDIV2 = 0x13,
76 CLKSRC_IDIV3 = 0x14,
77 CLKSRC_IDIV4 = 0x15,
80 /* UM10944 Table 83.
81 * MIIx clock control registers 1 to 30
82 * (addresses 100013h to 100035h)
84 struct sja1105_cgu_mii_ctrl {
85 u64 clksrc;
86 u64 autoblock;
87 u64 pd;
90 static void sja1105_cgu_idiv_packing(void *buf, struct sja1105_cgu_idiv *idiv,
91 enum packing_op op)
93 const int size = 4;
95 sja1105_packing(buf, &idiv->clksrc, 28, 24, size, op);
96 sja1105_packing(buf, &idiv->autoblock, 11, 11, size, op);
97 sja1105_packing(buf, &idiv->idiv, 5, 2, size, op);
98 sja1105_packing(buf, &idiv->pd, 0, 0, size, op);
101 static int sja1105_cgu_idiv_config(struct sja1105_private *priv, int port,
102 bool enabled, int factor)
104 const struct sja1105_regs *regs = priv->info->regs;
105 struct device *dev = priv->ds->dev;
106 struct sja1105_cgu_idiv idiv;
107 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
109 if (enabled && factor != 1 && factor != 10) {
110 dev_err(dev, "idiv factor must be 1 or 10\n");
111 return -ERANGE;
114 /* Payload for packed_buf */
115 idiv.clksrc = 0x0A; /* 25MHz */
116 idiv.autoblock = 1; /* Block clk automatically */
117 idiv.idiv = factor - 1; /* Divide by 1 or 10 */
118 idiv.pd = enabled ? 0 : 1; /* Power down? */
119 sja1105_cgu_idiv_packing(packed_buf, &idiv, PACK);
121 return sja1105_xfer_buf(priv, SPI_WRITE, regs->cgu_idiv[port],
122 packed_buf, SJA1105_SIZE_CGU_CMD);
125 static void
126 sja1105_cgu_mii_control_packing(void *buf, struct sja1105_cgu_mii_ctrl *cmd,
127 enum packing_op op)
129 const int size = 4;
131 sja1105_packing(buf, &cmd->clksrc, 28, 24, size, op);
132 sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
133 sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
136 static int sja1105_cgu_mii_tx_clk_config(struct sja1105_private *priv,
137 int port, sja1105_mii_role_t role)
139 const struct sja1105_regs *regs = priv->info->regs;
140 struct sja1105_cgu_mii_ctrl mii_tx_clk;
141 const int mac_clk_sources[] = {
142 CLKSRC_MII0_TX_CLK,
143 CLKSRC_MII1_TX_CLK,
144 CLKSRC_MII2_TX_CLK,
145 CLKSRC_MII3_TX_CLK,
146 CLKSRC_MII4_TX_CLK,
148 const int phy_clk_sources[] = {
149 CLKSRC_IDIV0,
150 CLKSRC_IDIV1,
151 CLKSRC_IDIV2,
152 CLKSRC_IDIV3,
153 CLKSRC_IDIV4,
155 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
156 int clksrc;
158 if (role == XMII_MAC)
159 clksrc = mac_clk_sources[port];
160 else
161 clksrc = phy_clk_sources[port];
163 /* Payload for packed_buf */
164 mii_tx_clk.clksrc = clksrc;
165 mii_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
166 mii_tx_clk.pd = 0; /* Power Down off => enabled */
167 sja1105_cgu_mii_control_packing(packed_buf, &mii_tx_clk, PACK);
169 return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_tx_clk[port],
170 packed_buf, SJA1105_SIZE_CGU_CMD);
173 static int
174 sja1105_cgu_mii_rx_clk_config(struct sja1105_private *priv, int port)
176 const struct sja1105_regs *regs = priv->info->regs;
177 struct sja1105_cgu_mii_ctrl mii_rx_clk;
178 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
179 const int clk_sources[] = {
180 CLKSRC_MII0_RX_CLK,
181 CLKSRC_MII1_RX_CLK,
182 CLKSRC_MII2_RX_CLK,
183 CLKSRC_MII3_RX_CLK,
184 CLKSRC_MII4_RX_CLK,
187 /* Payload for packed_buf */
188 mii_rx_clk.clksrc = clk_sources[port];
189 mii_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
190 mii_rx_clk.pd = 0; /* Power Down off => enabled */
191 sja1105_cgu_mii_control_packing(packed_buf, &mii_rx_clk, PACK);
193 return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_rx_clk[port],
194 packed_buf, SJA1105_SIZE_CGU_CMD);
197 static int
198 sja1105_cgu_mii_ext_tx_clk_config(struct sja1105_private *priv, int port)
200 const struct sja1105_regs *regs = priv->info->regs;
201 struct sja1105_cgu_mii_ctrl mii_ext_tx_clk;
202 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
203 const int clk_sources[] = {
204 CLKSRC_IDIV0,
205 CLKSRC_IDIV1,
206 CLKSRC_IDIV2,
207 CLKSRC_IDIV3,
208 CLKSRC_IDIV4,
211 /* Payload for packed_buf */
212 mii_ext_tx_clk.clksrc = clk_sources[port];
213 mii_ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
214 mii_ext_tx_clk.pd = 0; /* Power Down off => enabled */
215 sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_tx_clk, PACK);
217 return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_tx_clk[port],
218 packed_buf, SJA1105_SIZE_CGU_CMD);
221 static int
222 sja1105_cgu_mii_ext_rx_clk_config(struct sja1105_private *priv, int port)
224 const struct sja1105_regs *regs = priv->info->regs;
225 struct sja1105_cgu_mii_ctrl mii_ext_rx_clk;
226 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
227 const int clk_sources[] = {
228 CLKSRC_IDIV0,
229 CLKSRC_IDIV1,
230 CLKSRC_IDIV2,
231 CLKSRC_IDIV3,
232 CLKSRC_IDIV4,
235 /* Payload for packed_buf */
236 mii_ext_rx_clk.clksrc = clk_sources[port];
237 mii_ext_rx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
238 mii_ext_rx_clk.pd = 0; /* Power Down off => enabled */
239 sja1105_cgu_mii_control_packing(packed_buf, &mii_ext_rx_clk, PACK);
241 return sja1105_xfer_buf(priv, SPI_WRITE, regs->mii_ext_rx_clk[port],
242 packed_buf, SJA1105_SIZE_CGU_CMD);
245 static int sja1105_mii_clocking_setup(struct sja1105_private *priv, int port,
246 sja1105_mii_role_t role)
248 struct device *dev = priv->ds->dev;
249 int rc;
251 dev_dbg(dev, "Configuring MII-%s clocking\n",
252 (role == XMII_MAC) ? "MAC" : "PHY");
253 /* If role is MAC, disable IDIV
254 * If role is PHY, enable IDIV and configure for 1/1 divider
256 rc = sja1105_cgu_idiv_config(priv, port, (role == XMII_PHY), 1);
257 if (rc < 0)
258 return rc;
260 /* Configure CLKSRC of MII_TX_CLK_n
261 * * If role is MAC, select TX_CLK_n
262 * * If role is PHY, select IDIV_n
264 rc = sja1105_cgu_mii_tx_clk_config(priv, port, role);
265 if (rc < 0)
266 return rc;
268 /* Configure CLKSRC of MII_RX_CLK_n
269 * Select RX_CLK_n
271 rc = sja1105_cgu_mii_rx_clk_config(priv, port);
272 if (rc < 0)
273 return rc;
275 if (role == XMII_PHY) {
276 /* Per MII spec, the PHY (which is us) drives the TX_CLK pin */
278 /* Configure CLKSRC of EXT_TX_CLK_n
279 * Select IDIV_n
281 rc = sja1105_cgu_mii_ext_tx_clk_config(priv, port);
282 if (rc < 0)
283 return rc;
285 /* Configure CLKSRC of EXT_RX_CLK_n
286 * Select IDIV_n
288 rc = sja1105_cgu_mii_ext_rx_clk_config(priv, port);
289 if (rc < 0)
290 return rc;
292 return 0;
295 static void
296 sja1105_cgu_pll_control_packing(void *buf, struct sja1105_cgu_pll_ctrl *cmd,
297 enum packing_op op)
299 const int size = 4;
301 sja1105_packing(buf, &cmd->pllclksrc, 28, 24, size, op);
302 sja1105_packing(buf, &cmd->msel, 23, 16, size, op);
303 sja1105_packing(buf, &cmd->autoblock, 11, 11, size, op);
304 sja1105_packing(buf, &cmd->psel, 9, 8, size, op);
305 sja1105_packing(buf, &cmd->direct, 7, 7, size, op);
306 sja1105_packing(buf, &cmd->fbsel, 6, 6, size, op);
307 sja1105_packing(buf, &cmd->bypass, 1, 1, size, op);
308 sja1105_packing(buf, &cmd->pd, 0, 0, size, op);
311 static int sja1105_cgu_rgmii_tx_clk_config(struct sja1105_private *priv,
312 int port, sja1105_speed_t speed)
314 const struct sja1105_regs *regs = priv->info->regs;
315 struct sja1105_cgu_mii_ctrl txc;
316 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
317 int clksrc;
319 if (speed == SJA1105_SPEED_1000MBPS) {
320 clksrc = CLKSRC_PLL0;
321 } else {
322 int clk_sources[] = {CLKSRC_IDIV0, CLKSRC_IDIV1, CLKSRC_IDIV2,
323 CLKSRC_IDIV3, CLKSRC_IDIV4};
324 clksrc = clk_sources[port];
327 /* RGMII: 125MHz for 1000, 25MHz for 100, 2.5MHz for 10 */
328 txc.clksrc = clksrc;
329 /* Autoblock clk while changing clksrc */
330 txc.autoblock = 1;
331 /* Power Down off => enabled */
332 txc.pd = 0;
333 sja1105_cgu_mii_control_packing(packed_buf, &txc, PACK);
335 return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgmii_tx_clk[port],
336 packed_buf, SJA1105_SIZE_CGU_CMD);
339 /* AGU */
340 static void
341 sja1105_cfg_pad_mii_tx_packing(void *buf, struct sja1105_cfg_pad_mii_tx *cmd,
342 enum packing_op op)
344 const int size = 4;
346 sja1105_packing(buf, &cmd->d32_os, 28, 27, size, op);
347 sja1105_packing(buf, &cmd->d32_ipud, 25, 24, size, op);
348 sja1105_packing(buf, &cmd->d10_os, 20, 19, size, op);
349 sja1105_packing(buf, &cmd->d10_ipud, 17, 16, size, op);
350 sja1105_packing(buf, &cmd->ctrl_os, 12, 11, size, op);
351 sja1105_packing(buf, &cmd->ctrl_ipud, 9, 8, size, op);
352 sja1105_packing(buf, &cmd->clk_os, 4, 3, size, op);
353 sja1105_packing(buf, &cmd->clk_ih, 2, 2, size, op);
354 sja1105_packing(buf, &cmd->clk_ipud, 1, 0, size, op);
357 static int sja1105_rgmii_cfg_pad_tx_config(struct sja1105_private *priv,
358 int port)
360 const struct sja1105_regs *regs = priv->info->regs;
361 struct sja1105_cfg_pad_mii_tx pad_mii_tx;
362 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
364 /* Payload */
365 pad_mii_tx.d32_os = 3; /* TXD[3:2] output stage: */
366 /* high noise/high speed */
367 pad_mii_tx.d10_os = 3; /* TXD[1:0] output stage: */
368 /* high noise/high speed */
369 pad_mii_tx.d32_ipud = 2; /* TXD[3:2] input stage: */
370 /* plain input (default) */
371 pad_mii_tx.d10_ipud = 2; /* TXD[1:0] input stage: */
372 /* plain input (default) */
373 pad_mii_tx.ctrl_os = 3; /* TX_CTL / TX_ER output stage */
374 pad_mii_tx.ctrl_ipud = 2; /* TX_CTL / TX_ER input stage (default) */
375 pad_mii_tx.clk_os = 3; /* TX_CLK output stage */
376 pad_mii_tx.clk_ih = 0; /* TX_CLK input hysteresis (default) */
377 pad_mii_tx.clk_ipud = 2; /* TX_CLK input stage (default) */
378 sja1105_cfg_pad_mii_tx_packing(packed_buf, &pad_mii_tx, PACK);
380 return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_tx[port],
381 packed_buf, SJA1105_SIZE_CGU_CMD);
384 static void
385 sja1105_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
386 enum packing_op op)
388 const int size = SJA1105_SIZE_CGU_CMD;
390 sja1105_packing(buf, &cmd->rxc_stable_ovr, 15, 15, size, op);
391 sja1105_packing(buf, &cmd->rxc_delay, 14, 10, size, op);
392 sja1105_packing(buf, &cmd->rxc_bypass, 9, 9, size, op);
393 sja1105_packing(buf, &cmd->rxc_pd, 8, 8, size, op);
394 sja1105_packing(buf, &cmd->txc_stable_ovr, 7, 7, size, op);
395 sja1105_packing(buf, &cmd->txc_delay, 6, 2, size, op);
396 sja1105_packing(buf, &cmd->txc_bypass, 1, 1, size, op);
397 sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
400 /* Valid range in degrees is an integer between 73.8 and 101.7 */
401 static u64 sja1105_rgmii_delay(u64 phase)
403 /* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9.
404 * To avoid floating point operations we'll multiply by 10
405 * and get 1 decimal point precision.
407 phase *= 10;
408 return (phase - 738) / 9;
411 /* The RGMII delay setup procedure is 2-step and gets called upon each
412 * .phylink_mac_config. Both are strategic.
413 * The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues
414 * with recovering from a frequency change of the link partner's RGMII clock.
415 * The easiest way to recover from this is to temporarily power down the TDL,
416 * as it will re-lock at the new frequency afterwards.
418 int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
420 const struct sja1105_private *priv = ctx;
421 const struct sja1105_regs *regs = priv->info->regs;
422 struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
423 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
424 int rc;
426 if (priv->rgmii_rx_delay[port])
427 pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
428 if (priv->rgmii_tx_delay[port])
429 pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
431 /* Stage 1: Turn the RGMII delay lines off. */
432 pad_mii_id.rxc_bypass = 1;
433 pad_mii_id.rxc_pd = 1;
434 pad_mii_id.txc_bypass = 1;
435 pad_mii_id.txc_pd = 1;
436 sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
438 rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
439 packed_buf, SJA1105_SIZE_CGU_CMD);
440 if (rc < 0)
441 return rc;
443 /* Stage 2: Turn the RGMII delay lines on. */
444 if (priv->rgmii_rx_delay[port]) {
445 pad_mii_id.rxc_bypass = 0;
446 pad_mii_id.rxc_pd = 0;
448 if (priv->rgmii_tx_delay[port]) {
449 pad_mii_id.txc_bypass = 0;
450 pad_mii_id.txc_pd = 0;
452 sja1105_cfg_pad_mii_id_packing(packed_buf, &pad_mii_id, PACK);
454 return sja1105_xfer_buf(priv, SPI_WRITE, regs->pad_mii_id[port],
455 packed_buf, SJA1105_SIZE_CGU_CMD);
458 static int sja1105_rgmii_clocking_setup(struct sja1105_private *priv, int port,
459 sja1105_mii_role_t role)
461 struct device *dev = priv->ds->dev;
462 struct sja1105_mac_config_entry *mac;
463 sja1105_speed_t speed;
464 int rc;
466 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
467 speed = mac[port].speed;
469 dev_dbg(dev, "Configuring port %d RGMII at speed %dMbps\n",
470 port, speed);
472 switch (speed) {
473 case SJA1105_SPEED_1000MBPS:
474 /* 1000Mbps, IDIV disabled (125 MHz) */
475 rc = sja1105_cgu_idiv_config(priv, port, false, 1);
476 break;
477 case SJA1105_SPEED_100MBPS:
478 /* 100Mbps, IDIV enabled, divide by 1 (25 MHz) */
479 rc = sja1105_cgu_idiv_config(priv, port, true, 1);
480 break;
481 case SJA1105_SPEED_10MBPS:
482 /* 10Mbps, IDIV enabled, divide by 10 (2.5 MHz) */
483 rc = sja1105_cgu_idiv_config(priv, port, true, 10);
484 break;
485 case SJA1105_SPEED_AUTO:
486 /* Skip CGU configuration if there is no speed available
487 * (e.g. link is not established yet)
489 dev_dbg(dev, "Speed not available, skipping CGU config\n");
490 return 0;
491 default:
492 rc = -EINVAL;
495 if (rc < 0) {
496 dev_err(dev, "Failed to configure idiv\n");
497 return rc;
499 rc = sja1105_cgu_rgmii_tx_clk_config(priv, port, speed);
500 if (rc < 0) {
501 dev_err(dev, "Failed to configure RGMII Tx clock\n");
502 return rc;
504 rc = sja1105_rgmii_cfg_pad_tx_config(priv, port);
505 if (rc < 0) {
506 dev_err(dev, "Failed to configure Tx pad registers\n");
507 return rc;
509 if (!priv->info->setup_rgmii_delay)
510 return 0;
511 /* The role has no hardware effect for RGMII. However we use it as
512 * a proxy for this interface being a MAC-to-MAC connection, with
513 * the RGMII internal delays needing to be applied by us.
515 if (role == XMII_MAC)
516 return 0;
518 return priv->info->setup_rgmii_delay(priv, port);
521 static int sja1105_cgu_rmii_ref_clk_config(struct sja1105_private *priv,
522 int port)
524 const struct sja1105_regs *regs = priv->info->regs;
525 struct sja1105_cgu_mii_ctrl ref_clk;
526 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
527 const int clk_sources[] = {
528 CLKSRC_MII0_TX_CLK,
529 CLKSRC_MII1_TX_CLK,
530 CLKSRC_MII2_TX_CLK,
531 CLKSRC_MII3_TX_CLK,
532 CLKSRC_MII4_TX_CLK,
535 /* Payload for packed_buf */
536 ref_clk.clksrc = clk_sources[port];
537 ref_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
538 ref_clk.pd = 0; /* Power Down off => enabled */
539 sja1105_cgu_mii_control_packing(packed_buf, &ref_clk, PACK);
541 return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ref_clk[port],
542 packed_buf, SJA1105_SIZE_CGU_CMD);
545 static int
546 sja1105_cgu_rmii_ext_tx_clk_config(struct sja1105_private *priv, int port)
548 const struct sja1105_regs *regs = priv->info->regs;
549 struct sja1105_cgu_mii_ctrl ext_tx_clk;
550 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
552 /* Payload for packed_buf */
553 ext_tx_clk.clksrc = CLKSRC_PLL1;
554 ext_tx_clk.autoblock = 1; /* Autoblock clk while changing clksrc */
555 ext_tx_clk.pd = 0; /* Power Down off => enabled */
556 sja1105_cgu_mii_control_packing(packed_buf, &ext_tx_clk, PACK);
558 return sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_ext_tx_clk[port],
559 packed_buf, SJA1105_SIZE_CGU_CMD);
562 static int sja1105_cgu_rmii_pll_config(struct sja1105_private *priv)
564 const struct sja1105_regs *regs = priv->info->regs;
565 u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
566 struct sja1105_cgu_pll_ctrl pll = {0};
567 struct device *dev = priv->ds->dev;
568 int rc;
570 /* PLL1 must be enabled and output 50 Mhz.
571 * This is done by writing first 0x0A010941 to
572 * the PLL_1_C register and then deasserting
573 * power down (PD) 0x0A010940.
576 /* Step 1: PLL1 setup for 50Mhz */
577 pll.pllclksrc = 0xA;
578 pll.msel = 0x1;
579 pll.autoblock = 0x1;
580 pll.psel = 0x1;
581 pll.direct = 0x0;
582 pll.fbsel = 0x1;
583 pll.bypass = 0x0;
584 pll.pd = 0x1;
586 sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
587 rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
588 SJA1105_SIZE_CGU_CMD);
589 if (rc < 0) {
590 dev_err(dev, "failed to configure PLL1 for 50MHz\n");
591 return rc;
594 /* Step 2: Enable PLL1 */
595 pll.pd = 0x0;
597 sja1105_cgu_pll_control_packing(packed_buf, &pll, PACK);
598 rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->rmii_pll1, packed_buf,
599 SJA1105_SIZE_CGU_CMD);
600 if (rc < 0) {
601 dev_err(dev, "failed to enable PLL1\n");
602 return rc;
604 return rc;
607 static int sja1105_rmii_clocking_setup(struct sja1105_private *priv, int port,
608 sja1105_mii_role_t role)
610 struct device *dev = priv->ds->dev;
611 int rc;
613 dev_dbg(dev, "Configuring RMII-%s clocking\n",
614 (role == XMII_MAC) ? "MAC" : "PHY");
615 /* AH1601.pdf chapter 2.5.1. Sources */
616 if (role == XMII_MAC) {
617 /* Configure and enable PLL1 for 50Mhz output */
618 rc = sja1105_cgu_rmii_pll_config(priv);
619 if (rc < 0)
620 return rc;
622 /* Disable IDIV for this port */
623 rc = sja1105_cgu_idiv_config(priv, port, false, 1);
624 if (rc < 0)
625 return rc;
626 /* Source to sink mappings */
627 rc = sja1105_cgu_rmii_ref_clk_config(priv, port);
628 if (rc < 0)
629 return rc;
630 if (role == XMII_MAC) {
631 rc = sja1105_cgu_rmii_ext_tx_clk_config(priv, port);
632 if (rc < 0)
633 return rc;
635 return 0;
638 int sja1105_clocking_setup_port(struct sja1105_private *priv, int port)
640 struct sja1105_xmii_params_entry *mii;
641 struct device *dev = priv->ds->dev;
642 sja1105_phy_interface_t phy_mode;
643 sja1105_mii_role_t role;
644 int rc;
646 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
648 /* RGMII etc */
649 phy_mode = mii->xmii_mode[port];
650 /* MAC or PHY, for applicable types (not RGMII) */
651 role = mii->phy_mac[port];
653 switch (phy_mode) {
654 case XMII_MODE_MII:
655 rc = sja1105_mii_clocking_setup(priv, port, role);
656 break;
657 case XMII_MODE_RMII:
658 rc = sja1105_rmii_clocking_setup(priv, port, role);
659 break;
660 case XMII_MODE_RGMII:
661 rc = sja1105_rgmii_clocking_setup(priv, port, role);
662 break;
663 default:
664 dev_err(dev, "Invalid interface mode specified: %d\n",
665 phy_mode);
666 return -EINVAL;
668 if (rc)
669 dev_err(dev, "Clocking setup for port %d failed: %d\n",
670 port, rc);
671 return rc;
674 int sja1105_clocking_setup(struct sja1105_private *priv)
676 int port, rc;
678 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
679 rc = sja1105_clocking_setup_port(priv, port);
680 if (rc < 0)
681 return rc;
683 return 0;