1 // SPDX-License-Identifier: GPL-2.0
3 * Intel Combo-PHY driver
5 * Copyright (C) 2019-2020 Intel Corporation.
8 #include <linux/bitfield.h>
10 #include <linux/iopoll.h>
11 #include <linux/mfd/syscon.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
15 #include <linux/phy/phy.h>
16 #include <linux/platform_device.h>
17 #include <linux/regmap.h>
18 #include <linux/reset.h>
20 #include <dt-bindings/phy/phy.h>
22 #define PCIE_PHY_GEN_CTRL 0x00
23 #define PCIE_PHY_CLK_PAD BIT(17)
25 #define PAD_DIS_CFG 0x174
27 #define PCS_XF_ATE_OVRD_IN_2 0x3008
28 #define ADAPT_REQ_MSK GENMASK(5, 4)
30 #define PCS_XF_RX_ADAPT_ACK 0x3010
31 #define RX_ADAPT_ACK_BIT BIT(0)
33 #define CR_ADDR(addr, lane) (((addr) + (lane) * 0x100) << 2)
34 #define REG_COMBO_MODE(x) ((x) * 0x200)
35 #define REG_CLK_DISABLE(x) ((x) * 0x200 + 0x124)
37 #define COMBO_PHY_ID(x) ((x)->parent->id)
38 #define PHY_ID(x) ((x)->id)
40 #define CLK_100MHZ 100000000
41 #define CLK_156_25MHZ 156250000
43 static const unsigned long intel_iphy_clk_rates
[] = {
44 CLK_100MHZ
, CLK_156_25MHZ
, CLK_100MHZ
,
54 * Clock Register bit fields to enable clocks
55 * for ComboPhy according to the mode.
63 /* ComboPhy mode Register values */
64 enum intel_combo_mode
{
72 enum aggregated_mode
{
77 struct intel_combo_phy
;
79 struct intel_cbphy_iphy
{
81 struct intel_combo_phy
*parent
;
82 struct reset_control
*app_rst
;
86 struct intel_combo_phy
{
89 unsigned long clk_rate
;
90 void __iomem
*app_base
;
91 void __iomem
*cr_base
;
92 struct regmap
*syscfg
;
93 struct regmap
*hsiocfg
;
96 struct reset_control
*phy_rst
;
97 struct reset_control
*core_rst
;
98 struct intel_cbphy_iphy iphy
[PHY_MAX_NUM
];
99 enum intel_phy_mode phy_mode
;
100 enum aggregated_mode aggr_mode
;
105 static int intel_cbphy_iphy_enable(struct intel_cbphy_iphy
*iphy
, bool set
)
107 struct intel_combo_phy
*cbphy
= iphy
->parent
;
108 u32 mask
= BIT(cbphy
->phy_mode
* 2 + iphy
->id
);
111 /* Register: 0 is enable, 1 is disable */
112 val
= set
? 0 : mask
;
114 return regmap_update_bits(cbphy
->hsiocfg
, REG_CLK_DISABLE(cbphy
->bid
),
118 static int intel_cbphy_pcie_refclk_cfg(struct intel_cbphy_iphy
*iphy
, bool set
)
120 struct intel_combo_phy
*cbphy
= iphy
->parent
;
121 u32 mask
= BIT(cbphy
->id
* 2 + iphy
->id
);
124 /* Register: 0 is enable, 1 is disable */
125 val
= set
? 0 : mask
;
127 return regmap_update_bits(cbphy
->syscfg
, PAD_DIS_CFG
, mask
, val
);
130 static inline void combo_phy_w32_off_mask(void __iomem
*base
, unsigned int reg
,
135 reg_val
= readl(base
+ reg
);
138 writel(reg_val
, base
+ reg
);
141 static int intel_cbphy_iphy_cfg(struct intel_cbphy_iphy
*iphy
,
142 int (*phy_cfg
)(struct intel_cbphy_iphy
*))
144 struct intel_combo_phy
*cbphy
= iphy
->parent
;
151 if (cbphy
->aggr_mode
!= PHY_DL_MODE
)
154 return phy_cfg(&cbphy
->iphy
[PHY_1
]);
157 static int intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy
*iphy
)
159 struct intel_combo_phy
*cbphy
= iphy
->parent
;
162 ret
= intel_cbphy_pcie_refclk_cfg(iphy
, true);
164 dev_err(cbphy
->dev
, "Failed to enable PCIe pad refclk\n");
171 combo_phy_w32_off_mask(cbphy
->app_base
, PCIE_PHY_GEN_CTRL
,
172 PCIE_PHY_CLK_PAD
, FIELD_PREP(PCIE_PHY_CLK_PAD
, 0));
174 /* Delay for stable clock PLL */
175 usleep_range(50, 100);
180 static int intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy
*iphy
)
182 struct intel_combo_phy
*cbphy
= iphy
->parent
;
185 ret
= intel_cbphy_pcie_refclk_cfg(iphy
, false);
187 dev_err(cbphy
->dev
, "Failed to disable PCIe pad refclk\n");
194 combo_phy_w32_off_mask(cbphy
->app_base
, PCIE_PHY_GEN_CTRL
,
195 PCIE_PHY_CLK_PAD
, FIELD_PREP(PCIE_PHY_CLK_PAD
, 1));
200 static int intel_cbphy_set_mode(struct intel_combo_phy
*cbphy
)
202 enum intel_combo_mode cb_mode
;
203 enum aggregated_mode aggr
= cbphy
->aggr_mode
;
204 struct device
*dev
= cbphy
->dev
;
205 enum intel_phy_mode mode
;
208 mode
= cbphy
->phy_mode
;
212 cb_mode
= (aggr
== PHY_DL_MODE
) ? PCIE_DL_MODE
: PCIE0_PCIE1_MODE
;
216 cb_mode
= (aggr
== PHY_DL_MODE
) ? RXAUI_MODE
: XPCS0_XPCS1_MODE
;
220 if (aggr
== PHY_DL_MODE
) {
221 dev_err(dev
, "Mode:%u not support dual lane!\n", mode
);
225 cb_mode
= SATA0_SATA1_MODE
;
231 ret
= regmap_write(cbphy
->hsiocfg
, REG_COMBO_MODE(cbphy
->bid
), cb_mode
);
233 dev_err(dev
, "Failed to set ComboPhy mode: %d\n", ret
);
238 static void intel_cbphy_rst_assert(struct intel_combo_phy
*cbphy
)
240 reset_control_assert(cbphy
->core_rst
);
241 reset_control_assert(cbphy
->phy_rst
);
244 static void intel_cbphy_rst_deassert(struct intel_combo_phy
*cbphy
)
246 reset_control_deassert(cbphy
->core_rst
);
247 reset_control_deassert(cbphy
->phy_rst
);
248 /* Delay to ensure reset process is done */
249 usleep_range(10, 20);
252 static int intel_cbphy_iphy_power_on(struct intel_cbphy_iphy
*iphy
)
254 struct intel_combo_phy
*cbphy
= iphy
->parent
;
257 if (!cbphy
->init_cnt
) {
258 ret
= clk_prepare_enable(cbphy
->core_clk
);
260 dev_err(cbphy
->dev
, "Clock enable failed!\n");
264 ret
= clk_set_rate(cbphy
->core_clk
, cbphy
->clk_rate
);
266 dev_err(cbphy
->dev
, "Clock freq set to %lu failed!\n",
271 intel_cbphy_rst_assert(cbphy
);
272 intel_cbphy_rst_deassert(cbphy
);
273 ret
= intel_cbphy_set_mode(cbphy
);
278 ret
= intel_cbphy_iphy_enable(iphy
, true);
280 dev_err(cbphy
->dev
, "Failed enabling PHY core\n");
284 ret
= reset_control_deassert(iphy
->app_rst
);
286 dev_err(cbphy
->dev
, "PHY(%u:%u) reset deassert failed!\n",
287 COMBO_PHY_ID(iphy
), PHY_ID(iphy
));
291 /* Delay to ensure reset process is done */
297 clk_disable_unprepare(cbphy
->core_clk
);
302 static int intel_cbphy_iphy_power_off(struct intel_cbphy_iphy
*iphy
)
304 struct intel_combo_phy
*cbphy
= iphy
->parent
;
307 ret
= reset_control_assert(iphy
->app_rst
);
309 dev_err(cbphy
->dev
, "PHY(%u:%u) reset assert failed!\n",
310 COMBO_PHY_ID(iphy
), PHY_ID(iphy
));
314 ret
= intel_cbphy_iphy_enable(iphy
, false);
316 dev_err(cbphy
->dev
, "Failed disabling PHY core\n");
323 clk_disable_unprepare(cbphy
->core_clk
);
324 intel_cbphy_rst_assert(cbphy
);
329 static int intel_cbphy_init(struct phy
*phy
)
331 struct intel_cbphy_iphy
*iphy
= phy_get_drvdata(phy
);
332 struct intel_combo_phy
*cbphy
= iphy
->parent
;
335 mutex_lock(&cbphy
->lock
);
336 ret
= intel_cbphy_iphy_cfg(iphy
, intel_cbphy_iphy_power_on
);
340 if (cbphy
->phy_mode
== PHY_PCIE_MODE
) {
341 ret
= intel_cbphy_iphy_cfg(iphy
, intel_cbphy_pcie_en_pad_refclk
);
349 mutex_unlock(&cbphy
->lock
);
354 static int intel_cbphy_exit(struct phy
*phy
)
356 struct intel_cbphy_iphy
*iphy
= phy_get_drvdata(phy
);
357 struct intel_combo_phy
*cbphy
= iphy
->parent
;
360 mutex_lock(&cbphy
->lock
);
362 if (cbphy
->phy_mode
== PHY_PCIE_MODE
) {
363 ret
= intel_cbphy_iphy_cfg(iphy
, intel_cbphy_pcie_dis_pad_refclk
);
368 ret
= intel_cbphy_iphy_cfg(iphy
, intel_cbphy_iphy_power_off
);
371 mutex_unlock(&cbphy
->lock
);
376 static int intel_cbphy_calibrate(struct phy
*phy
)
378 struct intel_cbphy_iphy
*iphy
= phy_get_drvdata(phy
);
379 struct intel_combo_phy
*cbphy
= iphy
->parent
;
380 void __iomem
*cr_base
= cbphy
->cr_base
;
383 if (cbphy
->phy_mode
!= PHY_XPCS_MODE
)
388 /* trigger auto RX adaptation */
389 combo_phy_w32_off_mask(cr_base
, CR_ADDR(PCS_XF_ATE_OVRD_IN_2
, id
),
390 ADAPT_REQ_MSK
, FIELD_PREP(ADAPT_REQ_MSK
, 3));
391 /* Wait RX adaptation to finish */
392 ret
= readl_poll_timeout(cr_base
+ CR_ADDR(PCS_XF_RX_ADAPT_ACK
, id
),
393 val
, val
& RX_ADAPT_ACK_BIT
, 10, 5000);
395 dev_err(cbphy
->dev
, "RX Adaptation failed!\n");
397 dev_dbg(cbphy
->dev
, "RX Adaptation success!\n");
399 /* Stop RX adaptation */
400 combo_phy_w32_off_mask(cr_base
, CR_ADDR(PCS_XF_ATE_OVRD_IN_2
, id
),
401 ADAPT_REQ_MSK
, FIELD_PREP(ADAPT_REQ_MSK
, 0));
406 static int intel_cbphy_fwnode_parse(struct intel_combo_phy
*cbphy
)
408 struct device
*dev
= cbphy
->dev
;
409 struct platform_device
*pdev
= to_platform_device(dev
);
410 struct fwnode_handle
*fwnode
= dev_fwnode(dev
);
411 struct fwnode_reference_args ref
;
415 cbphy
->core_clk
= devm_clk_get(dev
, NULL
);
416 if (IS_ERR(cbphy
->core_clk
)) {
417 ret
= PTR_ERR(cbphy
->core_clk
);
418 if (ret
!= -EPROBE_DEFER
)
419 dev_err(dev
, "Get clk failed:%d!\n", ret
);
423 cbphy
->core_rst
= devm_reset_control_get_optional(dev
, "core");
424 if (IS_ERR(cbphy
->core_rst
)) {
425 ret
= PTR_ERR(cbphy
->core_rst
);
426 if (ret
!= -EPROBE_DEFER
)
427 dev_err(dev
, "Get core reset control err: %d!\n", ret
);
431 cbphy
->phy_rst
= devm_reset_control_get_optional(dev
, "phy");
432 if (IS_ERR(cbphy
->phy_rst
)) {
433 ret
= PTR_ERR(cbphy
->phy_rst
);
434 if (ret
!= -EPROBE_DEFER
)
435 dev_err(dev
, "Get PHY reset control err: %d!\n", ret
);
439 cbphy
->iphy
[0].app_rst
= devm_reset_control_get_optional(dev
, "iphy0");
440 if (IS_ERR(cbphy
->iphy
[0].app_rst
)) {
441 ret
= PTR_ERR(cbphy
->iphy
[0].app_rst
);
442 if (ret
!= -EPROBE_DEFER
)
443 dev_err(dev
, "Get phy0 reset control err: %d!\n", ret
);
447 cbphy
->iphy
[1].app_rst
= devm_reset_control_get_optional(dev
, "iphy1");
448 if (IS_ERR(cbphy
->iphy
[1].app_rst
)) {
449 ret
= PTR_ERR(cbphy
->iphy
[1].app_rst
);
450 if (ret
!= -EPROBE_DEFER
)
451 dev_err(dev
, "Get phy1 reset control err: %d!\n", ret
);
455 cbphy
->app_base
= devm_platform_ioremap_resource_byname(pdev
, "app");
456 if (IS_ERR(cbphy
->app_base
))
457 return PTR_ERR(cbphy
->app_base
);
459 cbphy
->cr_base
= devm_platform_ioremap_resource_byname(pdev
, "core");
460 if (IS_ERR(cbphy
->cr_base
))
461 return PTR_ERR(cbphy
->cr_base
);
464 * syscfg and hsiocfg variables stores the handle of the registers set
465 * in which ComboPhy subsytem specific registers are subset. Using
466 * Register map framework to access the registers set.
468 ret
= fwnode_property_get_reference_args(fwnode
, "intel,syscfg", NULL
,
473 cbphy
->id
= ref
.args
[0];
474 cbphy
->syscfg
= device_node_to_regmap(to_of_node(ref
.fwnode
));
475 fwnode_handle_put(ref
.fwnode
);
477 ret
= fwnode_property_get_reference_args(fwnode
, "intel,hsio", NULL
, 1,
482 cbphy
->bid
= ref
.args
[0];
483 cbphy
->hsiocfg
= device_node_to_regmap(to_of_node(ref
.fwnode
));
484 fwnode_handle_put(ref
.fwnode
);
486 ret
= fwnode_property_read_u32_array(fwnode
, "intel,phy-mode", &val
, 1);
492 cbphy
->phy_mode
= PHY_PCIE_MODE
;
496 cbphy
->phy_mode
= PHY_SATA_MODE
;
500 cbphy
->phy_mode
= PHY_XPCS_MODE
;
504 dev_err(dev
, "Invalid PHY mode: %u\n", val
);
508 cbphy
->clk_rate
= intel_iphy_clk_rates
[cbphy
->phy_mode
];
510 if (fwnode_property_present(fwnode
, "intel,aggregation"))
511 cbphy
->aggr_mode
= PHY_DL_MODE
;
513 cbphy
->aggr_mode
= PHY_SL_MODE
;
518 static const struct phy_ops intel_cbphy_ops
= {
519 .init
= intel_cbphy_init
,
520 .exit
= intel_cbphy_exit
,
521 .calibrate
= intel_cbphy_calibrate
,
522 .owner
= THIS_MODULE
,
525 static struct phy
*intel_cbphy_xlate(struct device
*dev
,
526 struct of_phandle_args
*args
)
528 struct intel_combo_phy
*cbphy
= dev_get_drvdata(dev
);
531 if (args
->args_count
< 1) {
532 dev_err(dev
, "Invalid number of arguments\n");
533 return ERR_PTR(-EINVAL
);
536 iphy_id
= args
->args
[0];
537 if (iphy_id
>= PHY_MAX_NUM
) {
538 dev_err(dev
, "Invalid phy instance %d\n", iphy_id
);
539 return ERR_PTR(-EINVAL
);
542 if (cbphy
->aggr_mode
== PHY_DL_MODE
&& iphy_id
== PHY_1
) {
543 dev_err(dev
, "Invalid. ComboPhy is in Dual lane mode %d\n", iphy_id
);
544 return ERR_PTR(-EINVAL
);
547 return cbphy
->iphy
[iphy_id
].phy
;
550 static int intel_cbphy_create(struct intel_combo_phy
*cbphy
)
552 struct phy_provider
*phy_provider
;
553 struct device
*dev
= cbphy
->dev
;
554 struct intel_cbphy_iphy
*iphy
;
557 for (i
= 0; i
< PHY_MAX_NUM
; i
++) {
558 iphy
= &cbphy
->iphy
[i
];
559 iphy
->parent
= cbphy
;
562 /* In dual lane mode skip phy creation for the second phy */
563 if (cbphy
->aggr_mode
== PHY_DL_MODE
&& iphy
->id
== PHY_1
)
566 iphy
->phy
= devm_phy_create(dev
, NULL
, &intel_cbphy_ops
);
567 if (IS_ERR(iphy
->phy
)) {
568 dev_err(dev
, "PHY[%u:%u]: create PHY instance failed!\n",
569 COMBO_PHY_ID(iphy
), PHY_ID(iphy
));
571 return PTR_ERR(iphy
->phy
);
574 phy_set_drvdata(iphy
->phy
, iphy
);
577 dev_set_drvdata(dev
, cbphy
);
578 phy_provider
= devm_of_phy_provider_register(dev
, intel_cbphy_xlate
);
579 if (IS_ERR(phy_provider
))
580 dev_err(dev
, "Register PHY provider failed!\n");
582 return PTR_ERR_OR_ZERO(phy_provider
);
585 static int intel_cbphy_probe(struct platform_device
*pdev
)
587 struct device
*dev
= &pdev
->dev
;
588 struct intel_combo_phy
*cbphy
;
591 cbphy
= devm_kzalloc(dev
, sizeof(*cbphy
), GFP_KERNEL
);
597 mutex_init(&cbphy
->lock
);
598 ret
= intel_cbphy_fwnode_parse(cbphy
);
602 platform_set_drvdata(pdev
, cbphy
);
604 return intel_cbphy_create(cbphy
);
607 static int intel_cbphy_remove(struct platform_device
*pdev
)
609 struct intel_combo_phy
*cbphy
= platform_get_drvdata(pdev
);
611 intel_cbphy_rst_assert(cbphy
);
612 clk_disable_unprepare(cbphy
->core_clk
);
616 static const struct of_device_id of_intel_cbphy_match
[] = {
617 { .compatible
= "intel,combo-phy" },
618 { .compatible
= "intel,combophy-lgm" },
622 static struct platform_driver intel_cbphy_driver
= {
623 .probe
= intel_cbphy_probe
,
624 .remove
= intel_cbphy_remove
,
626 .name
= "intel-combo-phy",
627 .of_match_table
= of_intel_cbphy_match
,
631 module_platform_driver(intel_cbphy_driver
);
633 MODULE_DESCRIPTION("Intel Combo-phy driver");
634 MODULE_LICENSE("GPL v2");