1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 MediaTek Inc.
5 * Stanley Chu <stanley.chu@mediatek.com>
6 * Peter Wang <peter.wang@mediatek.com>
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
12 #include <linux/of_address.h>
13 #include <linux/phy/phy.h>
14 #include <linux/platform_device.h>
15 #include <linux/soc/mediatek/mtk_sip_svc.h>
18 #include "ufshcd-pltfrm.h"
19 #include "ufs_quirks.h"
21 #include "ufs-mediatek.h"
23 #define ufs_mtk_smc(cmd, val, res) \
24 arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
25 cmd, val, 0, 0, 0, 0, 0, &(res))
27 #define ufs_mtk_ref_clk_notify(on, res) \
28 ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
30 #define ufs_mtk_device_reset_ctrl(high, res) \
31 ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
33 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba
*hba
, bool enable
)
39 UIC_ARG_MIB(VS_SAVEPOWERCONTROL
), &tmp
);
41 (1 << RX_SYMBOL_CLK_GATE_EN
) |
42 (1 << SYS_CLK_GATE_EN
) |
43 (1 << TX_CLK_GATE_EN
);
45 UIC_ARG_MIB(VS_SAVEPOWERCONTROL
), tmp
);
48 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE
), &tmp
);
49 tmp
= tmp
& ~(1 << TX_SYMBOL_CLK_REQ_FORCE
);
51 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE
), tmp
);
54 UIC_ARG_MIB(VS_SAVEPOWERCONTROL
), &tmp
);
55 tmp
= tmp
& ~((1 << RX_SYMBOL_CLK_GATE_EN
) |
56 (1 << SYS_CLK_GATE_EN
) |
57 (1 << TX_CLK_GATE_EN
));
59 UIC_ARG_MIB(VS_SAVEPOWERCONTROL
), tmp
);
62 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE
), &tmp
);
63 tmp
= tmp
| (1 << TX_SYMBOL_CLK_REQ_FORCE
);
65 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE
), tmp
);
69 static int ufs_mtk_hce_enable_notify(struct ufs_hba
*hba
,
70 enum ufs_notify_change_status status
)
72 struct ufs_mtk_host
*host
= ufshcd_get_variant(hba
);
74 if (status
== PRE_CHANGE
) {
76 hba
->hba_enable_delay_us
= 0;
78 hba
->hba_enable_delay_us
= 600;
84 static int ufs_mtk_bind_mphy(struct ufs_hba
*hba
)
86 struct ufs_mtk_host
*host
= ufshcd_get_variant(hba
);
87 struct device
*dev
= hba
->dev
;
88 struct device_node
*np
= dev
->of_node
;
91 host
->mphy
= devm_of_phy_get_by_index(dev
, np
, 0);
93 if (host
->mphy
== ERR_PTR(-EPROBE_DEFER
)) {
95 * UFS driver might be probed before the phy driver does.
96 * In that case we would like to return EPROBE_DEFER code.
100 "%s: required phy hasn't probed yet. err = %d\n",
102 } else if (IS_ERR(host
->mphy
)) {
103 err
= PTR_ERR(host
->mphy
);
104 dev_info(dev
, "%s: PHY get failed %d\n", __func__
, err
);
113 static int ufs_mtk_setup_ref_clk(struct ufs_hba
*hba
, bool on
)
115 struct ufs_mtk_host
*host
= ufshcd_get_variant(hba
);
116 struct arm_smccc_res res
;
117 unsigned long timeout
;
120 if (host
->ref_clk_enabled
== on
)
124 ufs_mtk_ref_clk_notify(on
, res
);
125 ufshcd_delay_us(host
->ref_clk_ungating_wait_us
, 10);
126 ufshcd_writel(hba
, REFCLK_REQUEST
, REG_UFS_REFCLK_CTRL
);
128 ufshcd_writel(hba
, REFCLK_RELEASE
, REG_UFS_REFCLK_CTRL
);
132 timeout
= jiffies
+ msecs_to_jiffies(REFCLK_REQ_TIMEOUT_MS
);
134 value
= ufshcd_readl(hba
, REG_UFS_REFCLK_CTRL
);
136 /* Wait until ack bit equals to req bit */
137 if (((value
& REFCLK_ACK
) >> 1) == (value
& REFCLK_REQUEST
))
140 usleep_range(100, 200);
141 } while (time_before(jiffies
, timeout
));
143 dev_err(hba
->dev
, "missing ack of refclk req, reg: 0x%x\n", value
);
145 ufs_mtk_ref_clk_notify(host
->ref_clk_enabled
, res
);
150 host
->ref_clk_enabled
= on
;
152 ufshcd_delay_us(host
->ref_clk_gating_wait_us
, 10);
153 ufs_mtk_ref_clk_notify(on
, res
);
159 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba
*hba
,
160 u16 gating_us
, u16 ungating_us
)
162 struct ufs_mtk_host
*host
= ufshcd_get_variant(hba
);
164 if (hba
->dev_info
.clk_gating_wait_us
) {
165 host
->ref_clk_gating_wait_us
=
166 hba
->dev_info
.clk_gating_wait_us
;
168 host
->ref_clk_gating_wait_us
= gating_us
;
171 host
->ref_clk_ungating_wait_us
= ungating_us
;
174 static u32
ufs_mtk_link_get_state(struct ufs_hba
*hba
)
178 ufshcd_writel(hba
, 0x20, REG_UFS_DEBUG_SEL
);
179 val
= ufshcd_readl(hba
, REG_UFS_PROBE
);
186 * ufs_mtk_setup_clocks - enables/disable clocks
187 * @hba: host controller instance
188 * @on: If true, enable clocks else disable them.
189 * @status: PRE_CHANGE or POST_CHANGE notify
191 * Returns 0 on success, non-zero on failure.
193 static int ufs_mtk_setup_clocks(struct ufs_hba
*hba
, bool on
,
194 enum ufs_notify_change_status status
)
196 struct ufs_mtk_host
*host
= ufshcd_get_variant(hba
);
200 * In case ufs_mtk_init() is not yet done, simply ignore.
201 * This ufs_mtk_setup_clocks() shall be called from
202 * ufs_mtk_init() after init is done.
207 if (!on
&& status
== PRE_CHANGE
) {
208 if (!ufshcd_is_link_active(hba
)) {
209 ufs_mtk_setup_ref_clk(hba
, on
);
210 ret
= phy_power_off(host
->mphy
);
213 * Gate ref-clk if link state is in Hibern8
214 * triggered by Auto-Hibern8.
216 if (!ufshcd_can_hibern8_during_gating(hba
) &&
217 ufshcd_is_auto_hibern8_enabled(hba
) &&
218 ufs_mtk_link_get_state(hba
) ==
220 ufs_mtk_setup_ref_clk(hba
, on
);
222 } else if (on
&& status
== POST_CHANGE
) {
223 ret
= phy_power_on(host
->mphy
);
224 ufs_mtk_setup_ref_clk(hba
, on
);
231 * ufs_mtk_init - find other essential mmio bases
232 * @hba: host controller instance
234 * Binds PHY with controller and powers up PHY enabling clocks
237 * Returns -EPROBE_DEFER if binding fails, returns negative error
238 * on phy power up failure and returns zero on success.
240 static int ufs_mtk_init(struct ufs_hba
*hba
)
242 struct ufs_mtk_host
*host
;
243 struct device
*dev
= hba
->dev
;
246 host
= devm_kzalloc(dev
, sizeof(*host
), GFP_KERNEL
);
249 dev_info(dev
, "%s: no memory for mtk ufs host\n", __func__
);
254 ufshcd_set_variant(hba
, host
);
256 err
= ufs_mtk_bind_mphy(hba
);
258 goto out_variant_clear
;
260 /* Enable runtime autosuspend */
261 hba
->caps
|= UFSHCD_CAP_RPM_AUTOSUSPEND
;
263 /* Enable clock-gating */
264 hba
->caps
|= UFSHCD_CAP_CLK_GATING
;
267 * ufshcd_vops_init() is invoked after
268 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
269 * phy clock setup is skipped.
271 * Enable phy clocks specifically here.
273 ufs_mtk_setup_clocks(hba
, true, POST_CHANGE
);
278 ufshcd_set_variant(hba
, NULL
);
283 static int ufs_mtk_pre_pwr_change(struct ufs_hba
*hba
,
284 struct ufs_pa_layer_attr
*dev_max_params
,
285 struct ufs_pa_layer_attr
*dev_req_params
)
287 struct ufs_dev_params host_cap
;
290 host_cap
.tx_lanes
= UFS_MTK_LIMIT_NUM_LANES_TX
;
291 host_cap
.rx_lanes
= UFS_MTK_LIMIT_NUM_LANES_RX
;
292 host_cap
.hs_rx_gear
= UFS_MTK_LIMIT_HSGEAR_RX
;
293 host_cap
.hs_tx_gear
= UFS_MTK_LIMIT_HSGEAR_TX
;
294 host_cap
.pwm_rx_gear
= UFS_MTK_LIMIT_PWMGEAR_RX
;
295 host_cap
.pwm_tx_gear
= UFS_MTK_LIMIT_PWMGEAR_TX
;
296 host_cap
.rx_pwr_pwm
= UFS_MTK_LIMIT_RX_PWR_PWM
;
297 host_cap
.tx_pwr_pwm
= UFS_MTK_LIMIT_TX_PWR_PWM
;
298 host_cap
.rx_pwr_hs
= UFS_MTK_LIMIT_RX_PWR_HS
;
299 host_cap
.tx_pwr_hs
= UFS_MTK_LIMIT_TX_PWR_HS
;
300 host_cap
.hs_rate
= UFS_MTK_LIMIT_HS_RATE
;
301 host_cap
.desired_working_mode
=
302 UFS_MTK_LIMIT_DESIRED_MODE
;
304 ret
= ufshcd_get_pwr_dev_param(&host_cap
,
308 pr_info("%s: failed to determine capabilities\n",
315 static int ufs_mtk_pwr_change_notify(struct ufs_hba
*hba
,
316 enum ufs_notify_change_status stage
,
317 struct ufs_pa_layer_attr
*dev_max_params
,
318 struct ufs_pa_layer_attr
*dev_req_params
)
324 ret
= ufs_mtk_pre_pwr_change(hba
, dev_max_params
,
337 static int ufs_mtk_unipro_set_pm(struct ufs_hba
*hba
, u32 lpm
)
340 struct ufs_mtk_host
*host
= ufshcd_get_variant(hba
);
342 ret
= ufshcd_dme_set(hba
,
343 UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL
, 0),
346 host
->unipro_lpm
= lpm
;
351 static int ufs_mtk_pre_link(struct ufs_hba
*hba
)
356 ufs_mtk_unipro_set_pm(hba
, 0);
359 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
360 * to make sure that both host and device TX LCC are disabled
361 * once link startup is completed.
363 ret
= ufshcd_disable_host_tx_lcc(hba
);
367 /* disable deep stall */
368 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(VS_SAVEPOWERCONTROL
), &tmp
);
374 ret
= ufshcd_dme_set(hba
, UIC_ARG_MIB(VS_SAVEPOWERCONTROL
), tmp
);
379 static void ufs_mtk_setup_clk_gating(struct ufs_hba
*hba
)
384 if (ufshcd_is_clkgating_allowed(hba
)) {
385 if (ufshcd_is_auto_hibern8_supported(hba
) && hba
->ahit
)
386 ah_ms
= FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK
,
390 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
391 hba
->clk_gating
.delay_ms
= ah_ms
+ 5;
392 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
396 static int ufs_mtk_post_link(struct ufs_hba
*hba
)
398 /* enable unipro clock gating feature */
399 ufs_mtk_cfg_unipro_cg(hba
, true);
401 /* configure auto-hibern8 timer to 10ms */
402 if (ufshcd_is_auto_hibern8_supported(hba
)) {
403 ufshcd_auto_hibern8_update(hba
,
404 FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK
, 10) |
405 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK
, 3));
408 ufs_mtk_setup_clk_gating(hba
);
413 static int ufs_mtk_link_startup_notify(struct ufs_hba
*hba
,
414 enum ufs_notify_change_status stage
)
420 ret
= ufs_mtk_pre_link(hba
);
423 ret
= ufs_mtk_post_link(hba
);
433 static void ufs_mtk_device_reset(struct ufs_hba
*hba
)
435 struct arm_smccc_res res
;
437 ufs_mtk_device_reset_ctrl(0, res
);
440 * The reset signal is active low. UFS devices shall detect
441 * more than or equal to 1us of positive or negative RST_n
444 * To be on safe side, keep the reset low for at least 10us.
446 usleep_range(10, 15);
448 ufs_mtk_device_reset_ctrl(1, res
);
450 /* Some devices may need time to respond to rst_n */
451 usleep_range(10000, 15000);
453 dev_info(hba
->dev
, "device reset done\n");
456 static int ufs_mtk_link_set_hpm(struct ufs_hba
*hba
)
460 err
= ufshcd_hba_enable(hba
);
464 err
= ufs_mtk_unipro_set_pm(hba
, 0);
468 err
= ufshcd_uic_hibern8_exit(hba
);
470 ufshcd_set_link_active(hba
);
474 err
= ufshcd_make_hba_operational(hba
);
481 static int ufs_mtk_link_set_lpm(struct ufs_hba
*hba
)
485 err
= ufs_mtk_unipro_set_pm(hba
, 1);
487 /* Resume UniPro state for following error recovery */
488 ufs_mtk_unipro_set_pm(hba
, 0);
495 static int ufs_mtk_suspend(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
498 struct ufs_mtk_host
*host
= ufshcd_get_variant(hba
);
500 if (ufshcd_is_link_hibern8(hba
)) {
501 err
= ufs_mtk_link_set_lpm(hba
);
504 * Set link as off state enforcedly to trigger
505 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
506 * for completed host reset.
508 ufshcd_set_link_off(hba
);
513 if (!ufshcd_is_link_active(hba
))
514 phy_power_off(host
->mphy
);
519 static int ufs_mtk_resume(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
521 struct ufs_mtk_host
*host
= ufshcd_get_variant(hba
);
524 if (!ufshcd_is_link_active(hba
))
525 phy_power_on(host
->mphy
);
527 if (ufshcd_is_link_hibern8(hba
)) {
528 err
= ufs_mtk_link_set_hpm(hba
);
530 err
= ufshcd_link_recovery(hba
);
538 static void ufs_mtk_dbg_register_dump(struct ufs_hba
*hba
)
540 ufshcd_dump_regs(hba
, REG_UFS_REFCLK_CTRL
, 0x4, "Ref-Clk Ctrl ");
542 ufshcd_dump_regs(hba
, REG_UFS_EXTREG
, 0x4, "Ext Reg ");
544 ufshcd_dump_regs(hba
, REG_UFS_MPHYCTRL
,
545 REG_UFS_REJECT_MON
- REG_UFS_MPHYCTRL
+ 4,
548 /* Direct debugging information to REG_MTK_PROBE */
549 ufshcd_writel(hba
, 0x20, REG_UFS_DEBUG_SEL
);
550 ufshcd_dump_regs(hba
, REG_UFS_PROBE
, 0x4, "Debug Probe ");
553 static int ufs_mtk_apply_dev_quirks(struct ufs_hba
*hba
)
555 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
556 u16 mid
= dev_info
->wmanufacturerid
;
558 if (mid
== UFS_VENDOR_SAMSUNG
) {
559 hba
->dev_quirks
&= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
;
560 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
), 6);
564 * Decide waiting time before gating reference clock and
565 * after ungating reference clock according to vendors'
568 if (mid
== UFS_VENDOR_SAMSUNG
)
569 ufs_mtk_setup_ref_clk_wait_us(hba
, 1, 1);
570 else if (mid
== UFS_VENDOR_SKHYNIX
)
571 ufs_mtk_setup_ref_clk_wait_us(hba
, 30, 30);
572 else if (mid
== UFS_VENDOR_TOSHIBA
)
573 ufs_mtk_setup_ref_clk_wait_us(hba
, 100, 32);
579 * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
581 * The variant operations configure the necessary controller and PHY
582 * handshake during initialization.
584 static struct ufs_hba_variant_ops ufs_hba_mtk_vops
= {
585 .name
= "mediatek.ufshci",
586 .init
= ufs_mtk_init
,
587 .setup_clocks
= ufs_mtk_setup_clocks
,
588 .hce_enable_notify
= ufs_mtk_hce_enable_notify
,
589 .link_startup_notify
= ufs_mtk_link_startup_notify
,
590 .pwr_change_notify
= ufs_mtk_pwr_change_notify
,
591 .apply_dev_quirks
= ufs_mtk_apply_dev_quirks
,
592 .suspend
= ufs_mtk_suspend
,
593 .resume
= ufs_mtk_resume
,
594 .dbg_register_dump
= ufs_mtk_dbg_register_dump
,
595 .device_reset
= ufs_mtk_device_reset
,
599 * ufs_mtk_probe - probe routine of the driver
600 * @pdev: pointer to Platform device handle
602 * Return zero for success and non-zero for failure
604 static int ufs_mtk_probe(struct platform_device
*pdev
)
607 struct device
*dev
= &pdev
->dev
;
609 /* perform generic probe */
610 err
= ufshcd_pltfrm_init(pdev
, &ufs_hba_mtk_vops
);
612 dev_info(dev
, "probe failed %d\n", err
);
618 * ufs_mtk_remove - set driver_data of the device to NULL
619 * @pdev: pointer to platform device handle
623 static int ufs_mtk_remove(struct platform_device
*pdev
)
625 struct ufs_hba
*hba
= platform_get_drvdata(pdev
);
627 pm_runtime_get_sync(&(pdev
)->dev
);
632 static const struct of_device_id ufs_mtk_of_match
[] = {
633 { .compatible
= "mediatek,mt8183-ufshci"},
637 static const struct dev_pm_ops ufs_mtk_pm_ops
= {
638 .suspend
= ufshcd_pltfrm_suspend
,
639 .resume
= ufshcd_pltfrm_resume
,
640 .runtime_suspend
= ufshcd_pltfrm_runtime_suspend
,
641 .runtime_resume
= ufshcd_pltfrm_runtime_resume
,
642 .runtime_idle
= ufshcd_pltfrm_runtime_idle
,
645 static struct platform_driver ufs_mtk_pltform
= {
646 .probe
= ufs_mtk_probe
,
647 .remove
= ufs_mtk_remove
,
648 .shutdown
= ufshcd_pltfrm_shutdown
,
650 .name
= "ufshcd-mtk",
651 .pm
= &ufs_mtk_pm_ops
,
652 .of_match_table
= ufs_mtk_of_match
,
656 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
657 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
658 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
659 MODULE_LICENSE("GPL v2");
661 module_platform_driver(ufs_mtk_pltform
);