gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / scsi / ufs / ufs-mediatek.c
blob673c16596fb245fde320c04fb79c78a85c683a03
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 MediaTek Inc.
4 * Authors:
5 * Stanley Chu <stanley.chu@mediatek.com>
6 * Peter Wang <peter.wang@mediatek.com>
7 */
9 #include <linux/arm-smccc.h>
10 #include <linux/bitfield.h>
11 #include <linux/of.h>
12 #include <linux/of_address.h>
13 #include <linux/phy/phy.h>
14 #include <linux/platform_device.h>
15 #include <linux/soc/mediatek/mtk_sip_svc.h>
17 #include "ufshcd.h"
18 #include "ufshcd-pltfrm.h"
19 #include "ufs_quirks.h"
20 #include "unipro.h"
21 #include "ufs-mediatek.h"
23 #define ufs_mtk_smc(cmd, val, res) \
24 arm_smccc_smc(MTK_SIP_UFS_CONTROL, \
25 cmd, val, 0, 0, 0, 0, 0, &(res))
27 #define ufs_mtk_ref_clk_notify(on, res) \
28 ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res)
30 #define ufs_mtk_device_reset_ctrl(high, res) \
31 ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res)
33 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
35 u32 tmp;
37 if (enable) {
38 ufshcd_dme_get(hba,
39 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
40 tmp = tmp |
41 (1 << RX_SYMBOL_CLK_GATE_EN) |
42 (1 << SYS_CLK_GATE_EN) |
43 (1 << TX_CLK_GATE_EN);
44 ufshcd_dme_set(hba,
45 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
47 ufshcd_dme_get(hba,
48 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
49 tmp = tmp & ~(1 << TX_SYMBOL_CLK_REQ_FORCE);
50 ufshcd_dme_set(hba,
51 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
52 } else {
53 ufshcd_dme_get(hba,
54 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
55 tmp = tmp & ~((1 << RX_SYMBOL_CLK_GATE_EN) |
56 (1 << SYS_CLK_GATE_EN) |
57 (1 << TX_CLK_GATE_EN));
58 ufshcd_dme_set(hba,
59 UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
61 ufshcd_dme_get(hba,
62 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), &tmp);
63 tmp = tmp | (1 << TX_SYMBOL_CLK_REQ_FORCE);
64 ufshcd_dme_set(hba,
65 UIC_ARG_MIB(VS_DEBUGCLOCKENABLE), tmp);
69 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
70 enum ufs_notify_change_status status)
72 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
74 if (status == PRE_CHANGE) {
75 if (host->unipro_lpm)
76 hba->hba_enable_delay_us = 0;
77 else
78 hba->hba_enable_delay_us = 600;
81 return 0;
84 static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
86 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
87 struct device *dev = hba->dev;
88 struct device_node *np = dev->of_node;
89 int err = 0;
91 host->mphy = devm_of_phy_get_by_index(dev, np, 0);
93 if (host->mphy == ERR_PTR(-EPROBE_DEFER)) {
95 * UFS driver might be probed before the phy driver does.
96 * In that case we would like to return EPROBE_DEFER code.
98 err = -EPROBE_DEFER;
99 dev_info(dev,
100 "%s: required phy hasn't probed yet. err = %d\n",
101 __func__, err);
102 } else if (IS_ERR(host->mphy)) {
103 err = PTR_ERR(host->mphy);
104 dev_info(dev, "%s: PHY get failed %d\n", __func__, err);
107 if (err)
108 host->mphy = NULL;
110 return err;
113 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on)
115 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
116 struct arm_smccc_res res;
117 unsigned long timeout;
118 u32 value;
120 if (host->ref_clk_enabled == on)
121 return 0;
123 if (on) {
124 ufs_mtk_ref_clk_notify(on, res);
125 ufshcd_delay_us(host->ref_clk_ungating_wait_us, 10);
126 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL);
127 } else {
128 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL);
131 /* Wait for ack */
132 timeout = jiffies + msecs_to_jiffies(REFCLK_REQ_TIMEOUT_MS);
133 do {
134 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL);
136 /* Wait until ack bit equals to req bit */
137 if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST))
138 goto out;
140 usleep_range(100, 200);
141 } while (time_before(jiffies, timeout));
143 dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value);
145 ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res);
147 return -ETIMEDOUT;
149 out:
150 host->ref_clk_enabled = on;
151 if (!on) {
152 ufshcd_delay_us(host->ref_clk_gating_wait_us, 10);
153 ufs_mtk_ref_clk_notify(on, res);
156 return 0;
159 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba,
160 u16 gating_us, u16 ungating_us)
162 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
164 if (hba->dev_info.clk_gating_wait_us) {
165 host->ref_clk_gating_wait_us =
166 hba->dev_info.clk_gating_wait_us;
167 } else {
168 host->ref_clk_gating_wait_us = gating_us;
171 host->ref_clk_ungating_wait_us = ungating_us;
174 static u32 ufs_mtk_link_get_state(struct ufs_hba *hba)
176 u32 val;
178 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
179 val = ufshcd_readl(hba, REG_UFS_PROBE);
180 val = val >> 28;
182 return val;
186 * ufs_mtk_setup_clocks - enables/disable clocks
187 * @hba: host controller instance
188 * @on: If true, enable clocks else disable them.
189 * @status: PRE_CHANGE or POST_CHANGE notify
191 * Returns 0 on success, non-zero on failure.
193 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
194 enum ufs_notify_change_status status)
196 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
197 int ret = 0;
200 * In case ufs_mtk_init() is not yet done, simply ignore.
201 * This ufs_mtk_setup_clocks() shall be called from
202 * ufs_mtk_init() after init is done.
204 if (!host)
205 return 0;
207 if (!on && status == PRE_CHANGE) {
208 if (!ufshcd_is_link_active(hba)) {
209 ufs_mtk_setup_ref_clk(hba, on);
210 ret = phy_power_off(host->mphy);
211 } else {
213 * Gate ref-clk if link state is in Hibern8
214 * triggered by Auto-Hibern8.
216 if (!ufshcd_can_hibern8_during_gating(hba) &&
217 ufshcd_is_auto_hibern8_enabled(hba) &&
218 ufs_mtk_link_get_state(hba) ==
219 VS_LINK_HIBERN8)
220 ufs_mtk_setup_ref_clk(hba, on);
222 } else if (on && status == POST_CHANGE) {
223 ret = phy_power_on(host->mphy);
224 ufs_mtk_setup_ref_clk(hba, on);
227 return ret;
231 * ufs_mtk_init - find other essential mmio bases
232 * @hba: host controller instance
234 * Binds PHY with controller and powers up PHY enabling clocks
235 * and regulators.
237 * Returns -EPROBE_DEFER if binding fails, returns negative error
238 * on phy power up failure and returns zero on success.
240 static int ufs_mtk_init(struct ufs_hba *hba)
242 struct ufs_mtk_host *host;
243 struct device *dev = hba->dev;
244 int err = 0;
246 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
247 if (!host) {
248 err = -ENOMEM;
249 dev_info(dev, "%s: no memory for mtk ufs host\n", __func__);
250 goto out;
253 host->hba = hba;
254 ufshcd_set_variant(hba, host);
256 err = ufs_mtk_bind_mphy(hba);
257 if (err)
258 goto out_variant_clear;
260 /* Enable runtime autosuspend */
261 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
263 /* Enable clock-gating */
264 hba->caps |= UFSHCD_CAP_CLK_GATING;
267 * ufshcd_vops_init() is invoked after
268 * ufshcd_setup_clock(true) in ufshcd_hba_init() thus
269 * phy clock setup is skipped.
271 * Enable phy clocks specifically here.
273 ufs_mtk_setup_clocks(hba, true, POST_CHANGE);
275 goto out;
277 out_variant_clear:
278 ufshcd_set_variant(hba, NULL);
279 out:
280 return err;
283 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
284 struct ufs_pa_layer_attr *dev_max_params,
285 struct ufs_pa_layer_attr *dev_req_params)
287 struct ufs_dev_params host_cap;
288 int ret;
290 host_cap.tx_lanes = UFS_MTK_LIMIT_NUM_LANES_TX;
291 host_cap.rx_lanes = UFS_MTK_LIMIT_NUM_LANES_RX;
292 host_cap.hs_rx_gear = UFS_MTK_LIMIT_HSGEAR_RX;
293 host_cap.hs_tx_gear = UFS_MTK_LIMIT_HSGEAR_TX;
294 host_cap.pwm_rx_gear = UFS_MTK_LIMIT_PWMGEAR_RX;
295 host_cap.pwm_tx_gear = UFS_MTK_LIMIT_PWMGEAR_TX;
296 host_cap.rx_pwr_pwm = UFS_MTK_LIMIT_RX_PWR_PWM;
297 host_cap.tx_pwr_pwm = UFS_MTK_LIMIT_TX_PWR_PWM;
298 host_cap.rx_pwr_hs = UFS_MTK_LIMIT_RX_PWR_HS;
299 host_cap.tx_pwr_hs = UFS_MTK_LIMIT_TX_PWR_HS;
300 host_cap.hs_rate = UFS_MTK_LIMIT_HS_RATE;
301 host_cap.desired_working_mode =
302 UFS_MTK_LIMIT_DESIRED_MODE;
304 ret = ufshcd_get_pwr_dev_param(&host_cap,
305 dev_max_params,
306 dev_req_params);
307 if (ret) {
308 pr_info("%s: failed to determine capabilities\n",
309 __func__);
312 return ret;
315 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
316 enum ufs_notify_change_status stage,
317 struct ufs_pa_layer_attr *dev_max_params,
318 struct ufs_pa_layer_attr *dev_req_params)
320 int ret = 0;
322 switch (stage) {
323 case PRE_CHANGE:
324 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
325 dev_req_params);
326 break;
327 case POST_CHANGE:
328 break;
329 default:
330 ret = -EINVAL;
331 break;
334 return ret;
337 static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, u32 lpm)
339 int ret;
340 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
342 ret = ufshcd_dme_set(hba,
343 UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
344 lpm);
345 if (!ret)
346 host->unipro_lpm = lpm;
348 return ret;
351 static int ufs_mtk_pre_link(struct ufs_hba *hba)
353 int ret;
354 u32 tmp;
356 ufs_mtk_unipro_set_pm(hba, 0);
359 * Setting PA_Local_TX_LCC_Enable to 0 before link startup
360 * to make sure that both host and device TX LCC are disabled
361 * once link startup is completed.
363 ret = ufshcd_disable_host_tx_lcc(hba);
364 if (ret)
365 return ret;
367 /* disable deep stall */
368 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp);
369 if (ret)
370 return ret;
372 tmp &= ~(1 << 6);
374 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
376 return ret;
379 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
381 unsigned long flags;
382 u32 ah_ms;
384 if (ufshcd_is_clkgating_allowed(hba)) {
385 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
386 ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
387 hba->ahit);
388 else
389 ah_ms = 10;
390 spin_lock_irqsave(hba->host->host_lock, flags);
391 hba->clk_gating.delay_ms = ah_ms + 5;
392 spin_unlock_irqrestore(hba->host->host_lock, flags);
396 static int ufs_mtk_post_link(struct ufs_hba *hba)
398 /* enable unipro clock gating feature */
399 ufs_mtk_cfg_unipro_cg(hba, true);
401 /* configure auto-hibern8 timer to 10ms */
402 if (ufshcd_is_auto_hibern8_supported(hba)) {
403 ufshcd_auto_hibern8_update(hba,
404 FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
405 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3));
408 ufs_mtk_setup_clk_gating(hba);
410 return 0;
413 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
414 enum ufs_notify_change_status stage)
416 int ret = 0;
418 switch (stage) {
419 case PRE_CHANGE:
420 ret = ufs_mtk_pre_link(hba);
421 break;
422 case POST_CHANGE:
423 ret = ufs_mtk_post_link(hba);
424 break;
425 default:
426 ret = -EINVAL;
427 break;
430 return ret;
433 static void ufs_mtk_device_reset(struct ufs_hba *hba)
435 struct arm_smccc_res res;
437 ufs_mtk_device_reset_ctrl(0, res);
440 * The reset signal is active low. UFS devices shall detect
441 * more than or equal to 1us of positive or negative RST_n
442 * pulse width.
444 * To be on safe side, keep the reset low for at least 10us.
446 usleep_range(10, 15);
448 ufs_mtk_device_reset_ctrl(1, res);
450 /* Some devices may need time to respond to rst_n */
451 usleep_range(10000, 15000);
453 dev_info(hba->dev, "device reset done\n");
456 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
458 int err;
460 err = ufshcd_hba_enable(hba);
461 if (err)
462 return err;
464 err = ufs_mtk_unipro_set_pm(hba, 0);
465 if (err)
466 return err;
468 err = ufshcd_uic_hibern8_exit(hba);
469 if (!err)
470 ufshcd_set_link_active(hba);
471 else
472 return err;
474 err = ufshcd_make_hba_operational(hba);
475 if (err)
476 return err;
478 return 0;
481 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
483 int err;
485 err = ufs_mtk_unipro_set_pm(hba, 1);
486 if (err) {
487 /* Resume UniPro state for following error recovery */
488 ufs_mtk_unipro_set_pm(hba, 0);
489 return err;
492 return 0;
495 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
497 int err;
498 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
500 if (ufshcd_is_link_hibern8(hba)) {
501 err = ufs_mtk_link_set_lpm(hba);
502 if (err) {
504 * Set link as off state enforcedly to trigger
505 * ufshcd_host_reset_and_restore() in ufshcd_suspend()
506 * for completed host reset.
508 ufshcd_set_link_off(hba);
509 return -EAGAIN;
513 if (!ufshcd_is_link_active(hba))
514 phy_power_off(host->mphy);
516 return 0;
519 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
521 struct ufs_mtk_host *host = ufshcd_get_variant(hba);
522 int err;
524 if (!ufshcd_is_link_active(hba))
525 phy_power_on(host->mphy);
527 if (ufshcd_is_link_hibern8(hba)) {
528 err = ufs_mtk_link_set_hpm(hba);
529 if (err) {
530 err = ufshcd_link_recovery(hba);
531 return err;
535 return 0;
538 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
540 ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl ");
542 ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg ");
544 ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL,
545 REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4,
546 "MPHY Ctrl ");
548 /* Direct debugging information to REG_MTK_PROBE */
549 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL);
550 ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe ");
553 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
555 struct ufs_dev_info *dev_info = &hba->dev_info;
556 u16 mid = dev_info->wmanufacturerid;
558 if (mid == UFS_VENDOR_SAMSUNG) {
559 hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
560 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6);
564 * Decide waiting time before gating reference clock and
565 * after ungating reference clock according to vendors'
566 * requirements.
568 if (mid == UFS_VENDOR_SAMSUNG)
569 ufs_mtk_setup_ref_clk_wait_us(hba, 1, 1);
570 else if (mid == UFS_VENDOR_SKHYNIX)
571 ufs_mtk_setup_ref_clk_wait_us(hba, 30, 30);
572 else if (mid == UFS_VENDOR_TOSHIBA)
573 ufs_mtk_setup_ref_clk_wait_us(hba, 100, 32);
575 return 0;
579 * struct ufs_hba_mtk_vops - UFS MTK specific variant operations
581 * The variant operations configure the necessary controller and PHY
582 * handshake during initialization.
584 static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
585 .name = "mediatek.ufshci",
586 .init = ufs_mtk_init,
587 .setup_clocks = ufs_mtk_setup_clocks,
588 .hce_enable_notify = ufs_mtk_hce_enable_notify,
589 .link_startup_notify = ufs_mtk_link_startup_notify,
590 .pwr_change_notify = ufs_mtk_pwr_change_notify,
591 .apply_dev_quirks = ufs_mtk_apply_dev_quirks,
592 .suspend = ufs_mtk_suspend,
593 .resume = ufs_mtk_resume,
594 .dbg_register_dump = ufs_mtk_dbg_register_dump,
595 .device_reset = ufs_mtk_device_reset,
599 * ufs_mtk_probe - probe routine of the driver
600 * @pdev: pointer to Platform device handle
602 * Return zero for success and non-zero for failure
604 static int ufs_mtk_probe(struct platform_device *pdev)
606 int err;
607 struct device *dev = &pdev->dev;
609 /* perform generic probe */
610 err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
611 if (err)
612 dev_info(dev, "probe failed %d\n", err);
614 return err;
618 * ufs_mtk_remove - set driver_data of the device to NULL
619 * @pdev: pointer to platform device handle
621 * Always return 0
623 static int ufs_mtk_remove(struct platform_device *pdev)
625 struct ufs_hba *hba = platform_get_drvdata(pdev);
627 pm_runtime_get_sync(&(pdev)->dev);
628 ufshcd_remove(hba);
629 return 0;
632 static const struct of_device_id ufs_mtk_of_match[] = {
633 { .compatible = "mediatek,mt8183-ufshci"},
637 static const struct dev_pm_ops ufs_mtk_pm_ops = {
638 .suspend = ufshcd_pltfrm_suspend,
639 .resume = ufshcd_pltfrm_resume,
640 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
641 .runtime_resume = ufshcd_pltfrm_runtime_resume,
642 .runtime_idle = ufshcd_pltfrm_runtime_idle,
645 static struct platform_driver ufs_mtk_pltform = {
646 .probe = ufs_mtk_probe,
647 .remove = ufs_mtk_remove,
648 .shutdown = ufshcd_pltfrm_shutdown,
649 .driver = {
650 .name = "ufshcd-mtk",
651 .pm = &ufs_mtk_pm_ops,
652 .of_match_table = ufs_mtk_of_match,
656 MODULE_AUTHOR("Stanley Chu <stanley.chu@mediatek.com>");
657 MODULE_AUTHOR("Peter Wang <peter.wang@mediatek.com>");
658 MODULE_DESCRIPTION("MediaTek UFS Host Driver");
659 MODULE_LICENSE("GPL v2");
661 module_platform_driver(ufs_mtk_pltform);