gpio: rcar: Fix runtime PM imbalance on error
[linux/fpc-iii.git] / drivers / scsi / ufs / ufs-hisi.c
blob074a6a055a4c06134241db6b03fa57e14a0b0a7a
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * HiSilicon Hixxxx UFS Driver
5 * Copyright (c) 2016-2017 Linaro Ltd.
6 * Copyright (c) 2016-2017 HiSilicon Technologies Co., Ltd.
7 */
9 #include <linux/time.h>
10 #include <linux/of.h>
11 #include <linux/of_address.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/platform_device.h>
14 #include <linux/reset.h>
16 #include "ufshcd.h"
17 #include "ufshcd-pltfrm.h"
18 #include "unipro.h"
19 #include "ufs-hisi.h"
20 #include "ufshci.h"
21 #include "ufs_quirks.h"
23 static int ufs_hisi_check_hibern8(struct ufs_hba *hba)
25 int err = 0;
26 u32 tx_fsm_val_0 = 0;
27 u32 tx_fsm_val_1 = 0;
28 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
30 do {
31 err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
32 &tx_fsm_val_0);
33 err |= ufshcd_dme_get(hba,
34 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
35 if (err || (tx_fsm_val_0 == TX_FSM_HIBERN8 &&
36 tx_fsm_val_1 == TX_FSM_HIBERN8))
37 break;
39 /* sleep for max. 200us */
40 usleep_range(100, 200);
41 } while (time_before(jiffies, timeout));
44 * we might have scheduled out for long during polling so
45 * check the state again.
47 if (time_after(jiffies, timeout)) {
48 err = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 0),
49 &tx_fsm_val_0);
50 err |= ufshcd_dme_get(hba,
51 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE, 1), &tx_fsm_val_1);
54 if (err) {
55 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
56 __func__, err);
57 } else if (tx_fsm_val_0 != TX_FSM_HIBERN8 ||
58 tx_fsm_val_1 != TX_FSM_HIBERN8) {
59 err = -1;
60 dev_err(hba->dev, "%s: invalid TX_FSM_STATE, lane0 = %d, lane1 = %d\n",
61 __func__, tx_fsm_val_0, tx_fsm_val_1);
64 return err;
67 static void ufs_hisi_clk_init(struct ufs_hba *hba)
69 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
71 ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
72 if (ufs_sys_ctrl_readl(host, PHY_CLK_CTRL) & BIT_SYSCTRL_REF_CLOCK_EN)
73 mdelay(1);
74 /* use abb clk */
75 ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_SRC_SEl, UFS_SYSCTRL);
76 ufs_sys_ctrl_clr_bits(host, BIT_UFS_REFCLK_ISO_EN, PHY_ISO_EN);
77 /* open mphy ref clk */
78 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
81 static void ufs_hisi_soc_init(struct ufs_hba *hba)
83 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
84 u32 reg;
86 if (!IS_ERR(host->rst))
87 reset_control_assert(host->rst);
89 /* HC_PSW powerup */
90 ufs_sys_ctrl_set_bits(host, BIT_UFS_PSW_MTCMOS_EN, PSW_POWER_CTRL);
91 udelay(10);
92 /* notify PWR ready */
93 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PWR_READY, HC_LP_CTRL);
94 ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | 0,
95 UFS_DEVICE_RESET_CTRL);
97 reg = ufs_sys_ctrl_readl(host, PHY_CLK_CTRL);
98 reg = (reg & ~MASK_SYSCTRL_CFG_CLOCK_FREQ) | UFS_FREQ_CFG_CLK;
99 /* set cfg clk freq */
100 ufs_sys_ctrl_writel(host, reg, PHY_CLK_CTRL);
101 /* set ref clk freq */
102 ufs_sys_ctrl_clr_bits(host, MASK_SYSCTRL_REF_CLOCK_SEL, PHY_CLK_CTRL);
103 /* bypass ufs clk gate */
104 ufs_sys_ctrl_set_bits(host, MASK_UFS_CLK_GATE_BYPASS,
105 CLOCK_GATE_BYPASS);
106 ufs_sys_ctrl_set_bits(host, MASK_UFS_SYSCRTL_BYPASS, UFS_SYSCTRL);
108 /* open psw clk */
109 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_PSW_CLK_EN, PSW_CLK_CTRL);
110 /* disable ufshc iso */
111 ufs_sys_ctrl_clr_bits(host, BIT_UFS_PSW_ISO_CTRL, PSW_POWER_CTRL);
112 /* disable phy iso */
113 ufs_sys_ctrl_clr_bits(host, BIT_UFS_PHY_ISO_CTRL, PHY_ISO_EN);
114 /* notice iso disable */
115 ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_LP_ISOL_EN, HC_LP_CTRL);
117 /* disable lp_reset_n */
118 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_LP_RESET_N, RESET_CTRL_EN);
119 mdelay(1);
121 ufs_sys_ctrl_writel(host, MASK_UFS_DEVICE_RESET | BIT_UFS_DEVICE_RESET,
122 UFS_DEVICE_RESET_CTRL);
124 msleep(20);
127 * enable the fix of linereset recovery,
128 * and enable rx_reset/tx_rest beat
129 * enable ref_clk_en override(bit5) &
130 * override value = 1(bit4), with mask
132 ufs_sys_ctrl_writel(host, 0x03300330, UFS_DEVICE_RESET_CTRL);
134 if (!IS_ERR(host->rst))
135 reset_control_deassert(host->rst);
138 static int ufs_hisi_link_startup_pre_change(struct ufs_hba *hba)
140 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
141 int err;
142 uint32_t value;
143 uint32_t reg;
145 /* Unipro VS_mphy_disable */
146 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x1);
147 /* PA_HSSeries */
148 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x156A, 0x0), 0x2);
149 /* MPHY CBRATESEL */
150 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8114, 0x0), 0x1);
151 /* MPHY CBOVRCTRL2 */
152 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8121, 0x0), 0x2D);
153 /* MPHY CBOVRCTRL3 */
154 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8122, 0x0), 0x1);
156 if (host->caps & UFS_HISI_CAP_PHY10nm) {
157 /* MPHY CBOVRCTRL4 */
158 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8127, 0x0), 0x98);
159 /* MPHY CBOVRCTRL5 */
160 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8128, 0x0), 0x1);
163 /* Unipro VS_MphyCfgUpdt */
164 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
165 /* MPHY RXOVRCTRL4 rx0 */
166 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x4), 0x58);
167 /* MPHY RXOVRCTRL4 rx1 */
168 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800D, 0x5), 0x58);
169 /* MPHY RXOVRCTRL5 rx0 */
170 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x4), 0xB);
171 /* MPHY RXOVRCTRL5 rx1 */
172 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x800E, 0x5), 0xB);
173 /* MPHY RXSQCONTROL rx0 */
174 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x4), 0x1);
175 /* MPHY RXSQCONTROL rx1 */
176 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8009, 0x5), 0x1);
177 /* Unipro VS_MphyCfgUpdt */
178 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
180 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x8113, 0x0), 0x1);
181 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
183 if (host->caps & UFS_HISI_CAP_PHY10nm) {
184 /* RX_Hibern8Time_Capability*/
185 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x4), 0xA);
186 /* RX_Hibern8Time_Capability*/
187 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0092, 0x5), 0xA);
188 /* RX_Min_ActivateTime */
189 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x4), 0xA);
190 /* RX_Min_ActivateTime*/
191 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008f, 0x5), 0xA);
192 } else {
193 /* Tactive RX */
194 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x4), 0x7);
195 /* Tactive RX */
196 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008F, 0x5), 0x7);
199 /* Gear3 Synclength */
200 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x4), 0x4F);
201 /* Gear3 Synclength */
202 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0095, 0x5), 0x4F);
203 /* Gear2 Synclength */
204 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x4), 0x4F);
205 /* Gear2 Synclength */
206 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x0094, 0x5), 0x4F);
207 /* Gear1 Synclength */
208 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x4), 0x4F);
209 /* Gear1 Synclength */
210 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x008B, 0x5), 0x4F);
211 /* Thibernate Tx */
212 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x0), 0x5);
213 /* Thibernate Tx */
214 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0x000F, 0x1), 0x5);
216 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD085, 0x0), 0x1);
217 /* Unipro VS_mphy_disable */
218 ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), &value);
219 if (value != 0x1)
220 dev_info(hba->dev,
221 "Warring!!! Unipro VS_mphy_disable is 0x%x\n", value);
223 /* Unipro VS_mphy_disable */
224 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0C1, 0x0), 0x0);
225 err = ufs_hisi_check_hibern8(hba);
226 if (err)
227 dev_err(hba->dev, "ufs_hisi_check_hibern8 error\n");
229 if (!(host->caps & UFS_HISI_CAP_PHY10nm))
230 ufshcd_writel(hba, UFS_HCLKDIV_NORMAL_VALUE, UFS_REG_HCLKDIV);
232 /* disable auto H8 */
233 reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
234 reg = reg & (~UFS_AHIT_AH8ITV_MASK);
235 ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
237 /* Unipro PA_Local_TX_LCC_Enable */
238 ufshcd_disable_host_tx_lcc(hba);
239 /* close Unipro VS_Mk2ExtnSupport */
240 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), 0x0);
241 ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(0xD0AB, 0x0), &value);
242 if (value != 0) {
243 /* Ensure close success */
244 dev_info(hba->dev, "WARN: close VS_Mk2ExtnSupport failed\n");
247 return err;
250 static int ufs_hisi_link_startup_post_change(struct ufs_hba *hba)
252 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
254 /* Unipro DL_AFC0CreditThreshold */
255 ufshcd_dme_set(hba, UIC_ARG_MIB(0x2044), 0x0);
256 /* Unipro DL_TC0OutAckThreshold */
257 ufshcd_dme_set(hba, UIC_ARG_MIB(0x2045), 0x0);
258 /* Unipro DL_TC0TXFCThreshold */
259 ufshcd_dme_set(hba, UIC_ARG_MIB(0x2040), 0x9);
261 /* not bypass ufs clk gate */
262 ufs_sys_ctrl_clr_bits(host, MASK_UFS_CLK_GATE_BYPASS,
263 CLOCK_GATE_BYPASS);
264 ufs_sys_ctrl_clr_bits(host, MASK_UFS_SYSCRTL_BYPASS,
265 UFS_SYSCTRL);
267 /* select received symbol cnt */
268 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09a), 0x80000000);
269 /* reset counter0 and enable */
270 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd09c), 0x00000005);
272 return 0;
275 static int ufs_hisi_link_startup_notify(struct ufs_hba *hba,
276 enum ufs_notify_change_status status)
278 int err = 0;
280 switch (status) {
281 case PRE_CHANGE:
282 err = ufs_hisi_link_startup_pre_change(hba);
283 break;
284 case POST_CHANGE:
285 err = ufs_hisi_link_startup_post_change(hba);
286 break;
287 default:
288 break;
291 return err;
294 static void ufs_hisi_set_dev_cap(struct ufs_dev_params *hisi_param)
296 hisi_param->rx_lanes = UFS_HISI_LIMIT_NUM_LANES_RX;
297 hisi_param->tx_lanes = UFS_HISI_LIMIT_NUM_LANES_TX;
298 hisi_param->hs_rx_gear = UFS_HISI_LIMIT_HSGEAR_RX;
299 hisi_param->hs_tx_gear = UFS_HISI_LIMIT_HSGEAR_TX;
300 hisi_param->pwm_rx_gear = UFS_HISI_LIMIT_PWMGEAR_RX;
301 hisi_param->pwm_tx_gear = UFS_HISI_LIMIT_PWMGEAR_TX;
302 hisi_param->rx_pwr_pwm = UFS_HISI_LIMIT_RX_PWR_PWM;
303 hisi_param->tx_pwr_pwm = UFS_HISI_LIMIT_TX_PWR_PWM;
304 hisi_param->rx_pwr_hs = UFS_HISI_LIMIT_RX_PWR_HS;
305 hisi_param->tx_pwr_hs = UFS_HISI_LIMIT_TX_PWR_HS;
306 hisi_param->hs_rate = UFS_HISI_LIMIT_HS_RATE;
307 hisi_param->desired_working_mode = UFS_HISI_LIMIT_DESIRED_MODE;
310 static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
312 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
314 if (host->caps & UFS_HISI_CAP_PHY10nm) {
316 * Boston platform need to set SaveConfigTime to 0x13,
317 * and change sync length to maximum value
319 /* VS_DebugSaveConfigTime */
320 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0xD0A0), 0x13);
321 /* g1 sync length */
322 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1552), 0x4f);
323 /* g2 sync length */
324 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1554), 0x4f);
325 /* g3 sync length */
326 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x1556), 0x4f);
327 /* PA_Hibern8Time */
328 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a7), 0xA);
329 /* PA_Tactivate */
330 ufshcd_dme_set(hba, UIC_ARG_MIB((u32)0x15a8), 0xA);
331 ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(0xd085, 0x0), 0x01);
334 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) {
335 pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n");
336 /* VS_DebugSaveConfigTime */
337 ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10);
338 /* sync length */
339 ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48);
342 /* update */
343 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1);
344 /* PA_TxSkip */
345 ufshcd_dme_set(hba, UIC_ARG_MIB(0x155c), 0x0);
346 /*PA_PWRModeUserData0 = 8191, default is 0*/
347 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b0), 8191);
348 /*PA_PWRModeUserData1 = 65535, default is 0*/
349 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b1), 65535);
350 /*PA_PWRModeUserData2 = 32767, default is 0*/
351 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b2), 32767);
352 /*DME_FC0ProtectionTimeOutVal = 8191, default is 0*/
353 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd041), 8191);
354 /*DME_TC0ReplayTimeOutVal = 65535, default is 0*/
355 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd042), 65535);
356 /*DME_AFC0ReqTimeOutVal = 32767, default is 0*/
357 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd043), 32767);
358 /*PA_PWRModeUserData3 = 8191, default is 0*/
359 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b3), 8191);
360 /*PA_PWRModeUserData4 = 65535, default is 0*/
361 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b4), 65535);
362 /*PA_PWRModeUserData5 = 32767, default is 0*/
363 ufshcd_dme_set(hba, UIC_ARG_MIB(0x15b5), 32767);
364 /*DME_FC1ProtectionTimeOutVal = 8191, default is 0*/
365 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd044), 8191);
366 /*DME_TC1ReplayTimeOutVal = 65535, default is 0*/
367 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd045), 65535);
368 /*DME_AFC1ReqTimeOutVal = 32767, default is 0*/
369 ufshcd_dme_set(hba, UIC_ARG_MIB(0xd046), 32767);
372 static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
373 enum ufs_notify_change_status status,
374 struct ufs_pa_layer_attr *dev_max_params,
375 struct ufs_pa_layer_attr *dev_req_params)
377 struct ufs_dev_params ufs_hisi_cap;
378 int ret = 0;
380 if (!dev_req_params) {
381 dev_err(hba->dev,
382 "%s: incoming dev_req_params is NULL\n", __func__);
383 ret = -EINVAL;
384 goto out;
387 switch (status) {
388 case PRE_CHANGE:
389 ufs_hisi_set_dev_cap(&ufs_hisi_cap);
390 ret = ufshcd_get_pwr_dev_param(&ufs_hisi_cap,
391 dev_max_params, dev_req_params);
392 if (ret) {
393 dev_err(hba->dev,
394 "%s: failed to determine capabilities\n", __func__);
395 goto out;
398 ufs_hisi_pwr_change_pre_change(hba);
399 break;
400 case POST_CHANGE:
401 break;
402 default:
403 ret = -EINVAL;
404 break;
406 out:
407 return ret;
410 static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
412 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
414 if (ufshcd_is_runtime_pm(pm_op))
415 return 0;
417 if (host->in_suspend) {
418 WARN_ON(1);
419 return 0;
422 ufs_sys_ctrl_clr_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
423 udelay(10);
424 /* set ref_dig_clk override of PHY PCS to 0 */
425 ufs_sys_ctrl_writel(host, 0x00100000, UFS_DEVICE_RESET_CTRL);
427 host->in_suspend = true;
429 return 0;
432 static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
434 struct ufs_hisi_host *host = ufshcd_get_variant(hba);
436 if (!host->in_suspend)
437 return 0;
439 /* set ref_dig_clk override of PHY PCS to 1 */
440 ufs_sys_ctrl_writel(host, 0x00100010, UFS_DEVICE_RESET_CTRL);
441 udelay(10);
442 ufs_sys_ctrl_set_bits(host, BIT_SYSCTRL_REF_CLOCK_EN, PHY_CLK_CTRL);
444 host->in_suspend = false;
445 return 0;
448 static int ufs_hisi_get_resource(struct ufs_hisi_host *host)
450 struct device *dev = host->hba->dev;
451 struct platform_device *pdev = to_platform_device(dev);
453 /* get resource of ufs sys ctrl */
454 host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1);
455 return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl);
458 static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba)
460 hba->rpm_lvl = UFS_PM_LVL_1;
461 hba->spm_lvl = UFS_PM_LVL_3;
465 * ufs_hisi_init_common
466 * @hba: host controller instance
468 static int ufs_hisi_init_common(struct ufs_hba *hba)
470 int err = 0;
471 struct device *dev = hba->dev;
472 struct ufs_hisi_host *host;
474 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
475 if (!host)
476 return -ENOMEM;
478 host->hba = hba;
479 ufshcd_set_variant(hba, host);
481 host->rst = devm_reset_control_get(dev, "rst");
482 if (IS_ERR(host->rst)) {
483 dev_err(dev, "%s: failed to get reset control\n", __func__);
484 return PTR_ERR(host->rst);
487 ufs_hisi_set_pm_lvl(hba);
489 err = ufs_hisi_get_resource(host);
490 if (err) {
491 ufshcd_set_variant(hba, NULL);
492 return err;
495 return 0;
498 static int ufs_hi3660_init(struct ufs_hba *hba)
500 int ret = 0;
501 struct device *dev = hba->dev;
503 ret = ufs_hisi_init_common(hba);
504 if (ret) {
505 dev_err(dev, "%s: ufs common init fail\n", __func__);
506 return ret;
509 ufs_hisi_clk_init(hba);
511 ufs_hisi_soc_init(hba);
513 return 0;
516 static int ufs_hi3670_init(struct ufs_hba *hba)
518 int ret = 0;
519 struct device *dev = hba->dev;
520 struct ufs_hisi_host *host;
522 ret = ufs_hisi_init_common(hba);
523 if (ret) {
524 dev_err(dev, "%s: ufs common init fail\n", __func__);
525 return ret;
528 ufs_hisi_clk_init(hba);
530 ufs_hisi_soc_init(hba);
532 /* Add cap for 10nm PHY variant on HI3670 SoC */
533 host = ufshcd_get_variant(hba);
534 host->caps |= UFS_HISI_CAP_PHY10nm;
536 return 0;
539 static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
540 .name = "hi3660",
541 .init = ufs_hi3660_init,
542 .link_startup_notify = ufs_hisi_link_startup_notify,
543 .pwr_change_notify = ufs_hisi_pwr_change_notify,
544 .suspend = ufs_hisi_suspend,
545 .resume = ufs_hisi_resume,
548 static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = {
549 .name = "hi3670",
550 .init = ufs_hi3670_init,
551 .link_startup_notify = ufs_hisi_link_startup_notify,
552 .pwr_change_notify = ufs_hisi_pwr_change_notify,
553 .suspend = ufs_hisi_suspend,
554 .resume = ufs_hisi_resume,
557 static const struct of_device_id ufs_hisi_of_match[] = {
558 { .compatible = "hisilicon,hi3660-ufs", .data = &ufs_hba_hi3660_vops },
559 { .compatible = "hisilicon,hi3670-ufs", .data = &ufs_hba_hi3670_vops },
563 MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
565 static int ufs_hisi_probe(struct platform_device *pdev)
567 const struct of_device_id *of_id;
569 of_id = of_match_node(ufs_hisi_of_match, pdev->dev.of_node);
571 return ufshcd_pltfrm_init(pdev, of_id->data);
574 static int ufs_hisi_remove(struct platform_device *pdev)
576 struct ufs_hba *hba = platform_get_drvdata(pdev);
578 ufshcd_remove(hba);
579 return 0;
582 static const struct dev_pm_ops ufs_hisi_pm_ops = {
583 .suspend = ufshcd_pltfrm_suspend,
584 .resume = ufshcd_pltfrm_resume,
585 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
586 .runtime_resume = ufshcd_pltfrm_runtime_resume,
587 .runtime_idle = ufshcd_pltfrm_runtime_idle,
590 static struct platform_driver ufs_hisi_pltform = {
591 .probe = ufs_hisi_probe,
592 .remove = ufs_hisi_remove,
593 .shutdown = ufshcd_pltfrm_shutdown,
594 .driver = {
595 .name = "ufshcd-hisi",
596 .pm = &ufs_hisi_pm_ops,
597 .of_match_table = of_match_ptr(ufs_hisi_of_match),
600 module_platform_driver(ufs_hisi_pltform);
602 MODULE_LICENSE("GPL");
603 MODULE_ALIAS("platform:ufshcd-hisi");
604 MODULE_DESCRIPTION("HiSilicon Hixxxx UFS Driver");