PCI: hotplug: Embed hotplug_slot
[linux/fpc-iii.git] / drivers / scsi / ufs / ufs-qcom.c
blob75ee5906b9663de967ffa5394409070a3cf17b00
1 /*
2 * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 #include <linux/time.h>
16 #include <linux/of.h>
17 #include <linux/platform_device.h>
18 #include <linux/phy/phy.h>
19 #include <linux/phy/phy-qcom-ufs.h>
21 #include "ufshcd.h"
22 #include "ufshcd-pltfrm.h"
23 #include "unipro.h"
24 #include "ufs-qcom.h"
25 #include "ufshci.h"
26 #include "ufs_quirks.h"
27 #define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
28 (UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
30 enum {
31 TSTBUS_UAWM,
32 TSTBUS_UARM,
33 TSTBUS_TXUC,
34 TSTBUS_RXUC,
35 TSTBUS_DFC,
36 TSTBUS_TRLUT,
37 TSTBUS_TMRLUT,
38 TSTBUS_OCSC,
39 TSTBUS_UTP_HCI,
40 TSTBUS_COMBINED,
41 TSTBUS_WRAPPER,
42 TSTBUS_UNIPRO,
43 TSTBUS_MAX,
46 static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
48 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
49 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
50 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
51 u32 clk_cycles);
53 static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
54 const char *prefix, void *priv)
56 ufshcd_dump_regs(hba, offset, len * 4, prefix);
59 static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
61 int err = 0;
63 err = ufshcd_dme_get(hba,
64 UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), tx_lanes);
65 if (err)
66 dev_err(hba->dev, "%s: couldn't read PA_CONNECTEDTXDATALANES %d\n",
67 __func__, err);
69 return err;
72 static int ufs_qcom_host_clk_get(struct device *dev,
73 const char *name, struct clk **clk_out)
75 struct clk *clk;
76 int err = 0;
78 clk = devm_clk_get(dev, name);
79 if (IS_ERR(clk)) {
80 err = PTR_ERR(clk);
81 dev_err(dev, "%s: failed to get %s err %d",
82 __func__, name, err);
83 } else {
84 *clk_out = clk;
87 return err;
90 static int ufs_qcom_host_clk_enable(struct device *dev,
91 const char *name, struct clk *clk)
93 int err = 0;
95 err = clk_prepare_enable(clk);
96 if (err)
97 dev_err(dev, "%s: %s enable failed %d\n", __func__, name, err);
99 return err;
102 static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
104 if (!host->is_lane_clks_enabled)
105 return;
107 if (host->hba->lanes_per_direction > 1)
108 clk_disable_unprepare(host->tx_l1_sync_clk);
109 clk_disable_unprepare(host->tx_l0_sync_clk);
110 if (host->hba->lanes_per_direction > 1)
111 clk_disable_unprepare(host->rx_l1_sync_clk);
112 clk_disable_unprepare(host->rx_l0_sync_clk);
114 host->is_lane_clks_enabled = false;
117 static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
119 int err = 0;
120 struct device *dev = host->hba->dev;
122 if (host->is_lane_clks_enabled)
123 return 0;
125 err = ufs_qcom_host_clk_enable(dev, "rx_lane0_sync_clk",
126 host->rx_l0_sync_clk);
127 if (err)
128 goto out;
130 err = ufs_qcom_host_clk_enable(dev, "tx_lane0_sync_clk",
131 host->tx_l0_sync_clk);
132 if (err)
133 goto disable_rx_l0;
135 if (host->hba->lanes_per_direction > 1) {
136 err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
137 host->rx_l1_sync_clk);
138 if (err)
139 goto disable_tx_l0;
141 err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
142 host->tx_l1_sync_clk);
143 if (err)
144 goto disable_rx_l1;
147 host->is_lane_clks_enabled = true;
148 goto out;
150 disable_rx_l1:
151 if (host->hba->lanes_per_direction > 1)
152 clk_disable_unprepare(host->rx_l1_sync_clk);
153 disable_tx_l0:
154 clk_disable_unprepare(host->tx_l0_sync_clk);
155 disable_rx_l0:
156 clk_disable_unprepare(host->rx_l0_sync_clk);
157 out:
158 return err;
161 static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
163 int err = 0;
164 struct device *dev = host->hba->dev;
166 err = ufs_qcom_host_clk_get(dev,
167 "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
168 if (err)
169 goto out;
171 err = ufs_qcom_host_clk_get(dev,
172 "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
173 if (err)
174 goto out;
176 /* In case of single lane per direction, don't read lane1 clocks */
177 if (host->hba->lanes_per_direction > 1) {
178 err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
179 &host->rx_l1_sync_clk);
180 if (err)
181 goto out;
183 err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
184 &host->tx_l1_sync_clk);
186 out:
187 return err;
190 static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
192 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
193 struct phy *phy = host->generic_phy;
194 u32 tx_lanes;
195 int err = 0;
197 err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
198 if (err)
199 goto out;
201 err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
202 if (err)
203 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
204 __func__);
206 out:
207 return err;
210 static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
212 int err;
213 u32 tx_fsm_val = 0;
214 unsigned long timeout = jiffies + msecs_to_jiffies(HBRN8_POLL_TOUT_MS);
216 do {
217 err = ufshcd_dme_get(hba,
218 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
219 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
220 &tx_fsm_val);
221 if (err || tx_fsm_val == TX_FSM_HIBERN8)
222 break;
224 /* sleep for max. 200us */
225 usleep_range(100, 200);
226 } while (time_before(jiffies, timeout));
229 * we might have scheduled out for long during polling so
230 * check the state again.
232 if (time_after(jiffies, timeout))
233 err = ufshcd_dme_get(hba,
234 UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
235 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
236 &tx_fsm_val);
238 if (err) {
239 dev_err(hba->dev, "%s: unable to get TX_FSM_STATE, err %d\n",
240 __func__, err);
241 } else if (tx_fsm_val != TX_FSM_HIBERN8) {
242 err = tx_fsm_val;
243 dev_err(hba->dev, "%s: invalid TX_FSM_STATE = %d\n",
244 __func__, err);
247 return err;
250 static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host)
252 ufshcd_rmwl(host->hba, QUNIPRO_SEL,
253 ufs_qcom_cap_qunipro(host) ? QUNIPRO_SEL : 0,
254 REG_UFS_CFG1);
255 /* make sure above configuration is applied before we return */
256 mb();
259 static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
261 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
262 struct phy *phy = host->generic_phy;
263 int ret = 0;
264 bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B)
265 ? true : false;
267 if (is_rate_B)
268 phy_set_mode(phy, PHY_MODE_UFS_HS_B);
270 /* Assert PHY reset and apply PHY calibration values */
271 ufs_qcom_assert_reset(hba);
272 /* provide 1ms delay to let the reset pulse propagate */
273 usleep_range(1000, 1100);
275 /* phy initialization - calibrate the phy */
276 ret = phy_init(phy);
277 if (ret) {
278 dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
279 __func__, ret);
280 goto out;
283 /* De-assert PHY reset and start serdes */
284 ufs_qcom_deassert_reset(hba);
287 * after reset deassertion, phy will need all ref clocks,
288 * voltage, current to settle down before starting serdes.
290 usleep_range(1000, 1100);
292 /* power on phy - start serdes and phy's power and clocks */
293 ret = phy_power_on(phy);
294 if (ret) {
295 dev_err(hba->dev, "%s: phy power on failed, ret = %d\n",
296 __func__, ret);
297 goto out_disable_phy;
300 ufs_qcom_select_unipro_mode(host);
302 return 0;
304 out_disable_phy:
305 ufs_qcom_assert_reset(hba);
306 phy_exit(phy);
307 out:
308 return ret;
312 * The UTP controller has a number of internal clock gating cells (CGCs).
313 * Internal hardware sub-modules within the UTP controller control the CGCs.
314 * Hardware CGCs disable the clock to inactivate UTP sub-modules not involved
315 * in a specific operation, UTP controller CGCs are by default disabled and
316 * this function enables them (after every UFS link startup) to save some power
317 * leakage.
319 static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
321 ufshcd_writel(hba,
322 ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
323 REG_UFS_CFG2);
325 /* Ensure that HW clock gating is enabled before next operations */
326 mb();
329 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
330 enum ufs_notify_change_status status)
332 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
333 int err = 0;
335 switch (status) {
336 case PRE_CHANGE:
337 ufs_qcom_power_up_sequence(hba);
339 * The PHY PLL output is the source of tx/rx lane symbol
340 * clocks, hence, enable the lane clocks only after PHY
341 * is initialized.
343 err = ufs_qcom_enable_lane_clks(host);
344 break;
345 case POST_CHANGE:
346 /* check if UFS PHY moved from DISABLED to HIBERN8 */
347 err = ufs_qcom_check_hibern8(hba);
348 ufs_qcom_enable_hw_clk_gating(hba);
350 break;
351 default:
352 dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
353 err = -EINVAL;
354 break;
356 return err;
360 * Returns zero for success and non-zero in case of a failure
362 static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
363 u32 hs, u32 rate, bool update_link_startup_timer)
365 int ret = 0;
366 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
367 struct ufs_clk_info *clki;
368 u32 core_clk_period_in_ns;
369 u32 tx_clk_cycles_per_us = 0;
370 unsigned long core_clk_rate = 0;
371 u32 core_clk_cycles_per_us = 0;
373 static u32 pwm_fr_table[][2] = {
374 {UFS_PWM_G1, 0x1},
375 {UFS_PWM_G2, 0x1},
376 {UFS_PWM_G3, 0x1},
377 {UFS_PWM_G4, 0x1},
380 static u32 hs_fr_table_rA[][2] = {
381 {UFS_HS_G1, 0x1F},
382 {UFS_HS_G2, 0x3e},
383 {UFS_HS_G3, 0x7D},
386 static u32 hs_fr_table_rB[][2] = {
387 {UFS_HS_G1, 0x24},
388 {UFS_HS_G2, 0x49},
389 {UFS_HS_G3, 0x92},
393 * The Qunipro controller does not use following registers:
394 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
395 * UFS_REG_PA_LINK_STARTUP_TIMER
396 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
397 * Aggregation logic.
399 if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
400 goto out;
402 if (gear == 0) {
403 dev_err(hba->dev, "%s: invalid gear = %d\n", __func__, gear);
404 goto out_error;
407 list_for_each_entry(clki, &hba->clk_list_head, list) {
408 if (!strcmp(clki->name, "core_clk"))
409 core_clk_rate = clk_get_rate(clki->clk);
412 /* If frequency is smaller than 1MHz, set to 1MHz */
413 if (core_clk_rate < DEFAULT_CLK_RATE_HZ)
414 core_clk_rate = DEFAULT_CLK_RATE_HZ;
416 core_clk_cycles_per_us = core_clk_rate / USEC_PER_SEC;
417 if (ufshcd_readl(hba, REG_UFS_SYS1CLK_1US) != core_clk_cycles_per_us) {
418 ufshcd_writel(hba, core_clk_cycles_per_us, REG_UFS_SYS1CLK_1US);
420 * make sure above write gets applied before we return from
421 * this function.
423 mb();
426 if (ufs_qcom_cap_qunipro(host))
427 goto out;
429 core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate;
430 core_clk_period_in_ns <<= OFFSET_CLK_NS_REG;
431 core_clk_period_in_ns &= MASK_CLK_NS_REG;
433 switch (hs) {
434 case FASTAUTO_MODE:
435 case FAST_MODE:
436 if (rate == PA_HS_MODE_A) {
437 if (gear > ARRAY_SIZE(hs_fr_table_rA)) {
438 dev_err(hba->dev,
439 "%s: index %d exceeds table size %zu\n",
440 __func__, gear,
441 ARRAY_SIZE(hs_fr_table_rA));
442 goto out_error;
444 tx_clk_cycles_per_us = hs_fr_table_rA[gear-1][1];
445 } else if (rate == PA_HS_MODE_B) {
446 if (gear > ARRAY_SIZE(hs_fr_table_rB)) {
447 dev_err(hba->dev,
448 "%s: index %d exceeds table size %zu\n",
449 __func__, gear,
450 ARRAY_SIZE(hs_fr_table_rB));
451 goto out_error;
453 tx_clk_cycles_per_us = hs_fr_table_rB[gear-1][1];
454 } else {
455 dev_err(hba->dev, "%s: invalid rate = %d\n",
456 __func__, rate);
457 goto out_error;
459 break;
460 case SLOWAUTO_MODE:
461 case SLOW_MODE:
462 if (gear > ARRAY_SIZE(pwm_fr_table)) {
463 dev_err(hba->dev,
464 "%s: index %d exceeds table size %zu\n",
465 __func__, gear,
466 ARRAY_SIZE(pwm_fr_table));
467 goto out_error;
469 tx_clk_cycles_per_us = pwm_fr_table[gear-1][1];
470 break;
471 case UNCHANGED:
472 default:
473 dev_err(hba->dev, "%s: invalid mode = %d\n", __func__, hs);
474 goto out_error;
477 if (ufshcd_readl(hba, REG_UFS_TX_SYMBOL_CLK_NS_US) !=
478 (core_clk_period_in_ns | tx_clk_cycles_per_us)) {
479 /* this register 2 fields shall be written at once */
480 ufshcd_writel(hba, core_clk_period_in_ns | tx_clk_cycles_per_us,
481 REG_UFS_TX_SYMBOL_CLK_NS_US);
483 * make sure above write gets applied before we return from
484 * this function.
486 mb();
489 if (update_link_startup_timer) {
490 ufshcd_writel(hba, ((core_clk_rate / MSEC_PER_SEC) * 100),
491 REG_UFS_PA_LINK_STARTUP_TIMER);
493 * make sure that this configuration is applied before
494 * we return
496 mb();
498 goto out;
500 out_error:
501 ret = -EINVAL;
502 out:
503 return ret;
506 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
507 enum ufs_notify_change_status status)
509 int err = 0;
510 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
512 switch (status) {
513 case PRE_CHANGE:
514 if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
515 0, true)) {
516 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
517 __func__);
518 err = -EINVAL;
519 goto out;
522 if (ufs_qcom_cap_qunipro(host))
524 * set unipro core clock cycles to 150 & clear clock
525 * divider
527 err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
528 150);
531 * Some UFS devices (and may be host) have issues if LCC is
532 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
533 * before link startup which will make sure that both host
534 * and device TX LCC are disabled once link startup is
535 * completed.
537 if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
538 err = ufshcd_dme_set(hba,
539 UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
542 break;
543 case POST_CHANGE:
544 ufs_qcom_link_startup_post_change(hba);
545 break;
546 default:
547 break;
550 out:
551 return err;
554 static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
556 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
557 struct phy *phy = host->generic_phy;
558 int ret = 0;
560 if (ufs_qcom_is_link_off(hba)) {
562 * Disable the tx/rx lane symbol clocks before PHY is
563 * powered down as the PLL source should be disabled
564 * after downstream clocks are disabled.
566 ufs_qcom_disable_lane_clks(host);
567 phy_power_off(phy);
569 /* Assert PHY soft reset */
570 ufs_qcom_assert_reset(hba);
571 goto out;
575 * If UniPro link is not active, PHY ref_clk, main PHY analog power
576 * rail and low noise analog power rail for PLL can be switched off.
578 if (!ufs_qcom_is_link_active(hba)) {
579 ufs_qcom_disable_lane_clks(host);
580 phy_power_off(phy);
583 out:
584 return ret;
587 static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
589 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
590 struct phy *phy = host->generic_phy;
591 int err;
593 err = phy_power_on(phy);
594 if (err) {
595 dev_err(hba->dev, "%s: failed enabling regs, err = %d\n",
596 __func__, err);
597 goto out;
600 err = ufs_qcom_enable_lane_clks(host);
601 if (err)
602 goto out;
604 hba->is_sys_suspended = false;
606 out:
607 return err;
610 struct ufs_qcom_dev_params {
611 u32 pwm_rx_gear; /* pwm rx gear to work in */
612 u32 pwm_tx_gear; /* pwm tx gear to work in */
613 u32 hs_rx_gear; /* hs rx gear to work in */
614 u32 hs_tx_gear; /* hs tx gear to work in */
615 u32 rx_lanes; /* number of rx lanes */
616 u32 tx_lanes; /* number of tx lanes */
617 u32 rx_pwr_pwm; /* rx pwm working pwr */
618 u32 tx_pwr_pwm; /* tx pwm working pwr */
619 u32 rx_pwr_hs; /* rx hs working pwr */
620 u32 tx_pwr_hs; /* tx hs working pwr */
621 u32 hs_rate; /* rate A/B to work in HS */
622 u32 desired_working_mode;
625 static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
626 struct ufs_pa_layer_attr *dev_max,
627 struct ufs_pa_layer_attr *agreed_pwr)
629 int min_qcom_gear;
630 int min_dev_gear;
631 bool is_dev_sup_hs = false;
632 bool is_qcom_max_hs = false;
634 if (dev_max->pwr_rx == FAST_MODE)
635 is_dev_sup_hs = true;
637 if (qcom_param->desired_working_mode == FAST) {
638 is_qcom_max_hs = true;
639 min_qcom_gear = min_t(u32, qcom_param->hs_rx_gear,
640 qcom_param->hs_tx_gear);
641 } else {
642 min_qcom_gear = min_t(u32, qcom_param->pwm_rx_gear,
643 qcom_param->pwm_tx_gear);
647 * device doesn't support HS but qcom_param->desired_working_mode is
648 * HS, thus device and qcom_param don't agree
650 if (!is_dev_sup_hs && is_qcom_max_hs) {
651 pr_err("%s: failed to agree on power mode (device doesn't support HS but requested power is HS)\n",
652 __func__);
653 return -ENOTSUPP;
654 } else if (is_dev_sup_hs && is_qcom_max_hs) {
656 * since device supports HS, it supports FAST_MODE.
657 * since qcom_param->desired_working_mode is also HS
658 * then final decision (FAST/FASTAUTO) is done according
659 * to qcom_params as it is the restricting factor
661 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
662 qcom_param->rx_pwr_hs;
663 } else {
665 * here qcom_param->desired_working_mode is PWM.
666 * it doesn't matter whether device supports HS or PWM,
667 * in both cases qcom_param->desired_working_mode will
668 * determine the mode
670 agreed_pwr->pwr_rx = agreed_pwr->pwr_tx =
671 qcom_param->rx_pwr_pwm;
675 * we would like tx to work in the minimum number of lanes
676 * between device capability and vendor preferences.
677 * the same decision will be made for rx
679 agreed_pwr->lane_tx = min_t(u32, dev_max->lane_tx,
680 qcom_param->tx_lanes);
681 agreed_pwr->lane_rx = min_t(u32, dev_max->lane_rx,
682 qcom_param->rx_lanes);
684 /* device maximum gear is the minimum between device rx and tx gears */
685 min_dev_gear = min_t(u32, dev_max->gear_rx, dev_max->gear_tx);
688 * if both device capabilities and vendor pre-defined preferences are
689 * both HS or both PWM then set the minimum gear to be the chosen
690 * working gear.
691 * if one is PWM and one is HS then the one that is PWM get to decide
692 * what is the gear, as it is the one that also decided previously what
693 * pwr the device will be configured to.
695 if ((is_dev_sup_hs && is_qcom_max_hs) ||
696 (!is_dev_sup_hs && !is_qcom_max_hs))
697 agreed_pwr->gear_rx = agreed_pwr->gear_tx =
698 min_t(u32, min_dev_gear, min_qcom_gear);
699 else if (!is_dev_sup_hs)
700 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_dev_gear;
701 else
702 agreed_pwr->gear_rx = agreed_pwr->gear_tx = min_qcom_gear;
704 agreed_pwr->hs_rate = qcom_param->hs_rate;
705 return 0;
708 #ifdef CONFIG_MSM_BUS_SCALING
709 static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
710 const char *speed_mode)
712 struct device *dev = host->hba->dev;
713 struct device_node *np = dev->of_node;
714 int err;
715 const char *key = "qcom,bus-vector-names";
717 if (!speed_mode) {
718 err = -EINVAL;
719 goto out;
722 if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
723 err = of_property_match_string(np, key, "MAX");
724 else
725 err = of_property_match_string(np, key, speed_mode);
727 out:
728 if (err < 0)
729 dev_err(dev, "%s: Invalid %s mode %d\n",
730 __func__, speed_mode, err);
731 return err;
734 static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
736 int gear = max_t(u32, p->gear_rx, p->gear_tx);
737 int lanes = max_t(u32, p->lane_rx, p->lane_tx);
738 int pwr;
740 /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
741 if (!gear)
742 gear = 1;
744 if (!lanes)
745 lanes = 1;
747 if (!p->pwr_rx && !p->pwr_tx) {
748 pwr = SLOWAUTO_MODE;
749 snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
750 } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
751 p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
752 pwr = FAST_MODE;
753 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
754 p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
755 } else {
756 pwr = SLOW_MODE;
757 snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
758 "PWM", gear, lanes);
762 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
764 int err = 0;
766 if (vote != host->bus_vote.curr_vote) {
767 err = msm_bus_scale_client_update_request(
768 host->bus_vote.client_handle, vote);
769 if (err) {
770 dev_err(host->hba->dev,
771 "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
772 __func__, host->bus_vote.client_handle,
773 vote, err);
774 goto out;
777 host->bus_vote.curr_vote = vote;
779 out:
780 return err;
783 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
785 int vote;
786 int err = 0;
787 char mode[BUS_VECTOR_NAME_LEN];
789 ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
791 vote = ufs_qcom_get_bus_vote(host, mode);
792 if (vote >= 0)
793 err = ufs_qcom_set_bus_vote(host, vote);
794 else
795 err = vote;
797 if (err)
798 dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
799 else
800 host->bus_vote.saved_vote = vote;
801 return err;
804 static ssize_t
805 show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
806 char *buf)
808 struct ufs_hba *hba = dev_get_drvdata(dev);
809 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
811 return snprintf(buf, PAGE_SIZE, "%u\n",
812 host->bus_vote.is_max_bw_needed);
815 static ssize_t
816 store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
817 const char *buf, size_t count)
819 struct ufs_hba *hba = dev_get_drvdata(dev);
820 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
821 uint32_t value;
823 if (!kstrtou32(buf, 0, &value)) {
824 host->bus_vote.is_max_bw_needed = !!value;
825 ufs_qcom_update_bus_bw_vote(host);
828 return count;
831 static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
833 int err;
834 struct msm_bus_scale_pdata *bus_pdata;
835 struct device *dev = host->hba->dev;
836 struct platform_device *pdev = to_platform_device(dev);
837 struct device_node *np = dev->of_node;
839 bus_pdata = msm_bus_cl_get_pdata(pdev);
840 if (!bus_pdata) {
841 dev_err(dev, "%s: failed to get bus vectors\n", __func__);
842 err = -ENODATA;
843 goto out;
846 err = of_property_count_strings(np, "qcom,bus-vector-names");
847 if (err < 0 || err != bus_pdata->num_usecases) {
848 dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
849 __func__, err);
850 goto out;
853 host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
854 if (!host->bus_vote.client_handle) {
855 dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
856 __func__);
857 err = -EFAULT;
858 goto out;
861 /* cache the vote index for minimum and maximum bandwidth */
862 host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
863 host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
865 host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
866 host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
867 sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
868 host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
869 host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
870 err = device_create_file(dev, &host->bus_vote.max_bus_bw);
871 out:
872 return err;
874 #else /* CONFIG_MSM_BUS_SCALING */
875 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
877 return 0;
880 static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
882 return 0;
885 static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
887 return 0;
889 #endif /* CONFIG_MSM_BUS_SCALING */
891 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
893 if (host->dev_ref_clk_ctrl_mmio &&
894 (enable ^ host->is_dev_ref_clk_enabled)) {
895 u32 temp = readl_relaxed(host->dev_ref_clk_ctrl_mmio);
897 if (enable)
898 temp |= host->dev_ref_clk_en_mask;
899 else
900 temp &= ~host->dev_ref_clk_en_mask;
903 * If we are here to disable this clock it might be immediately
904 * after entering into hibern8 in which case we need to make
905 * sure that device ref_clk is active at least 1us after the
906 * hibern8 enter.
908 if (!enable)
909 udelay(1);
911 writel_relaxed(temp, host->dev_ref_clk_ctrl_mmio);
913 /* ensure that ref_clk is enabled/disabled before we return */
914 wmb();
917 * If we call hibern8 exit after this, we need to make sure that
918 * device ref_clk is stable for at least 1us before the hibern8
919 * exit command.
921 if (enable)
922 udelay(1);
924 host->is_dev_ref_clk_enabled = enable;
928 static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
929 enum ufs_notify_change_status status,
930 struct ufs_pa_layer_attr *dev_max_params,
931 struct ufs_pa_layer_attr *dev_req_params)
933 u32 val;
934 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
935 struct phy *phy = host->generic_phy;
936 struct ufs_qcom_dev_params ufs_qcom_cap;
937 int ret = 0;
938 int res = 0;
940 if (!dev_req_params) {
941 pr_err("%s: incoming dev_req_params is NULL\n", __func__);
942 ret = -EINVAL;
943 goto out;
946 switch (status) {
947 case PRE_CHANGE:
948 ufs_qcom_cap.tx_lanes = UFS_QCOM_LIMIT_NUM_LANES_TX;
949 ufs_qcom_cap.rx_lanes = UFS_QCOM_LIMIT_NUM_LANES_RX;
950 ufs_qcom_cap.hs_rx_gear = UFS_QCOM_LIMIT_HSGEAR_RX;
951 ufs_qcom_cap.hs_tx_gear = UFS_QCOM_LIMIT_HSGEAR_TX;
952 ufs_qcom_cap.pwm_rx_gear = UFS_QCOM_LIMIT_PWMGEAR_RX;
953 ufs_qcom_cap.pwm_tx_gear = UFS_QCOM_LIMIT_PWMGEAR_TX;
954 ufs_qcom_cap.rx_pwr_pwm = UFS_QCOM_LIMIT_RX_PWR_PWM;
955 ufs_qcom_cap.tx_pwr_pwm = UFS_QCOM_LIMIT_TX_PWR_PWM;
956 ufs_qcom_cap.rx_pwr_hs = UFS_QCOM_LIMIT_RX_PWR_HS;
957 ufs_qcom_cap.tx_pwr_hs = UFS_QCOM_LIMIT_TX_PWR_HS;
958 ufs_qcom_cap.hs_rate = UFS_QCOM_LIMIT_HS_RATE;
959 ufs_qcom_cap.desired_working_mode =
960 UFS_QCOM_LIMIT_DESIRED_MODE;
962 if (host->hw_ver.major == 0x1) {
964 * HS-G3 operations may not reliably work on legacy QCOM
965 * UFS host controller hardware even though capability
966 * exchange during link startup phase may end up
967 * negotiating maximum supported gear as G3.
968 * Hence downgrade the maximum supported gear to HS-G2.
970 if (ufs_qcom_cap.hs_tx_gear > UFS_HS_G2)
971 ufs_qcom_cap.hs_tx_gear = UFS_HS_G2;
972 if (ufs_qcom_cap.hs_rx_gear > UFS_HS_G2)
973 ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
976 ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
977 dev_max_params,
978 dev_req_params);
979 if (ret) {
980 pr_err("%s: failed to determine capabilities\n",
981 __func__);
982 goto out;
985 /* enable the device ref clock before changing to HS mode */
986 if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
987 ufshcd_is_hs_mode(dev_req_params))
988 ufs_qcom_dev_ref_clk_ctrl(host, true);
989 break;
990 case POST_CHANGE:
991 if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
992 dev_req_params->pwr_rx,
993 dev_req_params->hs_rate, false)) {
994 dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
995 __func__);
997 * we return error code at the end of the routine,
998 * but continue to configure UFS_PHY_TX_LANE_ENABLE
999 * and bus voting as usual
1001 ret = -EINVAL;
1004 val = ~(MAX_U32 << dev_req_params->lane_tx);
1005 res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
1006 if (res) {
1007 dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
1008 __func__, res);
1009 ret = res;
1012 /* cache the power mode parameters to use internally */
1013 memcpy(&host->dev_req_params,
1014 dev_req_params, sizeof(*dev_req_params));
1015 ufs_qcom_update_bus_bw_vote(host);
1017 /* disable the device ref clock if entered PWM mode */
1018 if (ufshcd_is_hs_mode(&hba->pwr_info) &&
1019 !ufshcd_is_hs_mode(dev_req_params))
1020 ufs_qcom_dev_ref_clk_ctrl(host, false);
1021 break;
1022 default:
1023 ret = -EINVAL;
1024 break;
1026 out:
1027 return ret;
1030 static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
1032 int err;
1033 u32 pa_vs_config_reg1;
1035 err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1036 &pa_vs_config_reg1);
1037 if (err)
1038 goto out;
1040 /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
1041 err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
1042 (pa_vs_config_reg1 | (1 << 12)));
1044 out:
1045 return err;
1048 static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
1050 int err = 0;
1052 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
1053 err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
1055 return err;
1058 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
1060 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1062 if (host->hw_ver.major == 0x1)
1063 return UFSHCI_VERSION_11;
1064 else
1065 return UFSHCI_VERSION_20;
1069 * ufs_qcom_advertise_quirks - advertise the known QCOM UFS controller quirks
1070 * @hba: host controller instance
1072 * QCOM UFS host controller might have some non standard behaviours (quirks)
1073 * than what is specified by UFSHCI specification. Advertise all such
1074 * quirks to standard UFS host controller driver so standard takes them into
1075 * account.
1077 static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
1079 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1081 if (host->hw_ver.major == 0x01) {
1082 hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1083 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
1084 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
1086 if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
1087 hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
1089 hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
1092 if (host->hw_ver.major == 0x2) {
1093 hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
1095 if (!ufs_qcom_cap_qunipro(host))
1096 /* Legacy UniPro mode still need following quirks */
1097 hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
1098 | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
1099 | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
1103 static void ufs_qcom_set_caps(struct ufs_hba *hba)
1105 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1107 hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
1108 hba->caps |= UFSHCD_CAP_CLK_SCALING;
1109 hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
1111 if (host->hw_ver.major >= 0x2) {
1112 host->caps = UFS_QCOM_CAP_QUNIPRO |
1113 UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
1118 * ufs_qcom_setup_clocks - enables/disable clocks
1119 * @hba: host controller instance
1120 * @on: If true, enable clocks else disable them.
1121 * @status: PRE_CHANGE or POST_CHANGE notify
1123 * Returns 0 on success, non-zero on failure.
1125 static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
1126 enum ufs_notify_change_status status)
1128 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1129 int err;
1130 int vote = 0;
1133 * In case ufs_qcom_init() is not yet done, simply ignore.
1134 * This ufs_qcom_setup_clocks() shall be called from
1135 * ufs_qcom_init() after init is done.
1137 if (!host)
1138 return 0;
1140 if (on && (status == POST_CHANGE)) {
1141 phy_power_on(host->generic_phy);
1143 /* enable the device ref clock for HS mode*/
1144 if (ufshcd_is_hs_mode(&hba->pwr_info))
1145 ufs_qcom_dev_ref_clk_ctrl(host, true);
1146 vote = host->bus_vote.saved_vote;
1147 if (vote == host->bus_vote.min_bw_vote)
1148 ufs_qcom_update_bus_bw_vote(host);
1150 } else if (!on && (status == PRE_CHANGE)) {
1151 if (!ufs_qcom_is_link_active(hba)) {
1152 /* disable device ref_clk */
1153 ufs_qcom_dev_ref_clk_ctrl(host, false);
1155 /* powering off PHY during aggressive clk gating */
1156 phy_power_off(host->generic_phy);
1159 vote = host->bus_vote.min_bw_vote;
1162 err = ufs_qcom_set_bus_vote(host, vote);
1163 if (err)
1164 dev_err(hba->dev, "%s: set bus vote failed %d\n",
1165 __func__, err);
1167 return err;
1170 #define ANDROID_BOOT_DEV_MAX 30
1171 static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
1173 #ifndef MODULE
1174 static int __init get_android_boot_dev(char *str)
1176 strlcpy(android_boot_dev, str, ANDROID_BOOT_DEV_MAX);
1177 return 1;
1179 __setup("androidboot.bootdevice=", get_android_boot_dev);
1180 #endif
1183 * ufs_qcom_init - bind phy with controller
1184 * @hba: host controller instance
1186 * Binds PHY with controller and powers up PHY enabling clocks
1187 * and regulators.
1189 * Returns -EPROBE_DEFER if binding fails, returns negative error
1190 * on phy power up failure and returns zero on success.
1192 static int ufs_qcom_init(struct ufs_hba *hba)
1194 int err;
1195 struct device *dev = hba->dev;
1196 struct platform_device *pdev = to_platform_device(dev);
1197 struct ufs_qcom_host *host;
1198 struct resource *res;
1200 if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
1201 return -ENODEV;
1203 host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
1204 if (!host) {
1205 err = -ENOMEM;
1206 dev_err(dev, "%s: no memory for qcom ufs host\n", __func__);
1207 goto out;
1210 /* Make a two way bind between the qcom host and the hba */
1211 host->hba = hba;
1212 ufshcd_set_variant(hba, host);
1215 * voting/devoting device ref_clk source is time consuming hence
1216 * skip devoting it during aggressive clock gating. This clock
1217 * will still be gated off during runtime suspend.
1219 host->generic_phy = devm_phy_get(dev, "ufsphy");
1221 if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
1223 * UFS driver might be probed before the phy driver does.
1224 * In that case we would like to return EPROBE_DEFER code.
1226 err = -EPROBE_DEFER;
1227 dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
1228 __func__, err);
1229 goto out_variant_clear;
1230 } else if (IS_ERR(host->generic_phy)) {
1231 err = PTR_ERR(host->generic_phy);
1232 dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
1233 goto out_variant_clear;
1236 err = ufs_qcom_bus_register(host);
1237 if (err)
1238 goto out_variant_clear;
1240 ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
1241 &host->hw_ver.minor, &host->hw_ver.step);
1244 * for newer controllers, device reference clock control bit has
1245 * moved inside UFS controller register address space itself.
1247 if (host->hw_ver.major >= 0x02) {
1248 host->dev_ref_clk_ctrl_mmio = hba->mmio_base + REG_UFS_CFG1;
1249 host->dev_ref_clk_en_mask = BIT(26);
1250 } else {
1251 /* "dev_ref_clk_ctrl_mem" is optional resource */
1252 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1253 if (res) {
1254 host->dev_ref_clk_ctrl_mmio =
1255 devm_ioremap_resource(dev, res);
1256 if (IS_ERR(host->dev_ref_clk_ctrl_mmio)) {
1257 dev_warn(dev,
1258 "%s: could not map dev_ref_clk_ctrl_mmio, err %ld\n",
1259 __func__,
1260 PTR_ERR(host->dev_ref_clk_ctrl_mmio));
1261 host->dev_ref_clk_ctrl_mmio = NULL;
1263 host->dev_ref_clk_en_mask = BIT(5);
1267 /* update phy revision information before calling phy_init() */
1268 ufs_qcom_phy_save_controller_version(host->generic_phy,
1269 host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
1271 err = ufs_qcom_init_lane_clks(host);
1272 if (err)
1273 goto out_variant_clear;
1275 ufs_qcom_set_caps(hba);
1276 ufs_qcom_advertise_quirks(hba);
1278 ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
1280 if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
1281 ufs_qcom_hosts[hba->dev->id] = host;
1283 host->dbg_print_en |= UFS_QCOM_DEFAULT_DBG_PRINT_EN;
1284 ufs_qcom_get_default_testbus_cfg(host);
1285 err = ufs_qcom_testbus_config(host);
1286 if (err) {
1287 dev_warn(dev, "%s: failed to configure the testbus %d\n",
1288 __func__, err);
1289 err = 0;
1292 goto out;
1294 out_variant_clear:
1295 ufshcd_set_variant(hba, NULL);
1296 out:
1297 return err;
1300 static void ufs_qcom_exit(struct ufs_hba *hba)
1302 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1304 ufs_qcom_disable_lane_clks(host);
1305 phy_power_off(host->generic_phy);
1306 phy_exit(host->generic_phy);
1309 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
1310 u32 clk_cycles)
1312 int err;
1313 u32 core_clk_ctrl_reg;
1315 if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK)
1316 return -EINVAL;
1318 err = ufshcd_dme_get(hba,
1319 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1320 &core_clk_ctrl_reg);
1321 if (err)
1322 goto out;
1324 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK;
1325 core_clk_ctrl_reg |= clk_cycles;
1327 /* Clear CORE_CLK_DIV_EN */
1328 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1330 err = ufshcd_dme_set(hba,
1331 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1332 core_clk_ctrl_reg);
1333 out:
1334 return err;
1337 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1339 /* nothing to do as of now */
1340 return 0;
1343 static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1345 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1347 if (!ufs_qcom_cap_qunipro(host))
1348 return 0;
1350 /* set unipro core clock cycles to 150 and clear clock divider */
1351 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
1354 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1356 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1357 int err;
1358 u32 core_clk_ctrl_reg;
1360 if (!ufs_qcom_cap_qunipro(host))
1361 return 0;
1363 err = ufshcd_dme_get(hba,
1364 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1365 &core_clk_ctrl_reg);
1367 /* make sure CORE_CLK_DIV_EN is cleared */
1368 if (!err &&
1369 (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
1370 core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
1371 err = ufshcd_dme_set(hba,
1372 UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
1373 core_clk_ctrl_reg);
1376 return err;
1379 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1381 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1383 if (!ufs_qcom_cap_qunipro(host))
1384 return 0;
1386 /* set unipro core clock cycles to 75 and clear clock divider */
1387 return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
1390 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1391 bool scale_up, enum ufs_notify_change_status status)
1393 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1394 struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
1395 int err = 0;
1397 if (status == PRE_CHANGE) {
1398 if (scale_up)
1399 err = ufs_qcom_clk_scale_up_pre_change(hba);
1400 else
1401 err = ufs_qcom_clk_scale_down_pre_change(hba);
1402 } else {
1403 if (scale_up)
1404 err = ufs_qcom_clk_scale_up_post_change(hba);
1405 else
1406 err = ufs_qcom_clk_scale_down_post_change(hba);
1408 if (err || !dev_req_params)
1409 goto out;
1411 ufs_qcom_cfg_timers(hba,
1412 dev_req_params->gear_rx,
1413 dev_req_params->pwr_rx,
1414 dev_req_params->hs_rate,
1415 false);
1416 ufs_qcom_update_bus_bw_vote(host);
1419 out:
1420 return err;
1423 static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
1424 void *priv, void (*print_fn)(struct ufs_hba *hba,
1425 int offset, int num_regs, const char *str, void *priv))
1427 u32 reg;
1428 struct ufs_qcom_host *host;
1430 if (unlikely(!hba)) {
1431 pr_err("%s: hba is NULL\n", __func__);
1432 return;
1434 if (unlikely(!print_fn)) {
1435 dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
1436 return;
1439 host = ufshcd_get_variant(hba);
1440 if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
1441 return;
1443 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
1444 print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
1446 reg = ufshcd_readl(hba, REG_UFS_CFG1);
1447 reg |= UTP_DBG_RAMS_EN;
1448 ufshcd_writel(hba, reg, REG_UFS_CFG1);
1450 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
1451 print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
1453 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
1454 print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
1456 reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
1457 print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
1459 /* clear bit 17 - UTP_DBG_RAMS_EN */
1460 ufshcd_rmwl(hba, UTP_DBG_RAMS_EN, 0, REG_UFS_CFG1);
1462 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
1463 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
1465 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
1466 print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
1468 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
1469 print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
1471 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
1472 print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
1474 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
1475 print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
1477 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
1478 print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
1480 reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
1481 print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
1484 static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
1486 if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
1487 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
1488 UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
1489 ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
1490 } else {
1491 ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
1492 ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
1496 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
1498 /* provide a legal default configuration */
1499 host->testbus.select_major = TSTBUS_UNIPRO;
1500 host->testbus.select_minor = 37;
1503 static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
1505 if (host->testbus.select_major >= TSTBUS_MAX) {
1506 dev_err(host->hba->dev,
1507 "%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
1508 __func__, host->testbus.select_major);
1509 return false;
1512 return true;
1515 int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
1517 int reg;
1518 int offset;
1519 u32 mask = TEST_BUS_SUB_SEL_MASK;
1521 if (!host)
1522 return -EINVAL;
1524 if (!ufs_qcom_testbus_cfg_is_ok(host))
1525 return -EPERM;
1527 switch (host->testbus.select_major) {
1528 case TSTBUS_UAWM:
1529 reg = UFS_TEST_BUS_CTRL_0;
1530 offset = 24;
1531 break;
1532 case TSTBUS_UARM:
1533 reg = UFS_TEST_BUS_CTRL_0;
1534 offset = 16;
1535 break;
1536 case TSTBUS_TXUC:
1537 reg = UFS_TEST_BUS_CTRL_0;
1538 offset = 8;
1539 break;
1540 case TSTBUS_RXUC:
1541 reg = UFS_TEST_BUS_CTRL_0;
1542 offset = 0;
1543 break;
1544 case TSTBUS_DFC:
1545 reg = UFS_TEST_BUS_CTRL_1;
1546 offset = 24;
1547 break;
1548 case TSTBUS_TRLUT:
1549 reg = UFS_TEST_BUS_CTRL_1;
1550 offset = 16;
1551 break;
1552 case TSTBUS_TMRLUT:
1553 reg = UFS_TEST_BUS_CTRL_1;
1554 offset = 8;
1555 break;
1556 case TSTBUS_OCSC:
1557 reg = UFS_TEST_BUS_CTRL_1;
1558 offset = 0;
1559 break;
1560 case TSTBUS_WRAPPER:
1561 reg = UFS_TEST_BUS_CTRL_2;
1562 offset = 16;
1563 break;
1564 case TSTBUS_COMBINED:
1565 reg = UFS_TEST_BUS_CTRL_2;
1566 offset = 8;
1567 break;
1568 case TSTBUS_UTP_HCI:
1569 reg = UFS_TEST_BUS_CTRL_2;
1570 offset = 0;
1571 break;
1572 case TSTBUS_UNIPRO:
1573 reg = UFS_UNIPRO_CFG;
1574 offset = 20;
1575 mask = 0xFFF;
1576 break;
1578 * No need for a default case, since
1579 * ufs_qcom_testbus_cfg_is_ok() checks that the configuration
1580 * is legal
1583 mask <<= offset;
1585 pm_runtime_get_sync(host->hba->dev);
1586 ufshcd_hold(host->hba, false);
1587 ufshcd_rmwl(host->hba, TEST_BUS_SEL,
1588 (u32)host->testbus.select_major << 19,
1589 REG_UFS_CFG1);
1590 ufshcd_rmwl(host->hba, mask,
1591 (u32)host->testbus.select_minor << offset,
1592 reg);
1593 ufs_qcom_enable_test_bus(host);
1595 * Make sure the test bus configuration is
1596 * committed before returning.
1598 mb();
1599 ufshcd_release(host->hba);
1600 pm_runtime_put_sync(host->hba->dev);
1602 return 0;
1605 static void ufs_qcom_testbus_read(struct ufs_hba *hba)
1607 ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS ");
1610 static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
1612 struct ufs_qcom_host *host = ufshcd_get_variant(hba);
1613 u32 *testbus = NULL;
1614 int i, nminor = 256, testbus_len = nminor * sizeof(u32);
1616 testbus = kmalloc(testbus_len, GFP_KERNEL);
1617 if (!testbus)
1618 return;
1620 host->testbus.select_major = TSTBUS_UNIPRO;
1621 for (i = 0; i < nminor; i++) {
1622 host->testbus.select_minor = i;
1623 ufs_qcom_testbus_config(host);
1624 testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
1626 print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
1627 16, 4, testbus, testbus_len, false);
1628 kfree(testbus);
1631 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
1633 ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
1634 "HCI Vendor Specific Registers ");
1636 /* sleep a bit intermittently as we are dumping too much data */
1637 ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
1638 usleep_range(1000, 1100);
1639 ufs_qcom_testbus_read(hba);
1640 usleep_range(1000, 1100);
1641 ufs_qcom_print_unipro_testbus(hba);
1642 usleep_range(1000, 1100);
1646 * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
1648 * The variant operations configure the necessary controller and PHY
1649 * handshake during initialization.
1651 static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
1652 .name = "qcom",
1653 .init = ufs_qcom_init,
1654 .exit = ufs_qcom_exit,
1655 .get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
1656 .clk_scale_notify = ufs_qcom_clk_scale_notify,
1657 .setup_clocks = ufs_qcom_setup_clocks,
1658 .hce_enable_notify = ufs_qcom_hce_enable_notify,
1659 .link_startup_notify = ufs_qcom_link_startup_notify,
1660 .pwr_change_notify = ufs_qcom_pwr_change_notify,
1661 .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
1662 .suspend = ufs_qcom_suspend,
1663 .resume = ufs_qcom_resume,
1664 .dbg_register_dump = ufs_qcom_dump_dbg_regs,
1668 * ufs_qcom_probe - probe routine of the driver
1669 * @pdev: pointer to Platform device handle
1671 * Return zero for success and non-zero for failure
1673 static int ufs_qcom_probe(struct platform_device *pdev)
1675 int err;
1676 struct device *dev = &pdev->dev;
1678 /* Perform generic probe */
1679 err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
1680 if (err)
1681 dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
1683 return err;
1687 * ufs_qcom_remove - set driver_data of the device to NULL
1688 * @pdev: pointer to platform device handle
1690 * Always returns 0
1692 static int ufs_qcom_remove(struct platform_device *pdev)
1694 struct ufs_hba *hba = platform_get_drvdata(pdev);
1696 pm_runtime_get_sync(&(pdev)->dev);
1697 ufshcd_remove(hba);
1698 return 0;
1701 static const struct of_device_id ufs_qcom_of_match[] = {
1702 { .compatible = "qcom,ufshc"},
1705 MODULE_DEVICE_TABLE(of, ufs_qcom_of_match);
1707 static const struct dev_pm_ops ufs_qcom_pm_ops = {
1708 .suspend = ufshcd_pltfrm_suspend,
1709 .resume = ufshcd_pltfrm_resume,
1710 .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
1711 .runtime_resume = ufshcd_pltfrm_runtime_resume,
1712 .runtime_idle = ufshcd_pltfrm_runtime_idle,
1715 static struct platform_driver ufs_qcom_pltform = {
1716 .probe = ufs_qcom_probe,
1717 .remove = ufs_qcom_remove,
1718 .shutdown = ufshcd_pltfrm_shutdown,
1719 .driver = {
1720 .name = "ufshcd-qcom",
1721 .pm = &ufs_qcom_pm_ops,
1722 .of_match_table = of_match_ptr(ufs_qcom_of_match),
1725 module_platform_driver(ufs_qcom_pltform);
1727 MODULE_LICENSE("GPL v2");