1 // SPDX-License-Identifier: GPL-2.0-only
3 * UFS Host Controller driver for Exynos specific extensions
5 * Copyright (C) 2014-2015 Samsung Electronics Co., Ltd.
6 * Author: Seungwon Jeon <essuuj@gmail.com>
7 * Author: Alim Akhtar <alim.akhtar@samsung.com>
11 #include <linux/clk.h>
12 #include <linux/module.h>
14 #include <linux/of_address.h>
15 #include <linux/phy/phy.h>
16 #include <linux/platform_device.h>
19 #include "ufshcd-pltfrm.h"
23 #include "ufs-exynos.h"
26 * Exynos's Vendor specific registers for UFSHCI
28 #define HCI_TXPRDT_ENTRY_SIZE 0x00
29 #define PRDT_PREFECT_EN BIT(31)
30 #define PRDT_SET_SIZE(x) ((x) & 0x1F)
31 #define HCI_RXPRDT_ENTRY_SIZE 0x04
32 #define HCI_1US_TO_CNT_VAL 0x0C
33 #define CNT_VAL_1US_MASK 0x3FF
34 #define HCI_UTRL_NEXUS_TYPE 0x40
35 #define HCI_UTMRL_NEXUS_TYPE 0x44
36 #define HCI_SW_RST 0x50
37 #define UFS_LINK_SW_RST BIT(0)
38 #define UFS_UNIPRO_SW_RST BIT(1)
39 #define UFS_SW_RST_MASK (UFS_UNIPRO_SW_RST | UFS_LINK_SW_RST)
40 #define HCI_DATA_REORDER 0x60
41 #define HCI_UNIPRO_APB_CLK_CTRL 0x68
42 #define UNIPRO_APB_CLK(v, x) (((v) & ~0xF) | ((x) & 0xF))
43 #define HCI_AXIDMA_RWDATA_BURST_LEN 0x6C
44 #define HCI_GPIO_OUT 0x70
45 #define HCI_ERR_EN_PA_LAYER 0x78
46 #define HCI_ERR_EN_DL_LAYER 0x7C
47 #define HCI_ERR_EN_N_LAYER 0x80
48 #define HCI_ERR_EN_T_LAYER 0x84
49 #define HCI_ERR_EN_DME_LAYER 0x88
50 #define HCI_CLKSTOP_CTRL 0xB0
51 #define REFCLK_STOP BIT(2)
52 #define UNIPRO_MCLK_STOP BIT(1)
53 #define UNIPRO_PCLK_STOP BIT(0)
54 #define CLK_STOP_MASK (REFCLK_STOP |\
58 #define REFCLK_CTRL_EN BIT(7)
59 #define UNIPRO_PCLK_CTRL_EN BIT(6)
60 #define UNIPRO_MCLK_CTRL_EN BIT(5)
61 #define HCI_CORECLK_CTRL_EN BIT(4)
62 #define CLK_CTRL_EN_MASK (REFCLK_CTRL_EN |\
63 UNIPRO_PCLK_CTRL_EN |\
65 /* Device fatal error */
66 #define DFES_ERR_EN BIT(31)
67 #define DFES_DEF_L2_ERRS (UIC_DATA_LINK_LAYER_ERROR_RX_BUF_OF |\
68 UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
69 #define DFES_DEF_L3_ERRS (UIC_NETWORK_UNSUPPORTED_HEADER_TYPE |\
70 UIC_NETWORK_BAD_DEVICEID_ENC |\
71 UIC_NETWORK_LHDR_TRAP_PACKET_DROPPING)
72 #define DFES_DEF_L4_ERRS (UIC_TRANSPORT_UNSUPPORTED_HEADER_TYPE |\
73 UIC_TRANSPORT_UNKNOWN_CPORTID |\
74 UIC_TRANSPORT_NO_CONNECTION_RX |\
78 UNIPRO_L1_5
= 0,/* PHY Adapter */
79 UNIPRO_L2
, /* Data Link */
80 UNIPRO_L3
, /* Network */
81 UNIPRO_L4
, /* Transport */
88 #define UNIPRO_COMP_VERSION 0x000
89 #define UNIPRO_DME_PWR_REQ 0x090
90 #define UNIPRO_DME_PWR_REQ_POWERMODE 0x094
91 #define UNIPRO_DME_PWR_REQ_LOCALL2TIMER0 0x098
92 #define UNIPRO_DME_PWR_REQ_LOCALL2TIMER1 0x09C
93 #define UNIPRO_DME_PWR_REQ_LOCALL2TIMER2 0x0A0
94 #define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER0 0x0A4
95 #define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER1 0x0A8
96 #define UNIPRO_DME_PWR_REQ_REMOTEL2TIMER2 0x0AC
99 * UFS Protector registers
101 #define UFSPRSECURITY 0x010
102 #define NSSMU BIT(14)
103 #define UFSPSBEGIN0 0x200
104 #define UFSPSEND0 0x204
105 #define UFSPSLUN0 0x208
106 #define UFSPSCTRL0 0x20C
108 #define CNTR_DIV_VAL 40
110 static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs
*ufs
, bool en
);
111 static void exynos_ufs_ctrl_clkstop(struct exynos_ufs
*ufs
, bool en
);
113 static inline void exynos_ufs_enable_auto_ctrl_hcc(struct exynos_ufs
*ufs
)
115 exynos_ufs_auto_ctrl_hcc(ufs
, true);
118 static inline void exynos_ufs_disable_auto_ctrl_hcc(struct exynos_ufs
*ufs
)
120 exynos_ufs_auto_ctrl_hcc(ufs
, false);
123 static inline void exynos_ufs_disable_auto_ctrl_hcc_save(
124 struct exynos_ufs
*ufs
, u32
*val
)
126 *val
= hci_readl(ufs
, HCI_MISC
);
127 exynos_ufs_auto_ctrl_hcc(ufs
, false);
130 static inline void exynos_ufs_auto_ctrl_hcc_restore(
131 struct exynos_ufs
*ufs
, u32
*val
)
133 hci_writel(ufs
, *val
, HCI_MISC
);
136 static inline void exynos_ufs_gate_clks(struct exynos_ufs
*ufs
)
138 exynos_ufs_ctrl_clkstop(ufs
, true);
141 static inline void exynos_ufs_ungate_clks(struct exynos_ufs
*ufs
)
143 exynos_ufs_ctrl_clkstop(ufs
, false);
146 static int exynos7_ufs_drv_init(struct device
*dev
, struct exynos_ufs
*ufs
)
151 static int exynos7_ufs_pre_link(struct exynos_ufs
*ufs
)
153 struct ufs_hba
*hba
= ufs
->hba
;
154 u32 val
= ufs
->drv_data
->uic_attr
->pa_dbg_option_suite
;
157 exynos_ufs_enable_ov_tm(hba
);
158 for_each_ufs_tx_lane(ufs
, i
)
159 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(0x297, i
), 0x17);
160 for_each_ufs_rx_lane(ufs
, i
) {
161 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(0x362, i
), 0xff);
162 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(0x363, i
), 0x00);
164 exynos_ufs_disable_ov_tm(hba
);
166 for_each_ufs_tx_lane(ufs
, i
)
168 UIC_ARG_MIB_SEL(TX_HIBERN8_CONTROL
, i
), 0x0);
169 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_DBG_TXPHY_CFGUPDT
), 0x1);
171 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_DBG_OPTION_SUITE
), val
| (1 << 12));
172 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_DBG_SKIP_RESET_PHY
), 0x1);
173 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_DBG_SKIP_LINE_RESET
), 0x1);
174 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_DBG_LINE_RESET_REQ
), 0x1);
176 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_DBG_OPTION_SUITE
), val
);
181 static int exynos7_ufs_post_link(struct exynos_ufs
*ufs
)
183 struct ufs_hba
*hba
= ufs
->hba
;
186 exynos_ufs_enable_ov_tm(hba
);
187 for_each_ufs_tx_lane(ufs
, i
) {
188 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(0x28b, i
), 0x83);
189 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(0x29a, i
), 0x07);
190 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(0x277, i
),
191 TX_LINERESET_N(exynos_ufs_calc_time_cntr(ufs
, 200000)));
193 exynos_ufs_disable_ov_tm(hba
);
195 exynos_ufs_enable_dbg_mode(hba
);
196 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_SAVECONFIGTIME
), 0xbb8);
197 exynos_ufs_disable_dbg_mode(hba
);
202 static int exynos7_ufs_pre_pwr_change(struct exynos_ufs
*ufs
,
203 struct ufs_pa_layer_attr
*pwr
)
205 unipro_writel(ufs
, 0x22, UNIPRO_DBG_FORCE_DME_CTRL_STATE
);
210 static int exynos7_ufs_post_pwr_change(struct exynos_ufs
*ufs
,
211 struct ufs_pa_layer_attr
*pwr
)
213 struct ufs_hba
*hba
= ufs
->hba
;
214 int lanes
= max_t(u32
, pwr
->lane_rx
, pwr
->lane_tx
);
216 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_DBG_RXPHY_CFGUPDT
), 0x1);
219 exynos_ufs_enable_dbg_mode(hba
);
220 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
), 0x1);
221 exynos_ufs_disable_dbg_mode(hba
);
228 * exynos_ufs_auto_ctrl_hcc - HCI core clock control by h/w
229 * Control should be disabled in the below cases
230 * - Before host controller S/W reset
231 * - Access to UFS protector's register
233 static void exynos_ufs_auto_ctrl_hcc(struct exynos_ufs
*ufs
, bool en
)
235 u32 misc
= hci_readl(ufs
, HCI_MISC
);
238 hci_writel(ufs
, misc
| HCI_CORECLK_CTRL_EN
, HCI_MISC
);
240 hci_writel(ufs
, misc
& ~HCI_CORECLK_CTRL_EN
, HCI_MISC
);
243 static void exynos_ufs_ctrl_clkstop(struct exynos_ufs
*ufs
, bool en
)
245 u32 ctrl
= hci_readl(ufs
, HCI_CLKSTOP_CTRL
);
246 u32 misc
= hci_readl(ufs
, HCI_MISC
);
249 hci_writel(ufs
, misc
| CLK_CTRL_EN_MASK
, HCI_MISC
);
250 hci_writel(ufs
, ctrl
| CLK_STOP_MASK
, HCI_CLKSTOP_CTRL
);
252 hci_writel(ufs
, ctrl
& ~CLK_STOP_MASK
, HCI_CLKSTOP_CTRL
);
253 hci_writel(ufs
, misc
& ~CLK_CTRL_EN_MASK
, HCI_MISC
);
257 static int exynos_ufs_get_clk_info(struct exynos_ufs
*ufs
)
259 struct ufs_hba
*hba
= ufs
->hba
;
260 struct list_head
*head
= &hba
->clk_list_head
;
261 struct ufs_clk_info
*clki
;
267 if (list_empty(head
))
270 list_for_each_entry(clki
, head
, list
) {
271 if (!IS_ERR(clki
->clk
)) {
272 if (!strcmp(clki
->name
, "core_clk"))
273 ufs
->clk_hci_core
= clki
->clk
;
274 else if (!strcmp(clki
->name
, "sclk_unipro_main"))
275 ufs
->clk_unipro_main
= clki
->clk
;
279 if (!ufs
->clk_hci_core
|| !ufs
->clk_unipro_main
) {
280 dev_err(hba
->dev
, "failed to get clk info\n");
285 ufs
->mclk_rate
= clk_get_rate(ufs
->clk_unipro_main
);
286 pclk_rate
= clk_get_rate(ufs
->clk_hci_core
);
287 f_min
= ufs
->pclk_avail_min
;
288 f_max
= ufs
->pclk_avail_max
;
290 if (ufs
->opts
& EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL
) {
292 pclk_rate
/= (div
+ 1);
294 if (pclk_rate
<= f_max
)
297 } while (pclk_rate
>= f_min
);
300 if (unlikely(pclk_rate
< f_min
|| pclk_rate
> f_max
)) {
301 dev_err(hba
->dev
, "not available pclk range %d\n", pclk_rate
);
306 ufs
->pclk_rate
= pclk_rate
;
313 static void exynos_ufs_set_unipro_pclk_div(struct exynos_ufs
*ufs
)
315 if (ufs
->opts
& EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL
) {
318 val
= hci_readl(ufs
, HCI_UNIPRO_APB_CLK_CTRL
);
319 hci_writel(ufs
, UNIPRO_APB_CLK(val
, ufs
->pclk_div
),
320 HCI_UNIPRO_APB_CLK_CTRL
);
324 static void exynos_ufs_set_pwm_clk_div(struct exynos_ufs
*ufs
)
326 struct ufs_hba
*hba
= ufs
->hba
;
327 struct exynos_ufs_uic_attr
*attr
= ufs
->drv_data
->uic_attr
;
330 UIC_ARG_MIB(CMN_PWM_CLK_CTRL
), attr
->cmn_pwm_clk_ctrl
);
333 static void exynos_ufs_calc_pwm_clk_div(struct exynos_ufs
*ufs
)
335 struct ufs_hba
*hba
= ufs
->hba
;
336 struct exynos_ufs_uic_attr
*attr
= ufs
->drv_data
->uic_attr
;
337 const unsigned int div
= 30, mult
= 20;
338 const unsigned long pwm_min
= 3 * 1000 * 1000;
339 const unsigned long pwm_max
= 9 * 1000 * 1000;
340 const int divs
[] = {32, 16, 8, 4};
341 unsigned long clk
= 0, _clk
, clk_period
;
342 int i
= 0, clk_idx
= -1;
344 clk_period
= UNIPRO_PCLK_PERIOD(ufs
);
345 for (i
= 0; i
< ARRAY_SIZE(divs
); i
++) {
346 _clk
= NSEC_PER_SEC
* mult
/ (clk_period
* divs
[i
] * div
);
347 if (_clk
>= pwm_min
&& _clk
<= pwm_max
) {
356 ufshcd_dme_get(hba
, UIC_ARG_MIB(CMN_PWM_CLK_CTRL
), &clk_idx
);
358 "failed to decide pwm clock divider, will not change\n");
361 attr
->cmn_pwm_clk_ctrl
= clk_idx
& PWM_CLK_CTRL_MASK
;
364 long exynos_ufs_calc_time_cntr(struct exynos_ufs
*ufs
, long period
)
366 const int precise
= 10;
367 long pclk_rate
= ufs
->pclk_rate
;
368 long clk_period
, fraction
;
370 clk_period
= UNIPRO_PCLK_PERIOD(ufs
);
371 fraction
= ((NSEC_PER_SEC
% pclk_rate
) * precise
) / pclk_rate
;
373 return (period
* precise
) / ((clk_period
* precise
) + fraction
);
376 static void exynos_ufs_specify_phy_time_attr(struct exynos_ufs
*ufs
)
378 struct exynos_ufs_uic_attr
*attr
= ufs
->drv_data
->uic_attr
;
379 struct ufs_phy_time_cfg
*t_cfg
= &ufs
->t_cfg
;
381 t_cfg
->tx_linereset_p
=
382 exynos_ufs_calc_time_cntr(ufs
, attr
->tx_dif_p_nsec
);
383 t_cfg
->tx_linereset_n
=
384 exynos_ufs_calc_time_cntr(ufs
, attr
->tx_dif_n_nsec
);
385 t_cfg
->tx_high_z_cnt
=
386 exynos_ufs_calc_time_cntr(ufs
, attr
->tx_high_z_cnt_nsec
);
387 t_cfg
->tx_base_n_val
=
388 exynos_ufs_calc_time_cntr(ufs
, attr
->tx_base_unit_nsec
);
389 t_cfg
->tx_gran_n_val
=
390 exynos_ufs_calc_time_cntr(ufs
, attr
->tx_gran_unit_nsec
);
391 t_cfg
->tx_sleep_cnt
=
392 exynos_ufs_calc_time_cntr(ufs
, attr
->tx_sleep_cnt
);
394 t_cfg
->rx_linereset
=
395 exynos_ufs_calc_time_cntr(ufs
, attr
->rx_dif_p_nsec
);
396 t_cfg
->rx_hibern8_wait
=
397 exynos_ufs_calc_time_cntr(ufs
, attr
->rx_hibern8_wait_nsec
);
398 t_cfg
->rx_base_n_val
=
399 exynos_ufs_calc_time_cntr(ufs
, attr
->rx_base_unit_nsec
);
400 t_cfg
->rx_gran_n_val
=
401 exynos_ufs_calc_time_cntr(ufs
, attr
->rx_gran_unit_nsec
);
402 t_cfg
->rx_sleep_cnt
=
403 exynos_ufs_calc_time_cntr(ufs
, attr
->rx_sleep_cnt
);
404 t_cfg
->rx_stall_cnt
=
405 exynos_ufs_calc_time_cntr(ufs
, attr
->rx_stall_cnt
);
408 static void exynos_ufs_config_phy_time_attr(struct exynos_ufs
*ufs
)
410 struct ufs_hba
*hba
= ufs
->hba
;
411 struct ufs_phy_time_cfg
*t_cfg
= &ufs
->t_cfg
;
414 exynos_ufs_set_pwm_clk_div(ufs
);
416 exynos_ufs_enable_ov_tm(hba
);
418 for_each_ufs_rx_lane(ufs
, i
) {
419 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(RX_FILLER_ENABLE
, i
),
420 ufs
->drv_data
->uic_attr
->rx_filler_enable
);
421 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(RX_LINERESET_VAL
, i
),
422 RX_LINERESET(t_cfg
->rx_linereset
));
423 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(RX_BASE_NVAL_07_00
, i
),
424 RX_BASE_NVAL_L(t_cfg
->rx_base_n_val
));
425 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(RX_BASE_NVAL_15_08
, i
),
426 RX_BASE_NVAL_H(t_cfg
->rx_base_n_val
));
427 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_07_00
, i
),
428 RX_GRAN_NVAL_L(t_cfg
->rx_gran_n_val
));
429 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(RX_GRAN_NVAL_10_08
, i
),
430 RX_GRAN_NVAL_H(t_cfg
->rx_gran_n_val
));
431 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(RX_OV_SLEEP_CNT_TIMER
, i
),
432 RX_OV_SLEEP_CNT(t_cfg
->rx_sleep_cnt
));
433 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(RX_OV_STALL_CNT_TIMER
, i
),
434 RX_OV_STALL_CNT(t_cfg
->rx_stall_cnt
));
437 for_each_ufs_tx_lane(ufs
, i
) {
438 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(TX_LINERESET_P_VAL
, i
),
439 TX_LINERESET_P(t_cfg
->tx_linereset_p
));
440 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_07_00
, i
),
441 TX_HIGH_Z_CNT_L(t_cfg
->tx_high_z_cnt
));
442 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(TX_HIGH_Z_CNT_11_08
, i
),
443 TX_HIGH_Z_CNT_H(t_cfg
->tx_high_z_cnt
));
444 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(TX_BASE_NVAL_07_00
, i
),
445 TX_BASE_NVAL_L(t_cfg
->tx_base_n_val
));
446 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(TX_BASE_NVAL_15_08
, i
),
447 TX_BASE_NVAL_H(t_cfg
->tx_base_n_val
));
448 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_07_00
, i
),
449 TX_GRAN_NVAL_L(t_cfg
->tx_gran_n_val
));
450 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(TX_GRAN_NVAL_10_08
, i
),
451 TX_GRAN_NVAL_H(t_cfg
->tx_gran_n_val
));
452 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(TX_OV_SLEEP_CNT_TIMER
, i
),
454 TX_OV_SLEEP_CNT(t_cfg
->tx_sleep_cnt
));
455 ufshcd_dme_set(hba
, UIC_ARG_MIB_SEL(TX_MIN_ACTIVATETIME
, i
),
456 ufs
->drv_data
->uic_attr
->tx_min_activatetime
);
459 exynos_ufs_disable_ov_tm(hba
);
462 static void exynos_ufs_config_phy_cap_attr(struct exynos_ufs
*ufs
)
464 struct ufs_hba
*hba
= ufs
->hba
;
465 struct exynos_ufs_uic_attr
*attr
= ufs
->drv_data
->uic_attr
;
468 exynos_ufs_enable_ov_tm(hba
);
470 for_each_ufs_rx_lane(ufs
, i
) {
472 UIC_ARG_MIB_SEL(RX_HS_G1_SYNC_LENGTH_CAP
, i
),
473 attr
->rx_hs_g1_sync_len_cap
);
475 UIC_ARG_MIB_SEL(RX_HS_G2_SYNC_LENGTH_CAP
, i
),
476 attr
->rx_hs_g2_sync_len_cap
);
478 UIC_ARG_MIB_SEL(RX_HS_G3_SYNC_LENGTH_CAP
, i
),
479 attr
->rx_hs_g3_sync_len_cap
);
481 UIC_ARG_MIB_SEL(RX_HS_G1_PREP_LENGTH_CAP
, i
),
482 attr
->rx_hs_g1_prep_sync_len_cap
);
484 UIC_ARG_MIB_SEL(RX_HS_G2_PREP_LENGTH_CAP
, i
),
485 attr
->rx_hs_g2_prep_sync_len_cap
);
487 UIC_ARG_MIB_SEL(RX_HS_G3_PREP_LENGTH_CAP
, i
),
488 attr
->rx_hs_g3_prep_sync_len_cap
);
491 if (attr
->rx_adv_fine_gran_sup_en
== 0) {
492 for_each_ufs_rx_lane(ufs
, i
) {
494 UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP
, i
), 0);
496 if (attr
->rx_min_actv_time_cap
)
498 UIC_ARG_MIB_SEL(RX_MIN_ACTIVATETIME_CAP
,
499 i
), attr
->rx_min_actv_time_cap
);
501 if (attr
->rx_hibern8_time_cap
)
503 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAP
, i
),
504 attr
->rx_hibern8_time_cap
);
506 } else if (attr
->rx_adv_fine_gran_sup_en
== 1) {
507 for_each_ufs_rx_lane(ufs
, i
) {
508 if (attr
->rx_adv_fine_gran_step
)
510 UIC_ARG_MIB_SEL(RX_ADV_GRANULARITY_CAP
,
511 i
), RX_ADV_FINE_GRAN_STEP(
512 attr
->rx_adv_fine_gran_step
));
514 if (attr
->rx_adv_min_actv_time_cap
)
517 RX_ADV_MIN_ACTIVATETIME_CAP
, i
),
518 attr
->rx_adv_min_actv_time_cap
);
520 if (attr
->rx_adv_hibern8_time_cap
)
522 UIC_ARG_MIB_SEL(RX_ADV_HIBERN8TIME_CAP
,
524 attr
->rx_adv_hibern8_time_cap
);
528 exynos_ufs_disable_ov_tm(hba
);
531 static void exynos_ufs_establish_connt(struct exynos_ufs
*ufs
)
533 struct ufs_hba
*hba
= ufs
->hba
;
537 PEER_CPORT_ID
= 0x00,
538 TRAFFIC_CLASS
= 0x00,
541 /* allow cport attributes to be set */
542 ufshcd_dme_set(hba
, UIC_ARG_MIB(T_CONNECTIONSTATE
), CPORT_IDLE
);
544 /* local unipro attributes */
545 ufshcd_dme_set(hba
, UIC_ARG_MIB(N_DEVICEID
), DEV_ID
);
546 ufshcd_dme_set(hba
, UIC_ARG_MIB(N_DEVICEID_VALID
), TRUE
);
547 ufshcd_dme_set(hba
, UIC_ARG_MIB(T_PEERDEVICEID
), PEER_DEV_ID
);
548 ufshcd_dme_set(hba
, UIC_ARG_MIB(T_PEERCPORTID
), PEER_CPORT_ID
);
549 ufshcd_dme_set(hba
, UIC_ARG_MIB(T_CPORTFLAGS
), CPORT_DEF_FLAGS
);
550 ufshcd_dme_set(hba
, UIC_ARG_MIB(T_TRAFFICCLASS
), TRAFFIC_CLASS
);
551 ufshcd_dme_set(hba
, UIC_ARG_MIB(T_CONNECTIONSTATE
), CPORT_CONNECTED
);
554 static void exynos_ufs_config_smu(struct exynos_ufs
*ufs
)
558 exynos_ufs_disable_auto_ctrl_hcc_save(ufs
, &val
);
560 /* make encryption disabled by default */
561 reg
= ufsp_readl(ufs
, UFSPRSECURITY
);
562 ufsp_writel(ufs
, reg
| NSSMU
, UFSPRSECURITY
);
563 ufsp_writel(ufs
, 0x0, UFSPSBEGIN0
);
564 ufsp_writel(ufs
, 0xffffffff, UFSPSEND0
);
565 ufsp_writel(ufs
, 0xff, UFSPSLUN0
);
566 ufsp_writel(ufs
, 0xf1, UFSPSCTRL0
);
568 exynos_ufs_auto_ctrl_hcc_restore(ufs
, &val
);
571 static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs
*ufs
,
572 struct ufs_pa_layer_attr
*pwr
)
574 struct ufs_hba
*hba
= ufs
->hba
;
575 u8 g
= max_t(u32
, pwr
->gear_rx
, pwr
->gear_tx
);
578 SYNC_LEN_G1
= 80 * 1000, /* 80us */
579 SYNC_LEN_G2
= 40 * 1000, /* 44us */
580 SYNC_LEN_G3
= 20 * 1000, /* 20us */
585 sync_len
= SYNC_LEN_G1
;
587 sync_len
= SYNC_LEN_G2
;
589 sync_len
= SYNC_LEN_G3
;
593 mask
= exynos_ufs_calc_time_cntr(ufs
, sync_len
);
594 mask
= (mask
>> 8) & 0xff;
596 exynos_ufs_enable_ov_tm(hba
);
598 for_each_ufs_rx_lane(ufs
, i
)
600 UIC_ARG_MIB_SEL(RX_SYNC_MASK_LENGTH
, i
), mask
);
602 exynos_ufs_disable_ov_tm(hba
);
605 static int exynos_ufs_pre_pwr_mode(struct ufs_hba
*hba
,
606 struct ufs_pa_layer_attr
*dev_max_params
,
607 struct ufs_pa_layer_attr
*dev_req_params
)
609 struct exynos_ufs
*ufs
= ufshcd_get_variant(hba
);
610 struct phy
*generic_phy
= ufs
->phy
;
611 struct ufs_dev_params ufs_exynos_cap
;
614 if (!dev_req_params
) {
615 pr_err("%s: incoming dev_req_params is NULL\n", __func__
);
620 ufshcd_init_pwr_dev_param(&ufs_exynos_cap
);
622 ret
= ufshcd_get_pwr_dev_param(&ufs_exynos_cap
,
623 dev_max_params
, dev_req_params
);
625 pr_err("%s: failed to determine capabilities\n", __func__
);
629 if (ufs
->drv_data
->pre_pwr_change
)
630 ufs
->drv_data
->pre_pwr_change(ufs
, dev_req_params
);
632 if (ufshcd_is_hs_mode(dev_req_params
)) {
633 exynos_ufs_config_sync_pattern_mask(ufs
, dev_req_params
);
635 switch (dev_req_params
->hs_rate
) {
638 phy_calibrate(generic_phy
);
648 #define PWR_MODE_STR_LEN 64
649 static int exynos_ufs_post_pwr_mode(struct ufs_hba
*hba
,
650 struct ufs_pa_layer_attr
*pwr_max
,
651 struct ufs_pa_layer_attr
*pwr_req
)
653 struct exynos_ufs
*ufs
= ufshcd_get_variant(hba
);
654 struct phy
*generic_phy
= ufs
->phy
;
655 int gear
= max_t(u32
, pwr_req
->gear_rx
, pwr_req
->gear_tx
);
656 int lanes
= max_t(u32
, pwr_req
->lane_rx
, pwr_req
->lane_tx
);
657 char pwr_str
[PWR_MODE_STR_LEN
] = "";
659 /* let default be PWM Gear 1, Lane 1 */
666 if (ufs
->drv_data
->post_pwr_change
)
667 ufs
->drv_data
->post_pwr_change(ufs
, pwr_req
);
669 if ((ufshcd_is_hs_mode(pwr_req
))) {
670 switch (pwr_req
->hs_rate
) {
673 phy_calibrate(generic_phy
);
677 snprintf(pwr_str
, PWR_MODE_STR_LEN
, "%s series_%s G_%d L_%d",
678 "FAST", pwr_req
->hs_rate
== PA_HS_MODE_A
? "A" : "B",
681 snprintf(pwr_str
, PWR_MODE_STR_LEN
, "%s G_%d L_%d",
682 "SLOW", gear
, lanes
);
685 dev_info(hba
->dev
, "Power mode changed to : %s\n", pwr_str
);
690 static void exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba
*hba
,
693 struct exynos_ufs
*ufs
= ufshcd_get_variant(hba
);
696 type
= hci_readl(ufs
, HCI_UTRL_NEXUS_TYPE
);
699 hci_writel(ufs
, type
| (1 << tag
), HCI_UTRL_NEXUS_TYPE
);
701 hci_writel(ufs
, type
& ~(1 << tag
), HCI_UTRL_NEXUS_TYPE
);
704 static void exynos_ufs_specify_nexus_t_tm_req(struct ufs_hba
*hba
,
707 struct exynos_ufs
*ufs
= ufshcd_get_variant(hba
);
710 type
= hci_readl(ufs
, HCI_UTMRL_NEXUS_TYPE
);
715 hci_writel(ufs
, type
| (1 << tag
), HCI_UTMRL_NEXUS_TYPE
);
717 case UFS_ABORT_TASK_SET
:
718 case UFS_CLEAR_TASK_SET
:
719 case UFS_LOGICAL_RESET
:
720 case UFS_QUERY_TASK_SET
:
721 hci_writel(ufs
, type
& ~(1 << tag
), HCI_UTMRL_NEXUS_TYPE
);
726 static int exynos_ufs_phy_init(struct exynos_ufs
*ufs
)
728 struct ufs_hba
*hba
= ufs
->hba
;
729 struct phy
*generic_phy
= ufs
->phy
;
732 if (ufs
->avail_ln_rx
== 0 || ufs
->avail_ln_tx
== 0) {
733 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_AVAILRXDATALANES
),
735 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_AVAILTXDATALANES
),
737 WARN(ufs
->avail_ln_rx
!= ufs
->avail_ln_tx
,
738 "available data lane is not equal(rx:%d, tx:%d)\n",
739 ufs
->avail_ln_rx
, ufs
->avail_ln_tx
);
742 phy_set_bus_width(generic_phy
, ufs
->avail_ln_rx
);
743 ret
= phy_init(generic_phy
);
745 dev_err(hba
->dev
, "%s: phy init failed, ret = %d\n",
753 phy_exit(generic_phy
);
758 static void exynos_ufs_config_unipro(struct exynos_ufs
*ufs
)
760 struct ufs_hba
*hba
= ufs
->hba
;
762 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_DBG_CLK_PERIOD
),
763 DIV_ROUND_UP(NSEC_PER_SEC
, ufs
->mclk_rate
));
764 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTRAILINGCLOCKS
),
765 ufs
->drv_data
->uic_attr
->tx_trailingclks
);
766 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_DBG_OPTION_SUITE
),
767 ufs
->drv_data
->uic_attr
->pa_dbg_option_suite
);
770 static void exynos_ufs_config_intr(struct exynos_ufs
*ufs
, u32 errs
, u8 index
)
774 hci_writel(ufs
, DFES_ERR_EN
| errs
, HCI_ERR_EN_PA_LAYER
);
777 hci_writel(ufs
, DFES_ERR_EN
| errs
, HCI_ERR_EN_DL_LAYER
);
780 hci_writel(ufs
, DFES_ERR_EN
| errs
, HCI_ERR_EN_N_LAYER
);
783 hci_writel(ufs
, DFES_ERR_EN
| errs
, HCI_ERR_EN_T_LAYER
);
786 hci_writel(ufs
, DFES_ERR_EN
| errs
, HCI_ERR_EN_DME_LAYER
);
791 static int exynos_ufs_pre_link(struct ufs_hba
*hba
)
793 struct exynos_ufs
*ufs
= ufshcd_get_variant(hba
);
796 exynos_ufs_config_intr(ufs
, DFES_DEF_L2_ERRS
, UNIPRO_L2
);
797 exynos_ufs_config_intr(ufs
, DFES_DEF_L3_ERRS
, UNIPRO_L3
);
798 exynos_ufs_config_intr(ufs
, DFES_DEF_L4_ERRS
, UNIPRO_L4
);
799 exynos_ufs_set_unipro_pclk_div(ufs
);
802 exynos_ufs_config_unipro(ufs
);
805 exynos_ufs_phy_init(ufs
);
806 exynos_ufs_config_phy_time_attr(ufs
);
807 exynos_ufs_config_phy_cap_attr(ufs
);
809 if (ufs
->drv_data
->pre_link
)
810 ufs
->drv_data
->pre_link(ufs
);
815 static void exynos_ufs_fit_aggr_timeout(struct exynos_ufs
*ufs
)
819 val
= exynos_ufs_calc_time_cntr(ufs
, IATOVAL_NSEC
/ CNTR_DIV_VAL
);
820 hci_writel(ufs
, val
& CNT_VAL_1US_MASK
, HCI_1US_TO_CNT_VAL
);
823 static int exynos_ufs_post_link(struct ufs_hba
*hba
)
825 struct exynos_ufs
*ufs
= ufshcd_get_variant(hba
);
826 struct phy
*generic_phy
= ufs
->phy
;
827 struct exynos_ufs_uic_attr
*attr
= ufs
->drv_data
->uic_attr
;
829 exynos_ufs_establish_connt(ufs
);
830 exynos_ufs_fit_aggr_timeout(ufs
);
832 hci_writel(ufs
, 0xa, HCI_DATA_REORDER
);
833 hci_writel(ufs
, PRDT_SET_SIZE(12), HCI_TXPRDT_ENTRY_SIZE
);
834 hci_writel(ufs
, PRDT_SET_SIZE(12), HCI_RXPRDT_ENTRY_SIZE
);
835 hci_writel(ufs
, (1 << hba
->nutrs
) - 1, HCI_UTRL_NEXUS_TYPE
);
836 hci_writel(ufs
, (1 << hba
->nutmrs
) - 1, HCI_UTMRL_NEXUS_TYPE
);
837 hci_writel(ufs
, 0xf, HCI_AXIDMA_RWDATA_BURST_LEN
);
839 if (ufs
->opts
& EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB
)
841 UIC_ARG_MIB(T_DBG_SKIP_INIT_HIBERN8_EXIT
), TRUE
);
843 if (attr
->pa_granularity
) {
844 exynos_ufs_enable_dbg_mode(hba
);
845 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
846 attr
->pa_granularity
);
847 exynos_ufs_disable_dbg_mode(hba
);
849 if (attr
->pa_tactivate
)
850 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
852 if (attr
->pa_hibern8time
&&
853 !(ufs
->opts
& EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER
))
854 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HIBERN8TIME
),
855 attr
->pa_hibern8time
);
858 if (ufs
->opts
& EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER
) {
859 if (!attr
->pa_granularity
)
860 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
861 &attr
->pa_granularity
);
862 if (!attr
->pa_hibern8time
)
863 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_HIBERN8TIME
),
864 &attr
->pa_hibern8time
);
866 * not wait for HIBERN8 time to exit hibernation
868 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HIBERN8TIME
), 0);
870 if (attr
->pa_granularity
< 1 || attr
->pa_granularity
> 6) {
871 /* Valid range for granularity: 1 ~ 6 */
873 "%s: pa_granularity %d is invalid, assuming backwards compatibility\n",
875 attr
->pa_granularity
);
876 attr
->pa_granularity
= 6;
880 phy_calibrate(generic_phy
);
882 if (ufs
->drv_data
->post_link
)
883 ufs
->drv_data
->post_link(ufs
);
888 static int exynos_ufs_parse_dt(struct device
*dev
, struct exynos_ufs
*ufs
)
890 struct device_node
*np
= dev
->of_node
;
891 struct exynos_ufs_drv_data
*drv_data
= &exynos_ufs_drvs
;
892 struct exynos_ufs_uic_attr
*attr
;
895 while (drv_data
->compatible
) {
896 if (of_device_is_compatible(np
, drv_data
->compatible
)) {
897 ufs
->drv_data
= drv_data
;
903 if (ufs
->drv_data
&& ufs
->drv_data
->uic_attr
) {
904 attr
= ufs
->drv_data
->uic_attr
;
906 dev_err(dev
, "failed to get uic attributes\n");
911 ufs
->pclk_avail_min
= PCLK_AVAIL_MIN
;
912 ufs
->pclk_avail_max
= PCLK_AVAIL_MAX
;
914 attr
->rx_adv_fine_gran_sup_en
= RX_ADV_FINE_GRAN_SUP_EN
;
915 attr
->rx_adv_fine_gran_step
= RX_ADV_FINE_GRAN_STEP_VAL
;
916 attr
->rx_adv_min_actv_time_cap
= RX_ADV_MIN_ACTV_TIME_CAP
;
917 attr
->pa_granularity
= PA_GRANULARITY_VAL
;
918 attr
->pa_tactivate
= PA_TACTIVATE_VAL
;
919 attr
->pa_hibern8time
= PA_HIBERN8TIME_VAL
;
925 static int exynos_ufs_init(struct ufs_hba
*hba
)
927 struct device
*dev
= hba
->dev
;
928 struct platform_device
*pdev
= to_platform_device(dev
);
929 struct exynos_ufs
*ufs
;
932 ufs
= devm_kzalloc(dev
, sizeof(*ufs
), GFP_KERNEL
);
936 /* exynos-specific hci */
937 ufs
->reg_hci
= devm_platform_ioremap_resource_byname(pdev
, "vs_hci");
938 if (IS_ERR(ufs
->reg_hci
)) {
939 dev_err(dev
, "cannot ioremap for hci vendor register\n");
940 return PTR_ERR(ufs
->reg_hci
);
944 ufs
->reg_unipro
= devm_platform_ioremap_resource_byname(pdev
, "unipro");
945 if (IS_ERR(ufs
->reg_unipro
)) {
946 dev_err(dev
, "cannot ioremap for unipro register\n");
947 return PTR_ERR(ufs
->reg_unipro
);
951 ufs
->reg_ufsp
= devm_platform_ioremap_resource_byname(pdev
, "ufsp");
952 if (IS_ERR(ufs
->reg_ufsp
)) {
953 dev_err(dev
, "cannot ioremap for ufs protector register\n");
954 return PTR_ERR(ufs
->reg_ufsp
);
957 ret
= exynos_ufs_parse_dt(dev
, ufs
);
959 dev_err(dev
, "failed to get dt info.\n");
963 ufs
->phy
= devm_phy_get(dev
, "ufs-phy");
964 if (IS_ERR(ufs
->phy
)) {
965 ret
= PTR_ERR(ufs
->phy
);
966 dev_err(dev
, "failed to get ufs-phy\n");
970 ret
= phy_power_on(ufs
->phy
);
975 ufs
->opts
= ufs
->drv_data
->opts
;
976 ufs
->rx_sel_idx
= PA_MAXDATALANES
;
977 if (ufs
->opts
& EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX
)
979 hba
->priv
= (void *)ufs
;
980 hba
->quirks
= ufs
->drv_data
->quirks
;
981 if (ufs
->drv_data
->drv_init
) {
982 ret
= ufs
->drv_data
->drv_init(dev
, ufs
);
984 dev_err(dev
, "failed to init drv-data\n");
989 ret
= exynos_ufs_get_clk_info(ufs
);
992 exynos_ufs_specify_phy_time_attr(ufs
);
993 exynos_ufs_config_smu(ufs
);
997 phy_power_off(ufs
->phy
);
1003 static int exynos_ufs_host_reset(struct ufs_hba
*hba
)
1005 struct exynos_ufs
*ufs
= ufshcd_get_variant(hba
);
1006 unsigned long timeout
= jiffies
+ msecs_to_jiffies(1);
1010 exynos_ufs_disable_auto_ctrl_hcc_save(ufs
, &val
);
1012 hci_writel(ufs
, UFS_SW_RST_MASK
, HCI_SW_RST
);
1015 if (!(hci_readl(ufs
, HCI_SW_RST
) & UFS_SW_RST_MASK
))
1017 } while (time_before(jiffies
, timeout
));
1019 dev_err(hba
->dev
, "timeout host sw-reset\n");
1023 exynos_ufs_auto_ctrl_hcc_restore(ufs
, &val
);
1027 static void exynos_ufs_dev_hw_reset(struct ufs_hba
*hba
)
1029 struct exynos_ufs
*ufs
= ufshcd_get_variant(hba
);
1031 hci_writel(ufs
, 0 << 0, HCI_GPIO_OUT
);
1033 hci_writel(ufs
, 1 << 0, HCI_GPIO_OUT
);
1036 static void exynos_ufs_pre_hibern8(struct ufs_hba
*hba
, u8 enter
)
1038 struct exynos_ufs
*ufs
= ufshcd_get_variant(hba
);
1039 struct exynos_ufs_uic_attr
*attr
= ufs
->drv_data
->uic_attr
;
1042 if (ufs
->opts
& EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL
)
1043 exynos_ufs_disable_auto_ctrl_hcc(ufs
);
1044 exynos_ufs_ungate_clks(ufs
);
1046 if (ufs
->opts
& EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER
) {
1047 const unsigned int granularity_tbl
[] = {
1048 1, 4, 8, 16, 32, 100
1050 int h8_time
= attr
->pa_hibern8time
*
1051 granularity_tbl
[attr
->pa_granularity
- 1];
1056 delta
= h8_time
- ktime_us_delta(ktime_get(),
1057 ufs
->entry_hibern8_t
);
1061 us
= min_t(s64
, delta
, USEC_PER_MSEC
);
1063 usleep_range(us
, us
+ 10);
1069 static void exynos_ufs_post_hibern8(struct ufs_hba
*hba
, u8 enter
)
1071 struct exynos_ufs
*ufs
= ufshcd_get_variant(hba
);
1077 if (ufshcd_is_hs_mode(&ufs
->dev_req_params
))
1078 pwrmode
= FAST_MODE
;
1080 pwrmode
= SLOW_MODE
;
1082 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_PWRMODE
), &cur_mode
);
1083 if (cur_mode
!= (pwrmode
<< 4 | pwrmode
)) {
1084 dev_warn(hba
->dev
, "%s: power mode change\n", __func__
);
1085 hba
->pwr_info
.pwr_rx
= (cur_mode
>> 4) & 0xf;
1086 hba
->pwr_info
.pwr_tx
= cur_mode
& 0xf;
1087 ufshcd_config_pwr_mode(hba
, &hba
->max_pwr_info
.info
);
1090 if (!(ufs
->opts
& EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB
))
1091 exynos_ufs_establish_connt(ufs
);
1093 ufs
->entry_hibern8_t
= ktime_get();
1094 exynos_ufs_gate_clks(ufs
);
1095 if (ufs
->opts
& EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL
)
1096 exynos_ufs_enable_auto_ctrl_hcc(ufs
);
1100 static int exynos_ufs_hce_enable_notify(struct ufs_hba
*hba
,
1101 enum ufs_notify_change_status status
)
1103 struct exynos_ufs
*ufs
= ufshcd_get_variant(hba
);
1108 ret
= exynos_ufs_host_reset(hba
);
1111 exynos_ufs_dev_hw_reset(hba
);
1114 exynos_ufs_calc_pwm_clk_div(ufs
);
1115 if (!(ufs
->opts
& EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL
))
1116 exynos_ufs_enable_auto_ctrl_hcc(ufs
);
1123 static int exynos_ufs_link_startup_notify(struct ufs_hba
*hba
,
1124 enum ufs_notify_change_status status
)
1130 ret
= exynos_ufs_pre_link(hba
);
1133 ret
= exynos_ufs_post_link(hba
);
1140 static int exynos_ufs_pwr_change_notify(struct ufs_hba
*hba
,
1141 enum ufs_notify_change_status status
,
1142 struct ufs_pa_layer_attr
*dev_max_params
,
1143 struct ufs_pa_layer_attr
*dev_req_params
)
1149 ret
= exynos_ufs_pre_pwr_mode(hba
, dev_max_params
,
1153 ret
= exynos_ufs_post_pwr_mode(hba
, NULL
, dev_req_params
);
1160 static void exynos_ufs_hibern8_notify(struct ufs_hba
*hba
,
1161 enum uic_cmd_dme enter
,
1162 enum ufs_notify_change_status notify
)
1164 switch ((u8
)notify
) {
1166 exynos_ufs_pre_hibern8(hba
, enter
);
1169 exynos_ufs_post_hibern8(hba
, enter
);
1174 static int exynos_ufs_suspend(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
1176 struct exynos_ufs
*ufs
= ufshcd_get_variant(hba
);
1178 if (!ufshcd_is_link_active(hba
))
1179 phy_power_off(ufs
->phy
);
1184 static int exynos_ufs_resume(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
1186 struct exynos_ufs
*ufs
= ufshcd_get_variant(hba
);
1188 if (!ufshcd_is_link_active(hba
))
1189 phy_power_on(ufs
->phy
);
1191 exynos_ufs_config_smu(ufs
);
1196 static struct ufs_hba_variant_ops ufs_hba_exynos_ops
= {
1197 .name
= "exynos_ufs",
1198 .init
= exynos_ufs_init
,
1199 .hce_enable_notify
= exynos_ufs_hce_enable_notify
,
1200 .link_startup_notify
= exynos_ufs_link_startup_notify
,
1201 .pwr_change_notify
= exynos_ufs_pwr_change_notify
,
1202 .setup_xfer_req
= exynos_ufs_specify_nexus_t_xfer_req
,
1203 .setup_task_mgmt
= exynos_ufs_specify_nexus_t_tm_req
,
1204 .hibern8_notify
= exynos_ufs_hibern8_notify
,
1205 .suspend
= exynos_ufs_suspend
,
1206 .resume
= exynos_ufs_resume
,
1209 static int exynos_ufs_probe(struct platform_device
*pdev
)
1212 struct device
*dev
= &pdev
->dev
;
1214 err
= ufshcd_pltfrm_init(pdev
, &ufs_hba_exynos_ops
);
1216 dev_err(dev
, "ufshcd_pltfrm_init() failed %d\n", err
);
1221 static int exynos_ufs_remove(struct platform_device
*pdev
)
1223 struct ufs_hba
*hba
= platform_get_drvdata(pdev
);
1225 pm_runtime_get_sync(&(pdev
)->dev
);
1230 struct exynos_ufs_drv_data exynos_ufs_drvs
= {
1232 .compatible
= "samsung,exynos7-ufs",
1233 .uic_attr
= &exynos7_uic_attr
,
1234 .quirks
= UFSHCD_QUIRK_PRDT_BYTE_GRAN
|
1235 UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR
|
1236 UFSHCI_QUIRK_BROKEN_HCE
|
1237 UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR
|
1238 UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR
|
1239 UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL
,
1240 .opts
= EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL
|
1241 EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL
|
1242 EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX
|
1243 EXYNOS_UFS_OPT_SKIP_CONNECTION_ESTAB
|
1244 EXYNOS_UFS_OPT_USE_SW_HIBERN8_TIMER
,
1245 .drv_init
= exynos7_ufs_drv_init
,
1246 .pre_link
= exynos7_ufs_pre_link
,
1247 .post_link
= exynos7_ufs_post_link
,
1248 .pre_pwr_change
= exynos7_ufs_pre_pwr_change
,
1249 .post_pwr_change
= exynos7_ufs_post_pwr_change
,
1252 static const struct of_device_id exynos_ufs_of_match
[] = {
1253 { .compatible
= "samsung,exynos7-ufs",
1254 .data
= &exynos_ufs_drvs
},
1258 static const struct dev_pm_ops exynos_ufs_pm_ops
= {
1259 .suspend
= ufshcd_pltfrm_suspend
,
1260 .resume
= ufshcd_pltfrm_resume
,
1261 .runtime_suspend
= ufshcd_pltfrm_runtime_suspend
,
1262 .runtime_resume
= ufshcd_pltfrm_runtime_resume
,
1263 .runtime_idle
= ufshcd_pltfrm_runtime_idle
,
1266 static struct platform_driver exynos_ufs_pltform
= {
1267 .probe
= exynos_ufs_probe
,
1268 .remove
= exynos_ufs_remove
,
1269 .shutdown
= ufshcd_pltfrm_shutdown
,
1271 .name
= "exynos-ufshc",
1272 .pm
= &exynos_ufs_pm_ops
,
1273 .of_match_table
= of_match_ptr(exynos_ufs_of_match
),
1276 module_platform_driver(exynos_ufs_pltform
);
1278 MODULE_AUTHOR("Alim Akhtar <alim.akhtar@samsung.com>");
1279 MODULE_AUTHOR("Seungwon Jeon <essuuj@gmail.com>");
1280 MODULE_DESCRIPTION("Exynos UFS HCI Driver");
1281 MODULE_LICENSE("GPL v2");