1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Universal Flash Storage Host controller driver Core
4 * Copyright (C) 2011-2013 Samsung India Software Operations
5 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
12 #include <linux/async.h>
13 #include <linux/devfreq.h>
14 #include <linux/nls.h>
16 #include <linux/bitfield.h>
17 #include <linux/blk-pm.h>
18 #include <linux/blkdev.h>
19 #include <linux/clk.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/module.h>
23 #include <linux/pm_opp.h>
24 #include <linux/regulator/consumer.h>
25 #include <linux/sched/clock.h>
26 #include <linux/iopoll.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_dbg.h>
29 #include <scsi/scsi_driver.h>
30 #include <scsi/scsi_eh.h>
31 #include "ufshcd-priv.h"
32 #include <ufs/ufs_quirks.h>
33 #include <ufs/unipro.h>
34 #include "ufs-sysfs.h"
35 #include "ufs-debugfs.h"
36 #include "ufs-fault-injection.h"
38 #include "ufshcd-crypto.h"
39 #include <linux/unaligned.h>
41 #define CREATE_TRACE_POINTS
42 #include "ufs_trace.h"
44 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
48 #define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
53 /* UIC command timeout, unit: ms */
55 UIC_CMD_TIMEOUT_DEFAULT
= 500,
56 UIC_CMD_TIMEOUT_MAX
= 2000,
58 /* NOP OUT retries waiting for NOP IN response */
59 #define NOP_OUT_RETRIES 10
60 /* Timeout after 50 msecs if NOP OUT hangs without response */
61 #define NOP_OUT_TIMEOUT 50 /* msecs */
63 /* Query request retries */
64 #define QUERY_REQ_RETRIES 3
65 /* Query request timeout */
66 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
68 /* Advanced RPMB request timeout */
69 #define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */
71 /* Task management command timeout */
72 #define TM_CMD_TIMEOUT 100 /* msecs */
74 /* maximum number of retries for a general UIC command */
75 #define UFS_UIC_COMMAND_RETRIES 3
77 /* maximum number of link-startup retries */
78 #define DME_LINKSTARTUP_RETRIES 3
80 /* maximum number of reset retries before giving up */
81 #define MAX_HOST_RESET_RETRIES 5
83 /* Maximum number of error handler retries before giving up */
84 #define MAX_ERR_HANDLER_RETRIES 5
86 /* Expose the flag value from utp_upiu_query.value */
87 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
89 /* Interrupt aggregation default timeout, unit: 40us */
90 #define INT_AGGR_DEF_TO 0x02
92 /* default delay of autosuspend: 2000 ms */
93 #define RPM_AUTOSUSPEND_DELAY_MS 2000
95 /* Default delay of RPM device flush delayed work */
96 #define RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS 5000
98 /* Default value of wait time before gating device ref clock */
99 #define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
101 /* Polling time to wait for fDeviceInit */
102 #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
104 /* Default RTC update every 10 seconds */
105 #define UFS_RTC_UPDATE_INTERVAL_MS (10 * MSEC_PER_SEC)
107 /* bMaxNumOfRTT is equal to two after device manufacturing */
108 #define DEFAULT_MAX_NUM_RTT 2
110 /* UFSHC 4.0 compliant HC support this mode. */
111 static bool use_mcq_mode
= true;
113 static bool is_mcq_supported(struct ufs_hba
*hba
)
115 return hba
->mcq_sup
&& use_mcq_mode
;
118 module_param(use_mcq_mode
, bool, 0644);
119 MODULE_PARM_DESC(use_mcq_mode
, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default");
121 static unsigned int uic_cmd_timeout
= UIC_CMD_TIMEOUT_DEFAULT
;
123 static int uic_cmd_timeout_set(const char *val
, const struct kernel_param
*kp
)
125 return param_set_uint_minmax(val
, kp
, UIC_CMD_TIMEOUT_DEFAULT
,
126 UIC_CMD_TIMEOUT_MAX
);
129 static const struct kernel_param_ops uic_cmd_timeout_ops
= {
130 .set
= uic_cmd_timeout_set
,
131 .get
= param_get_uint
,
134 module_param_cb(uic_cmd_timeout
, &uic_cmd_timeout_ops
, &uic_cmd_timeout
, 0644);
135 MODULE_PARM_DESC(uic_cmd_timeout
,
136 "UFS UIC command timeout in milliseconds. Defaults to 500ms. Supported values range from 500ms to 2 seconds inclusively");
138 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
142 _ret = ufshcd_enable_vreg(_dev, _vreg); \
144 _ret = ufshcd_disable_vreg(_dev, _vreg); \
148 #define ufshcd_hex_dump(prefix_str, buf, len) do { \
149 size_t __len = (len); \
150 print_hex_dump(KERN_ERR, prefix_str, \
151 __len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,\
152 16, 4, buf, __len, false); \
155 int ufshcd_dump_regs(struct ufs_hba
*hba
, size_t offset
, size_t len
,
161 if (offset
% 4 != 0 || len
% 4 != 0) /* keep readl happy */
164 regs
= kzalloc(len
, GFP_ATOMIC
);
168 for (pos
= 0; pos
< len
; pos
+= 4) {
170 pos
>= REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
&&
171 pos
<= REG_UIC_ERROR_CODE_DME
)
173 regs
[pos
/ 4] = ufshcd_readl(hba
, offset
+ pos
);
176 ufshcd_hex_dump(prefix
, regs
, len
);
181 EXPORT_SYMBOL_GPL(ufshcd_dump_regs
);
184 UFSHCD_MAX_CHANNEL
= 0,
188 static const char *const ufshcd_state_name
[] = {
189 [UFSHCD_STATE_RESET
] = "reset",
190 [UFSHCD_STATE_OPERATIONAL
] = "operational",
191 [UFSHCD_STATE_ERROR
] = "error",
192 [UFSHCD_STATE_EH_SCHEDULED_FATAL
] = "eh_fatal",
193 [UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
] = "eh_non_fatal",
196 /* UFSHCD error handling flags */
198 UFSHCD_EH_IN_PROGRESS
= (1 << 0),
201 /* UFSHCD UIC layer error flags */
203 UFSHCD_UIC_DL_PA_INIT_ERROR
= (1 << 0), /* Data link layer error */
204 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
= (1 << 1), /* Data link layer error */
205 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
= (1 << 2), /* Data link layer error */
206 UFSHCD_UIC_NL_ERROR
= (1 << 3), /* Network layer error */
207 UFSHCD_UIC_TL_ERROR
= (1 << 4), /* Transport Layer error */
208 UFSHCD_UIC_DME_ERROR
= (1 << 5), /* DME error */
209 UFSHCD_UIC_PA_GENERIC_ERROR
= (1 << 6), /* Generic PA error */
212 #define ufshcd_set_eh_in_progress(h) \
213 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
214 #define ufshcd_eh_in_progress(h) \
215 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
216 #define ufshcd_clear_eh_in_progress(h) \
217 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
219 const struct ufs_pm_lvl_states ufs_pm_lvl_states
[] = {
220 [UFS_PM_LVL_0
] = {UFS_ACTIVE_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
221 [UFS_PM_LVL_1
] = {UFS_ACTIVE_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
222 [UFS_PM_LVL_2
] = {UFS_SLEEP_PWR_MODE
, UIC_LINK_ACTIVE_STATE
},
223 [UFS_PM_LVL_3
] = {UFS_SLEEP_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
224 [UFS_PM_LVL_4
] = {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_HIBERN8_STATE
},
225 [UFS_PM_LVL_5
] = {UFS_POWERDOWN_PWR_MODE
, UIC_LINK_OFF_STATE
},
227 * For DeepSleep, the link is first put in hibern8 and then off.
228 * Leaving the link in hibern8 is not supported.
230 [UFS_PM_LVL_6
] = {UFS_DEEPSLEEP_PWR_MODE
, UIC_LINK_OFF_STATE
},
233 static inline enum ufs_dev_pwr_mode
234 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl
)
236 return ufs_pm_lvl_states
[lvl
].dev_state
;
239 static inline enum uic_link_state
240 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl
)
242 return ufs_pm_lvl_states
[lvl
].link_state
;
245 static inline enum ufs_pm_level
246 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state
,
247 enum uic_link_state link_state
)
249 enum ufs_pm_level lvl
;
251 for (lvl
= UFS_PM_LVL_0
; lvl
< UFS_PM_LVL_MAX
; lvl
++) {
252 if ((ufs_pm_lvl_states
[lvl
].dev_state
== dev_state
) &&
253 (ufs_pm_lvl_states
[lvl
].link_state
== link_state
))
257 /* if no match found, return the level 0 */
261 static bool ufshcd_is_ufs_dev_busy(struct ufs_hba
*hba
)
263 return (hba
->clk_gating
.active_reqs
|| hba
->outstanding_reqs
|| hba
->outstanding_tasks
||
264 hba
->active_uic_cmd
|| hba
->uic_async_done
);
267 static const struct ufs_dev_quirk ufs_fixups
[] = {
268 /* UFS cards deviations table */
269 { .wmanufacturerid
= UFS_VENDOR_MICRON
,
270 .model
= UFS_ANY_MODEL
,
271 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
},
272 { .wmanufacturerid
= UFS_VENDOR_SAMSUNG
,
273 .model
= UFS_ANY_MODEL
,
274 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
|
275 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
|
276 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
},
277 { .wmanufacturerid
= UFS_VENDOR_SKHYNIX
,
278 .model
= UFS_ANY_MODEL
,
279 .quirk
= UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME
},
280 { .wmanufacturerid
= UFS_VENDOR_SKHYNIX
,
281 .model
= "hB8aL1" /*H28U62301AMR*/,
282 .quirk
= UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME
},
283 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
284 .model
= UFS_ANY_MODEL
,
285 .quirk
= UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
},
286 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
287 .model
= "THGLF2G9C8KBADG",
288 .quirk
= UFS_DEVICE_QUIRK_PA_TACTIVATE
},
289 { .wmanufacturerid
= UFS_VENDOR_TOSHIBA
,
290 .model
= "THGLF2G9D8KBADG",
291 .quirk
= UFS_DEVICE_QUIRK_PA_TACTIVATE
},
295 static irqreturn_t
ufshcd_tmc_handler(struct ufs_hba
*hba
);
296 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
);
297 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
);
298 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
);
299 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
);
300 static void ufshcd_hba_exit(struct ufs_hba
*hba
);
301 static int ufshcd_device_init(struct ufs_hba
*hba
, bool init_dev_params
);
302 static int ufshcd_probe_hba(struct ufs_hba
*hba
, bool init_dev_params
);
303 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
);
304 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
);
305 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
);
306 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
);
307 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
);
308 static int ufshcd_scale_clks(struct ufs_hba
*hba
, unsigned long freq
,
310 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
);
311 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
312 struct ufs_pa_layer_attr
*pwr_mode
);
313 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
);
314 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
);
315 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
316 struct ufs_vreg
*vreg
);
317 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba
*hba
,
319 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
);
320 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
);
322 void ufshcd_enable_irq(struct ufs_hba
*hba
)
324 if (!hba
->is_irq_enabled
) {
325 enable_irq(hba
->irq
);
326 hba
->is_irq_enabled
= true;
329 EXPORT_SYMBOL_GPL(ufshcd_enable_irq
);
331 void ufshcd_disable_irq(struct ufs_hba
*hba
)
333 if (hba
->is_irq_enabled
) {
334 disable_irq(hba
->irq
);
335 hba
->is_irq_enabled
= false;
338 EXPORT_SYMBOL_GPL(ufshcd_disable_irq
);
340 static void ufshcd_configure_wb(struct ufs_hba
*hba
)
342 if (!ufshcd_is_wb_allowed(hba
))
345 ufshcd_wb_toggle(hba
, true);
347 ufshcd_wb_toggle_buf_flush_during_h8(hba
, true);
349 if (ufshcd_is_wb_buf_flush_allowed(hba
))
350 ufshcd_wb_toggle_buf_flush(hba
, true);
353 static void ufshcd_add_cmd_upiu_trace(struct ufs_hba
*hba
, unsigned int tag
,
354 enum ufs_trace_str_t str_t
)
356 struct utp_upiu_req
*rq
= hba
->lrb
[tag
].ucd_req_ptr
;
357 struct utp_upiu_header
*header
;
359 if (!trace_ufshcd_upiu_enabled())
362 if (str_t
== UFS_CMD_SEND
)
363 header
= &rq
->header
;
365 header
= &hba
->lrb
[tag
].ucd_rsp_ptr
->header
;
367 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
, header
, &rq
->sc
.cdb
,
371 static void ufshcd_add_query_upiu_trace(struct ufs_hba
*hba
,
372 enum ufs_trace_str_t str_t
,
373 struct utp_upiu_req
*rq_rsp
)
375 if (!trace_ufshcd_upiu_enabled())
378 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
, &rq_rsp
->header
,
379 &rq_rsp
->qr
, UFS_TSF_OSF
);
382 static void ufshcd_add_tm_upiu_trace(struct ufs_hba
*hba
, unsigned int tag
,
383 enum ufs_trace_str_t str_t
)
385 struct utp_task_req_desc
*descp
= &hba
->utmrdl_base_addr
[tag
];
387 if (!trace_ufshcd_upiu_enabled())
390 if (str_t
== UFS_TM_SEND
)
391 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
,
392 &descp
->upiu_req
.req_header
,
393 &descp
->upiu_req
.input_param1
,
396 trace_ufshcd_upiu(dev_name(hba
->dev
), str_t
,
397 &descp
->upiu_rsp
.rsp_header
,
398 &descp
->upiu_rsp
.output_param1
,
402 static void ufshcd_add_uic_command_trace(struct ufs_hba
*hba
,
403 const struct uic_command
*ucmd
,
404 enum ufs_trace_str_t str_t
)
408 if (!trace_ufshcd_uic_command_enabled())
411 if (str_t
== UFS_CMD_SEND
)
414 cmd
= ufshcd_readl(hba
, REG_UIC_COMMAND
);
416 trace_ufshcd_uic_command(dev_name(hba
->dev
), str_t
, cmd
,
417 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_1
),
418 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
),
419 ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
));
422 static void ufshcd_add_command_trace(struct ufs_hba
*hba
, unsigned int tag
,
423 enum ufs_trace_str_t str_t
)
426 u8 opcode
= 0, group_id
= 0;
430 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
431 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
432 struct request
*rq
= scsi_cmd_to_rq(cmd
);
433 int transfer_len
= -1;
438 /* trace UPIU also */
439 ufshcd_add_cmd_upiu_trace(hba
, tag
, str_t
);
440 if (!trace_ufshcd_command_enabled())
443 opcode
= cmd
->cmnd
[0];
445 if (opcode
== READ_10
|| opcode
== WRITE_10
) {
447 * Currently we only fully trace read(10) and write(10) commands
450 be32_to_cpu(lrbp
->ucd_req_ptr
->sc
.exp_data_transfer_len
);
451 lba
= scsi_get_lba(cmd
);
452 if (opcode
== WRITE_10
)
453 group_id
= lrbp
->cmd
->cmnd
[6];
454 } else if (opcode
== UNMAP
) {
456 * The number of Bytes to be unmapped beginning with the lba.
458 transfer_len
= blk_rq_bytes(rq
);
459 lba
= scsi_get_lba(cmd
);
462 intr
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
464 if (hba
->mcq_enabled
) {
465 struct ufs_hw_queue
*hwq
= ufshcd_mcq_req_to_hwq(hba
, rq
);
469 doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
471 trace_ufshcd_command(cmd
->device
, str_t
, tag
, doorbell
, hwq_id
,
472 transfer_len
, intr
, lba
, opcode
, group_id
);
475 static void ufshcd_print_clk_freqs(struct ufs_hba
*hba
)
477 struct ufs_clk_info
*clki
;
478 struct list_head
*head
= &hba
->clk_list_head
;
480 if (list_empty(head
))
483 list_for_each_entry(clki
, head
, list
) {
484 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->min_freq
&&
486 dev_err(hba
->dev
, "clk: %s, rate: %u\n",
487 clki
->name
, clki
->curr_freq
);
491 static void ufshcd_print_evt(struct ufs_hba
*hba
, u32 id
,
492 const char *err_name
)
496 const struct ufs_event_hist
*e
;
498 if (id
>= UFS_EVT_CNT
)
501 e
= &hba
->ufs_stats
.event
[id
];
503 for (i
= 0; i
< UFS_EVENT_HIST_LENGTH
; i
++) {
504 int p
= (i
+ e
->pos
) % UFS_EVENT_HIST_LENGTH
;
506 if (e
->tstamp
[p
] == 0)
508 dev_err(hba
->dev
, "%s[%d] = 0x%x at %lld us\n", err_name
, p
,
509 e
->val
[p
], div_u64(e
->tstamp
[p
], 1000));
514 dev_err(hba
->dev
, "No record of %s\n", err_name
);
516 dev_err(hba
->dev
, "%s: total cnt=%llu\n", err_name
, e
->cnt
);
519 static void ufshcd_print_evt_hist(struct ufs_hba
*hba
)
521 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
, "host_regs: ");
523 ufshcd_print_evt(hba
, UFS_EVT_PA_ERR
, "pa_err");
524 ufshcd_print_evt(hba
, UFS_EVT_DL_ERR
, "dl_err");
525 ufshcd_print_evt(hba
, UFS_EVT_NL_ERR
, "nl_err");
526 ufshcd_print_evt(hba
, UFS_EVT_TL_ERR
, "tl_err");
527 ufshcd_print_evt(hba
, UFS_EVT_DME_ERR
, "dme_err");
528 ufshcd_print_evt(hba
, UFS_EVT_AUTO_HIBERN8_ERR
,
530 ufshcd_print_evt(hba
, UFS_EVT_FATAL_ERR
, "fatal_err");
531 ufshcd_print_evt(hba
, UFS_EVT_LINK_STARTUP_FAIL
,
532 "link_startup_fail");
533 ufshcd_print_evt(hba
, UFS_EVT_RESUME_ERR
, "resume_fail");
534 ufshcd_print_evt(hba
, UFS_EVT_SUSPEND_ERR
,
536 ufshcd_print_evt(hba
, UFS_EVT_WL_RES_ERR
, "wlun resume_fail");
537 ufshcd_print_evt(hba
, UFS_EVT_WL_SUSP_ERR
,
538 "wlun suspend_fail");
539 ufshcd_print_evt(hba
, UFS_EVT_DEV_RESET
, "dev_reset");
540 ufshcd_print_evt(hba
, UFS_EVT_HOST_RESET
, "host_reset");
541 ufshcd_print_evt(hba
, UFS_EVT_ABORT
, "task_abort");
543 ufshcd_vops_dbg_register_dump(hba
);
547 void ufshcd_print_tr(struct ufs_hba
*hba
, int tag
, bool pr_prdt
)
549 const struct ufshcd_lrb
*lrbp
;
552 lrbp
= &hba
->lrb
[tag
];
554 dev_err(hba
->dev
, "UPIU[%d] - issue time %lld us\n",
555 tag
, div_u64(lrbp
->issue_time_stamp_local_clock
, 1000));
556 dev_err(hba
->dev
, "UPIU[%d] - complete time %lld us\n",
557 tag
, div_u64(lrbp
->compl_time_stamp_local_clock
, 1000));
559 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
560 tag
, (u64
)lrbp
->utrd_dma_addr
);
562 ufshcd_hex_dump("UPIU TRD: ", lrbp
->utr_descriptor_ptr
,
563 sizeof(struct utp_transfer_req_desc
));
564 dev_err(hba
->dev
, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag
,
565 (u64
)lrbp
->ucd_req_dma_addr
);
566 ufshcd_hex_dump("UPIU REQ: ", lrbp
->ucd_req_ptr
,
567 sizeof(struct utp_upiu_req
));
568 dev_err(hba
->dev
, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag
,
569 (u64
)lrbp
->ucd_rsp_dma_addr
);
570 ufshcd_hex_dump("UPIU RSP: ", lrbp
->ucd_rsp_ptr
,
571 sizeof(struct utp_upiu_rsp
));
573 prdt_length
= le16_to_cpu(
574 lrbp
->utr_descriptor_ptr
->prd_table_length
);
575 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
576 prdt_length
/= ufshcd_sg_entry_size(hba
);
579 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
581 (u64
)lrbp
->ucd_prdt_dma_addr
);
584 ufshcd_hex_dump("UPIU PRDT: ", lrbp
->ucd_prdt_ptr
,
585 ufshcd_sg_entry_size(hba
) * prdt_length
);
588 static bool ufshcd_print_tr_iter(struct request
*req
, void *priv
)
590 struct scsi_device
*sdev
= req
->q
->queuedata
;
591 struct Scsi_Host
*shost
= sdev
->host
;
592 struct ufs_hba
*hba
= shost_priv(shost
);
594 ufshcd_print_tr(hba
, req
->tag
, *(bool *)priv
);
600 * ufshcd_print_trs_all - print trs for all started requests.
601 * @hba: per-adapter instance.
602 * @pr_prdt: need to print prdt or not.
604 static void ufshcd_print_trs_all(struct ufs_hba
*hba
, bool pr_prdt
)
606 blk_mq_tagset_busy_iter(&hba
->host
->tag_set
, ufshcd_print_tr_iter
, &pr_prdt
);
609 static void ufshcd_print_tmrs(struct ufs_hba
*hba
, unsigned long bitmap
)
613 for_each_set_bit(tag
, &bitmap
, hba
->nutmrs
) {
614 struct utp_task_req_desc
*tmrdp
= &hba
->utmrdl_base_addr
[tag
];
616 dev_err(hba
->dev
, "TM[%d] - Task Management Header\n", tag
);
617 ufshcd_hex_dump("", tmrdp
, sizeof(*tmrdp
));
621 static void ufshcd_print_host_state(struct ufs_hba
*hba
)
623 const struct scsi_device
*sdev_ufs
= hba
->ufs_device_wlun
;
625 dev_err(hba
->dev
, "UFS Host state=%d\n", hba
->ufshcd_state
);
626 dev_err(hba
->dev
, "outstanding reqs=0x%lx tasks=0x%lx\n",
627 hba
->outstanding_reqs
, hba
->outstanding_tasks
);
628 dev_err(hba
->dev
, "saved_err=0x%x, saved_uic_err=0x%x\n",
629 hba
->saved_err
, hba
->saved_uic_err
);
630 dev_err(hba
->dev
, "Device power mode=%d, UIC link state=%d\n",
631 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
632 dev_err(hba
->dev
, "PM in progress=%d, sys. suspended=%d\n",
633 hba
->pm_op_in_progress
, hba
->is_sys_suspended
);
634 dev_err(hba
->dev
, "Auto BKOPS=%d, Host self-block=%d\n",
635 hba
->auto_bkops_enabled
, hba
->host
->host_self_blocked
);
636 dev_err(hba
->dev
, "Clk gate=%d\n", hba
->clk_gating
.state
);
638 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
639 div_u64(hba
->ufs_stats
.last_hibern8_exit_tstamp
, 1000),
640 hba
->ufs_stats
.hibern8_exit_cnt
);
641 dev_err(hba
->dev
, "last intr at %lld us, last intr status=0x%x\n",
642 div_u64(hba
->ufs_stats
.last_intr_ts
, 1000),
643 hba
->ufs_stats
.last_intr_status
);
644 dev_err(hba
->dev
, "error handling flags=0x%x, req. abort count=%d\n",
645 hba
->eh_flags
, hba
->req_abort_count
);
646 dev_err(hba
->dev
, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
647 hba
->ufs_version
, hba
->capabilities
, hba
->caps
);
648 dev_err(hba
->dev
, "quirks=0x%x, dev. quirks=0x%x\n", hba
->quirks
,
651 dev_err(hba
->dev
, "UFS dev info: %.8s %.16s rev %.4s\n",
652 sdev_ufs
->vendor
, sdev_ufs
->model
, sdev_ufs
->rev
);
654 ufshcd_print_clk_freqs(hba
);
658 * ufshcd_print_pwr_info - print power params as saved in hba
660 * @hba: per-adapter instance
662 static void ufshcd_print_pwr_info(struct ufs_hba
*hba
)
664 static const char * const names
[] = {
675 * Using dev_dbg to avoid messages during runtime PM to avoid
676 * never-ending cycles of messages written back to storage by user space
677 * causing runtime resume, causing more messages and so on.
679 dev_dbg(hba
->dev
, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
681 hba
->pwr_info
.gear_rx
, hba
->pwr_info
.gear_tx
,
682 hba
->pwr_info
.lane_rx
, hba
->pwr_info
.lane_tx
,
683 names
[hba
->pwr_info
.pwr_rx
],
684 names
[hba
->pwr_info
.pwr_tx
],
685 hba
->pwr_info
.hs_rate
);
688 static void ufshcd_device_reset(struct ufs_hba
*hba
)
692 err
= ufshcd_vops_device_reset(hba
);
695 ufshcd_set_ufs_dev_active(hba
);
696 if (ufshcd_is_wb_allowed(hba
)) {
697 hba
->dev_info
.wb_enabled
= false;
698 hba
->dev_info
.wb_buf_flush_enabled
= false;
700 if (hba
->dev_info
.rtc_type
== UFS_RTC_RELATIVE
)
701 hba
->dev_info
.rtc_time_baseline
= 0;
703 if (err
!= -EOPNOTSUPP
)
704 ufshcd_update_evt_hist(hba
, UFS_EVT_DEV_RESET
, err
);
707 void ufshcd_delay_us(unsigned long us
, unsigned long tolerance
)
715 usleep_range(us
, us
+ tolerance
);
717 EXPORT_SYMBOL_GPL(ufshcd_delay_us
);
720 * ufshcd_wait_for_register - wait for register value to change
721 * @hba: per-adapter interface
722 * @reg: mmio register offset
723 * @mask: mask to apply to the read register value
724 * @val: value to wait for
725 * @interval_us: polling interval in microseconds
726 * @timeout_ms: timeout in milliseconds
728 * Return: -ETIMEDOUT on error, zero on success.
730 static int ufshcd_wait_for_register(struct ufs_hba
*hba
, u32 reg
, u32 mask
,
731 u32 val
, unsigned long interval_us
,
732 unsigned long timeout_ms
)
736 val
&= mask
; /* ignore bits that we don't intend to wait on */
738 return read_poll_timeout(ufshcd_readl
, v
, (v
& mask
) == val
,
739 interval_us
, timeout_ms
* 1000, false, hba
, reg
);
743 * ufshcd_get_intr_mask - Get the interrupt bit mask
744 * @hba: Pointer to adapter instance
746 * Return: interrupt bit mask per version
748 static inline u32
ufshcd_get_intr_mask(struct ufs_hba
*hba
)
750 if (hba
->ufs_version
<= ufshci_version(2, 0))
751 return INTERRUPT_MASK_ALL_VER_11
;
753 return INTERRUPT_MASK_ALL_VER_21
;
757 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
758 * @hba: Pointer to adapter instance
760 * Return: UFSHCI version supported by the controller
762 static inline u32
ufshcd_get_ufs_version(struct ufs_hba
*hba
)
766 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION
)
767 ufshci_ver
= ufshcd_vops_get_ufs_hci_version(hba
);
769 ufshci_ver
= ufshcd_readl(hba
, REG_UFS_VERSION
);
772 * UFSHCI v1.x uses a different version scheme, in order
773 * to allow the use of comparisons with the ufshci_version
774 * function, we convert it to the same scheme as ufs 2.0+.
776 if (ufshci_ver
& 0x00010000)
777 return ufshci_version(1, ufshci_ver
& 0x00000100);
783 * ufshcd_is_device_present - Check if any device connected to
784 * the host controller
785 * @hba: pointer to adapter instance
787 * Return: true if device present, false if no device detected
789 static inline bool ufshcd_is_device_present(struct ufs_hba
*hba
)
791 return ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) & DEVICE_PRESENT
;
795 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
796 * @lrbp: pointer to local command reference block
797 * @cqe: pointer to the completion queue entry
799 * This function is used to get the OCS field from UTRD
801 * Return: the OCS field in the UTRD.
803 static enum utp_ocs
ufshcd_get_tr_ocs(struct ufshcd_lrb
*lrbp
,
804 struct cq_entry
*cqe
)
807 return le32_to_cpu(cqe
->status
) & MASK_OCS
;
809 return lrbp
->utr_descriptor_ptr
->header
.ocs
& MASK_OCS
;
813 * ufshcd_utrl_clear() - Clear requests from the controller request list.
814 * @hba: per adapter instance
815 * @mask: mask with one bit set for each request to be cleared
817 static inline void ufshcd_utrl_clear(struct ufs_hba
*hba
, u32 mask
)
819 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR
)
822 * From the UFSHCI specification: "UTP Transfer Request List CLear
823 * Register (UTRLCLR): This field is bit significant. Each bit
824 * corresponds to a slot in the UTP Transfer Request List, where bit 0
825 * corresponds to request slot 0. A bit in this field is set to ‘0’
826 * by host software to indicate to the host controller that a transfer
827 * request slot is cleared. The host controller
828 * shall free up any resources associated to the request slot
829 * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The
830 * host software indicates no change to request slots by setting the
831 * associated bits in this field to ‘1’. Bits in this field shall only
832 * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’."
834 ufshcd_writel(hba
, ~mask
, REG_UTP_TRANSFER_REQ_LIST_CLEAR
);
838 * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register
839 * @hba: per adapter instance
840 * @pos: position of the bit to be cleared
842 static inline void ufshcd_utmrl_clear(struct ufs_hba
*hba
, u32 pos
)
844 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR
)
845 ufshcd_writel(hba
, (1 << pos
), REG_UTP_TASK_REQ_LIST_CLEAR
);
847 ufshcd_writel(hba
, ~(1 << pos
), REG_UTP_TASK_REQ_LIST_CLEAR
);
851 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
852 * @reg: Register value of host controller status
854 * Return: 0 on success; a positive value if failed.
856 static inline int ufshcd_get_lists_status(u32 reg
)
858 return !((reg
& UFSHCD_STATUS_READY
) == UFSHCD_STATUS_READY
);
862 * ufshcd_get_uic_cmd_result - Get the UIC command result
863 * @hba: Pointer to adapter instance
865 * This function gets the result of UIC command completion
867 * Return: 0 on success; non-zero value on error.
869 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba
*hba
)
871 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_2
) &
872 MASK_UIC_COMMAND_RESULT
;
876 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
877 * @hba: Pointer to adapter instance
879 * This function gets UIC command argument3
881 * Return: 0 on success; non-zero value on error.
883 static inline u32
ufshcd_get_dme_attr_val(struct ufs_hba
*hba
)
885 return ufshcd_readl(hba
, REG_UIC_COMMAND_ARG_3
);
889 * ufshcd_get_req_rsp - returns the TR response transaction type
890 * @ucd_rsp_ptr: pointer to response UPIU
894 static inline enum upiu_response_transaction
895 ufshcd_get_req_rsp(struct utp_upiu_rsp
*ucd_rsp_ptr
)
897 return ucd_rsp_ptr
->header
.transaction_code
;
901 * ufshcd_is_exception_event - Check if the device raised an exception event
902 * @ucd_rsp_ptr: pointer to response UPIU
904 * The function checks if the device raised an exception event indicated in
905 * the Device Information field of response UPIU.
907 * Return: true if exception is raised, false otherwise.
909 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp
*ucd_rsp_ptr
)
911 return ucd_rsp_ptr
->header
.device_information
& 1;
915 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
916 * @hba: per adapter instance
919 ufshcd_reset_intr_aggr(struct ufs_hba
*hba
)
921 ufshcd_writel(hba
, INT_AGGR_ENABLE
|
922 INT_AGGR_COUNTER_AND_TIMER_RESET
,
923 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
927 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
928 * @hba: per adapter instance
929 * @cnt: Interrupt aggregation counter threshold
930 * @tmout: Interrupt aggregation timeout value
933 ufshcd_config_intr_aggr(struct ufs_hba
*hba
, u8 cnt
, u8 tmout
)
935 ufshcd_writel(hba
, INT_AGGR_ENABLE
| INT_AGGR_PARAM_WRITE
|
936 INT_AGGR_COUNTER_THLD_VAL(cnt
) |
937 INT_AGGR_TIMEOUT_VAL(tmout
),
938 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
942 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
943 * @hba: per adapter instance
945 static inline void ufshcd_disable_intr_aggr(struct ufs_hba
*hba
)
947 ufshcd_writel(hba
, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL
);
951 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
952 * When run-stop registers are set to 1, it indicates the
953 * host controller that it can process the requests
954 * @hba: per adapter instance
956 static void ufshcd_enable_run_stop_reg(struct ufs_hba
*hba
)
958 ufshcd_writel(hba
, UTP_TASK_REQ_LIST_RUN_STOP_BIT
,
959 REG_UTP_TASK_REQ_LIST_RUN_STOP
);
960 ufshcd_writel(hba
, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT
,
961 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP
);
965 * ufshcd_hba_start - Start controller initialization sequence
966 * @hba: per adapter instance
968 static inline void ufshcd_hba_start(struct ufs_hba
*hba
)
970 u32 val
= CONTROLLER_ENABLE
;
972 if (ufshcd_crypto_enable(hba
))
973 val
|= CRYPTO_GENERAL_ENABLE
;
975 ufshcd_writel(hba
, val
, REG_CONTROLLER_ENABLE
);
979 * ufshcd_is_hba_active - Get controller state
980 * @hba: per adapter instance
982 * Return: true if and only if the controller is active.
984 bool ufshcd_is_hba_active(struct ufs_hba
*hba
)
986 return ufshcd_readl(hba
, REG_CONTROLLER_ENABLE
) & CONTROLLER_ENABLE
;
988 EXPORT_SYMBOL_GPL(ufshcd_is_hba_active
);
991 * ufshcd_pm_qos_init - initialize PM QoS request
992 * @hba: per adapter instance
994 void ufshcd_pm_qos_init(struct ufs_hba
*hba
)
997 if (hba
->pm_qos_enabled
)
1000 cpu_latency_qos_add_request(&hba
->pm_qos_req
, PM_QOS_DEFAULT_VALUE
);
1002 if (cpu_latency_qos_request_active(&hba
->pm_qos_req
))
1003 hba
->pm_qos_enabled
= true;
1007 * ufshcd_pm_qos_exit - remove request from PM QoS
1008 * @hba: per adapter instance
1010 void ufshcd_pm_qos_exit(struct ufs_hba
*hba
)
1012 if (!hba
->pm_qos_enabled
)
1015 cpu_latency_qos_remove_request(&hba
->pm_qos_req
);
1016 hba
->pm_qos_enabled
= false;
1020 * ufshcd_pm_qos_update - update PM QoS request
1021 * @hba: per adapter instance
1022 * @on: If True, vote for perf PM QoS mode otherwise power save mode
1024 static void ufshcd_pm_qos_update(struct ufs_hba
*hba
, bool on
)
1026 if (!hba
->pm_qos_enabled
)
1029 cpu_latency_qos_update_request(&hba
->pm_qos_req
, on
? 0 : PM_QOS_DEFAULT_VALUE
);
1033 * ufshcd_set_clk_freq - set UFS controller clock frequencies
1034 * @hba: per adapter instance
1035 * @scale_up: If True, set max possible frequency othewise set low frequency
1037 * Return: 0 if successful; < 0 upon failure.
1039 static int ufshcd_set_clk_freq(struct ufs_hba
*hba
, bool scale_up
)
1042 struct ufs_clk_info
*clki
;
1043 struct list_head
*head
= &hba
->clk_list_head
;
1045 if (list_empty(head
))
1048 list_for_each_entry(clki
, head
, list
) {
1049 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1050 if (scale_up
&& clki
->max_freq
) {
1051 if (clki
->curr_freq
== clki
->max_freq
)
1054 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
1056 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
1057 __func__
, clki
->name
,
1058 clki
->max_freq
, ret
);
1061 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
1062 "scaled up", clki
->name
,
1066 clki
->curr_freq
= clki
->max_freq
;
1068 } else if (!scale_up
&& clki
->min_freq
) {
1069 if (clki
->curr_freq
== clki
->min_freq
)
1072 ret
= clk_set_rate(clki
->clk
, clki
->min_freq
);
1074 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
1075 __func__
, clki
->name
,
1076 clki
->min_freq
, ret
);
1079 trace_ufshcd_clk_scaling(dev_name(hba
->dev
),
1080 "scaled down", clki
->name
,
1083 clki
->curr_freq
= clki
->min_freq
;
1086 dev_dbg(hba
->dev
, "%s: clk: %s, rate: %lu\n", __func__
,
1087 clki
->name
, clk_get_rate(clki
->clk
));
1094 int ufshcd_opp_config_clks(struct device
*dev
, struct opp_table
*opp_table
,
1095 struct dev_pm_opp
*opp
, void *data
,
1098 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1099 struct list_head
*head
= &hba
->clk_list_head
;
1100 struct ufs_clk_info
*clki
;
1105 list_for_each_entry(clki
, head
, list
) {
1106 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1107 freq
= dev_pm_opp_get_freq_indexed(opp
, idx
++);
1109 /* Do not set rate for clocks having frequency as 0 */
1113 ret
= clk_set_rate(clki
->clk
, freq
);
1115 dev_err(dev
, "%s: %s clk set rate(%ldHz) failed, %d\n",
1116 __func__
, clki
->name
, freq
, ret
);
1120 trace_ufshcd_clk_scaling(dev_name(dev
),
1121 (scaling_down
? "scaled down" : "scaled up"),
1122 clki
->name
, hba
->clk_scaling
.target_freq
, freq
);
1128 EXPORT_SYMBOL_GPL(ufshcd_opp_config_clks
);
1130 static int ufshcd_opp_set_rate(struct ufs_hba
*hba
, unsigned long freq
)
1132 struct dev_pm_opp
*opp
;
1135 opp
= dev_pm_opp_find_freq_floor_indexed(hba
->dev
,
1138 return PTR_ERR(opp
);
1140 ret
= dev_pm_opp_set_opp(hba
->dev
, opp
);
1141 dev_pm_opp_put(opp
);
1147 * ufshcd_scale_clks - scale up or scale down UFS controller clocks
1148 * @hba: per adapter instance
1149 * @freq: frequency to scale
1150 * @scale_up: True if scaling up and false if scaling down
1152 * Return: 0 if successful; < 0 upon failure.
1154 static int ufshcd_scale_clks(struct ufs_hba
*hba
, unsigned long freq
,
1158 ktime_t start
= ktime_get();
1160 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, PRE_CHANGE
);
1164 if (hba
->use_pm_opp
)
1165 ret
= ufshcd_opp_set_rate(hba
, freq
);
1167 ret
= ufshcd_set_clk_freq(hba
, scale_up
);
1171 ret
= ufshcd_vops_clk_scale_notify(hba
, scale_up
, POST_CHANGE
);
1173 if (hba
->use_pm_opp
)
1174 ufshcd_opp_set_rate(hba
,
1175 hba
->devfreq
->previous_freq
);
1177 ufshcd_set_clk_freq(hba
, !scale_up
);
1181 ufshcd_pm_qos_update(hba
, scale_up
);
1184 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1185 (scale_up
? "up" : "down"),
1186 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1191 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
1192 * @hba: per adapter instance
1193 * @freq: frequency to scale
1194 * @scale_up: True if scaling up and false if scaling down
1196 * Return: true if scaling is required, false otherwise.
1198 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba
*hba
,
1199 unsigned long freq
, bool scale_up
)
1201 struct ufs_clk_info
*clki
;
1202 struct list_head
*head
= &hba
->clk_list_head
;
1204 if (list_empty(head
))
1207 if (hba
->use_pm_opp
)
1208 return freq
!= hba
->clk_scaling
.target_freq
;
1210 list_for_each_entry(clki
, head
, list
) {
1211 if (!IS_ERR_OR_NULL(clki
->clk
)) {
1212 if (scale_up
&& clki
->max_freq
) {
1213 if (clki
->curr_freq
== clki
->max_freq
)
1216 } else if (!scale_up
&& clki
->min_freq
) {
1217 if (clki
->curr_freq
== clki
->min_freq
)
1228 * Determine the number of pending commands by counting the bits in the SCSI
1229 * device budget maps. This approach has been selected because a bit is set in
1230 * the budget map before scsi_host_queue_ready() checks the host_self_blocked
1231 * flag. The host_self_blocked flag can be modified by calling
1232 * scsi_block_requests() or scsi_unblock_requests().
1234 static u32
ufshcd_pending_cmds(struct ufs_hba
*hba
)
1236 const struct scsi_device
*sdev
;
1237 unsigned long flags
;
1240 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1241 __shost_for_each_device(sdev
, hba
->host
)
1242 pending
+= sbitmap_weight(&sdev
->budget_map
);
1243 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1249 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1252 * Return: 0 upon success; -EBUSY upon timeout.
1254 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba
*hba
,
1255 u64 wait_timeout_us
)
1260 bool timeout
= false, do_last_check
= false;
1265 * Wait for all the outstanding tasks/transfer requests.
1266 * Verify by checking the doorbell registers are clear.
1268 start
= ktime_get();
1270 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
) {
1275 tm_doorbell
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
1276 tr_pending
= ufshcd_pending_cmds(hba
);
1277 if (!tm_doorbell
&& !tr_pending
) {
1280 } else if (do_last_check
) {
1284 io_schedule_timeout(msecs_to_jiffies(20));
1285 if (ktime_to_us(ktime_sub(ktime_get(), start
)) >
1289 * We might have scheduled out for long time so make
1290 * sure to check if doorbells are cleared by this time
1293 do_last_check
= true;
1295 } while (tm_doorbell
|| tr_pending
);
1299 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1300 __func__
, tm_doorbell
, tr_pending
);
1304 ufshcd_release(hba
);
1309 * ufshcd_scale_gear - scale up/down UFS gear
1310 * @hba: per adapter instance
1311 * @scale_up: True for scaling up gear and false for scaling down
1313 * Return: 0 for success; -EBUSY if scaling can't happen at this time;
1314 * non-zero for any other errors.
1316 static int ufshcd_scale_gear(struct ufs_hba
*hba
, bool scale_up
)
1319 struct ufs_pa_layer_attr new_pwr_info
;
1322 memcpy(&new_pwr_info
, &hba
->clk_scaling
.saved_pwr_info
,
1323 sizeof(struct ufs_pa_layer_attr
));
1325 memcpy(&new_pwr_info
, &hba
->pwr_info
,
1326 sizeof(struct ufs_pa_layer_attr
));
1328 if (hba
->pwr_info
.gear_tx
> hba
->clk_scaling
.min_gear
||
1329 hba
->pwr_info
.gear_rx
> hba
->clk_scaling
.min_gear
) {
1330 /* save the current power mode */
1331 memcpy(&hba
->clk_scaling
.saved_pwr_info
,
1333 sizeof(struct ufs_pa_layer_attr
));
1335 /* scale down gear */
1336 new_pwr_info
.gear_tx
= hba
->clk_scaling
.min_gear
;
1337 new_pwr_info
.gear_rx
= hba
->clk_scaling
.min_gear
;
1341 /* check if the power mode needs to be changed or not? */
1342 ret
= ufshcd_config_pwr_mode(hba
, &new_pwr_info
);
1344 dev_err(hba
->dev
, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1346 hba
->pwr_info
.gear_tx
, hba
->pwr_info
.gear_rx
,
1347 new_pwr_info
.gear_tx
, new_pwr_info
.gear_rx
);
1353 * Wait until all pending SCSI commands and TMFs have finished or the timeout
1356 * Return: 0 upon success; -EBUSY upon timeout.
1358 static int ufshcd_clock_scaling_prepare(struct ufs_hba
*hba
, u64 timeout_us
)
1362 * make sure that there are no outstanding requests when
1363 * clock scaling is in progress
1365 blk_mq_quiesce_tagset(&hba
->host
->tag_set
);
1366 mutex_lock(&hba
->wb_mutex
);
1367 down_write(&hba
->clk_scaling_lock
);
1369 if (!hba
->clk_scaling
.is_allowed
||
1370 ufshcd_wait_for_doorbell_clr(hba
, timeout_us
)) {
1372 up_write(&hba
->clk_scaling_lock
);
1373 mutex_unlock(&hba
->wb_mutex
);
1374 blk_mq_unquiesce_tagset(&hba
->host
->tag_set
);
1378 /* let's not get into low power until clock scaling is completed */
1385 static void ufshcd_clock_scaling_unprepare(struct ufs_hba
*hba
, int err
, bool scale_up
)
1387 up_write(&hba
->clk_scaling_lock
);
1389 /* Enable Write Booster if we have scaled up else disable it */
1390 if (ufshcd_enable_wb_if_scaling_up(hba
) && !err
)
1391 ufshcd_wb_toggle(hba
, scale_up
);
1393 mutex_unlock(&hba
->wb_mutex
);
1395 blk_mq_unquiesce_tagset(&hba
->host
->tag_set
);
1396 ufshcd_release(hba
);
1400 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1401 * @hba: per adapter instance
1402 * @freq: frequency to scale
1403 * @scale_up: True for scaling up and false for scalin down
1405 * Return: 0 for success; -EBUSY if scaling can't happen at this time; non-zero
1406 * for any other errors.
1408 static int ufshcd_devfreq_scale(struct ufs_hba
*hba
, unsigned long freq
,
1413 ret
= ufshcd_clock_scaling_prepare(hba
, 1 * USEC_PER_SEC
);
1417 /* scale down the gear before scaling down clocks */
1419 ret
= ufshcd_scale_gear(hba
, false);
1424 ret
= ufshcd_scale_clks(hba
, freq
, scale_up
);
1427 ufshcd_scale_gear(hba
, true);
1431 /* scale up the gear after scaling up clocks */
1433 ret
= ufshcd_scale_gear(hba
, true);
1435 ufshcd_scale_clks(hba
, hba
->devfreq
->previous_freq
,
1442 ufshcd_clock_scaling_unprepare(hba
, ret
, scale_up
);
1446 static void ufshcd_clk_scaling_suspend_work(struct work_struct
*work
)
1448 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1449 clk_scaling
.suspend_work
);
1450 unsigned long irq_flags
;
1452 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1453 if (hba
->clk_scaling
.active_reqs
|| hba
->clk_scaling
.is_suspended
) {
1454 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1457 hba
->clk_scaling
.is_suspended
= true;
1458 hba
->clk_scaling
.window_start_t
= 0;
1459 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1461 devfreq_suspend_device(hba
->devfreq
);
1464 static void ufshcd_clk_scaling_resume_work(struct work_struct
*work
)
1466 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1467 clk_scaling
.resume_work
);
1468 unsigned long irq_flags
;
1470 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1471 if (!hba
->clk_scaling
.is_suspended
) {
1472 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1475 hba
->clk_scaling
.is_suspended
= false;
1476 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1478 devfreq_resume_device(hba
->devfreq
);
1481 static int ufshcd_devfreq_target(struct device
*dev
,
1482 unsigned long *freq
, u32 flags
)
1485 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1487 bool scale_up
= false, sched_clk_scaling_suspend_work
= false;
1488 struct list_head
*clk_list
= &hba
->clk_list_head
;
1489 struct ufs_clk_info
*clki
;
1490 unsigned long irq_flags
;
1492 if (!ufshcd_is_clkscaling_supported(hba
))
1495 if (hba
->use_pm_opp
) {
1496 struct dev_pm_opp
*opp
;
1498 /* Get the recommended frequency from OPP framework */
1499 opp
= devfreq_recommended_opp(dev
, freq
, flags
);
1501 return PTR_ERR(opp
);
1503 dev_pm_opp_put(opp
);
1505 /* Override with the closest supported frequency */
1506 clki
= list_first_entry(&hba
->clk_list_head
, struct ufs_clk_info
,
1508 *freq
= (unsigned long) clk_round_rate(clki
->clk
, *freq
);
1511 spin_lock_irqsave(hba
->host
->host_lock
, irq_flags
);
1512 if (ufshcd_eh_in_progress(hba
)) {
1513 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1517 /* Skip scaling clock when clock scaling is suspended */
1518 if (hba
->clk_scaling
.is_suspended
) {
1519 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1520 dev_warn(hba
->dev
, "clock scaling is suspended, skip");
1524 if (!hba
->clk_scaling
.active_reqs
)
1525 sched_clk_scaling_suspend_work
= true;
1527 if (list_empty(clk_list
)) {
1528 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1532 /* Decide based on the target or rounded-off frequency and update */
1533 if (hba
->use_pm_opp
)
1534 scale_up
= *freq
> hba
->clk_scaling
.target_freq
;
1536 scale_up
= *freq
== clki
->max_freq
;
1538 if (!hba
->use_pm_opp
&& !scale_up
)
1539 *freq
= clki
->min_freq
;
1541 /* Update the frequency */
1542 if (!ufshcd_is_devfreq_scaling_required(hba
, *freq
, scale_up
)) {
1543 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1545 goto out
; /* no state change required */
1547 spin_unlock_irqrestore(hba
->host
->host_lock
, irq_flags
);
1549 start
= ktime_get();
1550 ret
= ufshcd_devfreq_scale(hba
, *freq
, scale_up
);
1552 hba
->clk_scaling
.target_freq
= *freq
;
1554 trace_ufshcd_profile_clk_scaling(dev_name(hba
->dev
),
1555 (scale_up
? "up" : "down"),
1556 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
1559 if (sched_clk_scaling_suspend_work
&&
1560 (!scale_up
|| hba
->clk_scaling
.suspend_on_no_request
))
1561 queue_work(hba
->clk_scaling
.workq
,
1562 &hba
->clk_scaling
.suspend_work
);
1567 static int ufshcd_devfreq_get_dev_status(struct device
*dev
,
1568 struct devfreq_dev_status
*stat
)
1570 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1571 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
1572 unsigned long flags
;
1575 if (!ufshcd_is_clkscaling_supported(hba
))
1578 memset(stat
, 0, sizeof(*stat
));
1580 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1581 curr_t
= ktime_get();
1582 if (!scaling
->window_start_t
)
1586 * If current frequency is 0, then the ondemand governor considers
1587 * there's no initial frequency set. And it always requests to set
1588 * to max. frequency.
1590 if (hba
->use_pm_opp
) {
1591 stat
->current_frequency
= hba
->clk_scaling
.target_freq
;
1593 struct list_head
*clk_list
= &hba
->clk_list_head
;
1594 struct ufs_clk_info
*clki
;
1596 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1597 stat
->current_frequency
= clki
->curr_freq
;
1600 if (scaling
->is_busy_started
)
1601 scaling
->tot_busy_t
+= ktime_us_delta(curr_t
,
1602 scaling
->busy_start_t
);
1603 stat
->total_time
= ktime_us_delta(curr_t
, scaling
->window_start_t
);
1604 stat
->busy_time
= scaling
->tot_busy_t
;
1606 scaling
->window_start_t
= curr_t
;
1607 scaling
->tot_busy_t
= 0;
1609 if (scaling
->active_reqs
) {
1610 scaling
->busy_start_t
= curr_t
;
1611 scaling
->is_busy_started
= true;
1613 scaling
->busy_start_t
= 0;
1614 scaling
->is_busy_started
= false;
1616 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1620 static int ufshcd_devfreq_init(struct ufs_hba
*hba
)
1622 struct list_head
*clk_list
= &hba
->clk_list_head
;
1623 struct ufs_clk_info
*clki
;
1624 struct devfreq
*devfreq
;
1627 /* Skip devfreq if we don't have any clocks in the list */
1628 if (list_empty(clk_list
))
1631 if (!hba
->use_pm_opp
) {
1632 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1633 dev_pm_opp_add(hba
->dev
, clki
->min_freq
, 0);
1634 dev_pm_opp_add(hba
->dev
, clki
->max_freq
, 0);
1637 ufshcd_vops_config_scaling_param(hba
, &hba
->vps
->devfreq_profile
,
1638 &hba
->vps
->ondemand_data
);
1639 devfreq
= devfreq_add_device(hba
->dev
,
1640 &hba
->vps
->devfreq_profile
,
1641 DEVFREQ_GOV_SIMPLE_ONDEMAND
,
1642 &hba
->vps
->ondemand_data
);
1643 if (IS_ERR(devfreq
)) {
1644 ret
= PTR_ERR(devfreq
);
1645 dev_err(hba
->dev
, "Unable to register with devfreq %d\n", ret
);
1647 if (!hba
->use_pm_opp
) {
1648 dev_pm_opp_remove(hba
->dev
, clki
->min_freq
);
1649 dev_pm_opp_remove(hba
->dev
, clki
->max_freq
);
1654 hba
->devfreq
= devfreq
;
1659 static void ufshcd_devfreq_remove(struct ufs_hba
*hba
)
1661 struct list_head
*clk_list
= &hba
->clk_list_head
;
1666 devfreq_remove_device(hba
->devfreq
);
1667 hba
->devfreq
= NULL
;
1669 if (!hba
->use_pm_opp
) {
1670 struct ufs_clk_info
*clki
;
1672 clki
= list_first_entry(clk_list
, struct ufs_clk_info
, list
);
1673 dev_pm_opp_remove(hba
->dev
, clki
->min_freq
);
1674 dev_pm_opp_remove(hba
->dev
, clki
->max_freq
);
1678 static void ufshcd_suspend_clkscaling(struct ufs_hba
*hba
)
1680 unsigned long flags
;
1681 bool suspend
= false;
1683 cancel_work_sync(&hba
->clk_scaling
.suspend_work
);
1684 cancel_work_sync(&hba
->clk_scaling
.resume_work
);
1686 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1687 if (!hba
->clk_scaling
.is_suspended
) {
1689 hba
->clk_scaling
.is_suspended
= true;
1690 hba
->clk_scaling
.window_start_t
= 0;
1692 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1695 devfreq_suspend_device(hba
->devfreq
);
1698 static void ufshcd_resume_clkscaling(struct ufs_hba
*hba
)
1700 unsigned long flags
;
1701 bool resume
= false;
1703 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1704 if (hba
->clk_scaling
.is_suspended
) {
1706 hba
->clk_scaling
.is_suspended
= false;
1708 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1711 devfreq_resume_device(hba
->devfreq
);
1714 static ssize_t
ufshcd_clkscale_enable_show(struct device
*dev
,
1715 struct device_attribute
*attr
, char *buf
)
1717 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1719 return sysfs_emit(buf
, "%d\n", hba
->clk_scaling
.is_enabled
);
1722 static ssize_t
ufshcd_clkscale_enable_store(struct device
*dev
,
1723 struct device_attribute
*attr
, const char *buf
, size_t count
)
1725 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
1729 if (kstrtou32(buf
, 0, &value
))
1732 down(&hba
->host_sem
);
1733 if (!ufshcd_is_user_access_allowed(hba
)) {
1739 if (value
== hba
->clk_scaling
.is_enabled
)
1742 ufshcd_rpm_get_sync(hba
);
1745 hba
->clk_scaling
.is_enabled
= value
;
1748 ufshcd_resume_clkscaling(hba
);
1750 ufshcd_suspend_clkscaling(hba
);
1751 err
= ufshcd_devfreq_scale(hba
, ULONG_MAX
, true);
1753 dev_err(hba
->dev
, "%s: failed to scale clocks up %d\n",
1757 ufshcd_release(hba
);
1758 ufshcd_rpm_put_sync(hba
);
1761 return err
? err
: count
;
1764 static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba
*hba
)
1766 hba
->clk_scaling
.enable_attr
.show
= ufshcd_clkscale_enable_show
;
1767 hba
->clk_scaling
.enable_attr
.store
= ufshcd_clkscale_enable_store
;
1768 sysfs_attr_init(&hba
->clk_scaling
.enable_attr
.attr
);
1769 hba
->clk_scaling
.enable_attr
.attr
.name
= "clkscale_enable";
1770 hba
->clk_scaling
.enable_attr
.attr
.mode
= 0644;
1771 if (device_create_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
))
1772 dev_err(hba
->dev
, "Failed to create sysfs for clkscale_enable\n");
1775 static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba
*hba
)
1777 if (hba
->clk_scaling
.enable_attr
.attr
.name
)
1778 device_remove_file(hba
->dev
, &hba
->clk_scaling
.enable_attr
);
1781 static void ufshcd_init_clk_scaling(struct ufs_hba
*hba
)
1783 if (!ufshcd_is_clkscaling_supported(hba
))
1786 if (!hba
->clk_scaling
.min_gear
)
1787 hba
->clk_scaling
.min_gear
= UFS_HS_G1
;
1789 INIT_WORK(&hba
->clk_scaling
.suspend_work
,
1790 ufshcd_clk_scaling_suspend_work
);
1791 INIT_WORK(&hba
->clk_scaling
.resume_work
,
1792 ufshcd_clk_scaling_resume_work
);
1794 hba
->clk_scaling
.workq
= alloc_ordered_workqueue(
1795 "ufs_clkscaling_%d", WQ_MEM_RECLAIM
, hba
->host
->host_no
);
1797 hba
->clk_scaling
.is_initialized
= true;
1800 static void ufshcd_exit_clk_scaling(struct ufs_hba
*hba
)
1802 if (!hba
->clk_scaling
.is_initialized
)
1805 ufshcd_remove_clk_scaling_sysfs(hba
);
1806 destroy_workqueue(hba
->clk_scaling
.workq
);
1807 ufshcd_devfreq_remove(hba
);
1808 hba
->clk_scaling
.is_initialized
= false;
1811 static void ufshcd_ungate_work(struct work_struct
*work
)
1814 unsigned long flags
;
1815 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1816 clk_gating
.ungate_work
);
1818 cancel_delayed_work_sync(&hba
->clk_gating
.gate_work
);
1820 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1821 if (hba
->clk_gating
.state
== CLKS_ON
) {
1822 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1826 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1827 ufshcd_hba_vreg_set_hpm(hba
);
1828 ufshcd_setup_clocks(hba
, true);
1830 ufshcd_enable_irq(hba
);
1832 /* Exit from hibern8 */
1833 if (ufshcd_can_hibern8_during_gating(hba
)) {
1834 /* Prevent gating in this path */
1835 hba
->clk_gating
.is_suspended
= true;
1836 if (ufshcd_is_link_hibern8(hba
)) {
1837 ret
= ufshcd_uic_hibern8_exit(hba
);
1839 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
1842 ufshcd_set_link_active(hba
);
1844 hba
->clk_gating
.is_suspended
= false;
1849 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1850 * Also, exit from hibern8 mode and set the link as active.
1851 * @hba: per adapter instance
1853 void ufshcd_hold(struct ufs_hba
*hba
)
1856 unsigned long flags
;
1858 if (!ufshcd_is_clkgating_allowed(hba
) ||
1859 !hba
->clk_gating
.is_initialized
)
1861 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1862 hba
->clk_gating
.active_reqs
++;
1865 switch (hba
->clk_gating
.state
) {
1868 * Wait for the ungate work to complete if in progress.
1869 * Though the clocks may be in ON state, the link could
1870 * still be in hibner8 state if hibern8 is allowed
1871 * during clock gating.
1872 * Make sure we exit hibern8 state also in addition to
1875 if (ufshcd_can_hibern8_during_gating(hba
) &&
1876 ufshcd_is_link_hibern8(hba
)) {
1877 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1878 flush_result
= flush_work(&hba
->clk_gating
.ungate_work
);
1879 if (hba
->clk_gating
.is_suspended
&& !flush_result
)
1881 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1886 if (cancel_delayed_work(&hba
->clk_gating
.gate_work
)) {
1887 hba
->clk_gating
.state
= CLKS_ON
;
1888 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1889 hba
->clk_gating
.state
);
1893 * If we are here, it means gating work is either done or
1894 * currently running. Hence, fall through to cancel gating
1895 * work and to enable clocks.
1899 hba
->clk_gating
.state
= REQ_CLKS_ON
;
1900 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1901 hba
->clk_gating
.state
);
1902 queue_work(hba
->clk_gating
.clk_gating_workq
,
1903 &hba
->clk_gating
.ungate_work
);
1905 * fall through to check if we should wait for this
1906 * work to be done or not.
1910 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1911 flush_work(&hba
->clk_gating
.ungate_work
);
1912 /* Make sure state is CLKS_ON before returning */
1913 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1916 dev_err(hba
->dev
, "%s: clk gating is in invalid state %d\n",
1917 __func__
, hba
->clk_gating
.state
);
1920 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1922 EXPORT_SYMBOL_GPL(ufshcd_hold
);
1924 static void ufshcd_gate_work(struct work_struct
*work
)
1926 struct ufs_hba
*hba
= container_of(work
, struct ufs_hba
,
1927 clk_gating
.gate_work
.work
);
1928 unsigned long flags
;
1931 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1933 * In case you are here to cancel this work the gating state
1934 * would be marked as REQ_CLKS_ON. In this case save time by
1935 * skipping the gating work and exit after changing the clock
1938 if (hba
->clk_gating
.is_suspended
||
1939 (hba
->clk_gating
.state
!= REQ_CLKS_OFF
)) {
1940 hba
->clk_gating
.state
= CLKS_ON
;
1941 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1942 hba
->clk_gating
.state
);
1946 if (ufshcd_is_ufs_dev_busy(hba
) || hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
)
1949 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1951 /* put the link into hibern8 mode before turning off clocks */
1952 if (ufshcd_can_hibern8_during_gating(hba
)) {
1953 ret
= ufshcd_uic_hibern8_enter(hba
);
1955 hba
->clk_gating
.state
= CLKS_ON
;
1956 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
1958 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1959 hba
->clk_gating
.state
);
1962 ufshcd_set_link_hibern8(hba
);
1965 ufshcd_disable_irq(hba
);
1967 ufshcd_setup_clocks(hba
, false);
1969 /* Put the host controller in low power mode if possible */
1970 ufshcd_hba_vreg_set_lpm(hba
);
1972 * In case you are here to cancel this work the gating state
1973 * would be marked as REQ_CLKS_ON. In this case keep the state
1974 * as REQ_CLKS_ON which would anyway imply that clocks are off
1975 * and a request to turn them on is pending. By doing this way,
1976 * we keep the state machine in tact and this would ultimately
1977 * prevent from doing cancel work multiple times when there are
1978 * new requests arriving before the current cancel work is done.
1980 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
1981 if (hba
->clk_gating
.state
== REQ_CLKS_OFF
) {
1982 hba
->clk_gating
.state
= CLKS_OFF
;
1983 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
1984 hba
->clk_gating
.state
);
1987 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
1992 /* host lock must be held before calling this variant */
1993 static void __ufshcd_release(struct ufs_hba
*hba
)
1995 if (!ufshcd_is_clkgating_allowed(hba
))
1998 hba
->clk_gating
.active_reqs
--;
2000 if (hba
->clk_gating
.active_reqs
|| hba
->clk_gating
.is_suspended
||
2001 hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
||
2002 hba
->outstanding_tasks
|| !hba
->clk_gating
.is_initialized
||
2003 hba
->active_uic_cmd
|| hba
->uic_async_done
||
2004 hba
->clk_gating
.state
== CLKS_OFF
)
2007 hba
->clk_gating
.state
= REQ_CLKS_OFF
;
2008 trace_ufshcd_clk_gating(dev_name(hba
->dev
), hba
->clk_gating
.state
);
2009 queue_delayed_work(hba
->clk_gating
.clk_gating_workq
,
2010 &hba
->clk_gating
.gate_work
,
2011 msecs_to_jiffies(hba
->clk_gating
.delay_ms
));
2014 void ufshcd_release(struct ufs_hba
*hba
)
2016 unsigned long flags
;
2018 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2019 __ufshcd_release(hba
);
2020 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2022 EXPORT_SYMBOL_GPL(ufshcd_release
);
2024 static ssize_t
ufshcd_clkgate_delay_show(struct device
*dev
,
2025 struct device_attribute
*attr
, char *buf
)
2027 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
2029 return sysfs_emit(buf
, "%lu\n", hba
->clk_gating
.delay_ms
);
2032 void ufshcd_clkgate_delay_set(struct device
*dev
, unsigned long value
)
2034 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
2035 unsigned long flags
;
2037 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2038 hba
->clk_gating
.delay_ms
= value
;
2039 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2041 EXPORT_SYMBOL_GPL(ufshcd_clkgate_delay_set
);
2043 static ssize_t
ufshcd_clkgate_delay_store(struct device
*dev
,
2044 struct device_attribute
*attr
, const char *buf
, size_t count
)
2046 unsigned long value
;
2048 if (kstrtoul(buf
, 0, &value
))
2051 ufshcd_clkgate_delay_set(dev
, value
);
2055 static ssize_t
ufshcd_clkgate_enable_show(struct device
*dev
,
2056 struct device_attribute
*attr
, char *buf
)
2058 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
2060 return sysfs_emit(buf
, "%d\n", hba
->clk_gating
.is_enabled
);
2063 static ssize_t
ufshcd_clkgate_enable_store(struct device
*dev
,
2064 struct device_attribute
*attr
, const char *buf
, size_t count
)
2066 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
2067 unsigned long flags
;
2070 if (kstrtou32(buf
, 0, &value
))
2075 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2076 if (value
== hba
->clk_gating
.is_enabled
)
2080 __ufshcd_release(hba
);
2082 hba
->clk_gating
.active_reqs
++;
2084 hba
->clk_gating
.is_enabled
= value
;
2086 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2090 static void ufshcd_init_clk_gating_sysfs(struct ufs_hba
*hba
)
2092 hba
->clk_gating
.delay_attr
.show
= ufshcd_clkgate_delay_show
;
2093 hba
->clk_gating
.delay_attr
.store
= ufshcd_clkgate_delay_store
;
2094 sysfs_attr_init(&hba
->clk_gating
.delay_attr
.attr
);
2095 hba
->clk_gating
.delay_attr
.attr
.name
= "clkgate_delay_ms";
2096 hba
->clk_gating
.delay_attr
.attr
.mode
= 0644;
2097 if (device_create_file(hba
->dev
, &hba
->clk_gating
.delay_attr
))
2098 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_delay\n");
2100 hba
->clk_gating
.enable_attr
.show
= ufshcd_clkgate_enable_show
;
2101 hba
->clk_gating
.enable_attr
.store
= ufshcd_clkgate_enable_store
;
2102 sysfs_attr_init(&hba
->clk_gating
.enable_attr
.attr
);
2103 hba
->clk_gating
.enable_attr
.attr
.name
= "clkgate_enable";
2104 hba
->clk_gating
.enable_attr
.attr
.mode
= 0644;
2105 if (device_create_file(hba
->dev
, &hba
->clk_gating
.enable_attr
))
2106 dev_err(hba
->dev
, "Failed to create sysfs for clkgate_enable\n");
2109 static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba
*hba
)
2111 if (hba
->clk_gating
.delay_attr
.attr
.name
)
2112 device_remove_file(hba
->dev
, &hba
->clk_gating
.delay_attr
);
2113 if (hba
->clk_gating
.enable_attr
.attr
.name
)
2114 device_remove_file(hba
->dev
, &hba
->clk_gating
.enable_attr
);
2117 static void ufshcd_init_clk_gating(struct ufs_hba
*hba
)
2119 if (!ufshcd_is_clkgating_allowed(hba
))
2122 hba
->clk_gating
.state
= CLKS_ON
;
2124 hba
->clk_gating
.delay_ms
= 150;
2125 INIT_DELAYED_WORK(&hba
->clk_gating
.gate_work
, ufshcd_gate_work
);
2126 INIT_WORK(&hba
->clk_gating
.ungate_work
, ufshcd_ungate_work
);
2128 hba
->clk_gating
.clk_gating_workq
= alloc_ordered_workqueue(
2129 "ufs_clk_gating_%d", WQ_MEM_RECLAIM
| WQ_HIGHPRI
,
2130 hba
->host
->host_no
);
2132 ufshcd_init_clk_gating_sysfs(hba
);
2134 hba
->clk_gating
.is_enabled
= true;
2135 hba
->clk_gating
.is_initialized
= true;
2138 static void ufshcd_exit_clk_gating(struct ufs_hba
*hba
)
2140 if (!hba
->clk_gating
.is_initialized
)
2143 ufshcd_remove_clk_gating_sysfs(hba
);
2145 /* Ungate the clock if necessary. */
2147 hba
->clk_gating
.is_initialized
= false;
2148 ufshcd_release(hba
);
2150 destroy_workqueue(hba
->clk_gating
.clk_gating_workq
);
2153 static void ufshcd_clk_scaling_start_busy(struct ufs_hba
*hba
)
2155 bool queue_resume_work
= false;
2156 ktime_t curr_t
= ktime_get();
2157 unsigned long flags
;
2159 if (!ufshcd_is_clkscaling_supported(hba
))
2162 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2163 if (!hba
->clk_scaling
.active_reqs
++)
2164 queue_resume_work
= true;
2166 if (!hba
->clk_scaling
.is_enabled
|| hba
->pm_op_in_progress
) {
2167 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2171 if (queue_resume_work
)
2172 queue_work(hba
->clk_scaling
.workq
,
2173 &hba
->clk_scaling
.resume_work
);
2175 if (!hba
->clk_scaling
.window_start_t
) {
2176 hba
->clk_scaling
.window_start_t
= curr_t
;
2177 hba
->clk_scaling
.tot_busy_t
= 0;
2178 hba
->clk_scaling
.is_busy_started
= false;
2181 if (!hba
->clk_scaling
.is_busy_started
) {
2182 hba
->clk_scaling
.busy_start_t
= curr_t
;
2183 hba
->clk_scaling
.is_busy_started
= true;
2185 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2188 static void ufshcd_clk_scaling_update_busy(struct ufs_hba
*hba
)
2190 struct ufs_clk_scaling
*scaling
= &hba
->clk_scaling
;
2191 unsigned long flags
;
2193 if (!ufshcd_is_clkscaling_supported(hba
))
2196 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2197 hba
->clk_scaling
.active_reqs
--;
2198 if (!scaling
->active_reqs
&& scaling
->is_busy_started
) {
2199 scaling
->tot_busy_t
+= ktime_to_us(ktime_sub(ktime_get(),
2200 scaling
->busy_start_t
));
2201 scaling
->busy_start_t
= 0;
2202 scaling
->is_busy_started
= false;
2204 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2207 static inline int ufshcd_monitor_opcode2dir(u8 opcode
)
2209 if (opcode
== READ_6
|| opcode
== READ_10
|| opcode
== READ_16
)
2211 else if (opcode
== WRITE_6
|| opcode
== WRITE_10
|| opcode
== WRITE_16
)
2217 static inline bool ufshcd_should_inform_monitor(struct ufs_hba
*hba
,
2218 struct ufshcd_lrb
*lrbp
)
2220 const struct ufs_hba_monitor
*m
= &hba
->monitor
;
2222 return (m
->enabled
&& lrbp
&& lrbp
->cmd
&&
2223 (!m
->chunk_size
|| m
->chunk_size
== lrbp
->cmd
->sdb
.length
) &&
2224 ktime_before(hba
->monitor
.enabled_ts
, lrbp
->issue_time_stamp
));
2227 static void ufshcd_start_monitor(struct ufs_hba
*hba
,
2228 const struct ufshcd_lrb
*lrbp
)
2230 int dir
= ufshcd_monitor_opcode2dir(*lrbp
->cmd
->cmnd
);
2231 unsigned long flags
;
2233 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2234 if (dir
>= 0 && hba
->monitor
.nr_queued
[dir
]++ == 0)
2235 hba
->monitor
.busy_start_ts
[dir
] = ktime_get();
2236 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2239 static void ufshcd_update_monitor(struct ufs_hba
*hba
, const struct ufshcd_lrb
*lrbp
)
2241 int dir
= ufshcd_monitor_opcode2dir(*lrbp
->cmd
->cmnd
);
2242 unsigned long flags
;
2244 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2245 if (dir
>= 0 && hba
->monitor
.nr_queued
[dir
] > 0) {
2246 const struct request
*req
= scsi_cmd_to_rq(lrbp
->cmd
);
2247 struct ufs_hba_monitor
*m
= &hba
->monitor
;
2248 ktime_t now
, inc
, lat
;
2250 now
= lrbp
->compl_time_stamp
;
2251 inc
= ktime_sub(now
, m
->busy_start_ts
[dir
]);
2252 m
->total_busy
[dir
] = ktime_add(m
->total_busy
[dir
], inc
);
2253 m
->nr_sec_rw
[dir
] += blk_rq_sectors(req
);
2255 /* Update latencies */
2257 lat
= ktime_sub(now
, lrbp
->issue_time_stamp
);
2258 m
->lat_sum
[dir
] += lat
;
2259 if (m
->lat_max
[dir
] < lat
|| !m
->lat_max
[dir
])
2260 m
->lat_max
[dir
] = lat
;
2261 if (m
->lat_min
[dir
] > lat
|| !m
->lat_min
[dir
])
2262 m
->lat_min
[dir
] = lat
;
2264 m
->nr_queued
[dir
]--;
2265 /* Push forward the busy start of monitor */
2266 m
->busy_start_ts
[dir
] = now
;
2268 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2272 * ufshcd_send_command - Send SCSI or device management commands
2273 * @hba: per adapter instance
2274 * @task_tag: Task tag of the command
2275 * @hwq: pointer to hardware queue instance
2278 void ufshcd_send_command(struct ufs_hba
*hba
, unsigned int task_tag
,
2279 struct ufs_hw_queue
*hwq
)
2281 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[task_tag
];
2282 unsigned long flags
;
2284 lrbp
->issue_time_stamp
= ktime_get();
2285 lrbp
->issue_time_stamp_local_clock
= local_clock();
2286 lrbp
->compl_time_stamp
= ktime_set(0, 0);
2287 lrbp
->compl_time_stamp_local_clock
= 0;
2288 ufshcd_add_command_trace(hba
, task_tag
, UFS_CMD_SEND
);
2290 ufshcd_clk_scaling_start_busy(hba
);
2291 if (unlikely(ufshcd_should_inform_monitor(hba
, lrbp
)))
2292 ufshcd_start_monitor(hba
, lrbp
);
2294 if (hba
->mcq_enabled
) {
2295 int utrd_size
= sizeof(struct utp_transfer_req_desc
);
2296 struct utp_transfer_req_desc
*src
= lrbp
->utr_descriptor_ptr
;
2297 struct utp_transfer_req_desc
*dest
;
2299 spin_lock(&hwq
->sq_lock
);
2300 dest
= hwq
->sqe_base_addr
+ hwq
->sq_tail_slot
;
2301 memcpy(dest
, src
, utrd_size
);
2302 ufshcd_inc_sq_tail(hwq
);
2303 spin_unlock(&hwq
->sq_lock
);
2305 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
2306 if (hba
->vops
&& hba
->vops
->setup_xfer_req
)
2307 hba
->vops
->setup_xfer_req(hba
, lrbp
->task_tag
,
2309 __set_bit(lrbp
->task_tag
, &hba
->outstanding_reqs
);
2310 ufshcd_writel(hba
, 1 << lrbp
->task_tag
,
2311 REG_UTP_TRANSFER_REQ_DOOR_BELL
);
2312 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
2317 * ufshcd_copy_sense_data - Copy sense data in case of check condition
2318 * @lrbp: pointer to local reference block
2320 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb
*lrbp
)
2322 u8
*const sense_buffer
= lrbp
->cmd
->sense_buffer
;
2326 resp_len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->header
.data_segment_length
);
2327 if (sense_buffer
&& resp_len
) {
2330 len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->sr
.sense_data_len
);
2331 len_to_copy
= min_t(int, UFS_SENSE_SIZE
, len
);
2333 memcpy(sense_buffer
, lrbp
->ucd_rsp_ptr
->sr
.sense_data
,
2339 * ufshcd_copy_query_response() - Copy the Query Response and the data
2341 * @hba: per adapter instance
2342 * @lrbp: pointer to local reference block
2344 * Return: 0 upon success; < 0 upon failure.
2347 int ufshcd_copy_query_response(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2349 struct ufs_query_res
*query_res
= &hba
->dev_cmd
.query
.response
;
2351 memcpy(&query_res
->upiu_res
, &lrbp
->ucd_rsp_ptr
->qr
, QUERY_OSF_SIZE
);
2353 /* Get the descriptor */
2354 if (hba
->dev_cmd
.query
.descriptor
&&
2355 lrbp
->ucd_rsp_ptr
->qr
.opcode
== UPIU_QUERY_OPCODE_READ_DESC
) {
2356 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+
2357 GENERAL_UPIU_REQUEST_SIZE
;
2361 /* data segment length */
2362 resp_len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->header
2363 .data_segment_length
);
2364 buf_len
= be16_to_cpu(
2365 hba
->dev_cmd
.query
.request
.upiu_req
.length
);
2366 if (likely(buf_len
>= resp_len
)) {
2367 memcpy(hba
->dev_cmd
.query
.descriptor
, descp
, resp_len
);
2370 "%s: rsp size %d is bigger than buffer size %d",
2371 __func__
, resp_len
, buf_len
);
2380 * ufshcd_hba_capabilities - Read controller capabilities
2381 * @hba: per adapter instance
2383 * Return: 0 on success, negative on error.
2385 static inline int ufshcd_hba_capabilities(struct ufs_hba
*hba
)
2389 hba
->capabilities
= ufshcd_readl(hba
, REG_CONTROLLER_CAPABILITIES
);
2391 /* nutrs and nutmrs are 0 based values */
2392 hba
->nutrs
= (hba
->capabilities
& MASK_TRANSFER_REQUESTS_SLOTS_SDB
) + 1;
2394 ((hba
->capabilities
& MASK_TASK_MANAGEMENT_REQUEST_SLOTS
) >> 16) + 1;
2395 hba
->reserved_slot
= hba
->nutrs
- 1;
2397 hba
->nortt
= FIELD_GET(MASK_NUMBER_OUTSTANDING_RTT
, hba
->capabilities
) + 1;
2399 /* Read crypto capabilities */
2400 err
= ufshcd_hba_init_crypto_capabilities(hba
);
2402 dev_err(hba
->dev
, "crypto setup failed\n");
2407 * The UFSHCI 3.0 specification does not define MCQ_SUPPORT and
2408 * LSDB_SUPPORT, but [31:29] as reserved bits with reset value 0s, which
2409 * means we can simply read values regardless of version.
2411 hba
->mcq_sup
= FIELD_GET(MASK_MCQ_SUPPORT
, hba
->capabilities
);
2413 * 0h: legacy single doorbell support is available
2414 * 1h: indicate that legacy single doorbell support has been removed
2416 if (!(hba
->quirks
& UFSHCD_QUIRK_BROKEN_LSDBS_CAP
))
2417 hba
->lsdb_sup
= !FIELD_GET(MASK_LSDB_SUPPORT
, hba
->capabilities
);
2419 hba
->lsdb_sup
= true;
2424 hba
->mcq_capabilities
= ufshcd_readl(hba
, REG_MCQCAP
);
2425 hba
->ext_iid_sup
= FIELD_GET(MASK_EXT_IID_SUPPORT
,
2426 hba
->mcq_capabilities
);
2432 * ufshcd_ready_for_uic_cmd - Check if controller is ready
2433 * to accept UIC commands
2434 * @hba: per adapter instance
2436 * Return: true on success, else false.
2438 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba
*hba
)
2441 int ret
= read_poll_timeout(ufshcd_readl
, val
, val
& UIC_COMMAND_READY
,
2442 500, uic_cmd_timeout
* 1000, false, hba
,
2443 REG_CONTROLLER_STATUS
);
2448 * ufshcd_get_upmcrs - Get the power mode change request status
2449 * @hba: Pointer to adapter instance
2451 * This function gets the UPMCRS field of HCS register
2453 * Return: value of UPMCRS field.
2455 static inline u8
ufshcd_get_upmcrs(struct ufs_hba
*hba
)
2457 return (ufshcd_readl(hba
, REG_CONTROLLER_STATUS
) >> 8) & 0x7;
2461 * ufshcd_dispatch_uic_cmd - Dispatch an UIC command to the Unipro layer
2462 * @hba: per adapter instance
2463 * @uic_cmd: UIC command
2466 ufshcd_dispatch_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2468 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2470 WARN_ON(hba
->active_uic_cmd
);
2472 hba
->active_uic_cmd
= uic_cmd
;
2475 ufshcd_writel(hba
, uic_cmd
->argument1
, REG_UIC_COMMAND_ARG_1
);
2476 ufshcd_writel(hba
, uic_cmd
->argument2
, REG_UIC_COMMAND_ARG_2
);
2477 ufshcd_writel(hba
, uic_cmd
->argument3
, REG_UIC_COMMAND_ARG_3
);
2479 ufshcd_add_uic_command_trace(hba
, uic_cmd
, UFS_CMD_SEND
);
2482 ufshcd_writel(hba
, uic_cmd
->command
& COMMAND_OPCODE_MASK
,
2487 * ufshcd_wait_for_uic_cmd - Wait for completion of an UIC command
2488 * @hba: per adapter instance
2489 * @uic_cmd: UIC command
2491 * Return: 0 only if success.
2494 ufshcd_wait_for_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2497 unsigned long flags
;
2499 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2501 if (wait_for_completion_timeout(&uic_cmd
->done
,
2502 msecs_to_jiffies(uic_cmd_timeout
))) {
2503 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
2507 "uic cmd 0x%x with arg3 0x%x completion timeout\n",
2508 uic_cmd
->command
, uic_cmd
->argument3
);
2510 if (!uic_cmd
->cmd_active
) {
2511 dev_err(hba
->dev
, "%s: UIC cmd has been completed, return the result\n",
2513 ret
= uic_cmd
->argument2
& MASK_UIC_COMMAND_RESULT
;
2517 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
2518 hba
->active_uic_cmd
= NULL
;
2519 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
2525 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2526 * @hba: per adapter instance
2527 * @uic_cmd: UIC command
2529 * Return: 0 only if success.
2532 __ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2534 lockdep_assert_held(&hba
->uic_cmd_mutex
);
2536 if (!ufshcd_ready_for_uic_cmd(hba
)) {
2538 "Controller not ready to accept UIC commands\n");
2542 init_completion(&uic_cmd
->done
);
2544 uic_cmd
->cmd_active
= 1;
2545 ufshcd_dispatch_uic_cmd(hba
, uic_cmd
);
2551 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
2552 * @hba: per adapter instance
2553 * @uic_cmd: UIC command
2555 * Return: 0 only if success.
2557 int ufshcd_send_uic_cmd(struct ufs_hba
*hba
, struct uic_command
*uic_cmd
)
2561 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_UIC_CMD
)
2565 mutex_lock(&hba
->uic_cmd_mutex
);
2566 ufshcd_add_delay_before_dme_cmd(hba
);
2568 ret
= __ufshcd_send_uic_cmd(hba
, uic_cmd
);
2570 ret
= ufshcd_wait_for_uic_cmd(hba
, uic_cmd
);
2572 mutex_unlock(&hba
->uic_cmd_mutex
);
2574 ufshcd_release(hba
);
2579 * ufshcd_sgl_to_prdt - SG list to PRTD (Physical Region Description Table, 4DW format)
2580 * @hba: per-adapter instance
2581 * @lrbp: pointer to local reference block
2582 * @sg_entries: The number of sg lists actually used
2583 * @sg_list: Pointer to SG list
2585 static void ufshcd_sgl_to_prdt(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
, int sg_entries
,
2586 struct scatterlist
*sg_list
)
2588 struct ufshcd_sg_entry
*prd
;
2589 struct scatterlist
*sg
;
2594 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
)
2595 lrbp
->utr_descriptor_ptr
->prd_table_length
=
2596 cpu_to_le16(sg_entries
* ufshcd_sg_entry_size(hba
));
2598 lrbp
->utr_descriptor_ptr
->prd_table_length
= cpu_to_le16(sg_entries
);
2600 prd
= lrbp
->ucd_prdt_ptr
;
2602 for_each_sg(sg_list
, sg
, sg_entries
, i
) {
2603 const unsigned int len
= sg_dma_len(sg
);
2606 * From the UFSHCI spec: "Data Byte Count (DBC): A '0'
2607 * based value that indicates the length, in bytes, of
2608 * the data block. A maximum of length of 256KB may
2609 * exist for any entry. Bits 1:0 of this field shall be
2610 * 11b to indicate Dword granularity. A value of '3'
2611 * indicates 4 bytes, '7' indicates 8 bytes, etc."
2613 WARN_ONCE(len
> SZ_256K
, "len = %#x\n", len
);
2614 prd
->size
= cpu_to_le32(len
- 1);
2615 prd
->addr
= cpu_to_le64(sg
->dma_address
);
2617 prd
= (void *)prd
+ ufshcd_sg_entry_size(hba
);
2620 lrbp
->utr_descriptor_ptr
->prd_table_length
= 0;
2625 * ufshcd_map_sg - Map scatter-gather list to prdt
2626 * @hba: per adapter instance
2627 * @lrbp: pointer to local reference block
2629 * Return: 0 in case of success, non-zero value in case of failure.
2631 static int ufshcd_map_sg(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2633 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
2634 int sg_segments
= scsi_dma_map(cmd
);
2636 if (sg_segments
< 0)
2639 ufshcd_sgl_to_prdt(hba
, lrbp
, sg_segments
, scsi_sglist(cmd
));
2641 return ufshcd_crypto_fill_prdt(hba
, lrbp
);
2645 * ufshcd_enable_intr - enable interrupts
2646 * @hba: per adapter instance
2647 * @intrs: interrupt bits
2649 static void ufshcd_enable_intr(struct ufs_hba
*hba
, u32 intrs
)
2651 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2654 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2658 * ufshcd_disable_intr - disable interrupts
2659 * @hba: per adapter instance
2660 * @intrs: interrupt bits
2662 static void ufshcd_disable_intr(struct ufs_hba
*hba
, u32 intrs
)
2664 u32 set
= ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
2667 ufshcd_writel(hba
, set
, REG_INTERRUPT_ENABLE
);
2671 * ufshcd_prepare_req_desc_hdr - Fill UTP Transfer request descriptor header according to request
2672 * descriptor according to request
2673 * @hba: per adapter instance
2674 * @lrbp: pointer to local reference block
2675 * @upiu_flags: flags required in the header
2676 * @cmd_dir: requests data direction
2677 * @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
2680 ufshcd_prepare_req_desc_hdr(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
,
2681 u8
*upiu_flags
, enum dma_data_direction cmd_dir
,
2684 struct utp_transfer_req_desc
*req_desc
= lrbp
->utr_descriptor_ptr
;
2685 struct request_desc_header
*h
= &req_desc
->header
;
2686 enum utp_data_direction data_direction
;
2688 lrbp
->command_type
= UTP_CMD_TYPE_UFS_STORAGE
;
2690 *h
= (typeof(*h
)){ };
2692 if (cmd_dir
== DMA_FROM_DEVICE
) {
2693 data_direction
= UTP_DEVICE_TO_HOST
;
2694 *upiu_flags
= UPIU_CMD_FLAGS_READ
;
2695 } else if (cmd_dir
== DMA_TO_DEVICE
) {
2696 data_direction
= UTP_HOST_TO_DEVICE
;
2697 *upiu_flags
= UPIU_CMD_FLAGS_WRITE
;
2699 data_direction
= UTP_NO_DATA_TRANSFER
;
2700 *upiu_flags
= UPIU_CMD_FLAGS_NONE
;
2703 h
->command_type
= lrbp
->command_type
;
2704 h
->data_direction
= data_direction
;
2705 h
->ehs_length
= ehs_length
;
2710 /* Prepare crypto related dwords */
2711 ufshcd_prepare_req_desc_hdr_crypto(lrbp
, h
);
2714 * assigning invalid value for command status. Controller
2715 * updates OCS on command completion, with the command
2718 h
->ocs
= OCS_INVALID_COMMAND_STATUS
;
2720 req_desc
->prd_table_length
= 0;
2724 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2726 * @lrbp: local reference block pointer
2727 * @upiu_flags: flags
2730 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb
*lrbp
, u8 upiu_flags
)
2732 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
2733 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2734 unsigned short cdb_len
;
2736 ucd_req_ptr
->header
= (struct utp_upiu_header
){
2737 .transaction_code
= UPIU_TRANSACTION_COMMAND
,
2738 .flags
= upiu_flags
,
2740 .task_tag
= lrbp
->task_tag
,
2741 .command_set_type
= UPIU_COMMAND_SET_TYPE_SCSI
,
2744 WARN_ON_ONCE(ucd_req_ptr
->header
.task_tag
!= lrbp
->task_tag
);
2746 ucd_req_ptr
->sc
.exp_data_transfer_len
= cpu_to_be32(cmd
->sdb
.length
);
2748 cdb_len
= min_t(unsigned short, cmd
->cmd_len
, UFS_CDB_SIZE
);
2749 memcpy(ucd_req_ptr
->sc
.cdb
, cmd
->cmnd
, cdb_len
);
2751 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2755 * ufshcd_prepare_utp_query_req_upiu() - fill the utp_transfer_req_desc for query request
2757 * @lrbp: local reference block pointer
2758 * @upiu_flags: flags
2760 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba
*hba
,
2761 struct ufshcd_lrb
*lrbp
, u8 upiu_flags
)
2763 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2764 struct ufs_query
*query
= &hba
->dev_cmd
.query
;
2765 u16 len
= be16_to_cpu(query
->request
.upiu_req
.length
);
2767 /* Query request header */
2768 ucd_req_ptr
->header
= (struct utp_upiu_header
){
2769 .transaction_code
= UPIU_TRANSACTION_QUERY_REQ
,
2770 .flags
= upiu_flags
,
2772 .task_tag
= lrbp
->task_tag
,
2773 .query_function
= query
->request
.query_func
,
2774 /* Data segment length only need for WRITE_DESC */
2775 .data_segment_length
=
2776 query
->request
.upiu_req
.opcode
==
2777 UPIU_QUERY_OPCODE_WRITE_DESC
?
2782 /* Copy the Query Request buffer as is */
2783 memcpy(&ucd_req_ptr
->qr
, &query
->request
.upiu_req
,
2786 /* Copy the Descriptor */
2787 if (query
->request
.upiu_req
.opcode
== UPIU_QUERY_OPCODE_WRITE_DESC
)
2788 memcpy(ucd_req_ptr
+ 1, query
->descriptor
, len
);
2790 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2793 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb
*lrbp
)
2795 struct utp_upiu_req
*ucd_req_ptr
= lrbp
->ucd_req_ptr
;
2797 memset(ucd_req_ptr
, 0, sizeof(struct utp_upiu_req
));
2799 ucd_req_ptr
->header
= (struct utp_upiu_header
){
2800 .transaction_code
= UPIU_TRANSACTION_NOP_OUT
,
2801 .task_tag
= lrbp
->task_tag
,
2804 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
2808 * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
2809 * for Device Management Purposes
2810 * @hba: per adapter instance
2811 * @lrbp: pointer to local reference block
2813 * Return: 0 upon success; < 0 upon failure.
2815 static int ufshcd_compose_devman_upiu(struct ufs_hba
*hba
,
2816 struct ufshcd_lrb
*lrbp
)
2821 ufshcd_prepare_req_desc_hdr(hba
, lrbp
, &upiu_flags
, DMA_NONE
, 0);
2823 if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_QUERY
)
2824 ufshcd_prepare_utp_query_req_upiu(hba
, lrbp
, upiu_flags
);
2825 else if (hba
->dev_cmd
.type
== DEV_CMD_TYPE_NOP
)
2826 ufshcd_prepare_utp_nop_upiu(lrbp
);
2834 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2836 * @hba: per adapter instance
2837 * @lrbp: pointer to local reference block
2839 static void ufshcd_comp_scsi_upiu(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
2841 struct request
*rq
= scsi_cmd_to_rq(lrbp
->cmd
);
2842 unsigned int ioprio_class
= IOPRIO_PRIO_CLASS(req_get_ioprio(rq
));
2845 ufshcd_prepare_req_desc_hdr(hba
, lrbp
, &upiu_flags
, lrbp
->cmd
->sc_data_direction
, 0);
2846 if (ioprio_class
== IOPRIO_CLASS_RT
)
2847 upiu_flags
|= UPIU_CMD_FLAGS_CP
;
2848 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp
, upiu_flags
);
2851 static void __ufshcd_setup_cmd(struct ufshcd_lrb
*lrbp
, struct scsi_cmnd
*cmd
, u8 lun
, int tag
)
2853 memset(lrbp
->ucd_req_ptr
, 0, sizeof(*lrbp
->ucd_req_ptr
));
2856 lrbp
->task_tag
= tag
;
2858 ufshcd_prepare_lrbp_crypto(cmd
? scsi_cmd_to_rq(cmd
) : NULL
, lrbp
);
2861 static void ufshcd_setup_scsi_cmd(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
,
2862 struct scsi_cmnd
*cmd
, u8 lun
, int tag
)
2864 __ufshcd_setup_cmd(lrbp
, cmd
, lun
, tag
);
2865 lrbp
->intr_cmd
= !ufshcd_is_intr_aggr_allowed(hba
);
2866 lrbp
->req_abort_skip
= false;
2868 ufshcd_comp_scsi_upiu(hba
, lrbp
);
2872 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2873 * @upiu_wlun_id: UPIU W-LUN id
2875 * Return: SCSI W-LUN id.
2877 static inline u16
ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id
)
2879 return (upiu_wlun_id
& ~UFS_UPIU_WLUN_ID
) | SCSI_W_LUN_BASE
;
2882 static inline bool is_device_wlun(struct scsi_device
*sdev
)
2885 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
);
2889 * Associate the UFS controller queue with the default and poll HCTX types.
2890 * Initialize the mq_map[] arrays.
2892 static void ufshcd_map_queues(struct Scsi_Host
*shost
)
2894 struct ufs_hba
*hba
= shost_priv(shost
);
2895 int i
, queue_offset
= 0;
2897 if (!is_mcq_supported(hba
)) {
2898 hba
->nr_queues
[HCTX_TYPE_DEFAULT
] = 1;
2899 hba
->nr_queues
[HCTX_TYPE_READ
] = 0;
2900 hba
->nr_queues
[HCTX_TYPE_POLL
] = 1;
2901 hba
->nr_hw_queues
= 1;
2904 for (i
= 0; i
< shost
->nr_maps
; i
++) {
2905 struct blk_mq_queue_map
*map
= &shost
->tag_set
.map
[i
];
2907 map
->nr_queues
= hba
->nr_queues
[i
];
2908 if (!map
->nr_queues
)
2910 map
->queue_offset
= queue_offset
;
2911 if (i
== HCTX_TYPE_POLL
&& !is_mcq_supported(hba
))
2912 map
->queue_offset
= 0;
2914 blk_mq_map_queues(map
);
2915 queue_offset
+= map
->nr_queues
;
2919 static void ufshcd_init_lrb(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrb
, int i
)
2921 struct utp_transfer_cmd_desc
*cmd_descp
= (void *)hba
->ucdl_base_addr
+
2922 i
* ufshcd_get_ucd_size(hba
);
2923 struct utp_transfer_req_desc
*utrdlp
= hba
->utrdl_base_addr
;
2924 dma_addr_t cmd_desc_element_addr
= hba
->ucdl_dma_addr
+
2925 i
* ufshcd_get_ucd_size(hba
);
2926 u16 response_offset
= le16_to_cpu(utrdlp
[i
].response_upiu_offset
);
2927 u16 prdt_offset
= le16_to_cpu(utrdlp
[i
].prd_table_offset
);
2929 lrb
->utr_descriptor_ptr
= utrdlp
+ i
;
2930 lrb
->utrd_dma_addr
= hba
->utrdl_dma_addr
+
2931 i
* sizeof(struct utp_transfer_req_desc
);
2932 lrb
->ucd_req_ptr
= (struct utp_upiu_req
*)cmd_descp
->command_upiu
;
2933 lrb
->ucd_req_dma_addr
= cmd_desc_element_addr
;
2934 lrb
->ucd_rsp_ptr
= (struct utp_upiu_rsp
*)cmd_descp
->response_upiu
;
2935 lrb
->ucd_rsp_dma_addr
= cmd_desc_element_addr
+ response_offset
;
2936 lrb
->ucd_prdt_ptr
= (struct ufshcd_sg_entry
*)cmd_descp
->prd_table
;
2937 lrb
->ucd_prdt_dma_addr
= cmd_desc_element_addr
+ prdt_offset
;
2941 * ufshcd_queuecommand - main entry point for SCSI requests
2942 * @host: SCSI host pointer
2943 * @cmd: command from SCSI Midlayer
2945 * Return: 0 for success, non-zero in case of failure.
2947 static int ufshcd_queuecommand(struct Scsi_Host
*host
, struct scsi_cmnd
*cmd
)
2949 struct ufs_hba
*hba
= shost_priv(host
);
2950 int tag
= scsi_cmd_to_rq(cmd
)->tag
;
2951 struct ufshcd_lrb
*lrbp
;
2953 struct ufs_hw_queue
*hwq
= NULL
;
2955 switch (hba
->ufshcd_state
) {
2956 case UFSHCD_STATE_OPERATIONAL
:
2958 case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
:
2960 * SCSI error handler can call ->queuecommand() while UFS error
2961 * handler is in progress. Error interrupts could change the
2962 * state from UFSHCD_STATE_RESET to
2963 * UFSHCD_STATE_EH_SCHEDULED_NON_FATAL. Prevent requests
2964 * being issued in that case.
2966 if (ufshcd_eh_in_progress(hba
)) {
2967 err
= SCSI_MLQUEUE_HOST_BUSY
;
2971 case UFSHCD_STATE_EH_SCHEDULED_FATAL
:
2973 * pm_runtime_get_sync() is used at error handling preparation
2974 * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
2975 * PM ops, it can never be finished if we let SCSI layer keep
2976 * retrying it, which gets err handler stuck forever. Neither
2977 * can we let the scsi cmd pass through, because UFS is in bad
2978 * state, the scsi cmd may eventually time out, which will get
2979 * err handler blocked for too long. So, just fail the scsi cmd
2980 * sent from PM ops, err handler can recover PM error anyways.
2982 if (hba
->pm_op_in_progress
) {
2983 hba
->force_reset
= true;
2984 set_host_byte(cmd
, DID_BAD_TARGET
);
2989 case UFSHCD_STATE_RESET
:
2990 err
= SCSI_MLQUEUE_HOST_BUSY
;
2992 case UFSHCD_STATE_ERROR
:
2993 set_host_byte(cmd
, DID_ERROR
);
2998 hba
->req_abort_count
= 0;
3002 lrbp
= &hba
->lrb
[tag
];
3004 ufshcd_setup_scsi_cmd(hba
, lrbp
, cmd
, ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
), tag
);
3006 err
= ufshcd_map_sg(hba
, lrbp
);
3008 ufshcd_release(hba
);
3012 if (hba
->mcq_enabled
)
3013 hwq
= ufshcd_mcq_req_to_hwq(hba
, scsi_cmd_to_rq(cmd
));
3015 ufshcd_send_command(hba
, tag
, hwq
);
3018 if (ufs_trigger_eh(hba
)) {
3019 unsigned long flags
;
3021 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
3022 ufshcd_schedule_eh_work(hba
);
3023 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
3029 static void ufshcd_setup_dev_cmd(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
,
3030 enum dev_cmd_type cmd_type
, u8 lun
, int tag
)
3032 __ufshcd_setup_cmd(lrbp
, NULL
, lun
, tag
);
3033 lrbp
->intr_cmd
= true; /* No interrupt aggregation */
3034 hba
->dev_cmd
.type
= cmd_type
;
3037 static int ufshcd_compose_dev_cmd(struct ufs_hba
*hba
,
3038 struct ufshcd_lrb
*lrbp
, enum dev_cmd_type cmd_type
, int tag
)
3040 ufshcd_setup_dev_cmd(hba
, lrbp
, cmd_type
, 0, tag
);
3042 return ufshcd_compose_devman_upiu(hba
, lrbp
);
3046 * Check with the block layer if the command is inflight
3047 * @cmd: command to check.
3049 * Return: true if command is inflight; false if not.
3051 bool ufshcd_cmd_inflight(struct scsi_cmnd
*cmd
)
3053 return cmd
&& blk_mq_rq_state(scsi_cmd_to_rq(cmd
)) == MQ_RQ_IN_FLIGHT
;
3057 * Clear the pending command in the controller and wait until
3058 * the controller confirms that the command has been cleared.
3059 * @hba: per adapter instance
3060 * @task_tag: The tag number of the command to be cleared.
3062 static int ufshcd_clear_cmd(struct ufs_hba
*hba
, u32 task_tag
)
3067 if (hba
->mcq_enabled
) {
3069 * MCQ mode. Clean up the MCQ resources similar to
3070 * what the ufshcd_utrl_clear() does for SDB mode.
3072 err
= ufshcd_mcq_sq_cleanup(hba
, task_tag
);
3074 dev_err(hba
->dev
, "%s: failed tag=%d. err=%d\n",
3075 __func__
, task_tag
, err
);
3081 mask
= 1U << task_tag
;
3083 /* clear outstanding transaction before retry */
3084 ufshcd_utrl_clear(hba
, mask
);
3087 * wait for h/w to clear corresponding bit in door-bell.
3088 * max. wait is 1 sec.
3090 return ufshcd_wait_for_register(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
,
3091 mask
, ~mask
, 1000, 1000);
3095 * ufshcd_dev_cmd_completion() - handles device management command responses
3096 * @hba: per adapter instance
3097 * @lrbp: pointer to local reference block
3099 * Return: 0 upon success; < 0 upon failure.
3102 ufshcd_dev_cmd_completion(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
)
3104 enum upiu_response_transaction resp
;
3107 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
3108 resp
= ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
);
3111 case UPIU_TRANSACTION_NOP_IN
:
3112 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_NOP
) {
3114 dev_err(hba
->dev
, "%s: unexpected response %x\n",
3118 case UPIU_TRANSACTION_QUERY_RSP
: {
3119 u8 response
= lrbp
->ucd_rsp_ptr
->header
.response
;
3122 err
= ufshcd_copy_query_response(hba
, lrbp
);
3125 case UPIU_TRANSACTION_REJECT_UPIU
:
3126 /* TODO: handle Reject UPIU Response */
3128 dev_err(hba
->dev
, "%s: Reject UPIU not fully implemented\n",
3131 case UPIU_TRANSACTION_RESPONSE
:
3132 if (hba
->dev_cmd
.type
!= DEV_CMD_TYPE_RPMB
) {
3134 dev_err(hba
->dev
, "%s: unexpected response %x\n", __func__
, resp
);
3139 dev_err(hba
->dev
, "%s: Invalid device management cmd response: %x\n",
3147 static int ufshcd_wait_for_dev_cmd(struct ufs_hba
*hba
,
3148 struct ufshcd_lrb
*lrbp
, int max_timeout
)
3150 unsigned long time_left
= msecs_to_jiffies(max_timeout
);
3151 unsigned long flags
;
3156 time_left
= wait_for_completion_timeout(hba
->dev_cmd
.complete
,
3159 if (likely(time_left
)) {
3161 * The completion handler called complete() and the caller of
3162 * this function still owns the @lrbp tag so the code below does
3163 * not trigger any race conditions.
3165 hba
->dev_cmd
.complete
= NULL
;
3166 err
= ufshcd_get_tr_ocs(lrbp
, NULL
);
3168 err
= ufshcd_dev_cmd_completion(hba
, lrbp
);
3171 dev_dbg(hba
->dev
, "%s: dev_cmd request timedout, tag %d\n",
3172 __func__
, lrbp
->task_tag
);
3175 if (hba
->mcq_enabled
) {
3176 /* successfully cleared the command, retry if needed */
3177 if (ufshcd_clear_cmd(hba
, lrbp
->task_tag
) == 0)
3179 hba
->dev_cmd
.complete
= NULL
;
3184 if (ufshcd_clear_cmd(hba
, lrbp
->task_tag
) == 0) {
3185 /* successfully cleared the command, retry if needed */
3188 * Since clearing the command succeeded we also need to
3189 * clear the task tag bit from the outstanding_reqs
3192 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
3193 pending
= test_bit(lrbp
->task_tag
,
3194 &hba
->outstanding_reqs
);
3196 hba
->dev_cmd
.complete
= NULL
;
3197 __clear_bit(lrbp
->task_tag
,
3198 &hba
->outstanding_reqs
);
3200 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
3204 * The completion handler ran while we tried to
3205 * clear the command.
3211 dev_err(hba
->dev
, "%s: failed to clear tag %d\n",
3212 __func__
, lrbp
->task_tag
);
3214 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
3215 pending
= test_bit(lrbp
->task_tag
,
3216 &hba
->outstanding_reqs
);
3218 hba
->dev_cmd
.complete
= NULL
;
3219 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
3223 * The completion handler ran while we tried to
3224 * clear the command.
3235 static void ufshcd_dev_man_lock(struct ufs_hba
*hba
)
3238 mutex_lock(&hba
->dev_cmd
.lock
);
3239 down_read(&hba
->clk_scaling_lock
);
3242 static void ufshcd_dev_man_unlock(struct ufs_hba
*hba
)
3244 up_read(&hba
->clk_scaling_lock
);
3245 mutex_unlock(&hba
->dev_cmd
.lock
);
3246 ufshcd_release(hba
);
3249 static int ufshcd_issue_dev_cmd(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
,
3250 const u32 tag
, int timeout
)
3252 DECLARE_COMPLETION_ONSTACK(wait
);
3255 hba
->dev_cmd
.complete
= &wait
;
3257 ufshcd_add_query_upiu_trace(hba
, UFS_QUERY_SEND
, lrbp
->ucd_req_ptr
);
3259 ufshcd_send_command(hba
, tag
, hba
->dev_cmd_queue
);
3260 err
= ufshcd_wait_for_dev_cmd(hba
, lrbp
, timeout
);
3262 ufshcd_add_query_upiu_trace(hba
, err
? UFS_QUERY_ERR
: UFS_QUERY_COMP
,
3263 (struct utp_upiu_req
*)lrbp
->ucd_rsp_ptr
);
3269 * ufshcd_exec_dev_cmd - API for sending device management requests
3271 * @cmd_type: specifies the type (NOP, Query...)
3272 * @timeout: timeout in milliseconds
3274 * Return: 0 upon success; < 0 upon failure.
3276 * NOTE: Since there is only one available tag for device management commands,
3277 * it is expected you hold the hba->dev_cmd.lock mutex.
3279 static int ufshcd_exec_dev_cmd(struct ufs_hba
*hba
,
3280 enum dev_cmd_type cmd_type
, int timeout
)
3282 const u32 tag
= hba
->reserved_slot
;
3283 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
3286 /* Protects use of hba->reserved_slot. */
3287 lockdep_assert_held(&hba
->dev_cmd
.lock
);
3289 err
= ufshcd_compose_dev_cmd(hba
, lrbp
, cmd_type
, tag
);
3293 return ufshcd_issue_dev_cmd(hba
, lrbp
, tag
, timeout
);
3297 * ufshcd_init_query() - init the query response and request parameters
3298 * @hba: per-adapter instance
3299 * @request: address of the request pointer to be initialized
3300 * @response: address of the response pointer to be initialized
3301 * @opcode: operation to perform
3302 * @idn: flag idn to access
3303 * @index: LU number to access
3304 * @selector: query/flag/descriptor further identification
3306 static inline void ufshcd_init_query(struct ufs_hba
*hba
,
3307 struct ufs_query_req
**request
, struct ufs_query_res
**response
,
3308 enum query_opcode opcode
, u8 idn
, u8 index
, u8 selector
)
3310 *request
= &hba
->dev_cmd
.query
.request
;
3311 *response
= &hba
->dev_cmd
.query
.response
;
3312 memset(*request
, 0, sizeof(struct ufs_query_req
));
3313 memset(*response
, 0, sizeof(struct ufs_query_res
));
3314 (*request
)->upiu_req
.opcode
= opcode
;
3315 (*request
)->upiu_req
.idn
= idn
;
3316 (*request
)->upiu_req
.index
= index
;
3317 (*request
)->upiu_req
.selector
= selector
;
3320 static int ufshcd_query_flag_retry(struct ufs_hba
*hba
,
3321 enum query_opcode opcode
, enum flag_idn idn
, u8 index
, bool *flag_res
)
3326 for (retries
= 0; retries
< QUERY_REQ_RETRIES
; retries
++) {
3327 ret
= ufshcd_query_flag(hba
, opcode
, idn
, index
, flag_res
);
3330 "%s: failed with error %d, retries %d\n",
3331 __func__
, ret
, retries
);
3338 "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n",
3339 __func__
, opcode
, idn
, ret
, retries
);
3344 * ufshcd_query_flag() - API function for sending flag query requests
3345 * @hba: per-adapter instance
3346 * @opcode: flag query to perform
3347 * @idn: flag idn to access
3348 * @index: flag index to access
3349 * @flag_res: the flag value after the query request completes
3351 * Return: 0 for success, non-zero in case of failure.
3353 int ufshcd_query_flag(struct ufs_hba
*hba
, enum query_opcode opcode
,
3354 enum flag_idn idn
, u8 index
, bool *flag_res
)
3356 struct ufs_query_req
*request
= NULL
;
3357 struct ufs_query_res
*response
= NULL
;
3358 int err
, selector
= 0;
3359 int timeout
= QUERY_REQ_TIMEOUT
;
3363 ufshcd_dev_man_lock(hba
);
3365 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3369 case UPIU_QUERY_OPCODE_SET_FLAG
:
3370 case UPIU_QUERY_OPCODE_CLEAR_FLAG
:
3371 case UPIU_QUERY_OPCODE_TOGGLE_FLAG
:
3372 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3374 case UPIU_QUERY_OPCODE_READ_FLAG
:
3375 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3377 /* No dummy reads */
3378 dev_err(hba
->dev
, "%s: Invalid argument for read request\n",
3386 "%s: Expected query flag opcode but got = %d\n",
3392 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, timeout
);
3396 "%s: Sending flag query for idn %d failed, err = %d\n",
3397 __func__
, idn
, err
);
3402 *flag_res
= (be32_to_cpu(response
->upiu_res
.value
) &
3403 MASK_QUERY_UPIU_FLAG_LOC
) & 0x1;
3406 ufshcd_dev_man_unlock(hba
);
3411 * ufshcd_query_attr - API function for sending attribute requests
3412 * @hba: per-adapter instance
3413 * @opcode: attribute opcode
3414 * @idn: attribute idn to access
3415 * @index: index field
3416 * @selector: selector field
3417 * @attr_val: the attribute value after the query request completes
3419 * Return: 0 for success, non-zero in case of failure.
3421 int ufshcd_query_attr(struct ufs_hba
*hba
, enum query_opcode opcode
,
3422 enum attr_idn idn
, u8 index
, u8 selector
, u32
*attr_val
)
3424 struct ufs_query_req
*request
= NULL
;
3425 struct ufs_query_res
*response
= NULL
;
3431 dev_err(hba
->dev
, "%s: attribute value required for opcode 0x%x\n",
3436 ufshcd_dev_man_lock(hba
);
3438 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3442 case UPIU_QUERY_OPCODE_WRITE_ATTR
:
3443 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3444 request
->upiu_req
.value
= cpu_to_be32(*attr_val
);
3446 case UPIU_QUERY_OPCODE_READ_ATTR
:
3447 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3450 dev_err(hba
->dev
, "%s: Expected query attr opcode but got = 0x%.2x\n",
3456 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
3459 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3460 __func__
, opcode
, idn
, index
, err
);
3464 *attr_val
= be32_to_cpu(response
->upiu_res
.value
);
3467 ufshcd_dev_man_unlock(hba
);
3472 * ufshcd_query_attr_retry() - API function for sending query
3473 * attribute with retries
3474 * @hba: per-adapter instance
3475 * @opcode: attribute opcode
3476 * @idn: attribute idn to access
3477 * @index: index field
3478 * @selector: selector field
3479 * @attr_val: the attribute value after the query request
3482 * Return: 0 for success, non-zero in case of failure.
3484 int ufshcd_query_attr_retry(struct ufs_hba
*hba
,
3485 enum query_opcode opcode
, enum attr_idn idn
, u8 index
, u8 selector
,
3491 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
3492 ret
= ufshcd_query_attr(hba
, opcode
, idn
, index
,
3493 selector
, attr_val
);
3495 dev_dbg(hba
->dev
, "%s: failed with error %d, retries %d\n",
3496 __func__
, ret
, retries
);
3503 "%s: query attribute, idn %d, failed with error %d after %d retries\n",
3504 __func__
, idn
, ret
, QUERY_REQ_RETRIES
);
3508 static int __ufshcd_query_descriptor(struct ufs_hba
*hba
,
3509 enum query_opcode opcode
, enum desc_idn idn
, u8 index
,
3510 u8 selector
, u8
*desc_buf
, int *buf_len
)
3512 struct ufs_query_req
*request
= NULL
;
3513 struct ufs_query_res
*response
= NULL
;
3519 dev_err(hba
->dev
, "%s: descriptor buffer required for opcode 0x%x\n",
3524 if (*buf_len
< QUERY_DESC_MIN_SIZE
|| *buf_len
> QUERY_DESC_MAX_SIZE
) {
3525 dev_err(hba
->dev
, "%s: descriptor buffer size (%d) is out of range\n",
3526 __func__
, *buf_len
);
3530 ufshcd_dev_man_lock(hba
);
3532 ufshcd_init_query(hba
, &request
, &response
, opcode
, idn
, index
,
3534 hba
->dev_cmd
.query
.descriptor
= desc_buf
;
3535 request
->upiu_req
.length
= cpu_to_be16(*buf_len
);
3538 case UPIU_QUERY_OPCODE_WRITE_DESC
:
3539 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
3541 case UPIU_QUERY_OPCODE_READ_DESC
:
3542 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_READ_REQUEST
;
3546 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
3552 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
3555 dev_err(hba
->dev
, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
3556 __func__
, opcode
, idn
, index
, err
);
3560 *buf_len
= be16_to_cpu(response
->upiu_res
.length
);
3563 hba
->dev_cmd
.query
.descriptor
= NULL
;
3564 ufshcd_dev_man_unlock(hba
);
3569 * ufshcd_query_descriptor_retry - API function for sending descriptor requests
3570 * @hba: per-adapter instance
3571 * @opcode: attribute opcode
3572 * @idn: attribute idn to access
3573 * @index: index field
3574 * @selector: selector field
3575 * @desc_buf: the buffer that contains the descriptor
3576 * @buf_len: length parameter passed to the device
3578 * The buf_len parameter will contain, on return, the length parameter
3579 * received on the response.
3581 * Return: 0 for success, non-zero in case of failure.
3583 int ufshcd_query_descriptor_retry(struct ufs_hba
*hba
,
3584 enum query_opcode opcode
,
3585 enum desc_idn idn
, u8 index
,
3587 u8
*desc_buf
, int *buf_len
)
3592 for (retries
= QUERY_REQ_RETRIES
; retries
> 0; retries
--) {
3593 err
= __ufshcd_query_descriptor(hba
, opcode
, idn
, index
,
3594 selector
, desc_buf
, buf_len
);
3595 if (!err
|| err
== -EINVAL
)
3603 * ufshcd_read_desc_param - read the specified descriptor parameter
3604 * @hba: Pointer to adapter instance
3605 * @desc_id: descriptor idn value
3606 * @desc_index: descriptor index
3607 * @param_offset: offset of the parameter to read
3608 * @param_read_buf: pointer to buffer where parameter would be read
3609 * @param_size: sizeof(param_read_buf)
3611 * Return: 0 in case of success, non-zero otherwise.
3613 int ufshcd_read_desc_param(struct ufs_hba
*hba
,
3614 enum desc_idn desc_id
,
3622 int buff_len
= QUERY_DESC_MAX_SIZE
;
3623 bool is_kmalloc
= true;
3626 if (desc_id
>= QUERY_DESC_IDN_MAX
|| !param_size
)
3629 /* Check whether we need temp memory */
3630 if (param_offset
!= 0 || param_size
< buff_len
) {
3631 desc_buf
= kzalloc(buff_len
, GFP_KERNEL
);
3635 desc_buf
= param_read_buf
;
3639 /* Request for full descriptor */
3640 ret
= ufshcd_query_descriptor_retry(hba
, UPIU_QUERY_OPCODE_READ_DESC
,
3641 desc_id
, desc_index
, 0,
3642 desc_buf
, &buff_len
);
3644 dev_err(hba
->dev
, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n",
3645 __func__
, desc_id
, desc_index
, param_offset
, ret
);
3649 /* Update descriptor length */
3650 buff_len
= desc_buf
[QUERY_DESC_LENGTH_OFFSET
];
3652 if (param_offset
>= buff_len
) {
3653 dev_err(hba
->dev
, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n",
3654 __func__
, param_offset
, desc_id
, buff_len
);
3660 if (desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
] != desc_id
) {
3661 dev_err(hba
->dev
, "%s: invalid desc_id %d in descriptor header\n",
3662 __func__
, desc_buf
[QUERY_DESC_DESC_TYPE_OFFSET
]);
3668 /* Make sure we don't copy more data than available */
3669 if (param_offset
>= buff_len
)
3672 memcpy(param_read_buf
, &desc_buf
[param_offset
],
3673 min_t(u32
, param_size
, buff_len
- param_offset
));
3682 * struct uc_string_id - unicode string
3684 * @len: size of this descriptor inclusive
3685 * @type: descriptor type
3686 * @uc: unicode string character
3688 struct uc_string_id
{
3694 /* replace non-printable or non-ASCII characters with spaces */
3695 static inline char ufshcd_remove_non_printable(u8 ch
)
3697 return (ch
>= 0x20 && ch
<= 0x7e) ? ch
: ' ';
3701 * ufshcd_read_string_desc - read string descriptor
3702 * @hba: pointer to adapter instance
3703 * @desc_index: descriptor index
3704 * @buf: pointer to buffer where descriptor would be read,
3705 * the caller should free the memory.
3706 * @ascii: if true convert from unicode to ascii characters
3707 * null terminated string.
3710 * * string size on success.
3711 * * -ENOMEM: on allocation failure
3712 * * -EINVAL: on a wrong parameter
3714 int ufshcd_read_string_desc(struct ufs_hba
*hba
, u8 desc_index
,
3715 u8
**buf
, bool ascii
)
3717 struct uc_string_id
*uc_str
;
3724 uc_str
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
3728 ret
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_STRING
, desc_index
, 0,
3729 (u8
*)uc_str
, QUERY_DESC_MAX_SIZE
);
3731 dev_err(hba
->dev
, "Reading String Desc failed after %d retries. err = %d\n",
3732 QUERY_REQ_RETRIES
, ret
);
3737 if (uc_str
->len
<= QUERY_DESC_HDR_SIZE
) {
3738 dev_dbg(hba
->dev
, "String Desc is of zero length\n");
3747 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3748 ascii_len
= (uc_str
->len
- QUERY_DESC_HDR_SIZE
) / 2 + 1;
3749 str
= kzalloc(ascii_len
, GFP_KERNEL
);
3756 * the descriptor contains string in UTF16 format
3757 * we need to convert to utf-8 so it can be displayed
3759 ret
= utf16s_to_utf8s(uc_str
->uc
,
3760 uc_str
->len
- QUERY_DESC_HDR_SIZE
,
3761 UTF16_BIG_ENDIAN
, str
, ascii_len
- 1);
3763 /* replace non-printable or non-ASCII characters with spaces */
3764 for (i
= 0; i
< ret
; i
++)
3765 str
[i
] = ufshcd_remove_non_printable(str
[i
]);
3770 str
= kmemdup(uc_str
, uc_str
->len
, GFP_KERNEL
);
3784 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3785 * @hba: Pointer to adapter instance
3787 * @param_offset: offset of the parameter to read
3788 * @param_read_buf: pointer to buffer where parameter would be read
3789 * @param_size: sizeof(param_read_buf)
3791 * Return: 0 in case of success, non-zero otherwise.
3793 static inline int ufshcd_read_unit_desc_param(struct ufs_hba
*hba
,
3795 enum unit_desc_param param_offset
,
3800 * Unit descriptors are only available for general purpose LUs (LUN id
3801 * from 0 to 7) and RPMB Well known LU.
3803 if (!ufs_is_valid_unit_desc_lun(&hba
->dev_info
, lun
))
3806 return ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_UNIT
, lun
,
3807 param_offset
, param_read_buf
, param_size
);
3810 static int ufshcd_get_ref_clk_gating_wait(struct ufs_hba
*hba
)
3813 u32 gating_wait
= UFSHCD_REF_CLK_GATING_WAIT_US
;
3815 if (hba
->dev_info
.wspecversion
>= 0x300) {
3816 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
3817 QUERY_ATTR_IDN_REF_CLK_GATING_WAIT_TIME
, 0, 0,
3820 dev_err(hba
->dev
, "Failed reading bRefClkGatingWait. err = %d, use default %uus\n",
3823 if (gating_wait
== 0) {
3824 gating_wait
= UFSHCD_REF_CLK_GATING_WAIT_US
;
3825 dev_err(hba
->dev
, "Undefined ref clk gating wait time, use default %uus\n",
3829 hba
->dev_info
.clk_gating_wait_us
= gating_wait
;
3836 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3837 * @hba: per adapter instance
3839 * 1. Allocate DMA memory for Command Descriptor array
3840 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3841 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3842 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3844 * 4. Allocate memory for local reference block(lrb).
3846 * Return: 0 for success, non-zero in case of failure.
3848 static int ufshcd_memory_alloc(struct ufs_hba
*hba
)
3850 size_t utmrdl_size
, utrdl_size
, ucdl_size
;
3852 /* Allocate memory for UTP command descriptors */
3853 ucdl_size
= ufshcd_get_ucd_size(hba
) * hba
->nutrs
;
3854 hba
->ucdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3856 &hba
->ucdl_dma_addr
,
3860 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3862 if (!hba
->ucdl_base_addr
||
3863 WARN_ON(hba
->ucdl_dma_addr
& (128 - 1))) {
3865 "Command Descriptor Memory allocation failed\n");
3870 * Allocate memory for UTP Transfer descriptors
3871 * UFSHCI requires 1KB alignment of UTRD
3873 utrdl_size
= (sizeof(struct utp_transfer_req_desc
) * hba
->nutrs
);
3874 hba
->utrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3876 &hba
->utrdl_dma_addr
,
3878 if (!hba
->utrdl_base_addr
||
3879 WARN_ON(hba
->utrdl_dma_addr
& (SZ_1K
- 1))) {
3881 "Transfer Descriptor Memory allocation failed\n");
3886 * Skip utmrdl allocation; it may have been
3887 * allocated during first pass and not released during
3888 * MCQ memory allocation.
3889 * See ufshcd_release_sdb_queue() and ufshcd_config_mcq()
3891 if (hba
->utmrdl_base_addr
)
3894 * Allocate memory for UTP Task Management descriptors
3895 * UFSHCI requires 1KB alignment of UTMRD
3897 utmrdl_size
= sizeof(struct utp_task_req_desc
) * hba
->nutmrs
;
3898 hba
->utmrdl_base_addr
= dmam_alloc_coherent(hba
->dev
,
3900 &hba
->utmrdl_dma_addr
,
3902 if (!hba
->utmrdl_base_addr
||
3903 WARN_ON(hba
->utmrdl_dma_addr
& (SZ_1K
- 1))) {
3905 "Task Management Descriptor Memory allocation failed\n");
3910 /* Allocate memory for local reference block */
3911 hba
->lrb
= devm_kcalloc(hba
->dev
,
3912 hba
->nutrs
, sizeof(struct ufshcd_lrb
),
3915 dev_err(hba
->dev
, "LRB Memory allocation failed\n");
3924 * ufshcd_host_memory_configure - configure local reference block with
3926 * @hba: per adapter instance
3928 * Configure Host memory space
3929 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3931 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3933 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3934 * into local reference block.
3936 static void ufshcd_host_memory_configure(struct ufs_hba
*hba
)
3938 struct utp_transfer_req_desc
*utrdlp
;
3939 dma_addr_t cmd_desc_dma_addr
;
3940 dma_addr_t cmd_desc_element_addr
;
3941 u16 response_offset
;
3946 utrdlp
= hba
->utrdl_base_addr
;
3949 offsetof(struct utp_transfer_cmd_desc
, response_upiu
);
3951 offsetof(struct utp_transfer_cmd_desc
, prd_table
);
3953 cmd_desc_size
= ufshcd_get_ucd_size(hba
);
3954 cmd_desc_dma_addr
= hba
->ucdl_dma_addr
;
3956 for (i
= 0; i
< hba
->nutrs
; i
++) {
3957 /* Configure UTRD with command descriptor base address */
3958 cmd_desc_element_addr
=
3959 (cmd_desc_dma_addr
+ (cmd_desc_size
* i
));
3960 utrdlp
[i
].command_desc_base_addr
=
3961 cpu_to_le64(cmd_desc_element_addr
);
3963 /* Response upiu and prdt offset should be in double words */
3964 if (hba
->quirks
& UFSHCD_QUIRK_PRDT_BYTE_GRAN
) {
3965 utrdlp
[i
].response_upiu_offset
=
3966 cpu_to_le16(response_offset
);
3967 utrdlp
[i
].prd_table_offset
=
3968 cpu_to_le16(prdt_offset
);
3969 utrdlp
[i
].response_upiu_length
=
3970 cpu_to_le16(ALIGNED_UPIU_SIZE
);
3972 utrdlp
[i
].response_upiu_offset
=
3973 cpu_to_le16(response_offset
>> 2);
3974 utrdlp
[i
].prd_table_offset
=
3975 cpu_to_le16(prdt_offset
>> 2);
3976 utrdlp
[i
].response_upiu_length
=
3977 cpu_to_le16(ALIGNED_UPIU_SIZE
>> 2);
3980 ufshcd_init_lrb(hba
, &hba
->lrb
[i
], i
);
3985 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3986 * @hba: per adapter instance
3988 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3989 * in order to initialize the Unipro link startup procedure.
3990 * Once the Unipro links are up, the device connected to the controller
3993 * Return: 0 on success, non-zero value on failure.
3995 static int ufshcd_dme_link_startup(struct ufs_hba
*hba
)
3997 struct uic_command uic_cmd
= {
3998 .command
= UIC_CMD_DME_LINK_STARTUP
,
4002 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4005 "dme-link-startup: error code %d\n", ret
);
4009 * ufshcd_dme_reset - UIC command for DME_RESET
4010 * @hba: per adapter instance
4012 * DME_RESET command is issued in order to reset UniPro stack.
4013 * This function now deals with cold reset.
4015 * Return: 0 on success, non-zero value on failure.
4017 static int ufshcd_dme_reset(struct ufs_hba
*hba
)
4019 struct uic_command uic_cmd
= {
4020 .command
= UIC_CMD_DME_RESET
,
4024 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4027 "dme-reset: error code %d\n", ret
);
4032 int ufshcd_dme_configure_adapt(struct ufs_hba
*hba
,
4038 if (agreed_gear
< UFS_HS_G4
)
4039 adapt_val
= PA_NO_ADAPT
;
4041 ret
= ufshcd_dme_set(hba
,
4042 UIC_ARG_MIB(PA_TXHSADAPTTYPE
),
4046 EXPORT_SYMBOL_GPL(ufshcd_dme_configure_adapt
);
4049 * ufshcd_dme_enable - UIC command for DME_ENABLE
4050 * @hba: per adapter instance
4052 * DME_ENABLE command is issued in order to enable UniPro stack.
4054 * Return: 0 on success, non-zero value on failure.
4056 static int ufshcd_dme_enable(struct ufs_hba
*hba
)
4058 struct uic_command uic_cmd
= {
4059 .command
= UIC_CMD_DME_ENABLE
,
4063 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4066 "dme-enable: error code %d\n", ret
);
4071 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba
*hba
)
4073 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
4074 unsigned long min_sleep_time_us
;
4076 if (!(hba
->quirks
& UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
))
4080 * last_dme_cmd_tstamp will be 0 only for 1st call to
4083 if (unlikely(!ktime_to_us(hba
->last_dme_cmd_tstamp
))) {
4084 min_sleep_time_us
= MIN_DELAY_BEFORE_DME_CMDS_US
;
4086 unsigned long delta
=
4087 (unsigned long) ktime_to_us(
4088 ktime_sub(ktime_get(),
4089 hba
->last_dme_cmd_tstamp
));
4091 if (delta
< MIN_DELAY_BEFORE_DME_CMDS_US
)
4093 MIN_DELAY_BEFORE_DME_CMDS_US
- delta
;
4095 min_sleep_time_us
= 0; /* no more delay required */
4098 if (min_sleep_time_us
> 0) {
4099 /* allow sleep for extra 50us if needed */
4100 usleep_range(min_sleep_time_us
, min_sleep_time_us
+ 50);
4103 /* update the last_dme_cmd_tstamp */
4104 hba
->last_dme_cmd_tstamp
= ktime_get();
4108 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
4109 * @hba: per adapter instance
4110 * @attr_sel: uic command argument1
4111 * @attr_set: attribute set type as uic command argument2
4112 * @mib_val: setting value as uic command argument3
4113 * @peer: indicate whether peer or local
4115 * Return: 0 on success, non-zero value on failure.
4117 int ufshcd_dme_set_attr(struct ufs_hba
*hba
, u32 attr_sel
,
4118 u8 attr_set
, u32 mib_val
, u8 peer
)
4120 struct uic_command uic_cmd
= {
4121 .command
= peer
? UIC_CMD_DME_PEER_SET
: UIC_CMD_DME_SET
,
4122 .argument1
= attr_sel
,
4123 .argument2
= UIC_ARG_ATTR_TYPE(attr_set
),
4124 .argument3
= mib_val
,
4126 static const char *const action
[] = {
4130 const char *set
= action
[!!peer
];
4132 int retries
= UFS_UIC_COMMAND_RETRIES
;
4135 /* for peer attributes we retry upon failure */
4136 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4138 dev_dbg(hba
->dev
, "%s: attr-id 0x%x val 0x%x error code %d\n",
4139 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
, ret
);
4140 } while (ret
&& peer
&& --retries
);
4143 dev_err(hba
->dev
, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
4144 set
, UIC_GET_ATTR_ID(attr_sel
), mib_val
,
4145 UFS_UIC_COMMAND_RETRIES
- retries
);
4149 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr
);
4152 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
4153 * @hba: per adapter instance
4154 * @attr_sel: uic command argument1
4155 * @mib_val: the value of the attribute as returned by the UIC command
4156 * @peer: indicate whether peer or local
4158 * Return: 0 on success, non-zero value on failure.
4160 int ufshcd_dme_get_attr(struct ufs_hba
*hba
, u32 attr_sel
,
4161 u32
*mib_val
, u8 peer
)
4163 struct uic_command uic_cmd
= {
4164 .command
= peer
? UIC_CMD_DME_PEER_GET
: UIC_CMD_DME_GET
,
4165 .argument1
= attr_sel
,
4167 static const char *const action
[] = {
4171 const char *get
= action
[!!peer
];
4173 int retries
= UFS_UIC_COMMAND_RETRIES
;
4174 struct ufs_pa_layer_attr orig_pwr_info
;
4175 struct ufs_pa_layer_attr temp_pwr_info
;
4176 bool pwr_mode_change
= false;
4178 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)) {
4179 orig_pwr_info
= hba
->pwr_info
;
4180 temp_pwr_info
= orig_pwr_info
;
4182 if (orig_pwr_info
.pwr_tx
== FAST_MODE
||
4183 orig_pwr_info
.pwr_rx
== FAST_MODE
) {
4184 temp_pwr_info
.pwr_tx
= FASTAUTO_MODE
;
4185 temp_pwr_info
.pwr_rx
= FASTAUTO_MODE
;
4186 pwr_mode_change
= true;
4187 } else if (orig_pwr_info
.pwr_tx
== SLOW_MODE
||
4188 orig_pwr_info
.pwr_rx
== SLOW_MODE
) {
4189 temp_pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
4190 temp_pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
4191 pwr_mode_change
= true;
4193 if (pwr_mode_change
) {
4194 ret
= ufshcd_change_power_mode(hba
, &temp_pwr_info
);
4201 /* for peer attributes we retry upon failure */
4202 ret
= ufshcd_send_uic_cmd(hba
, &uic_cmd
);
4204 dev_dbg(hba
->dev
, "%s: attr-id 0x%x error code %d\n",
4205 get
, UIC_GET_ATTR_ID(attr_sel
), ret
);
4206 } while (ret
&& peer
&& --retries
);
4209 dev_err(hba
->dev
, "%s: attr-id 0x%x failed %d retries\n",
4210 get
, UIC_GET_ATTR_ID(attr_sel
),
4211 UFS_UIC_COMMAND_RETRIES
- retries
);
4213 if (mib_val
&& !ret
)
4214 *mib_val
= uic_cmd
.argument3
;
4216 if (peer
&& (hba
->quirks
& UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
)
4218 ufshcd_change_power_mode(hba
, &orig_pwr_info
);
4222 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr
);
4225 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
4226 * state) and waits for it to take effect.
4228 * @hba: per adapter instance
4229 * @cmd: UIC command to execute
4231 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
4232 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
4233 * and device UniPro link and hence it's final completion would be indicated by
4234 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
4235 * addition to normal UIC command completion Status (UCCS). This function only
4236 * returns after the relevant status bits indicate the completion.
4238 * Return: 0 on success, non-zero value on failure.
4240 static int ufshcd_uic_pwr_ctrl(struct ufs_hba
*hba
, struct uic_command
*cmd
)
4242 DECLARE_COMPLETION_ONSTACK(uic_async_done
);
4243 unsigned long flags
;
4246 bool reenable_intr
= false;
4248 mutex_lock(&hba
->uic_cmd_mutex
);
4249 ufshcd_add_delay_before_dme_cmd(hba
);
4251 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4252 if (ufshcd_is_link_broken(hba
)) {
4256 hba
->uic_async_done
= &uic_async_done
;
4257 if (ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
) & UIC_COMMAND_COMPL
) {
4258 ufshcd_disable_intr(hba
, UIC_COMMAND_COMPL
);
4260 * Make sure UIC command completion interrupt is disabled before
4261 * issuing UIC command.
4263 ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
4264 reenable_intr
= true;
4266 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4267 ret
= __ufshcd_send_uic_cmd(hba
, cmd
);
4270 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
4271 cmd
->command
, cmd
->argument3
, ret
);
4275 if (!wait_for_completion_timeout(hba
->uic_async_done
,
4276 msecs_to_jiffies(uic_cmd_timeout
))) {
4278 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
4279 cmd
->command
, cmd
->argument3
);
4281 if (!cmd
->cmd_active
) {
4282 dev_err(hba
->dev
, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
4292 status
= ufshcd_get_upmcrs(hba
);
4293 if (status
!= PWR_LOCAL
) {
4295 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
4296 cmd
->command
, status
);
4297 ret
= (status
!= PWR_OK
) ? status
: -1;
4301 ufshcd_print_host_state(hba
);
4302 ufshcd_print_pwr_info(hba
);
4303 ufshcd_print_evt_hist(hba
);
4306 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4307 hba
->active_uic_cmd
= NULL
;
4308 hba
->uic_async_done
= NULL
;
4310 ufshcd_enable_intr(hba
, UIC_COMMAND_COMPL
);
4312 ufshcd_set_link_broken(hba
);
4313 ufshcd_schedule_eh_work(hba
);
4316 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4317 mutex_unlock(&hba
->uic_cmd_mutex
);
4323 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
4324 * using DME_SET primitives.
4325 * @hba: per adapter instance
4326 * @mode: powr mode value
4328 * Return: 0 on success, non-zero value on failure.
4330 int ufshcd_uic_change_pwr_mode(struct ufs_hba
*hba
, u8 mode
)
4332 struct uic_command uic_cmd
= {
4333 .command
= UIC_CMD_DME_SET
,
4334 .argument1
= UIC_ARG_MIB(PA_PWRMODE
),
4339 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
) {
4340 ret
= ufshcd_dme_set(hba
,
4341 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP
, 0), 1);
4343 dev_err(hba
->dev
, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
4350 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4351 ufshcd_release(hba
);
4356 EXPORT_SYMBOL_GPL(ufshcd_uic_change_pwr_mode
);
4358 int ufshcd_link_recovery(struct ufs_hba
*hba
)
4361 unsigned long flags
;
4363 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4364 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
4365 ufshcd_set_eh_in_progress(hba
);
4366 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4368 /* Reset the attached device */
4369 ufshcd_device_reset(hba
);
4371 ret
= ufshcd_host_reset_and_restore(hba
);
4373 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4375 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
4376 ufshcd_clear_eh_in_progress(hba
);
4377 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4380 dev_err(hba
->dev
, "%s: link recovery failed, err %d",
4385 EXPORT_SYMBOL_GPL(ufshcd_link_recovery
);
4387 int ufshcd_uic_hibern8_enter(struct ufs_hba
*hba
)
4389 struct uic_command uic_cmd
= {
4390 .command
= UIC_CMD_DME_HIBER_ENTER
,
4392 ktime_t start
= ktime_get();
4395 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
, PRE_CHANGE
);
4397 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4398 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "enter",
4399 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
4402 dev_err(hba
->dev
, "%s: hibern8 enter failed. ret = %d\n",
4405 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_ENTER
,
4410 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter
);
4412 int ufshcd_uic_hibern8_exit(struct ufs_hba
*hba
)
4414 struct uic_command uic_cmd
= {
4415 .command
= UIC_CMD_DME_HIBER_EXIT
,
4418 ktime_t start
= ktime_get();
4420 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
, PRE_CHANGE
);
4422 ret
= ufshcd_uic_pwr_ctrl(hba
, &uic_cmd
);
4423 trace_ufshcd_profile_hibern8(dev_name(hba
->dev
), "exit",
4424 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
4427 dev_err(hba
->dev
, "%s: hibern8 exit failed. ret = %d\n",
4430 ufshcd_vops_hibern8_notify(hba
, UIC_CMD_DME_HIBER_EXIT
,
4432 hba
->ufs_stats
.last_hibern8_exit_tstamp
= local_clock();
4433 hba
->ufs_stats
.hibern8_exit_cnt
++;
4438 EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit
);
4440 static void ufshcd_configure_auto_hibern8(struct ufs_hba
*hba
)
4442 if (!ufshcd_is_auto_hibern8_supported(hba
))
4445 ufshcd_writel(hba
, hba
->ahit
, REG_AUTO_HIBERNATE_IDLE_TIMER
);
4448 void ufshcd_auto_hibern8_update(struct ufs_hba
*hba
, u32 ahit
)
4450 const u32 cur_ahit
= READ_ONCE(hba
->ahit
);
4452 if (!ufshcd_is_auto_hibern8_supported(hba
) || cur_ahit
== ahit
)
4455 WRITE_ONCE(hba
->ahit
, ahit
);
4456 if (!pm_runtime_suspended(&hba
->ufs_device_wlun
->sdev_gendev
)) {
4457 ufshcd_rpm_get_sync(hba
);
4459 ufshcd_configure_auto_hibern8(hba
);
4460 ufshcd_release(hba
);
4461 ufshcd_rpm_put_sync(hba
);
4464 EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update
);
4467 * ufshcd_init_pwr_info - setting the POR (power on reset)
4468 * values in hba power info
4469 * @hba: per-adapter instance
4471 static void ufshcd_init_pwr_info(struct ufs_hba
*hba
)
4473 hba
->pwr_info
.gear_rx
= UFS_PWM_G1
;
4474 hba
->pwr_info
.gear_tx
= UFS_PWM_G1
;
4475 hba
->pwr_info
.lane_rx
= UFS_LANE_1
;
4476 hba
->pwr_info
.lane_tx
= UFS_LANE_1
;
4477 hba
->pwr_info
.pwr_rx
= SLOWAUTO_MODE
;
4478 hba
->pwr_info
.pwr_tx
= SLOWAUTO_MODE
;
4479 hba
->pwr_info
.hs_rate
= 0;
4483 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
4484 * @hba: per-adapter instance
4486 * Return: 0 upon success; < 0 upon failure.
4488 static int ufshcd_get_max_pwr_mode(struct ufs_hba
*hba
)
4490 struct ufs_pa_layer_attr
*pwr_info
= &hba
->max_pwr_info
.info
;
4492 if (hba
->max_pwr_info
.is_valid
)
4495 if (hba
->quirks
& UFSHCD_QUIRK_HIBERN_FASTAUTO
) {
4496 pwr_info
->pwr_tx
= FASTAUTO_MODE
;
4497 pwr_info
->pwr_rx
= FASTAUTO_MODE
;
4499 pwr_info
->pwr_tx
= FAST_MODE
;
4500 pwr_info
->pwr_rx
= FAST_MODE
;
4502 pwr_info
->hs_rate
= PA_HS_MODE_B
;
4504 /* Get the connected lane count */
4505 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES
),
4506 &pwr_info
->lane_rx
);
4507 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4508 &pwr_info
->lane_tx
);
4510 if (!pwr_info
->lane_rx
|| !pwr_info
->lane_tx
) {
4511 dev_err(hba
->dev
, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
4518 if (pwr_info
->lane_rx
!= pwr_info
->lane_tx
) {
4519 dev_err(hba
->dev
, "%s: asymmetric connected lanes. rx=%d, tx=%d\n",
4527 * First, get the maximum gears of HS speed.
4528 * If a zero value, it means there is no HSGEAR capability.
4529 * Then, get the maximum gears of PWM speed.
4531 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
), &pwr_info
->gear_rx
);
4532 if (!pwr_info
->gear_rx
) {
4533 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
4534 &pwr_info
->gear_rx
);
4535 if (!pwr_info
->gear_rx
) {
4536 dev_err(hba
->dev
, "%s: invalid max pwm rx gear read = %d\n",
4537 __func__
, pwr_info
->gear_rx
);
4540 pwr_info
->pwr_rx
= SLOW_MODE
;
4543 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXHSGEAR
),
4544 &pwr_info
->gear_tx
);
4545 if (!pwr_info
->gear_tx
) {
4546 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_MAXRXPWMGEAR
),
4547 &pwr_info
->gear_tx
);
4548 if (!pwr_info
->gear_tx
) {
4549 dev_err(hba
->dev
, "%s: invalid max pwm tx gear read = %d\n",
4550 __func__
, pwr_info
->gear_tx
);
4553 pwr_info
->pwr_tx
= SLOW_MODE
;
4556 hba
->max_pwr_info
.is_valid
= true;
4560 static int ufshcd_change_power_mode(struct ufs_hba
*hba
,
4561 struct ufs_pa_layer_attr
*pwr_mode
)
4565 /* if already configured to the requested pwr_mode */
4566 if (!hba
->force_pmc
&&
4567 pwr_mode
->gear_rx
== hba
->pwr_info
.gear_rx
&&
4568 pwr_mode
->gear_tx
== hba
->pwr_info
.gear_tx
&&
4569 pwr_mode
->lane_rx
== hba
->pwr_info
.lane_rx
&&
4570 pwr_mode
->lane_tx
== hba
->pwr_info
.lane_tx
&&
4571 pwr_mode
->pwr_rx
== hba
->pwr_info
.pwr_rx
&&
4572 pwr_mode
->pwr_tx
== hba
->pwr_info
.pwr_tx
&&
4573 pwr_mode
->hs_rate
== hba
->pwr_info
.hs_rate
) {
4574 dev_dbg(hba
->dev
, "%s: power already configured\n", __func__
);
4579 * Configure attributes for power mode change with below.
4580 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
4581 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
4584 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXGEAR
), pwr_mode
->gear_rx
);
4585 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVERXDATALANES
),
4587 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4588 pwr_mode
->pwr_rx
== FAST_MODE
)
4589 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), true);
4591 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_RXTERMINATION
), false);
4593 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXGEAR
), pwr_mode
->gear_tx
);
4594 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_ACTIVETXDATALANES
),
4596 if (pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4597 pwr_mode
->pwr_tx
== FAST_MODE
)
4598 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), true);
4600 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TXTERMINATION
), false);
4602 if (pwr_mode
->pwr_rx
== FASTAUTO_MODE
||
4603 pwr_mode
->pwr_tx
== FASTAUTO_MODE
||
4604 pwr_mode
->pwr_rx
== FAST_MODE
||
4605 pwr_mode
->pwr_tx
== FAST_MODE
)
4606 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_HSSERIES
),
4609 if (!(hba
->quirks
& UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING
)) {
4610 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA0
),
4611 DL_FC0ProtectionTimeOutVal_Default
);
4612 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA1
),
4613 DL_TC0ReplayTimeOutVal_Default
);
4614 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA2
),
4615 DL_AFC0ReqTimeOutVal_Default
);
4616 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA3
),
4617 DL_FC1ProtectionTimeOutVal_Default
);
4618 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA4
),
4619 DL_TC1ReplayTimeOutVal_Default
);
4620 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_PWRMODEUSERDATA5
),
4621 DL_AFC1ReqTimeOutVal_Default
);
4623 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal
),
4624 DL_FC0ProtectionTimeOutVal_Default
);
4625 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal
),
4626 DL_TC0ReplayTimeOutVal_Default
);
4627 ufshcd_dme_set(hba
, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal
),
4628 DL_AFC0ReqTimeOutVal_Default
);
4631 ret
= ufshcd_uic_change_pwr_mode(hba
, pwr_mode
->pwr_rx
<< 4
4632 | pwr_mode
->pwr_tx
);
4636 "%s: power mode change failed %d\n", __func__
, ret
);
4638 ufshcd_vops_pwr_change_notify(hba
, POST_CHANGE
, NULL
,
4641 memcpy(&hba
->pwr_info
, pwr_mode
,
4642 sizeof(struct ufs_pa_layer_attr
));
4649 * ufshcd_config_pwr_mode - configure a new power mode
4650 * @hba: per-adapter instance
4651 * @desired_pwr_mode: desired power configuration
4653 * Return: 0 upon success; < 0 upon failure.
4655 int ufshcd_config_pwr_mode(struct ufs_hba
*hba
,
4656 struct ufs_pa_layer_attr
*desired_pwr_mode
)
4658 struct ufs_pa_layer_attr final_params
= { 0 };
4661 ret
= ufshcd_vops_pwr_change_notify(hba
, PRE_CHANGE
,
4662 desired_pwr_mode
, &final_params
);
4665 memcpy(&final_params
, desired_pwr_mode
, sizeof(final_params
));
4667 ret
= ufshcd_change_power_mode(hba
, &final_params
);
4671 EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode
);
4674 * ufshcd_complete_dev_init() - checks device readiness
4675 * @hba: per-adapter instance
4677 * Set fDeviceInit flag and poll until device toggles it.
4679 * Return: 0 upon success; < 0 upon failure.
4681 static int ufshcd_complete_dev_init(struct ufs_hba
*hba
)
4684 bool flag_res
= true;
4687 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
4688 QUERY_FLAG_IDN_FDEVICEINIT
, 0, NULL
);
4691 "%s: setting fDeviceInit flag failed with error %d\n",
4696 /* Poll fDeviceInit flag to be cleared */
4697 timeout
= ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT
);
4699 err
= ufshcd_query_flag(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
4700 QUERY_FLAG_IDN_FDEVICEINIT
, 0, &flag_res
);
4703 usleep_range(500, 1000);
4704 } while (ktime_before(ktime_get(), timeout
));
4708 "%s: reading fDeviceInit flag failed with error %d\n",
4710 } else if (flag_res
) {
4712 "%s: fDeviceInit was not cleared by the device\n",
4721 * ufshcd_make_hba_operational - Make UFS controller operational
4722 * @hba: per adapter instance
4724 * To bring UFS host controller to operational state,
4725 * 1. Enable required interrupts
4726 * 2. Configure interrupt aggregation
4727 * 3. Program UTRL and UTMRL base address
4728 * 4. Configure run-stop-registers
4730 * Return: 0 on success, non-zero value on failure.
4732 int ufshcd_make_hba_operational(struct ufs_hba
*hba
)
4737 /* Enable required interrupts */
4738 ufshcd_enable_intr(hba
, UFSHCD_ENABLE_INTRS
);
4740 /* Configure interrupt aggregation */
4741 if (ufshcd_is_intr_aggr_allowed(hba
))
4742 ufshcd_config_intr_aggr(hba
, hba
->nutrs
- 1, INT_AGGR_DEF_TO
);
4744 ufshcd_disable_intr_aggr(hba
);
4746 /* Configure UTRL and UTMRL base address registers */
4747 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
4748 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
4749 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
4750 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
4751 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
4752 REG_UTP_TASK_REQ_LIST_BASE_L
);
4753 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
4754 REG_UTP_TASK_REQ_LIST_BASE_H
);
4757 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
4759 reg
= ufshcd_readl(hba
, REG_CONTROLLER_STATUS
);
4760 if (!(ufshcd_get_lists_status(reg
))) {
4761 ufshcd_enable_run_stop_reg(hba
);
4764 "Host controller not ready to process requests");
4770 EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational
);
4773 * ufshcd_hba_stop - Send controller to reset state
4774 * @hba: per adapter instance
4776 void ufshcd_hba_stop(struct ufs_hba
*hba
)
4778 unsigned long flags
;
4782 * Obtain the host lock to prevent that the controller is disabled
4783 * while the UFS interrupt handler is active on another CPU.
4785 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
4786 ufshcd_writel(hba
, CONTROLLER_DISABLE
, REG_CONTROLLER_ENABLE
);
4787 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
4789 err
= ufshcd_wait_for_register(hba
, REG_CONTROLLER_ENABLE
,
4790 CONTROLLER_ENABLE
, CONTROLLER_DISABLE
,
4793 dev_err(hba
->dev
, "%s: Controller disable failed\n", __func__
);
4795 EXPORT_SYMBOL_GPL(ufshcd_hba_stop
);
4798 * ufshcd_hba_execute_hce - initialize the controller
4799 * @hba: per adapter instance
4801 * The controller resets itself and controller firmware initialization
4802 * sequence kicks off. When controller is ready it will set
4803 * the Host Controller Enable bit to 1.
4805 * Return: 0 on success, non-zero value on failure.
4807 static int ufshcd_hba_execute_hce(struct ufs_hba
*hba
)
4811 for (retry
= 3; retry
> 0; retry
--) {
4812 if (ufshcd_is_hba_active(hba
))
4813 /* change controller state to "reset state" */
4814 ufshcd_hba_stop(hba
);
4816 /* UniPro link is disabled at this point */
4817 ufshcd_set_link_off(hba
);
4819 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4821 /* start controller initialization sequence */
4822 ufshcd_hba_start(hba
);
4825 * To initialize a UFS host controller HCE bit must be set to 1.
4826 * During initialization the HCE bit value changes from 1->0->1.
4827 * When the host controller completes initialization sequence
4828 * it sets the value of HCE bit to 1. The same HCE bit is read back
4829 * to check if the controller has completed initialization sequence.
4830 * So without this delay the value HCE = 1, set in the previous
4831 * instruction might be read back.
4832 * This delay can be changed based on the controller.
4834 ufshcd_delay_us(hba
->vps
->hba_enable_delay_us
, 100);
4836 /* wait for the host controller to complete initialization */
4837 if (!ufshcd_wait_for_register(hba
, REG_CONTROLLER_ENABLE
, CONTROLLER_ENABLE
,
4838 CONTROLLER_ENABLE
, 1000, 50))
4841 dev_err(hba
->dev
, "Enabling the controller failed\n");
4847 /* enable UIC related interrupts */
4848 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4850 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4855 int ufshcd_hba_enable(struct ufs_hba
*hba
)
4859 if (hba
->quirks
& UFSHCI_QUIRK_BROKEN_HCE
) {
4860 ufshcd_set_link_off(hba
);
4861 ufshcd_vops_hce_enable_notify(hba
, PRE_CHANGE
);
4863 /* enable UIC related interrupts */
4864 ufshcd_enable_intr(hba
, UFSHCD_UIC_MASK
);
4865 ret
= ufshcd_dme_reset(hba
);
4867 dev_err(hba
->dev
, "DME_RESET failed\n");
4871 ret
= ufshcd_dme_enable(hba
);
4873 dev_err(hba
->dev
, "Enabling DME failed\n");
4877 ufshcd_vops_hce_enable_notify(hba
, POST_CHANGE
);
4879 ret
= ufshcd_hba_execute_hce(hba
);
4884 EXPORT_SYMBOL_GPL(ufshcd_hba_enable
);
4886 static int ufshcd_disable_tx_lcc(struct ufs_hba
*hba
, bool peer
)
4888 int tx_lanes
= 0, i
, err
= 0;
4891 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4894 ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES
),
4896 for (i
= 0; i
< tx_lanes
; i
++) {
4898 err
= ufshcd_dme_set(hba
,
4899 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4900 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4903 err
= ufshcd_dme_peer_set(hba
,
4904 UIC_ARG_MIB_SEL(TX_LCC_ENABLE
,
4905 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i
)),
4908 dev_err(hba
->dev
, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4909 __func__
, peer
, i
, err
);
4917 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba
*hba
)
4919 return ufshcd_disable_tx_lcc(hba
, true);
4922 void ufshcd_update_evt_hist(struct ufs_hba
*hba
, u32 id
, u32 val
)
4924 struct ufs_event_hist
*e
;
4926 if (id
>= UFS_EVT_CNT
)
4929 e
= &hba
->ufs_stats
.event
[id
];
4930 e
->val
[e
->pos
] = val
;
4931 e
->tstamp
[e
->pos
] = local_clock();
4933 e
->pos
= (e
->pos
+ 1) % UFS_EVENT_HIST_LENGTH
;
4935 ufshcd_vops_event_notify(hba
, id
, &val
);
4937 EXPORT_SYMBOL_GPL(ufshcd_update_evt_hist
);
4940 * ufshcd_link_startup - Initialize unipro link startup
4941 * @hba: per adapter instance
4943 * Return: 0 for success, non-zero in case of failure.
4945 static int ufshcd_link_startup(struct ufs_hba
*hba
)
4948 int retries
= DME_LINKSTARTUP_RETRIES
;
4949 bool link_startup_again
= false;
4952 * If UFS device isn't active then we will have to issue link startup
4953 * 2 times to make sure the device state move to active.
4955 if (!ufshcd_is_ufs_dev_active(hba
))
4956 link_startup_again
= true;
4960 ufshcd_vops_link_startup_notify(hba
, PRE_CHANGE
);
4962 ret
= ufshcd_dme_link_startup(hba
);
4964 /* check if device is detected by inter-connect layer */
4965 if (!ret
&& !ufshcd_is_device_present(hba
)) {
4966 ufshcd_update_evt_hist(hba
,
4967 UFS_EVT_LINK_STARTUP_FAIL
,
4969 dev_err(hba
->dev
, "%s: Device not present\n", __func__
);
4975 * DME link lost indication is only received when link is up,
4976 * but we can't be sure if the link is up until link startup
4977 * succeeds. So reset the local Uni-Pro and try again.
4979 if (ret
&& retries
&& ufshcd_hba_enable(hba
)) {
4980 ufshcd_update_evt_hist(hba
,
4981 UFS_EVT_LINK_STARTUP_FAIL
,
4985 } while (ret
&& retries
--);
4988 /* failed to get the link up... retire */
4989 ufshcd_update_evt_hist(hba
,
4990 UFS_EVT_LINK_STARTUP_FAIL
,
4995 if (link_startup_again
) {
4996 link_startup_again
= false;
4997 retries
= DME_LINKSTARTUP_RETRIES
;
5001 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
5002 ufshcd_init_pwr_info(hba
);
5003 ufshcd_print_pwr_info(hba
);
5005 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_LCC
) {
5006 ret
= ufshcd_disable_device_tx_lcc(hba
);
5011 /* Include any host controller configuration via UIC commands */
5012 ret
= ufshcd_vops_link_startup_notify(hba
, POST_CHANGE
);
5016 /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
5017 ufshcd_readl(hba
, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
);
5018 ret
= ufshcd_make_hba_operational(hba
);
5021 dev_err(hba
->dev
, "link startup failed %d\n", ret
);
5022 ufshcd_print_host_state(hba
);
5023 ufshcd_print_pwr_info(hba
);
5024 ufshcd_print_evt_hist(hba
);
5030 * ufshcd_verify_dev_init() - Verify device initialization
5031 * @hba: per-adapter instance
5033 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
5034 * device Transport Protocol (UTP) layer is ready after a reset.
5035 * If the UTP layer at the device side is not initialized, it may
5036 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
5037 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
5039 * Return: 0 upon success; < 0 upon failure.
5041 static int ufshcd_verify_dev_init(struct ufs_hba
*hba
)
5046 ufshcd_dev_man_lock(hba
);
5048 for (retries
= NOP_OUT_RETRIES
; retries
> 0; retries
--) {
5049 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_NOP
,
5050 hba
->nop_out_timeout
);
5052 if (!err
|| err
== -ETIMEDOUT
)
5055 dev_dbg(hba
->dev
, "%s: error %d retrying\n", __func__
, err
);
5058 ufshcd_dev_man_unlock(hba
);
5061 dev_err(hba
->dev
, "%s: NOP OUT failed %d\n", __func__
, err
);
5066 * ufshcd_setup_links - associate link b/w device wlun and other luns
5067 * @sdev: pointer to SCSI device
5068 * @hba: pointer to ufs hba
5070 static void ufshcd_setup_links(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
5072 struct device_link
*link
;
5075 * Device wlun is the supplier & rest of the luns are consumers.
5076 * This ensures that device wlun suspends after all other luns.
5078 if (hba
->ufs_device_wlun
) {
5079 link
= device_link_add(&sdev
->sdev_gendev
,
5080 &hba
->ufs_device_wlun
->sdev_gendev
,
5081 DL_FLAG_PM_RUNTIME
| DL_FLAG_RPM_ACTIVE
);
5083 dev_err(&sdev
->sdev_gendev
, "Failed establishing link - %s\n",
5084 dev_name(&hba
->ufs_device_wlun
->sdev_gendev
));
5088 /* Ignore REPORT_LUN wlun probing */
5089 if (hba
->luns_avail
== 1) {
5090 ufshcd_rpm_put(hba
);
5095 * Device wlun is probed. The assumption is that WLUNs are
5096 * scanned before other LUNs.
5103 * ufshcd_lu_init - Initialize the relevant parameters of the LU
5104 * @hba: per-adapter instance
5105 * @sdev: pointer to SCSI device
5107 static void ufshcd_lu_init(struct ufs_hba
*hba
, struct scsi_device
*sdev
)
5109 int len
= QUERY_DESC_MAX_SIZE
;
5110 u8 lun
= ufshcd_scsi_to_upiu_lun(sdev
->lun
);
5111 u8 lun_qdepth
= hba
->nutrs
;
5115 desc_buf
= kzalloc(len
, GFP_KERNEL
);
5119 ret
= ufshcd_read_unit_desc_param(hba
, lun
, 0, desc_buf
, len
);
5121 if (ret
== -EOPNOTSUPP
)
5122 /* If LU doesn't support unit descriptor, its queue depth is set to 1 */
5128 if (desc_buf
[UNIT_DESC_PARAM_LU_Q_DEPTH
]) {
5130 * In per-LU queueing architecture, bLUQueueDepth will not be 0, then we will
5131 * use the smaller between UFSHCI CAP.NUTRS and UFS LU bLUQueueDepth
5133 lun_qdepth
= min_t(int, desc_buf
[UNIT_DESC_PARAM_LU_Q_DEPTH
], hba
->nutrs
);
5136 * According to UFS device specification, the write protection mode is only supported by
5137 * normal LU, not supported by WLUN.
5139 if (hba
->dev_info
.f_power_on_wp_en
&& lun
< hba
->dev_info
.max_lu_supported
&&
5140 !hba
->dev_info
.is_lu_power_on_wp
&&
5141 desc_buf
[UNIT_DESC_PARAM_LU_WR_PROTECT
] == UFS_LU_POWER_ON_WP
)
5142 hba
->dev_info
.is_lu_power_on_wp
= true;
5144 /* In case of RPMB LU, check if advanced RPMB mode is enabled */
5145 if (desc_buf
[UNIT_DESC_PARAM_UNIT_INDEX
] == UFS_UPIU_RPMB_WLUN
&&
5146 desc_buf
[RPMB_UNIT_DESC_PARAM_REGION_EN
] & BIT(4))
5147 hba
->dev_info
.b_advanced_rpmb_en
= true;
5153 * For WLUNs that don't support unit descriptor, queue depth is set to 1. For LUs whose
5154 * bLUQueueDepth == 0, the queue depth is set to a maximum value that host can queue.
5156 dev_dbg(hba
->dev
, "Set LU %x queue depth %d\n", lun
, lun_qdepth
);
5157 scsi_change_queue_depth(sdev
, lun_qdepth
);
5161 * ufshcd_slave_alloc - handle initial SCSI device configurations
5162 * @sdev: pointer to SCSI device
5166 static int ufshcd_slave_alloc(struct scsi_device
*sdev
)
5168 struct ufs_hba
*hba
;
5170 hba
= shost_priv(sdev
->host
);
5172 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
5173 sdev
->use_10_for_ms
= 1;
5175 /* DBD field should be set to 1 in mode sense(10) */
5176 sdev
->set_dbd_for_ms
= 1;
5178 /* allow SCSI layer to restart the device in case of errors */
5179 sdev
->allow_restart
= 1;
5181 /* REPORT SUPPORTED OPERATION CODES is not supported */
5182 sdev
->no_report_opcodes
= 1;
5184 /* WRITE_SAME command is not supported */
5185 sdev
->no_write_same
= 1;
5187 ufshcd_lu_init(hba
, sdev
);
5189 ufshcd_setup_links(hba
, sdev
);
5195 * ufshcd_change_queue_depth - change queue depth
5196 * @sdev: pointer to SCSI device
5197 * @depth: required depth to set
5199 * Change queue depth and make sure the max. limits are not crossed.
5201 * Return: new queue depth.
5203 static int ufshcd_change_queue_depth(struct scsi_device
*sdev
, int depth
)
5205 return scsi_change_queue_depth(sdev
, min(depth
, sdev
->host
->can_queue
));
5209 * ufshcd_device_configure - adjust SCSI device configurations
5210 * @sdev: pointer to SCSI device
5211 * @lim: queue limits
5213 * Return: 0 (success).
5215 static int ufshcd_device_configure(struct scsi_device
*sdev
,
5216 struct queue_limits
*lim
)
5218 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
5219 struct request_queue
*q
= sdev
->request_queue
;
5221 lim
->dma_pad_mask
= PRDT_DATA_BYTE_COUNT_PAD
- 1;
5224 * Block runtime-pm until all consumers are added.
5225 * Refer ufshcd_setup_links().
5227 if (is_device_wlun(sdev
))
5228 pm_runtime_get_noresume(&sdev
->sdev_gendev
);
5229 else if (ufshcd_is_rpm_autosuspend_allowed(hba
))
5230 sdev
->rpm_autosuspend
= 1;
5232 * Do not print messages during runtime PM to avoid never-ending cycles
5233 * of messages written back to storage by user space causing runtime
5234 * resume, causing more messages and so on.
5236 sdev
->silence_suspend
= 1;
5238 if (hba
->vops
&& hba
->vops
->config_scsi_dev
)
5239 hba
->vops
->config_scsi_dev(sdev
);
5241 ufshcd_crypto_register(hba
, q
);
5247 * ufshcd_slave_destroy - remove SCSI device configurations
5248 * @sdev: pointer to SCSI device
5250 static void ufshcd_slave_destroy(struct scsi_device
*sdev
)
5252 struct ufs_hba
*hba
;
5253 unsigned long flags
;
5255 hba
= shost_priv(sdev
->host
);
5257 /* Drop the reference as it won't be needed anymore */
5258 if (ufshcd_scsi_to_upiu_lun(sdev
->lun
) == UFS_UPIU_UFS_DEVICE_WLUN
) {
5259 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5260 hba
->ufs_device_wlun
= NULL
;
5261 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5262 } else if (hba
->ufs_device_wlun
) {
5263 struct device
*supplier
= NULL
;
5265 /* Ensure UFS Device WLUN exists and does not disappear */
5266 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
5267 if (hba
->ufs_device_wlun
) {
5268 supplier
= &hba
->ufs_device_wlun
->sdev_gendev
;
5269 get_device(supplier
);
5271 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
5275 * If a LUN fails to probe (e.g. absent BOOT WLUN), the
5276 * device will not have been registered but can still
5277 * have a device link holding a reference to the device.
5279 device_link_remove(&sdev
->sdev_gendev
, supplier
);
5280 put_device(supplier
);
5286 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
5287 * @lrbp: pointer to local reference block of completed command
5288 * @scsi_status: SCSI command status
5290 * Return: value base on SCSI command status.
5293 ufshcd_scsi_cmd_status(struct ufshcd_lrb
*lrbp
, int scsi_status
)
5297 switch (scsi_status
) {
5298 case SAM_STAT_CHECK_CONDITION
:
5299 ufshcd_copy_sense_data(lrbp
);
5302 result
|= DID_OK
<< 16 | scsi_status
;
5304 case SAM_STAT_TASK_SET_FULL
:
5306 case SAM_STAT_TASK_ABORTED
:
5307 ufshcd_copy_sense_data(lrbp
);
5308 result
|= scsi_status
;
5311 result
|= DID_ERROR
<< 16;
5313 } /* end of switch */
5319 * ufshcd_transfer_rsp_status - Get overall status of the response
5320 * @hba: per adapter instance
5321 * @lrbp: pointer to local reference block of completed command
5322 * @cqe: pointer to the completion queue entry
5324 * Return: result of the command to notify SCSI midlayer.
5327 ufshcd_transfer_rsp_status(struct ufs_hba
*hba
, struct ufshcd_lrb
*lrbp
,
5328 struct cq_entry
*cqe
)
5336 upiu_flags
= lrbp
->ucd_rsp_ptr
->header
.flags
;
5337 resid
= be32_to_cpu(lrbp
->ucd_rsp_ptr
->sr
.residual_transfer_count
);
5339 * Test !overflow instead of underflow to support UFS devices that do
5340 * not set either flag.
5342 if (resid
&& !(upiu_flags
& UPIU_RSP_FLAG_OVERFLOW
))
5343 scsi_set_resid(lrbp
->cmd
, resid
);
5345 /* overall command status of utrd */
5346 ocs
= ufshcd_get_tr_ocs(lrbp
, cqe
);
5348 if (hba
->quirks
& UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR
) {
5349 if (lrbp
->ucd_rsp_ptr
->header
.response
||
5350 lrbp
->ucd_rsp_ptr
->header
.status
)
5356 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
5357 switch (ufshcd_get_req_rsp(lrbp
->ucd_rsp_ptr
)) {
5358 case UPIU_TRANSACTION_RESPONSE
:
5360 * get the result based on SCSI status response
5361 * to notify the SCSI midlayer of the command status
5363 scsi_status
= lrbp
->ucd_rsp_ptr
->header
.status
;
5364 result
= ufshcd_scsi_cmd_status(lrbp
, scsi_status
);
5367 * Currently we are only supporting BKOPs exception
5368 * events hence we can ignore BKOPs exception event
5369 * during power management callbacks. BKOPs exception
5370 * event is not expected to be raised in runtime suspend
5371 * callback as it allows the urgent bkops.
5372 * During system suspend, we are anyway forcefully
5373 * disabling the bkops and if urgent bkops is needed
5374 * it will be enabled on system resume. Long term
5375 * solution could be to abort the system suspend if
5376 * UFS device needs urgent BKOPs.
5378 if (!hba
->pm_op_in_progress
&&
5379 !ufshcd_eh_in_progress(hba
) &&
5380 ufshcd_is_exception_event(lrbp
->ucd_rsp_ptr
))
5381 /* Flushed in suspend */
5382 schedule_work(&hba
->eeh_work
);
5384 case UPIU_TRANSACTION_REJECT_UPIU
:
5385 /* TODO: handle Reject UPIU Response */
5386 result
= DID_ERROR
<< 16;
5388 "Reject UPIU not fully implemented\n");
5392 "Unexpected request response code = %x\n",
5394 result
= DID_ERROR
<< 16;
5399 case OCS_INVALID_COMMAND_STATUS
:
5400 result
|= DID_REQUEUE
<< 16;
5402 "OCS %s from controller for tag %d\n",
5403 (ocs
== OCS_ABORTED
? "aborted" : "invalid"),
5406 case OCS_INVALID_CMD_TABLE_ATTR
:
5407 case OCS_INVALID_PRDT_ATTR
:
5408 case OCS_MISMATCH_DATA_BUF_SIZE
:
5409 case OCS_MISMATCH_RESP_UPIU_SIZE
:
5410 case OCS_PEER_COMM_FAILURE
:
5411 case OCS_FATAL_ERROR
:
5412 case OCS_DEVICE_FATAL_ERROR
:
5413 case OCS_INVALID_CRYPTO_CONFIG
:
5414 case OCS_GENERAL_CRYPTO_ERROR
:
5416 result
|= DID_ERROR
<< 16;
5418 "OCS error from controller = %x for tag %d\n",
5419 ocs
, lrbp
->task_tag
);
5420 ufshcd_print_evt_hist(hba
);
5421 ufshcd_print_host_state(hba
);
5423 } /* end of switch */
5425 if ((host_byte(result
) != DID_OK
) &&
5426 (host_byte(result
) != DID_REQUEUE
) && !hba
->silence_err_logs
)
5427 ufshcd_print_tr(hba
, lrbp
->task_tag
, true);
5431 static bool ufshcd_is_auto_hibern8_error(struct ufs_hba
*hba
,
5434 if (!ufshcd_is_auto_hibern8_supported(hba
) ||
5435 !ufshcd_is_auto_hibern8_enabled(hba
))
5438 if (!(intr_mask
& UFSHCD_UIC_HIBERN8_MASK
))
5441 if (hba
->active_uic_cmd
&&
5442 (hba
->active_uic_cmd
->command
== UIC_CMD_DME_HIBER_ENTER
||
5443 hba
->active_uic_cmd
->command
== UIC_CMD_DME_HIBER_EXIT
))
5450 * ufshcd_uic_cmd_compl - handle completion of uic command
5451 * @hba: per adapter instance
5452 * @intr_status: interrupt status generated by the controller
5455 * IRQ_HANDLED - If interrupt is valid
5456 * IRQ_NONE - If invalid interrupt
5458 static irqreturn_t
ufshcd_uic_cmd_compl(struct ufs_hba
*hba
, u32 intr_status
)
5460 irqreturn_t retval
= IRQ_NONE
;
5461 struct uic_command
*cmd
;
5463 spin_lock(hba
->host
->host_lock
);
5464 cmd
= hba
->active_uic_cmd
;
5465 if (WARN_ON_ONCE(!cmd
))
5468 if (ufshcd_is_auto_hibern8_error(hba
, intr_status
))
5469 hba
->errors
|= (UFSHCD_UIC_HIBERN8_MASK
& intr_status
);
5471 if (intr_status
& UIC_COMMAND_COMPL
) {
5472 cmd
->argument2
|= ufshcd_get_uic_cmd_result(hba
);
5473 cmd
->argument3
= ufshcd_get_dme_attr_val(hba
);
5474 if (!hba
->uic_async_done
)
5475 cmd
->cmd_active
= 0;
5476 complete(&cmd
->done
);
5477 retval
= IRQ_HANDLED
;
5480 if (intr_status
& UFSHCD_UIC_PWR_MASK
&& hba
->uic_async_done
) {
5481 cmd
->cmd_active
= 0;
5482 complete(hba
->uic_async_done
);
5483 retval
= IRQ_HANDLED
;
5486 if (retval
== IRQ_HANDLED
)
5487 ufshcd_add_uic_command_trace(hba
, cmd
, UFS_CMD_COMP
);
5490 spin_unlock(hba
->host
->host_lock
);
5495 /* Release the resources allocated for processing a SCSI command. */
5496 void ufshcd_release_scsi_cmd(struct ufs_hba
*hba
,
5497 struct ufshcd_lrb
*lrbp
)
5499 struct scsi_cmnd
*cmd
= lrbp
->cmd
;
5501 scsi_dma_unmap(cmd
);
5502 ufshcd_crypto_clear_prdt(hba
, lrbp
);
5503 ufshcd_release(hba
);
5504 ufshcd_clk_scaling_update_busy(hba
);
5508 * ufshcd_compl_one_cqe - handle a completion queue entry
5509 * @hba: per adapter instance
5510 * @task_tag: the task tag of the request to be completed
5511 * @cqe: pointer to the completion queue entry
5513 void ufshcd_compl_one_cqe(struct ufs_hba
*hba
, int task_tag
,
5514 struct cq_entry
*cqe
)
5516 struct ufshcd_lrb
*lrbp
;
5517 struct scsi_cmnd
*cmd
;
5520 lrbp
= &hba
->lrb
[task_tag
];
5521 lrbp
->compl_time_stamp
= ktime_get();
5524 if (unlikely(ufshcd_should_inform_monitor(hba
, lrbp
)))
5525 ufshcd_update_monitor(hba
, lrbp
);
5526 ufshcd_add_command_trace(hba
, task_tag
, UFS_CMD_COMP
);
5527 cmd
->result
= ufshcd_transfer_rsp_status(hba
, lrbp
, cqe
);
5528 ufshcd_release_scsi_cmd(hba
, lrbp
);
5529 /* Do not touch lrbp after scsi done */
5531 } else if (hba
->dev_cmd
.complete
) {
5533 ocs
= le32_to_cpu(cqe
->status
) & MASK_OCS
;
5534 lrbp
->utr_descriptor_ptr
->header
.ocs
= ocs
;
5536 complete(hba
->dev_cmd
.complete
);
5541 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
5542 * @hba: per adapter instance
5543 * @completed_reqs: bitmask that indicates which requests to complete
5545 static void __ufshcd_transfer_req_compl(struct ufs_hba
*hba
,
5546 unsigned long completed_reqs
)
5550 for_each_set_bit(tag
, &completed_reqs
, hba
->nutrs
)
5551 ufshcd_compl_one_cqe(hba
, tag
, NULL
);
5554 /* Any value that is not an existing queue number is fine for this constant. */
5556 UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
= -1
5559 static void ufshcd_clear_polled(struct ufs_hba
*hba
,
5560 unsigned long *completed_reqs
)
5564 for_each_set_bit(tag
, completed_reqs
, hba
->nutrs
) {
5565 struct scsi_cmnd
*cmd
= hba
->lrb
[tag
].cmd
;
5569 if (scsi_cmd_to_rq(cmd
)->cmd_flags
& REQ_POLLED
)
5570 __clear_bit(tag
, completed_reqs
);
5575 * Return: > 0 if one or more commands have been completed or 0 if no
5576 * requests have been completed.
5578 static int ufshcd_poll(struct Scsi_Host
*shost
, unsigned int queue_num
)
5580 struct ufs_hba
*hba
= shost_priv(shost
);
5581 unsigned long completed_reqs
, flags
;
5583 struct ufs_hw_queue
*hwq
;
5585 if (hba
->mcq_enabled
) {
5586 hwq
= &hba
->uhq
[queue_num
];
5588 return ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
5591 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
5592 tr_doorbell
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
5593 completed_reqs
= ~tr_doorbell
& hba
->outstanding_reqs
;
5594 WARN_ONCE(completed_reqs
& ~hba
->outstanding_reqs
,
5595 "completed: %#lx; outstanding: %#lx\n", completed_reqs
,
5596 hba
->outstanding_reqs
);
5597 if (queue_num
== UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
) {
5598 /* Do not complete polled requests from interrupt context. */
5599 ufshcd_clear_polled(hba
, &completed_reqs
);
5601 hba
->outstanding_reqs
&= ~completed_reqs
;
5602 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
5605 __ufshcd_transfer_req_compl(hba
, completed_reqs
);
5607 return completed_reqs
!= 0;
5611 * ufshcd_mcq_compl_pending_transfer - MCQ mode function. It is
5612 * invoked from the error handler context or ufshcd_host_reset_and_restore()
5613 * to complete the pending transfers and free the resources associated with
5616 * @hba: per adapter instance
5617 * @force_compl: This flag is set to true when invoked
5618 * from ufshcd_host_reset_and_restore() in which case it requires special
5619 * handling because the host controller has been reset by ufshcd_hba_stop().
5621 static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba
*hba
,
5624 struct ufs_hw_queue
*hwq
;
5625 struct ufshcd_lrb
*lrbp
;
5626 struct scsi_cmnd
*cmd
;
5627 unsigned long flags
;
5630 for (tag
= 0; tag
< hba
->nutrs
; tag
++) {
5631 lrbp
= &hba
->lrb
[tag
];
5633 if (!ufshcd_cmd_inflight(cmd
) ||
5634 test_bit(SCMD_STATE_COMPLETE
, &cmd
->state
))
5637 hwq
= ufshcd_mcq_req_to_hwq(hba
, scsi_cmd_to_rq(cmd
));
5640 ufshcd_mcq_compl_all_cqes_lock(hba
, hwq
);
5642 * For those cmds of which the cqes are not present
5643 * in the cq, complete them explicitly.
5645 spin_lock_irqsave(&hwq
->cq_lock
, flags
);
5646 if (cmd
&& !test_bit(SCMD_STATE_COMPLETE
, &cmd
->state
)) {
5647 set_host_byte(cmd
, DID_REQUEUE
);
5648 ufshcd_release_scsi_cmd(hba
, lrbp
);
5651 spin_unlock_irqrestore(&hwq
->cq_lock
, flags
);
5653 ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
5659 * ufshcd_transfer_req_compl - handle SCSI and query command completion
5660 * @hba: per adapter instance
5663 * IRQ_HANDLED - If interrupt is valid
5664 * IRQ_NONE - If invalid interrupt
5666 static irqreturn_t
ufshcd_transfer_req_compl(struct ufs_hba
*hba
)
5668 /* Resetting interrupt aggregation counters first and reading the
5669 * DOOR_BELL afterward allows us to handle all the completed requests.
5670 * In order to prevent other interrupts starvation the DB is read once
5671 * after reset. The down side of this solution is the possibility of
5672 * false interrupt if device completes another request after resetting
5673 * aggregation and before reading the DB.
5675 if (ufshcd_is_intr_aggr_allowed(hba
) &&
5676 !(hba
->quirks
& UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR
))
5677 ufshcd_reset_intr_aggr(hba
);
5679 if (ufs_fail_completion(hba
))
5683 * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
5684 * do not want polling to trigger spurious interrupt complaints.
5686 ufshcd_poll(hba
->host
, UFSHCD_POLL_FROM_INTERRUPT_CONTEXT
);
5691 int __ufshcd_write_ee_control(struct ufs_hba
*hba
, u32 ee_ctrl_mask
)
5693 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
5694 QUERY_ATTR_IDN_EE_CONTROL
, 0, 0,
5698 int ufshcd_write_ee_control(struct ufs_hba
*hba
)
5702 mutex_lock(&hba
->ee_ctrl_mutex
);
5703 err
= __ufshcd_write_ee_control(hba
, hba
->ee_ctrl_mask
);
5704 mutex_unlock(&hba
->ee_ctrl_mutex
);
5706 dev_err(hba
->dev
, "%s: failed to write ee control %d\n",
5711 int ufshcd_update_ee_control(struct ufs_hba
*hba
, u16
*mask
,
5712 const u16
*other_mask
, u16 set
, u16 clr
)
5714 u16 new_mask
, ee_ctrl_mask
;
5717 mutex_lock(&hba
->ee_ctrl_mutex
);
5718 new_mask
= (*mask
& ~clr
) | set
;
5719 ee_ctrl_mask
= new_mask
| *other_mask
;
5720 if (ee_ctrl_mask
!= hba
->ee_ctrl_mask
)
5721 err
= __ufshcd_write_ee_control(hba
, ee_ctrl_mask
);
5722 /* Still need to update 'mask' even if 'ee_ctrl_mask' was unchanged */
5724 hba
->ee_ctrl_mask
= ee_ctrl_mask
;
5727 mutex_unlock(&hba
->ee_ctrl_mutex
);
5732 * ufshcd_disable_ee - disable exception event
5733 * @hba: per-adapter instance
5734 * @mask: exception event to disable
5736 * Disables exception event in the device so that the EVENT_ALERT
5739 * Return: zero on success, non-zero error value on failure.
5741 static inline int ufshcd_disable_ee(struct ufs_hba
*hba
, u16 mask
)
5743 return ufshcd_update_ee_drv_mask(hba
, 0, mask
);
5747 * ufshcd_enable_ee - enable exception event
5748 * @hba: per-adapter instance
5749 * @mask: exception event to enable
5751 * Enable corresponding exception event in the device to allow
5752 * device to alert host in critical scenarios.
5754 * Return: zero on success, non-zero error value on failure.
5756 static inline int ufshcd_enable_ee(struct ufs_hba
*hba
, u16 mask
)
5758 return ufshcd_update_ee_drv_mask(hba
, mask
, 0);
5762 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
5763 * @hba: per-adapter instance
5765 * Allow device to manage background operations on its own. Enabling
5766 * this might lead to inconsistent latencies during normal data transfers
5767 * as the device is allowed to manage its own way of handling background
5770 * Return: zero on success, non-zero on failure.
5772 static int ufshcd_enable_auto_bkops(struct ufs_hba
*hba
)
5776 if (hba
->auto_bkops_enabled
)
5779 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_SET_FLAG
,
5780 QUERY_FLAG_IDN_BKOPS_EN
, 0, NULL
);
5782 dev_err(hba
->dev
, "%s: failed to enable bkops %d\n",
5787 hba
->auto_bkops_enabled
= true;
5788 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Enabled");
5790 /* No need of URGENT_BKOPS exception from the device */
5791 err
= ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5793 dev_err(hba
->dev
, "%s: failed to disable exception event %d\n",
5800 * ufshcd_disable_auto_bkops - block device in doing background operations
5801 * @hba: per-adapter instance
5803 * Disabling background operations improves command response latency but
5804 * has drawback of device moving into critical state where the device is
5805 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
5806 * host is idle so that BKOPS are managed effectively without any negative
5809 * Return: zero on success, non-zero on failure.
5811 static int ufshcd_disable_auto_bkops(struct ufs_hba
*hba
)
5815 if (!hba
->auto_bkops_enabled
)
5819 * If host assisted BKOPs is to be enabled, make sure
5820 * urgent bkops exception is allowed.
5822 err
= ufshcd_enable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5824 dev_err(hba
->dev
, "%s: failed to enable exception event %d\n",
5829 err
= ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_CLEAR_FLAG
,
5830 QUERY_FLAG_IDN_BKOPS_EN
, 0, NULL
);
5832 dev_err(hba
->dev
, "%s: failed to disable bkops %d\n",
5834 ufshcd_disable_ee(hba
, MASK_EE_URGENT_BKOPS
);
5838 hba
->auto_bkops_enabled
= false;
5839 trace_ufshcd_auto_bkops_state(dev_name(hba
->dev
), "Disabled");
5840 hba
->is_urgent_bkops_lvl_checked
= false;
5846 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
5847 * @hba: per adapter instance
5849 * After a device reset the device may toggle the BKOPS_EN flag
5850 * to default value. The s/w tracking variables should be updated
5851 * as well. This function would change the auto-bkops state based on
5852 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
5854 static void ufshcd_force_reset_auto_bkops(struct ufs_hba
*hba
)
5856 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
)) {
5857 hba
->auto_bkops_enabled
= false;
5858 hba
->ee_ctrl_mask
|= MASK_EE_URGENT_BKOPS
;
5859 ufshcd_enable_auto_bkops(hba
);
5861 hba
->auto_bkops_enabled
= true;
5862 hba
->ee_ctrl_mask
&= ~MASK_EE_URGENT_BKOPS
;
5863 ufshcd_disable_auto_bkops(hba
);
5865 hba
->urgent_bkops_lvl
= BKOPS_STATUS_PERF_IMPACT
;
5866 hba
->is_urgent_bkops_lvl_checked
= false;
5869 static inline int ufshcd_get_bkops_status(struct ufs_hba
*hba
, u32
*status
)
5871 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5872 QUERY_ATTR_IDN_BKOPS_STATUS
, 0, 0, status
);
5876 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
5877 * @hba: per-adapter instance
5879 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
5880 * flag in the device to permit background operations if the device
5881 * bkops_status is greater than or equal to the "hba->urgent_bkops_lvl",
5882 * disable otherwise.
5884 * Return: 0 for success, non-zero in case of failure.
5886 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
5887 * to know whether auto bkops is enabled or disabled after this function
5888 * returns control to it.
5890 static int ufshcd_bkops_ctrl(struct ufs_hba
*hba
)
5892 enum bkops_status status
= hba
->urgent_bkops_lvl
;
5893 u32 curr_status
= 0;
5896 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5898 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5901 } else if (curr_status
> BKOPS_STATUS_MAX
) {
5902 dev_err(hba
->dev
, "%s: invalid BKOPS status %d\n",
5903 __func__
, curr_status
);
5908 if (curr_status
>= status
)
5909 err
= ufshcd_enable_auto_bkops(hba
);
5911 err
= ufshcd_disable_auto_bkops(hba
);
5916 static inline int ufshcd_get_ee_status(struct ufs_hba
*hba
, u32
*status
)
5918 return ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5919 QUERY_ATTR_IDN_EE_STATUS
, 0, 0, status
);
5922 static void ufshcd_bkops_exception_event_handler(struct ufs_hba
*hba
)
5925 u32 curr_status
= 0;
5927 if (hba
->is_urgent_bkops_lvl_checked
)
5928 goto enable_auto_bkops
;
5930 err
= ufshcd_get_bkops_status(hba
, &curr_status
);
5932 dev_err(hba
->dev
, "%s: failed to get BKOPS status %d\n",
5938 * We are seeing that some devices are raising the urgent bkops
5939 * exception events even when BKOPS status doesn't indicate performace
5940 * impacted or critical. Handle these device by determining their urgent
5941 * bkops status at runtime.
5943 if (curr_status
< BKOPS_STATUS_PERF_IMPACT
) {
5944 dev_err(hba
->dev
, "%s: device raised urgent BKOPS exception for bkops status %d\n",
5945 __func__
, curr_status
);
5946 /* update the current status as the urgent bkops level */
5947 hba
->urgent_bkops_lvl
= curr_status
;
5948 hba
->is_urgent_bkops_lvl_checked
= true;
5952 err
= ufshcd_enable_auto_bkops(hba
);
5955 dev_err(hba
->dev
, "%s: failed to handle urgent bkops %d\n",
5959 static void ufshcd_temp_exception_event_handler(struct ufs_hba
*hba
, u16 status
)
5963 if (ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
5964 QUERY_ATTR_IDN_CASE_ROUGH_TEMP
, 0, 0, &value
))
5967 dev_info(hba
->dev
, "exception Tcase %d\n", value
- 80);
5969 ufs_hwmon_notify_event(hba
, status
& MASK_EE_URGENT_TEMP
);
5972 * A placeholder for the platform vendors to add whatever additional
5977 static int __ufshcd_wb_toggle(struct ufs_hba
*hba
, bool set
, enum flag_idn idn
)
5980 enum query_opcode opcode
= set
? UPIU_QUERY_OPCODE_SET_FLAG
:
5981 UPIU_QUERY_OPCODE_CLEAR_FLAG
;
5983 index
= ufshcd_wb_get_query_index(hba
);
5984 return ufshcd_query_flag_retry(hba
, opcode
, idn
, index
, NULL
);
5987 int ufshcd_wb_toggle(struct ufs_hba
*hba
, bool enable
)
5991 if (!ufshcd_is_wb_allowed(hba
) ||
5992 hba
->dev_info
.wb_enabled
== enable
)
5995 ret
= __ufshcd_wb_toggle(hba
, enable
, QUERY_FLAG_IDN_WB_EN
);
5997 dev_err(hba
->dev
, "%s: Write Booster %s failed %d\n",
5998 __func__
, enable
? "enabling" : "disabling", ret
);
6002 hba
->dev_info
.wb_enabled
= enable
;
6003 dev_dbg(hba
->dev
, "%s: Write Booster %s\n",
6004 __func__
, enable
? "enabled" : "disabled");
6009 static void ufshcd_wb_toggle_buf_flush_during_h8(struct ufs_hba
*hba
,
6014 ret
= __ufshcd_wb_toggle(hba
, enable
,
6015 QUERY_FLAG_IDN_WB_BUFF_FLUSH_DURING_HIBERN8
);
6017 dev_err(hba
->dev
, "%s: WB-Buf Flush during H8 %s failed %d\n",
6018 __func__
, enable
? "enabling" : "disabling", ret
);
6021 dev_dbg(hba
->dev
, "%s: WB-Buf Flush during H8 %s\n",
6022 __func__
, enable
? "enabled" : "disabled");
6025 int ufshcd_wb_toggle_buf_flush(struct ufs_hba
*hba
, bool enable
)
6029 if (!ufshcd_is_wb_allowed(hba
) ||
6030 hba
->dev_info
.wb_buf_flush_enabled
== enable
)
6033 ret
= __ufshcd_wb_toggle(hba
, enable
, QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN
);
6035 dev_err(hba
->dev
, "%s: WB-Buf Flush %s failed %d\n",
6036 __func__
, enable
? "enabling" : "disabling", ret
);
6040 hba
->dev_info
.wb_buf_flush_enabled
= enable
;
6041 dev_dbg(hba
->dev
, "%s: WB-Buf Flush %s\n",
6042 __func__
, enable
? "enabled" : "disabled");
6047 static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba
*hba
,
6054 index
= ufshcd_wb_get_query_index(hba
);
6055 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
6056 QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE
,
6057 index
, 0, &cur_buf
);
6059 dev_err(hba
->dev
, "%s: dCurWriteBoosterBufferSize read failed %d\n",
6065 dev_info(hba
->dev
, "dCurWBBuf: %d WB disabled until free-space is available\n",
6069 /* Let it continue to flush when available buffer exceeds threshold */
6070 return avail_buf
< hba
->vps
->wb_flush_threshold
;
6073 static void ufshcd_wb_force_disable(struct ufs_hba
*hba
)
6075 if (ufshcd_is_wb_buf_flush_allowed(hba
))
6076 ufshcd_wb_toggle_buf_flush(hba
, false);
6078 ufshcd_wb_toggle_buf_flush_during_h8(hba
, false);
6079 ufshcd_wb_toggle(hba
, false);
6080 hba
->caps
&= ~UFSHCD_CAP_WB_EN
;
6082 dev_info(hba
->dev
, "%s: WB force disabled\n", __func__
);
6085 static bool ufshcd_is_wb_buf_lifetime_available(struct ufs_hba
*hba
)
6091 index
= ufshcd_wb_get_query_index(hba
);
6092 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
6093 QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST
,
6094 index
, 0, &lifetime
);
6097 "%s: bWriteBoosterBufferLifeTimeEst read failed %d\n",
6102 if (lifetime
== UFS_WB_EXCEED_LIFETIME
) {
6103 dev_err(hba
->dev
, "%s: WB buf lifetime is exhausted 0x%02X\n",
6104 __func__
, lifetime
);
6108 dev_dbg(hba
->dev
, "%s: WB buf lifetime is 0x%02X\n",
6109 __func__
, lifetime
);
6114 static bool ufshcd_wb_need_flush(struct ufs_hba
*hba
)
6120 if (!ufshcd_is_wb_allowed(hba
))
6123 if (!ufshcd_is_wb_buf_lifetime_available(hba
)) {
6124 ufshcd_wb_force_disable(hba
);
6129 * The ufs device needs the vcc to be ON to flush.
6130 * With user-space reduction enabled, it's enough to enable flush
6131 * by checking only the available buffer. The threshold
6132 * defined here is > 90% full.
6133 * With user-space preserved enabled, the current-buffer
6134 * should be checked too because the wb buffer size can reduce
6135 * when disk tends to be full. This info is provided by current
6136 * buffer (dCurrentWriteBoosterBufferSize). There's no point in
6137 * keeping vcc on when current buffer is empty.
6139 index
= ufshcd_wb_get_query_index(hba
);
6140 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
6141 QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE
,
6142 index
, 0, &avail_buf
);
6144 dev_warn(hba
->dev
, "%s: dAvailableWriteBoosterBufferSize read failed %d\n",
6149 if (!hba
->dev_info
.b_presrv_uspc_en
)
6150 return avail_buf
<= UFS_WB_BUF_REMAIN_PERCENT(10);
6152 return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba
, avail_buf
);
6155 static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct
*work
)
6157 struct ufs_hba
*hba
= container_of(to_delayed_work(work
),
6159 rpm_dev_flush_recheck_work
);
6161 * To prevent unnecessary VCC power drain after device finishes
6162 * WriteBooster buffer flush or Auto BKOPs, force runtime resume
6163 * after a certain delay to recheck the threshold by next runtime
6166 ufshcd_rpm_get_sync(hba
);
6167 ufshcd_rpm_put_sync(hba
);
6171 * ufshcd_exception_event_handler - handle exceptions raised by device
6172 * @work: pointer to work data
6174 * Read bExceptionEventStatus attribute from the device and handle the
6175 * exception event accordingly.
6177 static void ufshcd_exception_event_handler(struct work_struct
*work
)
6179 struct ufs_hba
*hba
;
6182 hba
= container_of(work
, struct ufs_hba
, eeh_work
);
6184 err
= ufshcd_get_ee_status(hba
, &status
);
6186 dev_err(hba
->dev
, "%s: failed to get exception status %d\n",
6191 trace_ufshcd_exception_event(dev_name(hba
->dev
), status
);
6193 if (status
& hba
->ee_drv_mask
& MASK_EE_URGENT_BKOPS
)
6194 ufshcd_bkops_exception_event_handler(hba
);
6196 if (status
& hba
->ee_drv_mask
& MASK_EE_URGENT_TEMP
)
6197 ufshcd_temp_exception_event_handler(hba
, status
);
6199 ufs_debugfs_exception_event(hba
, status
);
6202 /* Complete requests that have door-bell cleared */
6203 static void ufshcd_complete_requests(struct ufs_hba
*hba
, bool force_compl
)
6205 if (hba
->mcq_enabled
)
6206 ufshcd_mcq_compl_pending_transfer(hba
, force_compl
);
6208 ufshcd_transfer_req_compl(hba
);
6210 ufshcd_tmc_handler(hba
);
6214 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
6215 * to recover from the DL NAC errors or not.
6216 * @hba: per-adapter instance
6218 * Return: true if error handling is required, false otherwise.
6220 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba
*hba
)
6222 unsigned long flags
;
6223 bool err_handling
= true;
6225 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6227 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
6228 * device fatal error and/or DL NAC & REPLAY timeout errors.
6230 if (hba
->saved_err
& (CONTROLLER_FATAL_ERROR
| SYSTEM_BUS_FATAL_ERROR
))
6233 if ((hba
->saved_err
& DEVICE_FATAL_ERROR
) ||
6234 ((hba
->saved_err
& UIC_ERROR
) &&
6235 (hba
->saved_uic_err
& UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))
6238 if ((hba
->saved_err
& UIC_ERROR
) &&
6239 (hba
->saved_uic_err
& UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)) {
6242 * wait for 50ms to see if we can get any other errors or not.
6244 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6246 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6249 * now check if we have got any other severe errors other than
6252 if ((hba
->saved_err
& INT_FATAL_ERRORS
) ||
6253 ((hba
->saved_err
& UIC_ERROR
) &&
6254 (hba
->saved_uic_err
& ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)))
6258 * As DL NAC is the only error received so far, send out NOP
6259 * command to confirm if link is still active or not.
6260 * - If we don't get any response then do error recovery.
6261 * - If we get response then clear the DL NAC error bit.
6264 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6265 err
= ufshcd_verify_dev_init(hba
);
6266 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6271 /* Link seems to be alive hence ignore the DL NAC errors */
6272 if (hba
->saved_uic_err
== UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
)
6273 hba
->saved_err
&= ~UIC_ERROR
;
6274 /* clear NAC error */
6275 hba
->saved_uic_err
&= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
6276 if (!hba
->saved_uic_err
)
6277 err_handling
= false;
6280 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6281 return err_handling
;
6284 /* host lock must be held before calling this func */
6285 static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba
*hba
)
6287 return (hba
->saved_uic_err
& UFSHCD_UIC_DL_PA_INIT_ERROR
) ||
6288 (hba
->saved_err
& (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
));
6291 void ufshcd_schedule_eh_work(struct ufs_hba
*hba
)
6293 lockdep_assert_held(hba
->host
->host_lock
);
6295 /* handle fatal errors only when link is not in error state */
6296 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
) {
6297 if (hba
->force_reset
|| ufshcd_is_link_broken(hba
) ||
6298 ufshcd_is_saved_err_fatal(hba
))
6299 hba
->ufshcd_state
= UFSHCD_STATE_EH_SCHEDULED_FATAL
;
6301 hba
->ufshcd_state
= UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
;
6302 queue_work(hba
->eh_wq
, &hba
->eh_work
);
6306 static void ufshcd_force_error_recovery(struct ufs_hba
*hba
)
6308 spin_lock_irq(hba
->host
->host_lock
);
6309 hba
->force_reset
= true;
6310 ufshcd_schedule_eh_work(hba
);
6311 spin_unlock_irq(hba
->host
->host_lock
);
6314 static void ufshcd_clk_scaling_allow(struct ufs_hba
*hba
, bool allow
)
6316 mutex_lock(&hba
->wb_mutex
);
6317 down_write(&hba
->clk_scaling_lock
);
6318 hba
->clk_scaling
.is_allowed
= allow
;
6319 up_write(&hba
->clk_scaling_lock
);
6320 mutex_unlock(&hba
->wb_mutex
);
6323 static void ufshcd_clk_scaling_suspend(struct ufs_hba
*hba
, bool suspend
)
6326 if (hba
->clk_scaling
.is_enabled
)
6327 ufshcd_suspend_clkscaling(hba
);
6328 ufshcd_clk_scaling_allow(hba
, false);
6330 ufshcd_clk_scaling_allow(hba
, true);
6331 if (hba
->clk_scaling
.is_enabled
)
6332 ufshcd_resume_clkscaling(hba
);
6336 static void ufshcd_err_handling_prepare(struct ufs_hba
*hba
)
6338 ufshcd_rpm_get_sync(hba
);
6339 if (pm_runtime_status_suspended(&hba
->ufs_device_wlun
->sdev_gendev
) ||
6340 hba
->is_sys_suspended
) {
6341 enum ufs_pm_op pm_op
;
6344 * Don't assume anything of resume, if
6345 * resume fails, irq and clocks can be OFF, and powers
6346 * can be OFF or in LPM.
6348 ufshcd_setup_hba_vreg(hba
, true);
6349 ufshcd_enable_irq(hba
);
6350 ufshcd_setup_vreg(hba
, true);
6351 ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
6352 ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
6354 if (!ufshcd_is_clkgating_allowed(hba
))
6355 ufshcd_setup_clocks(hba
, true);
6356 pm_op
= hba
->is_sys_suspended
? UFS_SYSTEM_PM
: UFS_RUNTIME_PM
;
6357 ufshcd_vops_resume(hba
, pm_op
);
6360 if (ufshcd_is_clkscaling_supported(hba
) &&
6361 hba
->clk_scaling
.is_enabled
)
6362 ufshcd_suspend_clkscaling(hba
);
6363 ufshcd_clk_scaling_allow(hba
, false);
6365 /* Wait for ongoing ufshcd_queuecommand() calls to finish. */
6366 blk_mq_quiesce_tagset(&hba
->host
->tag_set
);
6367 cancel_work_sync(&hba
->eeh_work
);
6370 static void ufshcd_err_handling_unprepare(struct ufs_hba
*hba
)
6372 blk_mq_unquiesce_tagset(&hba
->host
->tag_set
);
6373 ufshcd_release(hba
);
6374 if (ufshcd_is_clkscaling_supported(hba
))
6375 ufshcd_clk_scaling_suspend(hba
, false);
6376 ufshcd_rpm_put(hba
);
6379 static inline bool ufshcd_err_handling_should_stop(struct ufs_hba
*hba
)
6381 return (!hba
->is_powered
|| hba
->shutting_down
||
6382 !hba
->ufs_device_wlun
||
6383 hba
->ufshcd_state
== UFSHCD_STATE_ERROR
||
6384 (!(hba
->saved_err
|| hba
->saved_uic_err
|| hba
->force_reset
||
6385 ufshcd_is_link_broken(hba
))));
6389 static void ufshcd_recover_pm_error(struct ufs_hba
*hba
)
6391 struct Scsi_Host
*shost
= hba
->host
;
6392 struct scsi_device
*sdev
;
6393 struct request_queue
*q
;
6396 hba
->is_sys_suspended
= false;
6398 * Set RPM status of wlun device to RPM_ACTIVE,
6399 * this also clears its runtime error.
6401 ret
= pm_runtime_set_active(&hba
->ufs_device_wlun
->sdev_gendev
);
6403 /* hba device might have a runtime error otherwise */
6405 ret
= pm_runtime_set_active(hba
->dev
);
6407 * If wlun device had runtime error, we also need to resume those
6408 * consumer scsi devices in case any of them has failed to be
6409 * resumed due to supplier runtime resume failure. This is to unblock
6410 * blk_queue_enter in case there are bios waiting inside it.
6413 shost_for_each_device(sdev
, shost
) {
6414 q
= sdev
->request_queue
;
6415 if (q
->dev
&& (q
->rpm_status
== RPM_SUSPENDED
||
6416 q
->rpm_status
== RPM_SUSPENDING
))
6417 pm_request_resume(q
->dev
);
6422 static inline void ufshcd_recover_pm_error(struct ufs_hba
*hba
)
6427 static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba
*hba
)
6429 struct ufs_pa_layer_attr
*pwr_info
= &hba
->pwr_info
;
6432 ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_PWRMODE
), &mode
);
6434 if (pwr_info
->pwr_rx
!= ((mode
>> PWRMODE_RX_OFFSET
) & PWRMODE_MASK
))
6437 if (pwr_info
->pwr_tx
!= (mode
& PWRMODE_MASK
))
6443 static bool ufshcd_abort_one(struct request
*rq
, void *priv
)
6447 struct scsi_cmnd
*cmd
= blk_mq_rq_to_pdu(rq
);
6448 struct scsi_device
*sdev
= cmd
->device
;
6449 struct Scsi_Host
*shost
= sdev
->host
;
6450 struct ufs_hba
*hba
= shost_priv(shost
);
6452 *ret
= ufshcd_try_to_abort_task(hba
, tag
);
6453 dev_err(hba
->dev
, "Aborting tag %d / CDB %#02x %s\n", tag
,
6454 hba
->lrb
[tag
].cmd
? hba
->lrb
[tag
].cmd
->cmnd
[0] : -1,
6455 *ret
? "failed" : "succeeded");
6461 * ufshcd_abort_all - Abort all pending commands.
6462 * @hba: Host bus adapter pointer.
6464 * Return: true if and only if the host controller needs to be reset.
6466 static bool ufshcd_abort_all(struct ufs_hba
*hba
)
6470 blk_mq_tagset_busy_iter(&hba
->host
->tag_set
, ufshcd_abort_one
, &ret
);
6474 /* Clear pending task management requests */
6475 for_each_set_bit(tag
, &hba
->outstanding_tasks
, hba
->nutmrs
) {
6476 ret
= ufshcd_clear_tm_cmd(hba
, tag
);
6482 /* Complete the requests that are cleared by s/w */
6483 ufshcd_complete_requests(hba
, false);
6489 * ufshcd_err_handler - handle UFS errors that require s/w attention
6490 * @work: pointer to work structure
6492 static void ufshcd_err_handler(struct work_struct
*work
)
6494 int retries
= MAX_ERR_HANDLER_RETRIES
;
6495 struct ufs_hba
*hba
;
6496 unsigned long flags
;
6501 hba
= container_of(work
, struct ufs_hba
, eh_work
);
6504 "%s started; HBA state %s; powered %d; shutting down %d; saved_err = %d; saved_uic_err = %d; force_reset = %d%s\n",
6505 __func__
, ufshcd_state_name
[hba
->ufshcd_state
],
6506 hba
->is_powered
, hba
->shutting_down
, hba
->saved_err
,
6507 hba
->saved_uic_err
, hba
->force_reset
,
6508 ufshcd_is_link_broken(hba
) ? "; link is broken" : "");
6510 down(&hba
->host_sem
);
6511 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6512 if (ufshcd_err_handling_should_stop(hba
)) {
6513 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
)
6514 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6515 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6519 ufshcd_set_eh_in_progress(hba
);
6520 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6521 ufshcd_err_handling_prepare(hba
);
6522 /* Complete requests that have door-bell cleared by h/w */
6523 ufshcd_complete_requests(hba
, false);
6524 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6526 needs_restore
= false;
6527 needs_reset
= false;
6529 if (hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
)
6530 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
6532 * A full reset and restore might have happened after preparation
6533 * is finished, double check whether we should stop.
6535 if (ufshcd_err_handling_should_stop(hba
))
6536 goto skip_err_handling
;
6538 if ((hba
->dev_quirks
& UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) &&
6539 !hba
->force_reset
) {
6542 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6543 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
6544 ret
= ufshcd_quirk_dl_nac_errors(hba
);
6545 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6546 if (!ret
&& ufshcd_err_handling_should_stop(hba
))
6547 goto skip_err_handling
;
6550 if ((hba
->saved_err
& (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
)) ||
6551 (hba
->saved_uic_err
&&
6552 (hba
->saved_uic_err
!= UFSHCD_UIC_PA_GENERIC_ERROR
))) {
6553 bool pr_prdt
= !!(hba
->saved_err
& SYSTEM_BUS_FATAL_ERROR
);
6555 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6556 ufshcd_print_host_state(hba
);
6557 ufshcd_print_pwr_info(hba
);
6558 ufshcd_print_evt_hist(hba
);
6559 ufshcd_print_tmrs(hba
, hba
->outstanding_tasks
);
6560 ufshcd_print_trs_all(hba
, pr_prdt
);
6561 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6565 * if host reset is required then skip clearing the pending
6566 * transfers forcefully because they will get cleared during
6567 * host reset and restore
6569 if (hba
->force_reset
|| ufshcd_is_link_broken(hba
) ||
6570 ufshcd_is_saved_err_fatal(hba
) ||
6571 ((hba
->saved_err
& UIC_ERROR
) &&
6572 (hba
->saved_uic_err
& (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
|
6573 UFSHCD_UIC_DL_TCx_REPLAY_ERROR
)))) {
6579 * If LINERESET was caught, UFS might have been put to PWM mode,
6580 * check if power mode restore is needed.
6582 if (hba
->saved_uic_err
& UFSHCD_UIC_PA_GENERIC_ERROR
) {
6583 hba
->saved_uic_err
&= ~UFSHCD_UIC_PA_GENERIC_ERROR
;
6584 if (!hba
->saved_uic_err
)
6585 hba
->saved_err
&= ~UIC_ERROR
;
6586 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6587 if (ufshcd_is_pwr_mode_restore_needed(hba
))
6588 needs_restore
= true;
6589 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6590 if (!hba
->saved_err
&& !needs_restore
)
6591 goto skip_err_handling
;
6594 hba
->silence_err_logs
= true;
6595 /* release lock as clear command might sleep */
6596 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6598 needs_reset
= ufshcd_abort_all(hba
);
6600 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6601 hba
->silence_err_logs
= false;
6606 * After all reqs and tasks are cleared from doorbell,
6607 * now it is safe to retore power mode.
6609 if (needs_restore
) {
6610 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6612 * Hold the scaling lock just in case dev cmds
6613 * are sent via bsg and/or sysfs.
6615 down_write(&hba
->clk_scaling_lock
);
6616 hba
->force_pmc
= true;
6617 pmc_err
= ufshcd_config_pwr_mode(hba
, &(hba
->pwr_info
));
6620 dev_err(hba
->dev
, "%s: Failed to restore power mode, err = %d\n",
6623 hba
->force_pmc
= false;
6624 ufshcd_print_pwr_info(hba
);
6625 up_write(&hba
->clk_scaling_lock
);
6626 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6630 /* Fatal errors need reset */
6634 hba
->force_reset
= false;
6635 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6636 err
= ufshcd_reset_and_restore(hba
);
6638 dev_err(hba
->dev
, "%s: reset and restore failed with err %d\n",
6641 ufshcd_recover_pm_error(hba
);
6642 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6647 if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
)
6648 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
6649 if (hba
->saved_err
|| hba
->saved_uic_err
)
6650 dev_err_ratelimited(hba
->dev
, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
6651 __func__
, hba
->saved_err
, hba
->saved_uic_err
);
6653 /* Exit in an operational state or dead */
6654 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
&&
6655 hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
) {
6658 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
6660 ufshcd_clear_eh_in_progress(hba
);
6661 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6662 ufshcd_err_handling_unprepare(hba
);
6665 dev_info(hba
->dev
, "%s finished; HBA state %s\n", __func__
,
6666 ufshcd_state_name
[hba
->ufshcd_state
]);
6670 * ufshcd_update_uic_error - check and set fatal UIC error flags.
6671 * @hba: per-adapter instance
6674 * IRQ_HANDLED - If interrupt is valid
6675 * IRQ_NONE - If invalid interrupt
6677 static irqreturn_t
ufshcd_update_uic_error(struct ufs_hba
*hba
)
6680 irqreturn_t retval
= IRQ_NONE
;
6682 /* PHY layer error */
6683 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER
);
6684 if ((reg
& UIC_PHY_ADAPTER_LAYER_ERROR
) &&
6685 (reg
& UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK
)) {
6686 ufshcd_update_evt_hist(hba
, UFS_EVT_PA_ERR
, reg
);
6688 * To know whether this error is fatal or not, DB timeout
6689 * must be checked but this error is handled separately.
6691 if (reg
& UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK
)
6692 dev_dbg(hba
->dev
, "%s: UIC Lane error reported\n",
6695 /* Got a LINERESET indication. */
6696 if (reg
& UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR
) {
6697 struct uic_command
*cmd
= NULL
;
6699 hba
->uic_error
|= UFSHCD_UIC_PA_GENERIC_ERROR
;
6700 if (hba
->uic_async_done
&& hba
->active_uic_cmd
)
6701 cmd
= hba
->active_uic_cmd
;
6703 * Ignore the LINERESET during power mode change
6704 * operation via DME_SET command.
6706 if (cmd
&& (cmd
->command
== UIC_CMD_DME_SET
))
6707 hba
->uic_error
&= ~UFSHCD_UIC_PA_GENERIC_ERROR
;
6709 retval
|= IRQ_HANDLED
;
6712 /* PA_INIT_ERROR is fatal and needs UIC reset */
6713 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DATA_LINK_LAYER
);
6714 if ((reg
& UIC_DATA_LINK_LAYER_ERROR
) &&
6715 (reg
& UIC_DATA_LINK_LAYER_ERROR_CODE_MASK
)) {
6716 ufshcd_update_evt_hist(hba
, UFS_EVT_DL_ERR
, reg
);
6718 if (reg
& UIC_DATA_LINK_LAYER_ERROR_PA_INIT
)
6719 hba
->uic_error
|= UFSHCD_UIC_DL_PA_INIT_ERROR
;
6720 else if (hba
->dev_quirks
&
6721 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS
) {
6722 if (reg
& UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED
)
6724 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR
;
6725 else if (reg
& UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT
)
6726 hba
->uic_error
|= UFSHCD_UIC_DL_TCx_REPLAY_ERROR
;
6728 retval
|= IRQ_HANDLED
;
6731 /* UIC NL/TL/DME errors needs software retry */
6732 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_NETWORK_LAYER
);
6733 if ((reg
& UIC_NETWORK_LAYER_ERROR
) &&
6734 (reg
& UIC_NETWORK_LAYER_ERROR_CODE_MASK
)) {
6735 ufshcd_update_evt_hist(hba
, UFS_EVT_NL_ERR
, reg
);
6736 hba
->uic_error
|= UFSHCD_UIC_NL_ERROR
;
6737 retval
|= IRQ_HANDLED
;
6740 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_TRANSPORT_LAYER
);
6741 if ((reg
& UIC_TRANSPORT_LAYER_ERROR
) &&
6742 (reg
& UIC_TRANSPORT_LAYER_ERROR_CODE_MASK
)) {
6743 ufshcd_update_evt_hist(hba
, UFS_EVT_TL_ERR
, reg
);
6744 hba
->uic_error
|= UFSHCD_UIC_TL_ERROR
;
6745 retval
|= IRQ_HANDLED
;
6748 reg
= ufshcd_readl(hba
, REG_UIC_ERROR_CODE_DME
);
6749 if ((reg
& UIC_DME_ERROR
) &&
6750 (reg
& UIC_DME_ERROR_CODE_MASK
)) {
6751 ufshcd_update_evt_hist(hba
, UFS_EVT_DME_ERR
, reg
);
6752 hba
->uic_error
|= UFSHCD_UIC_DME_ERROR
;
6753 retval
|= IRQ_HANDLED
;
6756 dev_dbg(hba
->dev
, "%s: UIC error flags = 0x%08x\n",
6757 __func__
, hba
->uic_error
);
6762 * ufshcd_check_errors - Check for errors that need s/w attention
6763 * @hba: per-adapter instance
6764 * @intr_status: interrupt status generated by the controller
6767 * IRQ_HANDLED - If interrupt is valid
6768 * IRQ_NONE - If invalid interrupt
6770 static irqreturn_t
ufshcd_check_errors(struct ufs_hba
*hba
, u32 intr_status
)
6772 bool queue_eh_work
= false;
6773 irqreturn_t retval
= IRQ_NONE
;
6775 spin_lock(hba
->host
->host_lock
);
6776 hba
->errors
|= UFSHCD_ERROR_MASK
& intr_status
;
6778 if (hba
->errors
& INT_FATAL_ERRORS
) {
6779 ufshcd_update_evt_hist(hba
, UFS_EVT_FATAL_ERR
,
6781 queue_eh_work
= true;
6784 if (hba
->errors
& UIC_ERROR
) {
6786 retval
= ufshcd_update_uic_error(hba
);
6788 queue_eh_work
= true;
6791 if (hba
->errors
& UFSHCD_UIC_HIBERN8_MASK
) {
6793 "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
6794 __func__
, (hba
->errors
& UIC_HIBERNATE_ENTER
) ?
6796 hba
->errors
, ufshcd_get_upmcrs(hba
));
6797 ufshcd_update_evt_hist(hba
, UFS_EVT_AUTO_HIBERN8_ERR
,
6799 ufshcd_set_link_broken(hba
);
6800 queue_eh_work
= true;
6803 if (queue_eh_work
) {
6805 * update the transfer error masks to sticky bits, let's do this
6806 * irrespective of current ufshcd_state.
6808 hba
->saved_err
|= hba
->errors
;
6809 hba
->saved_uic_err
|= hba
->uic_error
;
6811 /* dump controller state before resetting */
6812 if ((hba
->saved_err
&
6813 (INT_FATAL_ERRORS
| UFSHCD_UIC_HIBERN8_MASK
)) ||
6814 (hba
->saved_uic_err
&&
6815 (hba
->saved_uic_err
!= UFSHCD_UIC_PA_GENERIC_ERROR
))) {
6816 dev_err(hba
->dev
, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
6817 __func__
, hba
->saved_err
,
6818 hba
->saved_uic_err
);
6819 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
,
6821 ufshcd_print_pwr_info(hba
);
6823 ufshcd_schedule_eh_work(hba
);
6824 retval
|= IRQ_HANDLED
;
6827 * if (!queue_eh_work) -
6828 * Other errors are either non-fatal where host recovers
6829 * itself without s/w intervention or errors that will be
6830 * handled by the SCSI core layer.
6834 spin_unlock(hba
->host
->host_lock
);
6839 * ufshcd_tmc_handler - handle task management function completion
6840 * @hba: per adapter instance
6843 * IRQ_HANDLED - If interrupt is valid
6844 * IRQ_NONE - If invalid interrupt
6846 static irqreturn_t
ufshcd_tmc_handler(struct ufs_hba
*hba
)
6848 unsigned long flags
, pending
, issued
;
6849 irqreturn_t ret
= IRQ_NONE
;
6852 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
6853 pending
= ufshcd_readl(hba
, REG_UTP_TASK_REQ_DOOR_BELL
);
6854 issued
= hba
->outstanding_tasks
& ~pending
;
6855 for_each_set_bit(tag
, &issued
, hba
->nutmrs
) {
6856 struct request
*req
= hba
->tmf_rqs
[tag
];
6857 struct completion
*c
= req
->end_io_data
;
6862 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
6868 * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events
6869 * @hba: per adapter instance
6871 * Return: IRQ_HANDLED if interrupt is handled.
6873 static irqreturn_t
ufshcd_handle_mcq_cq_events(struct ufs_hba
*hba
)
6875 struct ufs_hw_queue
*hwq
;
6876 unsigned long outstanding_cqs
;
6877 unsigned int nr_queues
;
6881 ret
= ufshcd_vops_get_outstanding_cqs(hba
, &outstanding_cqs
);
6883 outstanding_cqs
= (1U << hba
->nr_hw_queues
) - 1;
6885 /* Exclude the poll queues */
6886 nr_queues
= hba
->nr_hw_queues
- hba
->nr_queues
[HCTX_TYPE_POLL
];
6887 for_each_set_bit(i
, &outstanding_cqs
, nr_queues
) {
6890 events
= ufshcd_mcq_read_cqis(hba
, i
);
6892 ufshcd_mcq_write_cqis(hba
, events
, i
);
6894 if (events
& UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS
)
6895 ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
6902 * ufshcd_sl_intr - Interrupt service routine
6903 * @hba: per adapter instance
6904 * @intr_status: contains interrupts generated by the controller
6907 * IRQ_HANDLED - If interrupt is valid
6908 * IRQ_NONE - If invalid interrupt
6910 static irqreturn_t
ufshcd_sl_intr(struct ufs_hba
*hba
, u32 intr_status
)
6912 irqreturn_t retval
= IRQ_NONE
;
6914 if (intr_status
& UFSHCD_UIC_MASK
)
6915 retval
|= ufshcd_uic_cmd_compl(hba
, intr_status
);
6917 if (intr_status
& UFSHCD_ERROR_MASK
|| hba
->errors
)
6918 retval
|= ufshcd_check_errors(hba
, intr_status
);
6920 if (intr_status
& UTP_TASK_REQ_COMPL
)
6921 retval
|= ufshcd_tmc_handler(hba
);
6923 if (intr_status
& UTP_TRANSFER_REQ_COMPL
)
6924 retval
|= ufshcd_transfer_req_compl(hba
);
6926 if (intr_status
& MCQ_CQ_EVENT_STATUS
)
6927 retval
|= ufshcd_handle_mcq_cq_events(hba
);
6933 * ufshcd_intr - Main interrupt service routine
6935 * @__hba: pointer to adapter instance
6938 * IRQ_HANDLED - If interrupt is valid
6939 * IRQ_NONE - If invalid interrupt
6941 static irqreturn_t
ufshcd_intr(int irq
, void *__hba
)
6943 u32 intr_status
, enabled_intr_status
= 0;
6944 irqreturn_t retval
= IRQ_NONE
;
6945 struct ufs_hba
*hba
= __hba
;
6946 int retries
= hba
->nutrs
;
6948 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
6949 hba
->ufs_stats
.last_intr_status
= intr_status
;
6950 hba
->ufs_stats
.last_intr_ts
= local_clock();
6953 * There could be max of hba->nutrs reqs in flight and in worst case
6954 * if the reqs get finished 1 by 1 after the interrupt status is
6955 * read, make sure we handle them by checking the interrupt status
6956 * again in a loop until we process all of the reqs before returning.
6958 while (intr_status
&& retries
--) {
6959 enabled_intr_status
=
6960 intr_status
& ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
6961 ufshcd_writel(hba
, intr_status
, REG_INTERRUPT_STATUS
);
6962 if (enabled_intr_status
)
6963 retval
|= ufshcd_sl_intr(hba
, enabled_intr_status
);
6965 intr_status
= ufshcd_readl(hba
, REG_INTERRUPT_STATUS
);
6968 if (enabled_intr_status
&& retval
== IRQ_NONE
&&
6969 (!(enabled_intr_status
& UTP_TRANSFER_REQ_COMPL
) ||
6970 hba
->outstanding_reqs
) && !ufshcd_eh_in_progress(hba
)) {
6971 dev_err(hba
->dev
, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n",
6974 hba
->ufs_stats
.last_intr_status
,
6975 enabled_intr_status
);
6976 ufshcd_dump_regs(hba
, 0, UFSHCI_REG_SPACE_SIZE
, "host_regs: ");
6982 static int ufshcd_clear_tm_cmd(struct ufs_hba
*hba
, int tag
)
6985 u32 mask
= 1 << tag
;
6987 if (!test_bit(tag
, &hba
->outstanding_tasks
))
6990 ufshcd_utmrl_clear(hba
, tag
);
6992 /* poll for max. 1 sec to clear door bell register by h/w */
6993 err
= ufshcd_wait_for_register(hba
,
6994 REG_UTP_TASK_REQ_DOOR_BELL
,
6995 mask
, 0, 1000, 1000);
6997 dev_err(hba
->dev
, "Clearing task management function with tag %d %s\n",
6998 tag
, err
< 0 ? "failed" : "succeeded");
7004 static int __ufshcd_issue_tm_cmd(struct ufs_hba
*hba
,
7005 struct utp_task_req_desc
*treq
, u8 tm_function
)
7007 struct request_queue
*q
= hba
->tmf_queue
;
7008 struct Scsi_Host
*host
= hba
->host
;
7009 DECLARE_COMPLETION_ONSTACK(wait
);
7010 struct request
*req
;
7011 unsigned long flags
;
7015 * blk_mq_alloc_request() is used here only to get a free tag.
7017 req
= blk_mq_alloc_request(q
, REQ_OP_DRV_OUT
, 0);
7019 return PTR_ERR(req
);
7021 req
->end_io_data
= &wait
;
7024 spin_lock_irqsave(host
->host_lock
, flags
);
7026 task_tag
= req
->tag
;
7027 hba
->tmf_rqs
[req
->tag
] = req
;
7028 treq
->upiu_req
.req_header
.task_tag
= task_tag
;
7030 memcpy(hba
->utmrdl_base_addr
+ task_tag
, treq
, sizeof(*treq
));
7031 ufshcd_vops_setup_task_mgmt(hba
, task_tag
, tm_function
);
7033 __set_bit(task_tag
, &hba
->outstanding_tasks
);
7035 spin_unlock_irqrestore(host
->host_lock
, flags
);
7037 /* send command to the controller */
7038 ufshcd_writel(hba
, 1 << task_tag
, REG_UTP_TASK_REQ_DOOR_BELL
);
7040 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_SEND
);
7042 /* wait until the task management command is completed */
7043 err
= wait_for_completion_io_timeout(&wait
,
7044 msecs_to_jiffies(TM_CMD_TIMEOUT
));
7046 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_ERR
);
7047 dev_err(hba
->dev
, "%s: task management cmd 0x%.2x timed-out\n",
7048 __func__
, tm_function
);
7049 if (ufshcd_clear_tm_cmd(hba
, task_tag
))
7050 dev_WARN(hba
->dev
, "%s: unable to clear tm cmd (slot %d) after timeout\n",
7051 __func__
, task_tag
);
7055 memcpy(treq
, hba
->utmrdl_base_addr
+ task_tag
, sizeof(*treq
));
7057 ufshcd_add_tm_upiu_trace(hba
, task_tag
, UFS_TM_COMP
);
7060 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7061 hba
->tmf_rqs
[req
->tag
] = NULL
;
7062 __clear_bit(task_tag
, &hba
->outstanding_tasks
);
7063 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7065 ufshcd_release(hba
);
7066 blk_mq_free_request(req
);
7072 * ufshcd_issue_tm_cmd - issues task management commands to controller
7073 * @hba: per adapter instance
7074 * @lun_id: LUN ID to which TM command is sent
7075 * @task_id: task ID to which the TM command is applicable
7076 * @tm_function: task management function opcode
7077 * @tm_response: task management service response return value
7079 * Return: non-zero value on error, zero on success.
7081 static int ufshcd_issue_tm_cmd(struct ufs_hba
*hba
, int lun_id
, int task_id
,
7082 u8 tm_function
, u8
*tm_response
)
7084 struct utp_task_req_desc treq
= { };
7085 enum utp_ocs ocs_value
;
7088 /* Configure task request descriptor */
7089 treq
.header
.interrupt
= 1;
7090 treq
.header
.ocs
= OCS_INVALID_COMMAND_STATUS
;
7092 /* Configure task request UPIU */
7093 treq
.upiu_req
.req_header
.transaction_code
= UPIU_TRANSACTION_TASK_REQ
;
7094 treq
.upiu_req
.req_header
.lun
= lun_id
;
7095 treq
.upiu_req
.req_header
.tm_function
= tm_function
;
7098 * The host shall provide the same value for LUN field in the basic
7099 * header and for Input Parameter.
7101 treq
.upiu_req
.input_param1
= cpu_to_be32(lun_id
);
7102 treq
.upiu_req
.input_param2
= cpu_to_be32(task_id
);
7104 err
= __ufshcd_issue_tm_cmd(hba
, &treq
, tm_function
);
7105 if (err
== -ETIMEDOUT
)
7108 ocs_value
= treq
.header
.ocs
& MASK_OCS
;
7109 if (ocs_value
!= OCS_SUCCESS
)
7110 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n",
7111 __func__
, ocs_value
);
7112 else if (tm_response
)
7113 *tm_response
= be32_to_cpu(treq
.upiu_rsp
.output_param1
) &
7114 MASK_TM_SERVICE_RESP
;
7119 * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
7120 * @hba: per-adapter instance
7121 * @req_upiu: upiu request
7122 * @rsp_upiu: upiu reply
7123 * @desc_buff: pointer to descriptor buffer, NULL if NA
7124 * @buff_len: descriptor size, 0 if NA
7125 * @cmd_type: specifies the type (NOP, Query...)
7126 * @desc_op: descriptor operation
7128 * Those type of requests uses UTP Transfer Request Descriptor - utrd.
7129 * Therefore, it "rides" the device management infrastructure: uses its tag and
7130 * tasks work queues.
7132 * Since there is only one available tag for device management commands,
7133 * the caller is expected to hold the hba->dev_cmd.lock mutex.
7135 * Return: 0 upon success; < 0 upon failure.
7137 static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba
*hba
,
7138 struct utp_upiu_req
*req_upiu
,
7139 struct utp_upiu_req
*rsp_upiu
,
7140 u8
*desc_buff
, int *buff_len
,
7141 enum dev_cmd_type cmd_type
,
7142 enum query_opcode desc_op
)
7144 const u32 tag
= hba
->reserved_slot
;
7145 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
7149 /* Protects use of hba->reserved_slot. */
7150 lockdep_assert_held(&hba
->dev_cmd
.lock
);
7152 ufshcd_setup_dev_cmd(hba
, lrbp
, cmd_type
, 0, tag
);
7154 ufshcd_prepare_req_desc_hdr(hba
, lrbp
, &upiu_flags
, DMA_NONE
, 0);
7156 /* update the task tag in the request upiu */
7157 req_upiu
->header
.task_tag
= tag
;
7159 /* just copy the upiu request as it is */
7160 memcpy(lrbp
->ucd_req_ptr
, req_upiu
, sizeof(*lrbp
->ucd_req_ptr
));
7161 if (desc_buff
&& desc_op
== UPIU_QUERY_OPCODE_WRITE_DESC
) {
7162 /* The Data Segment Area is optional depending upon the query
7163 * function value. for WRITE DESCRIPTOR, the data segment
7164 * follows right after the tsf.
7166 memcpy(lrbp
->ucd_req_ptr
+ 1, desc_buff
, *buff_len
);
7170 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
7173 * ignore the returning value here - ufshcd_check_query_response is
7174 * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
7175 * read the response directly ignoring all errors.
7177 ufshcd_issue_dev_cmd(hba
, lrbp
, tag
, QUERY_REQ_TIMEOUT
);
7179 /* just copy the upiu response as it is */
7180 memcpy(rsp_upiu
, lrbp
->ucd_rsp_ptr
, sizeof(*rsp_upiu
));
7181 if (desc_buff
&& desc_op
== UPIU_QUERY_OPCODE_READ_DESC
) {
7182 u8
*descp
= (u8
*)lrbp
->ucd_rsp_ptr
+ sizeof(*rsp_upiu
);
7183 u16 resp_len
= be16_to_cpu(lrbp
->ucd_rsp_ptr
->header
7184 .data_segment_length
);
7186 if (*buff_len
>= resp_len
) {
7187 memcpy(desc_buff
, descp
, resp_len
);
7188 *buff_len
= resp_len
;
7191 "%s: rsp size %d is bigger than buffer size %d",
7192 __func__
, resp_len
, *buff_len
);
7197 ufshcd_add_query_upiu_trace(hba
, err
? UFS_QUERY_ERR
: UFS_QUERY_COMP
,
7198 (struct utp_upiu_req
*)lrbp
->ucd_rsp_ptr
);
7204 * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
7205 * @hba: per-adapter instance
7206 * @req_upiu: upiu request
7207 * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
7208 * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
7209 * @desc_buff: pointer to descriptor buffer, NULL if NA
7210 * @buff_len: descriptor size, 0 if NA
7211 * @desc_op: descriptor operation
7213 * Supports UTP Transfer requests (nop and query), and UTP Task
7214 * Management requests.
7215 * It is up to the caller to fill the upiu conent properly, as it will
7216 * be copied without any further input validations.
7218 * Return: 0 upon success; < 0 upon failure.
7220 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba
*hba
,
7221 struct utp_upiu_req
*req_upiu
,
7222 struct utp_upiu_req
*rsp_upiu
,
7223 enum upiu_request_transaction msgcode
,
7224 u8
*desc_buff
, int *buff_len
,
7225 enum query_opcode desc_op
)
7228 enum dev_cmd_type cmd_type
= DEV_CMD_TYPE_QUERY
;
7229 struct utp_task_req_desc treq
= { };
7230 enum utp_ocs ocs_value
;
7231 u8 tm_f
= req_upiu
->header
.tm_function
;
7234 case UPIU_TRANSACTION_NOP_OUT
:
7235 cmd_type
= DEV_CMD_TYPE_NOP
;
7237 case UPIU_TRANSACTION_QUERY_REQ
:
7238 ufshcd_dev_man_lock(hba
);
7239 err
= ufshcd_issue_devman_upiu_cmd(hba
, req_upiu
, rsp_upiu
,
7240 desc_buff
, buff_len
,
7242 ufshcd_dev_man_unlock(hba
);
7245 case UPIU_TRANSACTION_TASK_REQ
:
7246 treq
.header
.interrupt
= 1;
7247 treq
.header
.ocs
= OCS_INVALID_COMMAND_STATUS
;
7249 memcpy(&treq
.upiu_req
, req_upiu
, sizeof(*req_upiu
));
7251 err
= __ufshcd_issue_tm_cmd(hba
, &treq
, tm_f
);
7252 if (err
== -ETIMEDOUT
)
7255 ocs_value
= treq
.header
.ocs
& MASK_OCS
;
7256 if (ocs_value
!= OCS_SUCCESS
) {
7257 dev_err(hba
->dev
, "%s: failed, ocs = 0x%x\n", __func__
,
7262 memcpy(rsp_upiu
, &treq
.upiu_rsp
, sizeof(*rsp_upiu
));
7275 * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request
7276 * @hba: per adapter instance
7277 * @req_upiu: upiu request
7278 * @rsp_upiu: upiu reply
7279 * @req_ehs: EHS field which contains Advanced RPMB Request Message
7280 * @rsp_ehs: EHS field which returns Advanced RPMB Response Message
7281 * @sg_cnt: The number of sg lists actually used
7282 * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation
7283 * @dir: DMA direction
7285 * Return: zero on success, non-zero on failure.
7287 int ufshcd_advanced_rpmb_req_handler(struct ufs_hba
*hba
, struct utp_upiu_req
*req_upiu
,
7288 struct utp_upiu_req
*rsp_upiu
, struct ufs_ehs
*req_ehs
,
7289 struct ufs_ehs
*rsp_ehs
, int sg_cnt
, struct scatterlist
*sg_list
,
7290 enum dma_data_direction dir
)
7292 const u32 tag
= hba
->reserved_slot
;
7293 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
7299 int ehs
= (hba
->capabilities
& MASK_EHSLUTRD_SUPPORTED
) ? 2 : 0;
7301 /* Protects use of hba->reserved_slot. */
7302 ufshcd_dev_man_lock(hba
);
7304 ufshcd_setup_dev_cmd(hba
, lrbp
, DEV_CMD_TYPE_RPMB
, UFS_UPIU_RPMB_WLUN
, tag
);
7306 ufshcd_prepare_req_desc_hdr(hba
, lrbp
, &upiu_flags
, DMA_NONE
, ehs
);
7308 /* update the task tag */
7309 req_upiu
->header
.task_tag
= tag
;
7311 /* copy the UPIU(contains CDB) request as it is */
7312 memcpy(lrbp
->ucd_req_ptr
, req_upiu
, sizeof(*lrbp
->ucd_req_ptr
));
7313 /* Copy EHS, starting with byte32, immediately after the CDB package */
7314 memcpy(lrbp
->ucd_req_ptr
+ 1, req_ehs
, sizeof(*req_ehs
));
7316 if (dir
!= DMA_NONE
&& sg_list
)
7317 ufshcd_sgl_to_prdt(hba
, lrbp
, sg_cnt
, sg_list
);
7319 memset(lrbp
->ucd_rsp_ptr
, 0, sizeof(struct utp_upiu_rsp
));
7321 err
= ufshcd_issue_dev_cmd(hba
, lrbp
, tag
, ADVANCED_RPMB_REQ_TIMEOUT
);
7324 /* Just copy the upiu response as it is */
7325 memcpy(rsp_upiu
, lrbp
->ucd_rsp_ptr
, sizeof(*rsp_upiu
));
7326 /* Get the response UPIU result */
7327 result
= (lrbp
->ucd_rsp_ptr
->header
.response
<< 8) |
7328 lrbp
->ucd_rsp_ptr
->header
.status
;
7330 ehs_len
= lrbp
->ucd_rsp_ptr
->header
.ehs_length
;
7332 * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data
7333 * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB
7336 if (ehs_len
== 2 && rsp_ehs
) {
7338 * ucd_rsp_ptr points to a buffer with a length of 512 bytes
7339 * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32
7341 ehs_data
= (u8
*)lrbp
->ucd_rsp_ptr
+ EHS_OFFSET_IN_RESPONSE
;
7342 memcpy(rsp_ehs
, ehs_data
, ehs_len
* 32);
7346 ufshcd_dev_man_unlock(hba
);
7348 return err
? : result
;
7352 * ufshcd_eh_device_reset_handler() - Reset a single logical unit.
7353 * @cmd: SCSI command pointer
7355 * Return: SUCCESS or FAILED.
7357 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd
*cmd
)
7359 unsigned long flags
, pending_reqs
= 0, not_cleared
= 0;
7360 struct Scsi_Host
*host
;
7361 struct ufs_hba
*hba
;
7362 struct ufs_hw_queue
*hwq
;
7363 struct ufshcd_lrb
*lrbp
;
7364 u32 pos
, not_cleared_mask
= 0;
7368 host
= cmd
->device
->host
;
7369 hba
= shost_priv(host
);
7371 lun
= ufshcd_scsi_to_upiu_lun(cmd
->device
->lun
);
7372 err
= ufshcd_issue_tm_cmd(hba
, lun
, 0, UFS_LOGICAL_RESET
, &resp
);
7373 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7379 if (hba
->mcq_enabled
) {
7380 for (pos
= 0; pos
< hba
->nutrs
; pos
++) {
7381 lrbp
= &hba
->lrb
[pos
];
7382 if (ufshcd_cmd_inflight(lrbp
->cmd
) &&
7384 ufshcd_clear_cmd(hba
, pos
);
7385 hwq
= ufshcd_mcq_req_to_hwq(hba
, scsi_cmd_to_rq(lrbp
->cmd
));
7386 ufshcd_mcq_poll_cqe_lock(hba
, hwq
);
7393 /* clear the commands that were pending for corresponding LUN */
7394 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7395 for_each_set_bit(pos
, &hba
->outstanding_reqs
, hba
->nutrs
)
7396 if (hba
->lrb
[pos
].lun
== lun
)
7397 __set_bit(pos
, &pending_reqs
);
7398 hba
->outstanding_reqs
&= ~pending_reqs
;
7399 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7401 for_each_set_bit(pos
, &pending_reqs
, hba
->nutrs
) {
7402 if (ufshcd_clear_cmd(hba
, pos
) < 0) {
7403 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7404 not_cleared
= 1U << pos
&
7405 ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7406 hba
->outstanding_reqs
|= not_cleared
;
7407 not_cleared_mask
|= not_cleared
;
7408 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7410 dev_err(hba
->dev
, "%s: failed to clear request %d\n",
7414 __ufshcd_transfer_req_compl(hba
, pending_reqs
& ~not_cleared_mask
);
7417 hba
->req_abort_count
= 0;
7418 ufshcd_update_evt_hist(hba
, UFS_EVT_DEV_RESET
, (u32
)err
);
7422 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
7428 static void ufshcd_set_req_abort_skip(struct ufs_hba
*hba
, unsigned long bitmap
)
7430 struct ufshcd_lrb
*lrbp
;
7433 for_each_set_bit(tag
, &bitmap
, hba
->nutrs
) {
7434 lrbp
= &hba
->lrb
[tag
];
7435 lrbp
->req_abort_skip
= true;
7440 * ufshcd_try_to_abort_task - abort a specific task
7441 * @hba: Pointer to adapter instance
7442 * @tag: Task tag/index to be aborted
7444 * Abort the pending command in device by sending UFS_ABORT_TASK task management
7445 * command, and in host controller by clearing the door-bell register. There can
7446 * be race between controller sending the command to the device while abort is
7447 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
7448 * really issued and then try to abort it.
7450 * Return: zero on success, non-zero on failure.
7452 int ufshcd_try_to_abort_task(struct ufs_hba
*hba
, int tag
)
7454 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
7459 for (poll_cnt
= 100; poll_cnt
; poll_cnt
--) {
7460 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
7461 UFS_QUERY_TASK
, &resp
);
7462 if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED
) {
7463 /* cmd pending in the device */
7464 dev_err(hba
->dev
, "%s: cmd pending in the device. tag = %d\n",
7467 } else if (!err
&& resp
== UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7469 * cmd not pending in the device, check if it is
7474 "%s: cmd with tag %d not pending in the device.\n",
7476 if (!ufshcd_cmd_inflight(lrbp
->cmd
)) {
7478 "%s: cmd with tag=%d completed.\n",
7482 usleep_range(100, 200);
7485 "%s: no response from device. tag = %d, err %d\n",
7486 __func__
, tag
, err
);
7487 return err
? : resp
;
7494 err
= ufshcd_issue_tm_cmd(hba
, lrbp
->lun
, lrbp
->task_tag
,
7495 UFS_ABORT_TASK
, &resp
);
7496 if (err
|| resp
!= UPIU_TASK_MANAGEMENT_FUNC_COMPL
) {
7498 err
= resp
; /* service response error */
7499 dev_err(hba
->dev
, "%s: issued. tag = %d, err %d\n",
7500 __func__
, tag
, err
);
7505 err
= ufshcd_clear_cmd(hba
, tag
);
7507 dev_err(hba
->dev
, "%s: Failed clearing cmd at tag %d, err %d\n",
7508 __func__
, tag
, err
);
7514 * ufshcd_abort - scsi host template eh_abort_handler callback
7515 * @cmd: SCSI command pointer
7517 * Return: SUCCESS or FAILED.
7519 static int ufshcd_abort(struct scsi_cmnd
*cmd
)
7521 struct Scsi_Host
*host
= cmd
->device
->host
;
7522 struct ufs_hba
*hba
= shost_priv(host
);
7523 int tag
= scsi_cmd_to_rq(cmd
)->tag
;
7524 struct ufshcd_lrb
*lrbp
= &hba
->lrb
[tag
];
7525 unsigned long flags
;
7532 if (!hba
->mcq_enabled
) {
7533 reg
= ufshcd_readl(hba
, REG_UTP_TRANSFER_REQ_DOOR_BELL
);
7534 if (!test_bit(tag
, &hba
->outstanding_reqs
)) {
7535 /* If command is already aborted/completed, return FAILED. */
7537 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
7538 __func__
, tag
, hba
->outstanding_reqs
, reg
);
7543 /* Print Transfer Request of aborted task */
7544 dev_info(hba
->dev
, "%s: Device abort task at tag %d\n", __func__
, tag
);
7547 * Print detailed info about aborted request.
7548 * As more than one request might get aborted at the same time,
7549 * print full information only for the first aborted request in order
7550 * to reduce repeated printouts. For other aborted requests only print
7553 scsi_print_command(cmd
);
7554 if (!hba
->req_abort_count
) {
7555 ufshcd_update_evt_hist(hba
, UFS_EVT_ABORT
, tag
);
7556 ufshcd_print_evt_hist(hba
);
7557 ufshcd_print_host_state(hba
);
7558 ufshcd_print_pwr_info(hba
);
7559 ufshcd_print_tr(hba
, tag
, true);
7561 ufshcd_print_tr(hba
, tag
, false);
7563 hba
->req_abort_count
++;
7565 if (!hba
->mcq_enabled
&& !(reg
& (1 << tag
))) {
7566 /* only execute this code in single doorbell mode */
7568 "%s: cmd was completed, but without a notifying intr, tag = %d",
7570 __ufshcd_transfer_req_compl(hba
, 1UL << tag
);
7575 * Task abort to the device W-LUN is illegal. When this command
7576 * will fail, due to spec violation, scsi err handling next step
7577 * will be to send LU reset which, again, is a spec violation.
7578 * To avoid these unnecessary/illegal steps, first we clean up
7579 * the lrb taken by this cmd and re-set it in outstanding_reqs,
7580 * then queue the eh_work and bail.
7582 if (lrbp
->lun
== UFS_UPIU_UFS_DEVICE_WLUN
) {
7583 ufshcd_update_evt_hist(hba
, UFS_EVT_ABORT
, lrbp
->lun
);
7585 spin_lock_irqsave(host
->host_lock
, flags
);
7586 hba
->force_reset
= true;
7587 ufshcd_schedule_eh_work(hba
);
7588 spin_unlock_irqrestore(host
->host_lock
, flags
);
7592 if (hba
->mcq_enabled
) {
7593 /* MCQ mode. Branch off to handle abort for mcq mode */
7594 err
= ufshcd_mcq_abort(cmd
);
7598 /* Skip task abort in case previous aborts failed and report failure */
7599 if (lrbp
->req_abort_skip
) {
7600 dev_err(hba
->dev
, "%s: skipping abort\n", __func__
);
7601 ufshcd_set_req_abort_skip(hba
, hba
->outstanding_reqs
);
7605 err
= ufshcd_try_to_abort_task(hba
, tag
);
7607 dev_err(hba
->dev
, "%s: failed with err %d\n", __func__
, err
);
7608 ufshcd_set_req_abort_skip(hba
, hba
->outstanding_reqs
);
7614 * Clear the corresponding bit from outstanding_reqs since the command
7615 * has been aborted successfully.
7617 spin_lock_irqsave(&hba
->outstanding_lock
, flags
);
7618 outstanding
= __test_and_clear_bit(tag
, &hba
->outstanding_reqs
);
7619 spin_unlock_irqrestore(&hba
->outstanding_lock
, flags
);
7622 ufshcd_release_scsi_cmd(hba
, lrbp
);
7627 /* Matches the ufshcd_hold() call at the start of this function. */
7628 ufshcd_release(hba
);
7633 * ufshcd_process_probe_result - Process the ufshcd_probe_hba() result.
7634 * @hba: UFS host controller instance.
7635 * @probe_start: time when the ufshcd_probe_hba() call started.
7636 * @ret: ufshcd_probe_hba() return value.
7638 static void ufshcd_process_probe_result(struct ufs_hba
*hba
,
7639 ktime_t probe_start
, int ret
)
7641 unsigned long flags
;
7643 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7645 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
7646 else if (hba
->ufshcd_state
== UFSHCD_STATE_RESET
)
7647 hba
->ufshcd_state
= UFSHCD_STATE_OPERATIONAL
;
7648 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7650 trace_ufshcd_init(dev_name(hba
->dev
), ret
,
7651 ktime_to_us(ktime_sub(ktime_get(), probe_start
)),
7652 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
7656 * ufshcd_host_reset_and_restore - reset and restore host controller
7657 * @hba: per-adapter instance
7659 * Note that host controller reset may issue DME_RESET to
7660 * local and remote (device) Uni-Pro stack and the attributes
7661 * are reset to default state.
7663 * Return: zero on success, non-zero on failure.
7665 static int ufshcd_host_reset_and_restore(struct ufs_hba
*hba
)
7670 * Stop the host controller and complete the requests
7673 ufshcd_hba_stop(hba
);
7674 hba
->silence_err_logs
= true;
7675 ufshcd_complete_requests(hba
, true);
7676 hba
->silence_err_logs
= false;
7678 /* scale up clocks to max frequency before full reinitialization */
7679 ufshcd_scale_clks(hba
, ULONG_MAX
, true);
7681 err
= ufshcd_hba_enable(hba
);
7683 /* Establish the link again and restore the device */
7685 ktime_t probe_start
= ktime_get();
7687 err
= ufshcd_device_init(hba
, /*init_dev_params=*/false);
7689 err
= ufshcd_probe_hba(hba
, false);
7690 ufshcd_process_probe_result(hba
, probe_start
, err
);
7694 dev_err(hba
->dev
, "%s: Host init failed %d\n", __func__
, err
);
7695 ufshcd_update_evt_hist(hba
, UFS_EVT_HOST_RESET
, (u32
)err
);
7700 * ufshcd_reset_and_restore - reset and re-initialize host/device
7701 * @hba: per-adapter instance
7703 * Reset and recover device, host and re-establish link. This
7704 * is helpful to recover the communication in fatal error conditions.
7706 * Return: zero on success, non-zero on failure.
7708 static int ufshcd_reset_and_restore(struct ufs_hba
*hba
)
7711 u32 saved_uic_err
= 0;
7713 unsigned long flags
;
7714 int retries
= MAX_HOST_RESET_RETRIES
;
7716 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7719 * This is a fresh start, cache and clear saved error first,
7720 * in case new error generated during reset and restore.
7722 saved_err
|= hba
->saved_err
;
7723 saved_uic_err
|= hba
->saved_uic_err
;
7725 hba
->saved_uic_err
= 0;
7726 hba
->force_reset
= false;
7727 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
7728 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7730 /* Reset the attached device */
7731 ufshcd_device_reset(hba
);
7733 err
= ufshcd_host_reset_and_restore(hba
);
7735 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7738 /* Do not exit unless operational or dead */
7739 if (hba
->ufshcd_state
!= UFSHCD_STATE_OPERATIONAL
&&
7740 hba
->ufshcd_state
!= UFSHCD_STATE_ERROR
&&
7741 hba
->ufshcd_state
!= UFSHCD_STATE_EH_SCHEDULED_NON_FATAL
)
7743 } while (err
&& --retries
);
7746 * Inform scsi mid-layer that we did reset and allow to handle
7747 * Unit Attention properly.
7749 scsi_report_bus_reset(hba
->host
, 0);
7751 hba
->ufshcd_state
= UFSHCD_STATE_ERROR
;
7752 hba
->saved_err
|= saved_err
;
7753 hba
->saved_uic_err
|= saved_uic_err
;
7755 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7761 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
7762 * @cmd: SCSI command pointer
7764 * Return: SUCCESS or FAILED.
7766 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd
*cmd
)
7769 unsigned long flags
;
7770 struct ufs_hba
*hba
;
7772 hba
= shost_priv(cmd
->device
->host
);
7775 * If runtime PM sent SSU and got a timeout, scsi_error_handler is
7776 * stuck in this function waiting for flush_work(&hba->eh_work). And
7777 * ufshcd_err_handler(eh_work) is stuck waiting for runtime PM. Do
7778 * ufshcd_link_recovery instead of eh_work to prevent deadlock.
7780 if (hba
->pm_op_in_progress
) {
7781 if (ufshcd_link_recovery(hba
))
7787 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7788 hba
->force_reset
= true;
7789 ufshcd_schedule_eh_work(hba
);
7790 dev_err(hba
->dev
, "%s: reset in progress - 1\n", __func__
);
7791 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7793 flush_work(&hba
->eh_work
);
7795 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
7796 if (hba
->ufshcd_state
== UFSHCD_STATE_ERROR
)
7798 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
7804 * ufshcd_get_max_icc_level - calculate the ICC level
7805 * @sup_curr_uA: max. current supported by the regulator
7806 * @start_scan: row at the desc table to start scan from
7807 * @buff: power descriptor buffer
7809 * Return: calculated max ICC level for specific regulator.
7811 static u32
ufshcd_get_max_icc_level(int sup_curr_uA
, u32 start_scan
,
7819 for (i
= start_scan
; i
>= 0; i
--) {
7820 data
= get_unaligned_be16(&buff
[2 * i
]);
7821 unit
= (data
& ATTR_ICC_LVL_UNIT_MASK
) >>
7822 ATTR_ICC_LVL_UNIT_OFFSET
;
7823 curr_uA
= data
& ATTR_ICC_LVL_VALUE_MASK
;
7825 case UFSHCD_NANO_AMP
:
7826 curr_uA
= curr_uA
/ 1000;
7828 case UFSHCD_MILI_AMP
:
7829 curr_uA
= curr_uA
* 1000;
7832 curr_uA
= curr_uA
* 1000 * 1000;
7834 case UFSHCD_MICRO_AMP
:
7838 if (sup_curr_uA
>= curr_uA
)
7843 pr_err("%s: Couldn't find valid icc_level = %d", __func__
, i
);
7850 * ufshcd_find_max_sup_active_icc_level - calculate the max ICC level
7851 * In case regulators are not initialized we'll return 0
7852 * @hba: per-adapter instance
7853 * @desc_buf: power descriptor buffer to extract ICC levels from.
7855 * Return: calculated ICC level.
7857 static u32
ufshcd_find_max_sup_active_icc_level(struct ufs_hba
*hba
,
7862 if (!hba
->vreg_info
.vcc
|| !hba
->vreg_info
.vccq
||
7863 !hba
->vreg_info
.vccq2
) {
7865 * Using dev_dbg to avoid messages during runtime PM to avoid
7866 * never-ending cycles of messages written back to storage by
7867 * user space causing runtime resume, causing more messages and
7871 "%s: Regulator capability was not set, actvIccLevel=%d",
7872 __func__
, icc_level
);
7876 if (hba
->vreg_info
.vcc
->max_uA
)
7877 icc_level
= ufshcd_get_max_icc_level(
7878 hba
->vreg_info
.vcc
->max_uA
,
7879 POWER_DESC_MAX_ACTV_ICC_LVLS
- 1,
7880 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCC_0
]);
7882 if (hba
->vreg_info
.vccq
->max_uA
)
7883 icc_level
= ufshcd_get_max_icc_level(
7884 hba
->vreg_info
.vccq
->max_uA
,
7886 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ_0
]);
7888 if (hba
->vreg_info
.vccq2
->max_uA
)
7889 icc_level
= ufshcd_get_max_icc_level(
7890 hba
->vreg_info
.vccq2
->max_uA
,
7892 &desc_buf
[PWR_DESC_ACTIVE_LVLS_VCCQ2_0
]);
7897 static void ufshcd_set_active_icc_lvl(struct ufs_hba
*hba
)
7903 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
7907 ret
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_POWER
, 0, 0,
7908 desc_buf
, QUERY_DESC_MAX_SIZE
);
7911 "%s: Failed reading power descriptor ret = %d",
7916 icc_level
= ufshcd_find_max_sup_active_icc_level(hba
, desc_buf
);
7917 dev_dbg(hba
->dev
, "%s: setting icc_level 0x%x", __func__
, icc_level
);
7919 ret
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
7920 QUERY_ATTR_IDN_ACTIVE_ICC_LVL
, 0, 0, &icc_level
);
7924 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
7925 __func__
, icc_level
, ret
);
7931 static inline void ufshcd_blk_pm_runtime_init(struct scsi_device
*sdev
)
7933 struct Scsi_Host
*shost
= sdev
->host
;
7935 scsi_autopm_get_device(sdev
);
7936 blk_pm_runtime_init(sdev
->request_queue
, &sdev
->sdev_gendev
);
7937 if (sdev
->rpm_autosuspend
)
7938 pm_runtime_set_autosuspend_delay(&sdev
->sdev_gendev
,
7939 shost
->rpm_autosuspend_delay
);
7940 scsi_autopm_put_device(sdev
);
7944 * ufshcd_scsi_add_wlus - Adds required W-LUs
7945 * @hba: per-adapter instance
7947 * UFS device specification requires the UFS devices to support 4 well known
7949 * "REPORT_LUNS" (address: 01h)
7950 * "UFS Device" (address: 50h)
7951 * "RPMB" (address: 44h)
7952 * "BOOT" (address: 30h)
7953 * UFS device's power management needs to be controlled by "POWER CONDITION"
7954 * field of SSU (START STOP UNIT) command. But this "power condition" field
7955 * will take effect only when its sent to "UFS device" well known logical unit
7956 * hence we require the scsi_device instance to represent this logical unit in
7957 * order for the UFS host driver to send the SSU command for power management.
7959 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
7960 * Block) LU so user space process can control this LU. User space may also
7961 * want to have access to BOOT LU.
7963 * This function adds scsi device instances for each of all well known LUs
7964 * (except "REPORT LUNS" LU).
7966 * Return: zero on success (all required W-LUs are added successfully),
7967 * non-zero error value on failure (if failed to add any of the required W-LU).
7969 static int ufshcd_scsi_add_wlus(struct ufs_hba
*hba
)
7972 struct scsi_device
*sdev_boot
, *sdev_rpmb
;
7974 hba
->ufs_device_wlun
= __scsi_add_device(hba
->host
, 0, 0,
7975 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN
), NULL
);
7976 if (IS_ERR(hba
->ufs_device_wlun
)) {
7977 ret
= PTR_ERR(hba
->ufs_device_wlun
);
7978 hba
->ufs_device_wlun
= NULL
;
7981 scsi_device_put(hba
->ufs_device_wlun
);
7983 sdev_rpmb
= __scsi_add_device(hba
->host
, 0, 0,
7984 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN
), NULL
);
7985 if (IS_ERR(sdev_rpmb
)) {
7986 ret
= PTR_ERR(sdev_rpmb
);
7987 goto remove_ufs_device_wlun
;
7989 ufshcd_blk_pm_runtime_init(sdev_rpmb
);
7990 scsi_device_put(sdev_rpmb
);
7992 sdev_boot
= __scsi_add_device(hba
->host
, 0, 0,
7993 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN
), NULL
);
7994 if (IS_ERR(sdev_boot
)) {
7995 dev_err(hba
->dev
, "%s: BOOT WLUN not found\n", __func__
);
7997 ufshcd_blk_pm_runtime_init(sdev_boot
);
7998 scsi_device_put(sdev_boot
);
8002 remove_ufs_device_wlun
:
8003 scsi_remove_device(hba
->ufs_device_wlun
);
8008 static void ufshcd_wb_probe(struct ufs_hba
*hba
, const u8
*desc_buf
)
8010 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8012 u32 d_lu_wb_buf_alloc
;
8013 u32 ext_ufs_feature
;
8015 if (!ufshcd_is_wb_allowed(hba
))
8019 * Probe WB only for UFS-2.2 and UFS-3.1 (and later) devices or
8020 * UFS devices with quirk UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
8023 if (!(dev_info
->wspecversion
>= 0x310 ||
8024 dev_info
->wspecversion
== 0x220 ||
8025 (hba
->dev_quirks
& UFS_DEVICE_QUIRK_SUPPORT_EXTENDED_FEATURES
)))
8028 ext_ufs_feature
= get_unaligned_be32(desc_buf
+
8029 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
8031 if (!(ext_ufs_feature
& UFS_DEV_WRITE_BOOSTER_SUP
))
8035 * WB may be supported but not configured while provisioning. The spec
8036 * says, in dedicated wb buffer mode, a max of 1 lun would have wb
8037 * buffer configured.
8039 dev_info
->wb_buffer_type
= desc_buf
[DEVICE_DESC_PARAM_WB_TYPE
];
8041 dev_info
->b_presrv_uspc_en
=
8042 desc_buf
[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN
];
8044 if (dev_info
->wb_buffer_type
== WB_BUF_MODE_SHARED
) {
8045 if (!get_unaligned_be32(desc_buf
+
8046 DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS
))
8049 for (lun
= 0; lun
< UFS_UPIU_MAX_WB_LUN_ID
; lun
++) {
8050 d_lu_wb_buf_alloc
= 0;
8051 ufshcd_read_unit_desc_param(hba
,
8053 UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS
,
8054 (u8
*)&d_lu_wb_buf_alloc
,
8055 sizeof(d_lu_wb_buf_alloc
));
8056 if (d_lu_wb_buf_alloc
) {
8057 dev_info
->wb_dedicated_lu
= lun
;
8062 if (!d_lu_wb_buf_alloc
)
8066 if (!ufshcd_is_wb_buf_lifetime_available(hba
))
8072 hba
->caps
&= ~UFSHCD_CAP_WB_EN
;
8075 static void ufshcd_temp_notif_probe(struct ufs_hba
*hba
, const u8
*desc_buf
)
8077 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8078 u32 ext_ufs_feature
;
8081 if (!(hba
->caps
& UFSHCD_CAP_TEMP_NOTIF
) || dev_info
->wspecversion
< 0x300)
8084 ext_ufs_feature
= get_unaligned_be32(desc_buf
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
8086 if (ext_ufs_feature
& UFS_DEV_LOW_TEMP_NOTIF
)
8087 mask
|= MASK_EE_TOO_LOW_TEMP
;
8089 if (ext_ufs_feature
& UFS_DEV_HIGH_TEMP_NOTIF
)
8090 mask
|= MASK_EE_TOO_HIGH_TEMP
;
8093 ufshcd_enable_ee(hba
, mask
);
8094 ufs_hwmon_probe(hba
, mask
);
8098 static void ufshcd_ext_iid_probe(struct ufs_hba
*hba
, u8
*desc_buf
)
8100 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8101 u32 ext_ufs_feature
;
8105 /* Only UFS-4.0 and above may support EXT_IID */
8106 if (dev_info
->wspecversion
< 0x400)
8109 ext_ufs_feature
= get_unaligned_be32(desc_buf
+
8110 DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP
);
8111 if (!(ext_ufs_feature
& UFS_DEV_EXT_IID_SUP
))
8114 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
8115 QUERY_ATTR_IDN_EXT_IID_EN
, 0, 0, &ext_iid_en
);
8117 dev_err(hba
->dev
, "failed reading bEXTIIDEn. err = %d\n", err
);
8120 dev_info
->b_ext_iid_en
= ext_iid_en
;
8123 static void ufshcd_set_rtt(struct ufs_hba
*hba
)
8125 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8128 int host_rtt_cap
= hba
->vops
&& hba
->vops
->max_num_rtt
?
8129 hba
->vops
->max_num_rtt
: hba
->nortt
;
8131 /* RTT override makes sense only for UFS-4.0 and above */
8132 if (dev_info
->wspecversion
< 0x400)
8135 if (ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
8136 QUERY_ATTR_IDN_MAX_NUM_OF_RTT
, 0, 0, &dev_rtt
)) {
8137 dev_err(hba
->dev
, "failed reading bMaxNumOfRTT\n");
8141 /* do not override if it was already written */
8142 if (dev_rtt
!= DEFAULT_MAX_NUM_RTT
)
8145 rtt
= min_t(int, dev_info
->rtt_cap
, host_rtt_cap
);
8150 if (ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
8151 QUERY_ATTR_IDN_MAX_NUM_OF_RTT
, 0, 0, &rtt
))
8152 dev_err(hba
->dev
, "failed writing bMaxNumOfRTT\n");
8155 void ufshcd_fixup_dev_quirks(struct ufs_hba
*hba
,
8156 const struct ufs_dev_quirk
*fixups
)
8158 const struct ufs_dev_quirk
*f
;
8159 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8164 for (f
= fixups
; f
->quirk
; f
++) {
8165 if ((f
->wmanufacturerid
== dev_info
->wmanufacturerid
||
8166 f
->wmanufacturerid
== UFS_ANY_VENDOR
) &&
8167 ((dev_info
->model
&&
8168 STR_PRFX_EQUAL(f
->model
, dev_info
->model
)) ||
8169 !strcmp(f
->model
, UFS_ANY_MODEL
)))
8170 hba
->dev_quirks
|= f
->quirk
;
8173 EXPORT_SYMBOL_GPL(ufshcd_fixup_dev_quirks
);
8175 static void ufs_fixup_device_setup(struct ufs_hba
*hba
)
8177 /* fix by general quirk table */
8178 ufshcd_fixup_dev_quirks(hba
, ufs_fixups
);
8180 /* allow vendors to fix quirks */
8181 ufshcd_vops_fixup_dev_quirks(hba
);
8184 static void ufshcd_update_rtc(struct ufs_hba
*hba
)
8186 struct timespec64 ts64
;
8190 ktime_get_real_ts64(&ts64
);
8192 if (ts64
.tv_sec
< hba
->dev_info
.rtc_time_baseline
) {
8193 dev_warn_once(hba
->dev
, "%s: Current time precedes previous setting!\n", __func__
);
8198 * The Absolute RTC mode has a 136-year limit, spanning from 2010 to 2146. If a time beyond
8199 * 2146 is required, it is recommended to choose the relative RTC mode.
8201 val
= ts64
.tv_sec
- hba
->dev_info
.rtc_time_baseline
;
8203 /* Skip update RTC if RPM state is not RPM_ACTIVE */
8204 if (ufshcd_rpm_get_if_active(hba
) <= 0)
8207 err
= ufshcd_query_attr(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
, QUERY_ATTR_IDN_SECONDS_PASSED
,
8209 ufshcd_rpm_put(hba
);
8212 dev_err(hba
->dev
, "%s: Failed to update rtc %d\n", __func__
, err
);
8213 else if (hba
->dev_info
.rtc_type
== UFS_RTC_RELATIVE
)
8214 hba
->dev_info
.rtc_time_baseline
= ts64
.tv_sec
;
8217 static void ufshcd_rtc_work(struct work_struct
*work
)
8219 struct ufs_hba
*hba
;
8221 hba
= container_of(to_delayed_work(work
), struct ufs_hba
, ufs_rtc_update_work
);
8223 /* Update RTC only when there are no requests in progress and UFSHCI is operational */
8224 if (!ufshcd_is_ufs_dev_busy(hba
) && hba
->ufshcd_state
== UFSHCD_STATE_OPERATIONAL
)
8225 ufshcd_update_rtc(hba
);
8227 if (ufshcd_is_ufs_dev_active(hba
) && hba
->dev_info
.rtc_update_period
)
8228 schedule_delayed_work(&hba
->ufs_rtc_update_work
,
8229 msecs_to_jiffies(hba
->dev_info
.rtc_update_period
));
8232 static void ufs_init_rtc(struct ufs_hba
*hba
, u8
*desc_buf
)
8234 u16 periodic_rtc_update
= get_unaligned_be16(&desc_buf
[DEVICE_DESC_PARAM_FRQ_RTC
]);
8235 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8237 if (periodic_rtc_update
& UFS_RTC_TIME_BASELINE
) {
8238 dev_info
->rtc_type
= UFS_RTC_ABSOLUTE
;
8241 * The concept of measuring time in Linux as the number of seconds elapsed since
8242 * 00:00:00 UTC on January 1, 1970, and UFS ABS RTC is elapsed from January 1st
8243 * 2010 00:00, here we need to adjust ABS baseline.
8245 dev_info
->rtc_time_baseline
= mktime64(2010, 1, 1, 0, 0, 0) -
8246 mktime64(1970, 1, 1, 0, 0, 0);
8248 dev_info
->rtc_type
= UFS_RTC_RELATIVE
;
8249 dev_info
->rtc_time_baseline
= 0;
8253 * We ignore TIME_PERIOD defined in wPeriodicRTCUpdate because Spec does not clearly state
8254 * how to calculate the specific update period for each time unit. And we disable periodic
8255 * RTC update work, let user configure by sysfs node according to specific circumstance.
8257 dev_info
->rtc_update_period
= 0;
8260 static int ufs_get_device_desc(struct ufs_hba
*hba
)
8265 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8267 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
8273 err
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_DEVICE
, 0, 0, desc_buf
,
8274 QUERY_DESC_MAX_SIZE
);
8276 dev_err(hba
->dev
, "%s: Failed reading Device Desc. err = %d\n",
8282 * getting vendor (manufacturerID) and Bank Index in big endian
8285 dev_info
->wmanufacturerid
= desc_buf
[DEVICE_DESC_PARAM_MANF_ID
] << 8 |
8286 desc_buf
[DEVICE_DESC_PARAM_MANF_ID
+ 1];
8288 /* getting Specification Version in big endian format */
8289 dev_info
->wspecversion
= desc_buf
[DEVICE_DESC_PARAM_SPEC_VER
] << 8 |
8290 desc_buf
[DEVICE_DESC_PARAM_SPEC_VER
+ 1];
8291 dev_info
->bqueuedepth
= desc_buf
[DEVICE_DESC_PARAM_Q_DPTH
];
8293 dev_info
->rtt_cap
= desc_buf
[DEVICE_DESC_PARAM_RTT_CAP
];
8295 model_index
= desc_buf
[DEVICE_DESC_PARAM_PRDCT_NAME
];
8297 err
= ufshcd_read_string_desc(hba
, model_index
,
8298 &dev_info
->model
, SD_ASCII_STD
);
8300 dev_err(hba
->dev
, "%s: Failed reading Product Name. err = %d\n",
8305 hba
->luns_avail
= desc_buf
[DEVICE_DESC_PARAM_NUM_LU
] +
8306 desc_buf
[DEVICE_DESC_PARAM_NUM_WLU
];
8308 ufs_fixup_device_setup(hba
);
8310 ufshcd_wb_probe(hba
, desc_buf
);
8312 ufshcd_temp_notif_probe(hba
, desc_buf
);
8314 ufs_init_rtc(hba
, desc_buf
);
8316 if (hba
->ext_iid_sup
)
8317 ufshcd_ext_iid_probe(hba
, desc_buf
);
8320 * ufshcd_read_string_desc returns size of the string
8321 * reset the error value
8330 static void ufs_put_device_desc(struct ufs_hba
*hba
)
8332 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8334 kfree(dev_info
->model
);
8335 dev_info
->model
= NULL
;
8339 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
8340 * less than device PA_TACTIVATE time.
8341 * @hba: per-adapter instance
8343 * Some UFS devices require host PA_TACTIVATE to be lower than device
8344 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
8347 * Return: zero on success, non-zero error value on failure.
8349 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba
*hba
)
8352 u32 granularity
, peer_granularity
;
8353 u32 pa_tactivate
, peer_pa_tactivate
;
8354 u32 pa_tactivate_us
, peer_pa_tactivate_us
;
8355 static const u8 gran_to_us_table
[] = {1, 4, 8, 16, 32, 100};
8357 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
8362 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
8367 if ((granularity
< PA_GRANULARITY_MIN_VAL
) ||
8368 (granularity
> PA_GRANULARITY_MAX_VAL
)) {
8369 dev_err(hba
->dev
, "%s: invalid host PA_GRANULARITY %d",
8370 __func__
, granularity
);
8374 if ((peer_granularity
< PA_GRANULARITY_MIN_VAL
) ||
8375 (peer_granularity
> PA_GRANULARITY_MAX_VAL
)) {
8376 dev_err(hba
->dev
, "%s: invalid device PA_GRANULARITY %d",
8377 __func__
, peer_granularity
);
8381 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
), &pa_tactivate
);
8385 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8386 &peer_pa_tactivate
);
8390 pa_tactivate_us
= pa_tactivate
* gran_to_us_table
[granularity
- 1];
8391 peer_pa_tactivate_us
= peer_pa_tactivate
*
8392 gran_to_us_table
[peer_granularity
- 1];
8394 if (pa_tactivate_us
>= peer_pa_tactivate_us
) {
8395 u32 new_peer_pa_tactivate
;
8397 new_peer_pa_tactivate
= pa_tactivate_us
/
8398 gran_to_us_table
[peer_granularity
- 1];
8399 new_peer_pa_tactivate
++;
8400 ret
= ufshcd_dme_peer_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
),
8401 new_peer_pa_tactivate
);
8408 static void ufshcd_tune_unipro_params(struct ufs_hba
*hba
)
8410 ufshcd_vops_apply_dev_quirks(hba
);
8412 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_PA_TACTIVATE
)
8413 /* set 1ms timeout for PA_TACTIVATE */
8414 ufshcd_dme_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
), 10);
8416 if (hba
->dev_quirks
& UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE
)
8417 ufshcd_quirk_tune_host_pa_tactivate(hba
);
8420 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba
*hba
)
8422 hba
->ufs_stats
.hibern8_exit_cnt
= 0;
8423 hba
->ufs_stats
.last_hibern8_exit_tstamp
= ktime_set(0, 0);
8424 hba
->req_abort_count
= 0;
8427 static int ufshcd_device_geo_params_init(struct ufs_hba
*hba
)
8432 desc_buf
= kzalloc(QUERY_DESC_MAX_SIZE
, GFP_KERNEL
);
8438 err
= ufshcd_read_desc_param(hba
, QUERY_DESC_IDN_GEOMETRY
, 0, 0,
8439 desc_buf
, QUERY_DESC_MAX_SIZE
);
8441 dev_err(hba
->dev
, "%s: Failed reading Geometry Desc. err = %d\n",
8446 if (desc_buf
[GEOMETRY_DESC_PARAM_MAX_NUM_LUN
] == 1)
8447 hba
->dev_info
.max_lu_supported
= 32;
8448 else if (desc_buf
[GEOMETRY_DESC_PARAM_MAX_NUM_LUN
] == 0)
8449 hba
->dev_info
.max_lu_supported
= 8;
8456 struct ufs_ref_clk
{
8457 unsigned long freq_hz
;
8458 enum ufs_ref_clk_freq val
;
8461 static const struct ufs_ref_clk ufs_ref_clk_freqs
[] = {
8462 {19200000, REF_CLK_FREQ_19_2_MHZ
},
8463 {26000000, REF_CLK_FREQ_26_MHZ
},
8464 {38400000, REF_CLK_FREQ_38_4_MHZ
},
8465 {52000000, REF_CLK_FREQ_52_MHZ
},
8466 {0, REF_CLK_FREQ_INVAL
},
8469 static enum ufs_ref_clk_freq
8470 ufs_get_bref_clk_from_hz(unsigned long freq
)
8474 for (i
= 0; ufs_ref_clk_freqs
[i
].freq_hz
; i
++)
8475 if (ufs_ref_clk_freqs
[i
].freq_hz
== freq
)
8476 return ufs_ref_clk_freqs
[i
].val
;
8478 return REF_CLK_FREQ_INVAL
;
8481 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba
*hba
, struct clk
*refclk
)
8485 freq
= clk_get_rate(refclk
);
8487 hba
->dev_ref_clk_freq
=
8488 ufs_get_bref_clk_from_hz(freq
);
8490 if (hba
->dev_ref_clk_freq
== REF_CLK_FREQ_INVAL
)
8492 "invalid ref_clk setting = %ld\n", freq
);
8495 static int ufshcd_set_dev_ref_clk(struct ufs_hba
*hba
)
8499 u32 freq
= hba
->dev_ref_clk_freq
;
8501 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_READ_ATTR
,
8502 QUERY_ATTR_IDN_REF_CLK_FREQ
, 0, 0, &ref_clk
);
8505 dev_err(hba
->dev
, "failed reading bRefClkFreq. err = %d\n",
8510 if (ref_clk
== freq
)
8511 goto out
; /* nothing to update */
8513 err
= ufshcd_query_attr_retry(hba
, UPIU_QUERY_OPCODE_WRITE_ATTR
,
8514 QUERY_ATTR_IDN_REF_CLK_FREQ
, 0, 0, &freq
);
8517 dev_err(hba
->dev
, "bRefClkFreq setting to %lu Hz failed\n",
8518 ufs_ref_clk_freqs
[freq
].freq_hz
);
8522 dev_dbg(hba
->dev
, "bRefClkFreq setting to %lu Hz succeeded\n",
8523 ufs_ref_clk_freqs
[freq
].freq_hz
);
8529 static int ufshcd_device_params_init(struct ufs_hba
*hba
)
8534 /* Init UFS geometry descriptor related parameters */
8535 ret
= ufshcd_device_geo_params_init(hba
);
8539 /* Check and apply UFS device quirks */
8540 ret
= ufs_get_device_desc(hba
);
8542 dev_err(hba
->dev
, "%s: Failed getting device info. err = %d\n",
8547 ufshcd_set_rtt(hba
);
8549 ufshcd_get_ref_clk_gating_wait(hba
);
8551 if (!ufshcd_query_flag_retry(hba
, UPIU_QUERY_OPCODE_READ_FLAG
,
8552 QUERY_FLAG_IDN_PWR_ON_WPE
, 0, &flag
))
8553 hba
->dev_info
.f_power_on_wp_en
= flag
;
8555 /* Probe maximum power mode co-supported by both UFS host and device */
8556 if (ufshcd_get_max_pwr_mode(hba
))
8558 "%s: Failed getting max supported power mode\n",
8564 static void ufshcd_set_timestamp_attr(struct ufs_hba
*hba
)
8567 struct ufs_query_req
*request
= NULL
;
8568 struct ufs_query_res
*response
= NULL
;
8569 struct ufs_dev_info
*dev_info
= &hba
->dev_info
;
8570 struct utp_upiu_query_v4_0
*upiu_data
;
8572 if (dev_info
->wspecversion
< 0x400)
8575 ufshcd_dev_man_lock(hba
);
8577 ufshcd_init_query(hba
, &request
, &response
,
8578 UPIU_QUERY_OPCODE_WRITE_ATTR
,
8579 QUERY_ATTR_IDN_TIMESTAMP
, 0, 0);
8581 request
->query_func
= UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST
;
8583 upiu_data
= (struct utp_upiu_query_v4_0
*)&request
->upiu_req
;
8585 put_unaligned_be64(ktime_get_real_ns(), &upiu_data
->osf3
);
8587 err
= ufshcd_exec_dev_cmd(hba
, DEV_CMD_TYPE_QUERY
, QUERY_REQ_TIMEOUT
);
8590 dev_err(hba
->dev
, "%s: failed to set timestamp %d\n",
8593 ufshcd_dev_man_unlock(hba
);
8597 * ufshcd_add_lus - probe and add UFS logical units
8598 * @hba: per-adapter instance
8600 * Return: 0 upon success; < 0 upon failure.
8602 static int ufshcd_add_lus(struct ufs_hba
*hba
)
8606 /* Add required well known logical units to scsi mid layer */
8607 ret
= ufshcd_scsi_add_wlus(hba
);
8611 /* Initialize devfreq after UFS device is detected */
8612 if (ufshcd_is_clkscaling_supported(hba
)) {
8613 memcpy(&hba
->clk_scaling
.saved_pwr_info
,
8615 sizeof(struct ufs_pa_layer_attr
));
8616 hba
->clk_scaling
.is_allowed
= true;
8618 ret
= ufshcd_devfreq_init(hba
);
8622 hba
->clk_scaling
.is_enabled
= true;
8623 ufshcd_init_clk_scaling_sysfs(hba
);
8627 * The RTC update code accesses the hba->ufs_device_wlun->sdev_gendev
8628 * pointer and hence must only be started after the WLUN pointer has
8629 * been initialized by ufshcd_scsi_add_wlus().
8631 schedule_delayed_work(&hba
->ufs_rtc_update_work
,
8632 msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS
));
8635 scsi_scan_host(hba
->host
);
8641 /* SDB - Single Doorbell */
8642 static void ufshcd_release_sdb_queue(struct ufs_hba
*hba
, int nutrs
)
8644 size_t ucdl_size
, utrdl_size
;
8646 ucdl_size
= ufshcd_get_ucd_size(hba
) * nutrs
;
8647 dmam_free_coherent(hba
->dev
, ucdl_size
, hba
->ucdl_base_addr
,
8648 hba
->ucdl_dma_addr
);
8650 utrdl_size
= sizeof(struct utp_transfer_req_desc
) * nutrs
;
8651 dmam_free_coherent(hba
->dev
, utrdl_size
, hba
->utrdl_base_addr
,
8652 hba
->utrdl_dma_addr
);
8654 devm_kfree(hba
->dev
, hba
->lrb
);
8657 static int ufshcd_alloc_mcq(struct ufs_hba
*hba
)
8660 int old_nutrs
= hba
->nutrs
;
8662 ret
= ufshcd_mcq_decide_queue_depth(hba
);
8667 ret
= ufshcd_mcq_init(hba
);
8672 * Previously allocated memory for nutrs may not be enough in MCQ mode.
8673 * Number of supported tags in MCQ mode may be larger than SDB mode.
8675 if (hba
->nutrs
!= old_nutrs
) {
8676 ufshcd_release_sdb_queue(hba
, old_nutrs
);
8677 ret
= ufshcd_memory_alloc(hba
);
8680 ufshcd_host_memory_configure(hba
);
8683 ret
= ufshcd_mcq_memory_alloc(hba
);
8687 hba
->host
->can_queue
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
8688 hba
->reserved_slot
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
8692 hba
->nutrs
= old_nutrs
;
8696 static void ufshcd_config_mcq(struct ufs_hba
*hba
)
8701 ret
= ufshcd_mcq_vops_config_esi(hba
);
8702 dev_info(hba
->dev
, "ESI %sconfigured\n", ret
? "is not " : "");
8704 intrs
= UFSHCD_ENABLE_MCQ_INTRS
;
8705 if (hba
->quirks
& UFSHCD_QUIRK_MCQ_BROKEN_INTR
)
8706 intrs
&= ~MCQ_CQ_EVENT_STATUS
;
8707 ufshcd_enable_intr(hba
, intrs
);
8708 ufshcd_mcq_make_queues_operational(hba
);
8709 ufshcd_mcq_config_mac(hba
, hba
->nutrs
);
8711 dev_info(hba
->dev
, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n",
8712 hba
->nr_hw_queues
, hba
->nr_queues
[HCTX_TYPE_DEFAULT
],
8713 hba
->nr_queues
[HCTX_TYPE_READ
], hba
->nr_queues
[HCTX_TYPE_POLL
],
8717 static int ufshcd_post_device_init(struct ufs_hba
*hba
)
8721 ufshcd_tune_unipro_params(hba
);
8723 /* UFS device is also active now */
8724 ufshcd_set_ufs_dev_active(hba
);
8725 ufshcd_force_reset_auto_bkops(hba
);
8727 ufshcd_set_timestamp_attr(hba
);
8729 if (!hba
->max_pwr_info
.is_valid
)
8733 * Set the right value to bRefClkFreq before attempting to
8734 * switch to HS gears.
8736 if (hba
->dev_ref_clk_freq
!= REF_CLK_FREQ_INVAL
)
8737 ufshcd_set_dev_ref_clk(hba
);
8738 /* Gear up to HS gear. */
8739 ret
= ufshcd_config_pwr_mode(hba
, &hba
->max_pwr_info
.info
);
8741 dev_err(hba
->dev
, "%s: Failed setting power mode, err = %d\n",
8749 static int ufshcd_device_init(struct ufs_hba
*hba
, bool init_dev_params
)
8753 WARN_ON_ONCE(!hba
->scsi_host_added
);
8755 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
8757 ret
= ufshcd_link_startup(hba
);
8761 if (hba
->quirks
& UFSHCD_QUIRK_SKIP_PH_CONFIGURATION
)
8764 /* Debug counters initialization */
8765 ufshcd_clear_dbg_ufs_stats(hba
);
8767 /* UniPro link is active now */
8768 ufshcd_set_link_active(hba
);
8770 /* Reconfigure MCQ upon reset */
8771 if (hba
->mcq_enabled
&& !init_dev_params
) {
8772 ufshcd_config_mcq(hba
);
8773 ufshcd_mcq_enable(hba
);
8776 /* Verify device initialization by sending NOP OUT UPIU */
8777 ret
= ufshcd_verify_dev_init(hba
);
8781 /* Initiate UFS initialization, and waiting until completion */
8782 ret
= ufshcd_complete_dev_init(hba
);
8787 * Initialize UFS device parameters used by driver, these
8788 * parameters are associated with UFS descriptors.
8790 if (init_dev_params
) {
8791 ret
= ufshcd_device_params_init(hba
);
8794 if (is_mcq_supported(hba
) &&
8795 hba
->quirks
& UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH
) {
8796 ufshcd_config_mcq(hba
);
8797 ufshcd_mcq_enable(hba
);
8801 return ufshcd_post_device_init(hba
);
8805 * ufshcd_probe_hba - probe hba to detect device and initialize it
8806 * @hba: per-adapter instance
8807 * @init_dev_params: whether or not to call ufshcd_device_params_init().
8809 * Execute link-startup and verify device initialization
8811 * Return: 0 upon success; < 0 upon failure.
8813 static int ufshcd_probe_hba(struct ufs_hba
*hba
, bool init_dev_params
)
8817 if (!hba
->pm_op_in_progress
&&
8818 (hba
->quirks
& UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH
)) {
8819 /* Reset the device and controller before doing reinit */
8820 ufshcd_device_reset(hba
);
8821 ufs_put_device_desc(hba
);
8822 ufshcd_hba_stop(hba
);
8823 ufshcd_vops_reinit_notify(hba
);
8824 ret
= ufshcd_hba_enable(hba
);
8826 dev_err(hba
->dev
, "Host controller enable failed\n");
8827 ufshcd_print_evt_hist(hba
);
8828 ufshcd_print_host_state(hba
);
8832 /* Reinit the device */
8833 ret
= ufshcd_device_init(hba
, init_dev_params
);
8838 ufshcd_print_pwr_info(hba
);
8841 * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec)
8842 * and for removable UFS card as well, hence always set the parameter.
8843 * Note: Error handler may issue the device reset hence resetting
8844 * bActiveICCLevel as well so it is always safe to set this here.
8846 ufshcd_set_active_icc_lvl(hba
);
8848 /* Enable UFS Write Booster if supported */
8849 ufshcd_configure_wb(hba
);
8851 if (hba
->ee_usr_mask
)
8852 ufshcd_write_ee_control(hba
);
8853 ufshcd_configure_auto_hibern8(hba
);
8859 * ufshcd_async_scan - asynchronous execution for probing hba
8860 * @data: data pointer to pass to this function
8861 * @cookie: cookie data
8863 static void ufshcd_async_scan(void *data
, async_cookie_t cookie
)
8865 struct ufs_hba
*hba
= (struct ufs_hba
*)data
;
8866 ktime_t probe_start
;
8869 down(&hba
->host_sem
);
8870 /* Initialize hba, detect and initialize UFS device */
8871 probe_start
= ktime_get();
8872 ret
= ufshcd_probe_hba(hba
, true);
8873 ufshcd_process_probe_result(hba
, probe_start
, ret
);
8878 /* Probe and add UFS logical units */
8879 ret
= ufshcd_add_lus(hba
);
8882 pm_runtime_put_sync(hba
->dev
);
8885 dev_err(hba
->dev
, "%s failed: %d\n", __func__
, ret
);
8888 static enum scsi_timeout_action
ufshcd_eh_timed_out(struct scsi_cmnd
*scmd
)
8890 struct ufs_hba
*hba
= shost_priv(scmd
->device
->host
);
8892 if (!hba
->system_suspending
) {
8893 /* Activate the error handler in the SCSI core. */
8894 return SCSI_EH_NOT_HANDLED
;
8898 * If we get here we know that no TMFs are outstanding and also that
8899 * the only pending command is a START STOP UNIT command. Handle the
8900 * timeout of that command directly to prevent a deadlock between
8901 * ufshcd_set_dev_pwr_mode() and ufshcd_err_handler().
8903 ufshcd_link_recovery(hba
);
8904 dev_info(hba
->dev
, "%s() finished; outstanding_tasks = %#lx.\n",
8905 __func__
, hba
->outstanding_tasks
);
8907 return hba
->outstanding_reqs
? SCSI_EH_RESET_TIMER
: SCSI_EH_DONE
;
8910 static const struct attribute_group
*ufshcd_driver_groups
[] = {
8911 &ufs_sysfs_unit_descriptor_group
,
8912 &ufs_sysfs_lun_attributes_group
,
8916 static struct ufs_hba_variant_params ufs_hba_vps
= {
8917 .hba_enable_delay_us
= 1000,
8918 .wb_flush_threshold
= UFS_WB_BUF_REMAIN_PERCENT(40),
8919 .devfreq_profile
.polling_ms
= 100,
8920 .devfreq_profile
.target
= ufshcd_devfreq_target
,
8921 .devfreq_profile
.get_dev_status
= ufshcd_devfreq_get_dev_status
,
8922 .ondemand_data
.upthreshold
= 70,
8923 .ondemand_data
.downdifferential
= 5,
8926 static const struct scsi_host_template ufshcd_driver_template
= {
8927 .module
= THIS_MODULE
,
8929 .proc_name
= UFSHCD
,
8930 .map_queues
= ufshcd_map_queues
,
8931 .queuecommand
= ufshcd_queuecommand
,
8932 .mq_poll
= ufshcd_poll
,
8933 .slave_alloc
= ufshcd_slave_alloc
,
8934 .device_configure
= ufshcd_device_configure
,
8935 .slave_destroy
= ufshcd_slave_destroy
,
8936 .change_queue_depth
= ufshcd_change_queue_depth
,
8937 .eh_abort_handler
= ufshcd_abort
,
8938 .eh_device_reset_handler
= ufshcd_eh_device_reset_handler
,
8939 .eh_host_reset_handler
= ufshcd_eh_host_reset_handler
,
8940 .eh_timed_out
= ufshcd_eh_timed_out
,
8942 .sg_tablesize
= SG_ALL
,
8943 .max_segment_size
= PRDT_DATA_BYTE_COUNT_MAX
,
8944 .max_sectors
= SZ_1M
/ SECTOR_SIZE
,
8945 .max_host_blocked
= 1,
8946 .track_queue_depth
= 1,
8947 .skip_settle_delay
= 1,
8948 .sdev_groups
= ufshcd_driver_groups
,
8951 static int ufshcd_config_vreg_load(struct device
*dev
, struct ufs_vreg
*vreg
,
8960 * "set_load" operation shall be required on those regulators
8961 * which specifically configured current limitation. Otherwise
8962 * zero max_uA may cause unexpected behavior when regulator is
8963 * enabled or set as high power mode.
8968 ret
= regulator_set_load(vreg
->reg
, ua
);
8970 dev_err(dev
, "%s: %s set load (ua=%d) failed, err=%d\n",
8971 __func__
, vreg
->name
, ua
, ret
);
8977 static inline int ufshcd_config_vreg_lpm(struct ufs_hba
*hba
,
8978 struct ufs_vreg
*vreg
)
8980 return ufshcd_config_vreg_load(hba
->dev
, vreg
, UFS_VREG_LPM_LOAD_UA
);
8983 static inline int ufshcd_config_vreg_hpm(struct ufs_hba
*hba
,
8984 struct ufs_vreg
*vreg
)
8989 return ufshcd_config_vreg_load(hba
->dev
, vreg
, vreg
->max_uA
);
8992 static int ufshcd_config_vreg(struct device
*dev
,
8993 struct ufs_vreg
*vreg
, bool on
)
8995 if (regulator_count_voltages(vreg
->reg
) <= 0)
8998 return ufshcd_config_vreg_load(dev
, vreg
, on
? vreg
->max_uA
: 0);
9001 static int ufshcd_enable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
9005 if (!vreg
|| vreg
->enabled
)
9008 ret
= ufshcd_config_vreg(dev
, vreg
, true);
9010 ret
= regulator_enable(vreg
->reg
);
9013 vreg
->enabled
= true;
9015 dev_err(dev
, "%s: %s enable failed, err=%d\n",
9016 __func__
, vreg
->name
, ret
);
9021 static int ufshcd_disable_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
9025 if (!vreg
|| !vreg
->enabled
|| vreg
->always_on
)
9028 ret
= regulator_disable(vreg
->reg
);
9031 /* ignore errors on applying disable config */
9032 ufshcd_config_vreg(dev
, vreg
, false);
9033 vreg
->enabled
= false;
9035 dev_err(dev
, "%s: %s disable failed, err=%d\n",
9036 __func__
, vreg
->name
, ret
);
9042 static int ufshcd_setup_vreg(struct ufs_hba
*hba
, bool on
)
9045 struct device
*dev
= hba
->dev
;
9046 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
9048 ret
= ufshcd_toggle_vreg(dev
, info
->vcc
, on
);
9052 ret
= ufshcd_toggle_vreg(dev
, info
->vccq
, on
);
9056 ret
= ufshcd_toggle_vreg(dev
, info
->vccq2
, on
);
9060 ufshcd_toggle_vreg(dev
, info
->vccq2
, false);
9061 ufshcd_toggle_vreg(dev
, info
->vccq
, false);
9062 ufshcd_toggle_vreg(dev
, info
->vcc
, false);
9067 static int ufshcd_setup_hba_vreg(struct ufs_hba
*hba
, bool on
)
9069 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
9071 return ufshcd_toggle_vreg(hba
->dev
, info
->vdd_hba
, on
);
9074 int ufshcd_get_vreg(struct device
*dev
, struct ufs_vreg
*vreg
)
9081 vreg
->reg
= devm_regulator_get(dev
, vreg
->name
);
9082 if (IS_ERR(vreg
->reg
)) {
9083 ret
= PTR_ERR(vreg
->reg
);
9084 dev_err(dev
, "%s: %s get failed, err=%d\n",
9085 __func__
, vreg
->name
, ret
);
9090 EXPORT_SYMBOL_GPL(ufshcd_get_vreg
);
9092 static int ufshcd_init_vreg(struct ufs_hba
*hba
)
9095 struct device
*dev
= hba
->dev
;
9096 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
9098 ret
= ufshcd_get_vreg(dev
, info
->vcc
);
9102 ret
= ufshcd_get_vreg(dev
, info
->vccq
);
9104 ret
= ufshcd_get_vreg(dev
, info
->vccq2
);
9109 static int ufshcd_init_hba_vreg(struct ufs_hba
*hba
)
9111 struct ufs_vreg_info
*info
= &hba
->vreg_info
;
9113 return ufshcd_get_vreg(hba
->dev
, info
->vdd_hba
);
9116 static int ufshcd_setup_clocks(struct ufs_hba
*hba
, bool on
)
9119 struct ufs_clk_info
*clki
;
9120 struct list_head
*head
= &hba
->clk_list_head
;
9121 unsigned long flags
;
9122 ktime_t start
= ktime_get();
9123 bool clk_state_changed
= false;
9125 if (list_empty(head
))
9128 ret
= ufshcd_vops_setup_clocks(hba
, on
, PRE_CHANGE
);
9132 list_for_each_entry(clki
, head
, list
) {
9133 if (!IS_ERR_OR_NULL(clki
->clk
)) {
9135 * Don't disable clocks which are needed
9136 * to keep the link active.
9138 if (ufshcd_is_link_active(hba
) &&
9139 clki
->keep_link_active
)
9142 clk_state_changed
= on
^ clki
->enabled
;
9143 if (on
&& !clki
->enabled
) {
9144 ret
= clk_prepare_enable(clki
->clk
);
9146 dev_err(hba
->dev
, "%s: %s prepare enable failed, %d\n",
9147 __func__
, clki
->name
, ret
);
9150 } else if (!on
&& clki
->enabled
) {
9151 clk_disable_unprepare(clki
->clk
);
9154 dev_dbg(hba
->dev
, "%s: clk: %s %sabled\n", __func__
,
9155 clki
->name
, on
? "en" : "dis");
9159 ret
= ufshcd_vops_setup_clocks(hba
, on
, POST_CHANGE
);
9163 if (!ufshcd_is_clkscaling_supported(hba
))
9164 ufshcd_pm_qos_update(hba
, on
);
9167 list_for_each_entry(clki
, head
, list
) {
9168 if (!IS_ERR_OR_NULL(clki
->clk
) && clki
->enabled
)
9169 clk_disable_unprepare(clki
->clk
);
9171 } else if (!ret
&& on
) {
9172 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
9173 hba
->clk_gating
.state
= CLKS_ON
;
9174 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
9175 hba
->clk_gating
.state
);
9176 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
9179 if (clk_state_changed
)
9180 trace_ufshcd_profile_clk_gating(dev_name(hba
->dev
),
9181 (on
? "on" : "off"),
9182 ktime_to_us(ktime_sub(ktime_get(), start
)), ret
);
9186 static enum ufs_ref_clk_freq
ufshcd_parse_ref_clk_property(struct ufs_hba
*hba
)
9189 int ret
= device_property_read_u32(hba
->dev
, "ref-clk-freq", &freq
);
9192 dev_dbg(hba
->dev
, "Cannot query 'ref-clk-freq' property = %d", ret
);
9193 return REF_CLK_FREQ_INVAL
;
9196 return ufs_get_bref_clk_from_hz(freq
);
9199 static int ufshcd_init_clocks(struct ufs_hba
*hba
)
9202 struct ufs_clk_info
*clki
;
9203 struct device
*dev
= hba
->dev
;
9204 struct list_head
*head
= &hba
->clk_list_head
;
9206 if (list_empty(head
))
9209 list_for_each_entry(clki
, head
, list
) {
9213 clki
->clk
= devm_clk_get(dev
, clki
->name
);
9214 if (IS_ERR(clki
->clk
)) {
9215 ret
= PTR_ERR(clki
->clk
);
9216 dev_err(dev
, "%s: %s clk get failed, %d\n",
9217 __func__
, clki
->name
, ret
);
9222 * Parse device ref clk freq as per device tree "ref_clk".
9223 * Default dev_ref_clk_freq is set to REF_CLK_FREQ_INVAL
9224 * in ufshcd_alloc_host().
9226 if (!strcmp(clki
->name
, "ref_clk"))
9227 ufshcd_parse_dev_ref_clk_freq(hba
, clki
->clk
);
9229 if (clki
->max_freq
) {
9230 ret
= clk_set_rate(clki
->clk
, clki
->max_freq
);
9232 dev_err(hba
->dev
, "%s: %s clk set rate(%dHz) failed, %d\n",
9233 __func__
, clki
->name
,
9234 clki
->max_freq
, ret
);
9237 clki
->curr_freq
= clki
->max_freq
;
9239 dev_dbg(dev
, "%s: clk: %s, rate: %lu\n", __func__
,
9240 clki
->name
, clk_get_rate(clki
->clk
));
9243 /* Set Max. frequency for all clocks */
9244 if (hba
->use_pm_opp
) {
9245 ret
= ufshcd_opp_set_rate(hba
, ULONG_MAX
);
9247 dev_err(hba
->dev
, "%s: failed to set OPP: %d", __func__
,
9257 static int ufshcd_variant_hba_init(struct ufs_hba
*hba
)
9264 err
= ufshcd_vops_init(hba
);
9266 dev_err_probe(hba
->dev
, err
,
9267 "%s: variant %s init failed with err %d\n",
9268 __func__
, ufshcd_get_var_name(hba
), err
);
9273 static void ufshcd_variant_hba_exit(struct ufs_hba
*hba
)
9278 ufshcd_vops_exit(hba
);
9281 static int ufshcd_hba_init(struct ufs_hba
*hba
)
9286 * Handle host controller power separately from the UFS device power
9287 * rails as it will help controlling the UFS host controller power
9288 * collapse easily which is different than UFS device power collapse.
9289 * Also, enable the host controller power before we go ahead with rest
9290 * of the initialization here.
9292 err
= ufshcd_init_hba_vreg(hba
);
9296 err
= ufshcd_setup_hba_vreg(hba
, true);
9300 err
= ufshcd_init_clocks(hba
);
9302 goto out_disable_hba_vreg
;
9304 if (hba
->dev_ref_clk_freq
== REF_CLK_FREQ_INVAL
)
9305 hba
->dev_ref_clk_freq
= ufshcd_parse_ref_clk_property(hba
);
9307 err
= ufshcd_setup_clocks(hba
, true);
9309 goto out_disable_hba_vreg
;
9311 err
= ufshcd_init_vreg(hba
);
9313 goto out_disable_clks
;
9315 err
= ufshcd_setup_vreg(hba
, true);
9317 goto out_disable_clks
;
9319 err
= ufshcd_variant_hba_init(hba
);
9321 goto out_disable_vreg
;
9323 ufs_debugfs_hba_init(hba
);
9324 ufs_fault_inject_hba_init(hba
);
9326 hba
->is_powered
= true;
9330 ufshcd_setup_vreg(hba
, false);
9332 ufshcd_setup_clocks(hba
, false);
9333 out_disable_hba_vreg
:
9334 ufshcd_setup_hba_vreg(hba
, false);
9339 static void ufshcd_hba_exit(struct ufs_hba
*hba
)
9341 if (hba
->is_powered
) {
9342 ufshcd_pm_qos_exit(hba
);
9343 ufshcd_exit_clk_scaling(hba
);
9344 ufshcd_exit_clk_gating(hba
);
9346 destroy_workqueue(hba
->eh_wq
);
9347 ufs_debugfs_hba_exit(hba
);
9348 ufshcd_variant_hba_exit(hba
);
9349 ufshcd_setup_vreg(hba
, false);
9350 ufshcd_setup_clocks(hba
, false);
9351 ufshcd_setup_hba_vreg(hba
, false);
9352 hba
->is_powered
= false;
9353 ufs_put_device_desc(hba
);
9357 static int ufshcd_execute_start_stop(struct scsi_device
*sdev
,
9358 enum ufs_dev_pwr_mode pwr_mode
,
9359 struct scsi_sense_hdr
*sshdr
)
9361 const unsigned char cdb
[6] = { START_STOP
, 0, 0, 0, pwr_mode
<< 4, 0 };
9362 struct scsi_failure failure_defs
[] = {
9365 .result
= SCMD_FAILURE_RESULT_ANY
,
9368 struct scsi_failures failures
= {
9369 .failure_definitions
= failure_defs
,
9371 const struct scsi_exec_args args
= {
9372 .failures
= &failures
,
9374 .req_flags
= BLK_MQ_REQ_PM
,
9375 .scmd_flags
= SCMD_FAIL_IF_RECOVERING
,
9378 return scsi_execute_cmd(sdev
, cdb
, REQ_OP_DRV_IN
, /*buffer=*/NULL
,
9379 /*bufflen=*/0, /*timeout=*/10 * HZ
, /*retries=*/0,
9384 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
9386 * @hba: per adapter instance
9387 * @pwr_mode: device power mode to set
9389 * Return: 0 if requested power mode is set successfully;
9390 * < 0 if failed to set the requested power mode.
9392 static int ufshcd_set_dev_pwr_mode(struct ufs_hba
*hba
,
9393 enum ufs_dev_pwr_mode pwr_mode
)
9395 struct scsi_sense_hdr sshdr
;
9396 struct scsi_device
*sdp
;
9397 unsigned long flags
;
9400 spin_lock_irqsave(hba
->host
->host_lock
, flags
);
9401 sdp
= hba
->ufs_device_wlun
;
9402 if (sdp
&& scsi_device_online(sdp
))
9403 ret
= scsi_device_get(sdp
);
9406 spin_unlock_irqrestore(hba
->host
->host_lock
, flags
);
9412 * If scsi commands fail, the scsi mid-layer schedules scsi error-
9413 * handling, which would wait for host to be resumed. Since we know
9414 * we are functional while we are here, skip host resume in error
9417 hba
->host
->eh_noresume
= 1;
9420 * Current function would be generally called from the power management
9421 * callbacks hence set the RQF_PM flag so that it doesn't resume the
9422 * already suspended childs.
9424 ret
= ufshcd_execute_start_stop(sdp
, pwr_mode
, &sshdr
);
9426 sdev_printk(KERN_WARNING
, sdp
,
9427 "START_STOP failed for power mode: %d, result %x\n",
9430 if (scsi_sense_valid(&sshdr
))
9431 scsi_print_sense_hdr(sdp
, NULL
, &sshdr
);
9435 hba
->curr_dev_pwr_mode
= pwr_mode
;
9438 scsi_device_put(sdp
);
9439 hba
->host
->eh_noresume
= 0;
9443 static int ufshcd_link_state_transition(struct ufs_hba
*hba
,
9444 enum uic_link_state req_link_state
,
9445 bool check_for_bkops
)
9449 if (req_link_state
== hba
->uic_link_state
)
9452 if (req_link_state
== UIC_LINK_HIBERN8_STATE
) {
9453 ret
= ufshcd_uic_hibern8_enter(hba
);
9455 ufshcd_set_link_hibern8(hba
);
9457 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
9463 * If autobkops is enabled, link can't be turned off because
9464 * turning off the link would also turn off the device, except in the
9465 * case of DeepSleep where the device is expected to remain powered.
9467 else if ((req_link_state
== UIC_LINK_OFF_STATE
) &&
9468 (!check_for_bkops
|| !hba
->auto_bkops_enabled
)) {
9470 * Let's make sure that link is in low power mode, we are doing
9471 * this currently by putting the link in Hibern8. Otherway to
9472 * put the link in low power mode is to send the DME end point
9473 * to device and then send the DME reset command to local
9474 * unipro. But putting the link in hibern8 is much faster.
9476 * Note also that putting the link in Hibern8 is a requirement
9477 * for entering DeepSleep.
9479 ret
= ufshcd_uic_hibern8_enter(hba
);
9481 dev_err(hba
->dev
, "%s: hibern8 enter failed %d\n",
9486 * Change controller state to "reset state" which
9487 * should also put the link in off/reset state
9489 ufshcd_hba_stop(hba
);
9491 * TODO: Check if we need any delay to make sure that
9492 * controller is reset
9494 ufshcd_set_link_off(hba
);
9501 static void ufshcd_vreg_set_lpm(struct ufs_hba
*hba
)
9503 bool vcc_off
= false;
9506 * It seems some UFS devices may keep drawing more than sleep current
9507 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
9508 * To avoid this situation, add 2ms delay before putting these UFS
9509 * rails in LPM mode.
9511 if (!ufshcd_is_link_active(hba
) &&
9512 hba
->dev_quirks
& UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM
)
9513 usleep_range(2000, 2100);
9516 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
9519 * If UFS device and link is in OFF state, all power supplies (VCC,
9520 * VCCQ, VCCQ2) can be turned off if power on write protect is not
9521 * required. If UFS link is inactive (Hibern8 or OFF state) and device
9522 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
9524 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
9525 * in low power state which would save some power.
9527 * If Write Booster is enabled and the device needs to flush the WB
9528 * buffer OR if bkops status is urgent for WB, keep Vcc on.
9530 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
9531 !hba
->dev_info
.is_lu_power_on_wp
) {
9532 ufshcd_setup_vreg(hba
, false);
9534 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
9535 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
9537 if (ufshcd_is_link_hibern8(hba
) || ufshcd_is_link_off(hba
)) {
9538 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
9539 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq2
);
9544 * Some UFS devices require delay after VCC power rail is turned-off.
9546 if (vcc_off
&& hba
->vreg_info
.vcc
&&
9547 hba
->dev_quirks
& UFS_DEVICE_QUIRK_DELAY_AFTER_LPM
)
9548 usleep_range(5000, 5100);
9552 static int ufshcd_vreg_set_hpm(struct ufs_hba
*hba
)
9556 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
) &&
9557 !hba
->dev_info
.is_lu_power_on_wp
) {
9558 ret
= ufshcd_setup_vreg(hba
, true);
9559 } else if (!ufshcd_is_ufs_dev_active(hba
)) {
9560 if (!ufshcd_is_link_active(hba
)) {
9561 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq
);
9564 ret
= ufshcd_config_vreg_hpm(hba
, hba
->vreg_info
.vccq2
);
9568 ret
= ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, true);
9573 ufshcd_config_vreg_lpm(hba
, hba
->vreg_info
.vccq
);
9575 ufshcd_toggle_vreg(hba
->dev
, hba
->vreg_info
.vcc
, false);
9579 #endif /* CONFIG_PM */
9581 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba
*hba
)
9583 if (ufshcd_is_link_off(hba
) || ufshcd_can_aggressive_pc(hba
))
9584 ufshcd_setup_hba_vreg(hba
, false);
9587 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba
*hba
)
9589 if (ufshcd_is_link_off(hba
) || ufshcd_can_aggressive_pc(hba
))
9590 ufshcd_setup_hba_vreg(hba
, true);
9593 static int __ufshcd_wl_suspend(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
9596 bool check_for_bkops
;
9597 enum ufs_pm_level pm_lvl
;
9598 enum ufs_dev_pwr_mode req_dev_pwr_mode
;
9599 enum uic_link_state req_link_state
;
9601 hba
->pm_op_in_progress
= true;
9602 if (pm_op
!= UFS_SHUTDOWN_PM
) {
9603 pm_lvl
= pm_op
== UFS_RUNTIME_PM
?
9604 hba
->rpm_lvl
: hba
->spm_lvl
;
9605 req_dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl
);
9606 req_link_state
= ufs_get_pm_lvl_to_link_pwr_state(pm_lvl
);
9608 req_dev_pwr_mode
= UFS_POWERDOWN_PWR_MODE
;
9609 req_link_state
= UIC_LINK_OFF_STATE
;
9613 * If we can't transition into any of the low power modes
9614 * just gate the clocks.
9617 hba
->clk_gating
.is_suspended
= true;
9619 if (ufshcd_is_clkscaling_supported(hba
))
9620 ufshcd_clk_scaling_suspend(hba
, true);
9622 if (req_dev_pwr_mode
== UFS_ACTIVE_PWR_MODE
&&
9623 req_link_state
== UIC_LINK_ACTIVE_STATE
) {
9627 if ((req_dev_pwr_mode
== hba
->curr_dev_pwr_mode
) &&
9628 (req_link_state
== hba
->uic_link_state
))
9629 goto enable_scaling
;
9631 /* UFS device & link must be active before we enter in this function */
9632 if (!ufshcd_is_ufs_dev_active(hba
) || !ufshcd_is_link_active(hba
)) {
9633 /* Wait err handler finish or trigger err recovery */
9634 if (!ufshcd_eh_in_progress(hba
))
9635 ufshcd_force_error_recovery(hba
);
9637 goto enable_scaling
;
9640 if (pm_op
== UFS_RUNTIME_PM
) {
9641 if (ufshcd_can_autobkops_during_suspend(hba
)) {
9643 * The device is idle with no requests in the queue,
9644 * allow background operations if bkops status shows
9645 * that performance might be impacted.
9647 ret
= ufshcd_bkops_ctrl(hba
);
9650 * If return err in suspend flow, IO will hang.
9651 * Trigger error handler and break suspend for
9654 ufshcd_force_error_recovery(hba
);
9656 goto enable_scaling
;
9659 /* make sure that auto bkops is disabled */
9660 ufshcd_disable_auto_bkops(hba
);
9663 * If device needs to do BKOP or WB buffer flush during
9664 * Hibern8, keep device power mode as "active power mode"
9667 hba
->dev_info
.b_rpm_dev_flush_capable
=
9668 hba
->auto_bkops_enabled
||
9669 (((req_link_state
== UIC_LINK_HIBERN8_STATE
) ||
9670 ((req_link_state
== UIC_LINK_ACTIVE_STATE
) &&
9671 ufshcd_is_auto_hibern8_enabled(hba
))) &&
9672 ufshcd_wb_need_flush(hba
));
9675 flush_work(&hba
->eeh_work
);
9677 ret
= ufshcd_vops_suspend(hba
, pm_op
, PRE_CHANGE
);
9679 goto enable_scaling
;
9681 if (req_dev_pwr_mode
!= hba
->curr_dev_pwr_mode
) {
9682 if (pm_op
!= UFS_RUNTIME_PM
)
9683 /* ensure that bkops is disabled */
9684 ufshcd_disable_auto_bkops(hba
);
9686 if (!hba
->dev_info
.b_rpm_dev_flush_capable
) {
9687 ret
= ufshcd_set_dev_pwr_mode(hba
, req_dev_pwr_mode
);
9688 if (ret
&& pm_op
!= UFS_SHUTDOWN_PM
) {
9690 * If return err in suspend flow, IO will hang.
9691 * Trigger error handler and break suspend for
9694 ufshcd_force_error_recovery(hba
);
9698 goto enable_scaling
;
9703 * In the case of DeepSleep, the device is expected to remain powered
9704 * with the link off, so do not check for bkops.
9706 check_for_bkops
= !ufshcd_is_ufs_dev_deepsleep(hba
);
9707 ret
= ufshcd_link_state_transition(hba
, req_link_state
, check_for_bkops
);
9708 if (ret
&& pm_op
!= UFS_SHUTDOWN_PM
) {
9710 * If return err in suspend flow, IO will hang.
9711 * Trigger error handler and break suspend for
9714 ufshcd_force_error_recovery(hba
);
9718 goto set_dev_active
;
9722 * Call vendor specific suspend callback. As these callbacks may access
9723 * vendor specific host controller register space call them before the
9724 * host clocks are ON.
9726 ret
= ufshcd_vops_suspend(hba
, pm_op
, POST_CHANGE
);
9728 goto set_link_active
;
9730 cancel_delayed_work_sync(&hba
->ufs_rtc_update_work
);
9735 * Device hardware reset is required to exit DeepSleep. Also, for
9736 * DeepSleep, the link is off so host reset and restore will be done
9739 if (ufshcd_is_ufs_dev_deepsleep(hba
)) {
9740 ufshcd_device_reset(hba
);
9741 WARN_ON(!ufshcd_is_link_off(hba
));
9743 if (ufshcd_is_link_hibern8(hba
) && !ufshcd_uic_hibern8_exit(hba
))
9744 ufshcd_set_link_active(hba
);
9745 else if (ufshcd_is_link_off(hba
))
9746 ufshcd_host_reset_and_restore(hba
);
9748 /* Can also get here needing to exit DeepSleep */
9749 if (ufshcd_is_ufs_dev_deepsleep(hba
)) {
9750 ufshcd_device_reset(hba
);
9751 ufshcd_host_reset_and_restore(hba
);
9753 if (!ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
))
9754 ufshcd_disable_auto_bkops(hba
);
9756 if (ufshcd_is_clkscaling_supported(hba
))
9757 ufshcd_clk_scaling_suspend(hba
, false);
9759 hba
->dev_info
.b_rpm_dev_flush_capable
= false;
9761 if (hba
->dev_info
.b_rpm_dev_flush_capable
) {
9762 schedule_delayed_work(&hba
->rpm_dev_flush_recheck_work
,
9763 msecs_to_jiffies(RPM_DEV_FLUSH_RECHECK_WORK_DELAY_MS
));
9767 ufshcd_update_evt_hist(hba
, UFS_EVT_WL_SUSP_ERR
, (u32
)ret
);
9768 hba
->clk_gating
.is_suspended
= false;
9769 ufshcd_release(hba
);
9771 hba
->pm_op_in_progress
= false;
9776 static int __ufshcd_wl_resume(struct ufs_hba
*hba
, enum ufs_pm_op pm_op
)
9779 enum uic_link_state old_link_state
= hba
->uic_link_state
;
9781 hba
->pm_op_in_progress
= true;
9784 * Call vendor specific resume callback. As these callbacks may access
9785 * vendor specific host controller register space call them when the
9786 * host clocks are ON.
9788 ret
= ufshcd_vops_resume(hba
, pm_op
);
9792 /* For DeepSleep, the only supported option is to have the link off */
9793 WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba
) && !ufshcd_is_link_off(hba
));
9795 if (ufshcd_is_link_hibern8(hba
)) {
9796 ret
= ufshcd_uic_hibern8_exit(hba
);
9798 ufshcd_set_link_active(hba
);
9800 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
9802 goto vendor_suspend
;
9804 } else if (ufshcd_is_link_off(hba
)) {
9806 * A full initialization of the host and the device is
9807 * required since the link was put to off during suspend.
9808 * Note, in the case of DeepSleep, the device will exit
9809 * DeepSleep due to device reset.
9811 ret
= ufshcd_reset_and_restore(hba
);
9813 * ufshcd_reset_and_restore() should have already
9814 * set the link state as active
9816 if (ret
|| !ufshcd_is_link_active(hba
))
9817 goto vendor_suspend
;
9820 if (!ufshcd_is_ufs_dev_active(hba
)) {
9821 ret
= ufshcd_set_dev_pwr_mode(hba
, UFS_ACTIVE_PWR_MODE
);
9823 goto set_old_link_state
;
9824 ufshcd_set_timestamp_attr(hba
);
9825 schedule_delayed_work(&hba
->ufs_rtc_update_work
,
9826 msecs_to_jiffies(UFS_RTC_UPDATE_INTERVAL_MS
));
9829 if (ufshcd_keep_autobkops_enabled_except_suspend(hba
))
9830 ufshcd_enable_auto_bkops(hba
);
9833 * If BKOPs operations are urgently needed at this moment then
9834 * keep auto-bkops enabled or else disable it.
9836 ufshcd_bkops_ctrl(hba
);
9838 if (hba
->ee_usr_mask
)
9839 ufshcd_write_ee_control(hba
);
9841 if (ufshcd_is_clkscaling_supported(hba
))
9842 ufshcd_clk_scaling_suspend(hba
, false);
9844 if (hba
->dev_info
.b_rpm_dev_flush_capable
) {
9845 hba
->dev_info
.b_rpm_dev_flush_capable
= false;
9846 cancel_delayed_work(&hba
->rpm_dev_flush_recheck_work
);
9849 ufshcd_configure_auto_hibern8(hba
);
9854 ufshcd_link_state_transition(hba
, old_link_state
, 0);
9856 ufshcd_vops_suspend(hba
, pm_op
, PRE_CHANGE
);
9857 ufshcd_vops_suspend(hba
, pm_op
, POST_CHANGE
);
9860 ufshcd_update_evt_hist(hba
, UFS_EVT_WL_RES_ERR
, (u32
)ret
);
9861 hba
->clk_gating
.is_suspended
= false;
9862 ufshcd_release(hba
);
9863 hba
->pm_op_in_progress
= false;
9867 static int ufshcd_wl_runtime_suspend(struct device
*dev
)
9869 struct scsi_device
*sdev
= to_scsi_device(dev
);
9870 struct ufs_hba
*hba
;
9872 ktime_t start
= ktime_get();
9874 hba
= shost_priv(sdev
->host
);
9876 ret
= __ufshcd_wl_suspend(hba
, UFS_RUNTIME_PM
);
9878 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9880 trace_ufshcd_wl_runtime_suspend(dev_name(dev
), ret
,
9881 ktime_to_us(ktime_sub(ktime_get(), start
)),
9882 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9887 static int ufshcd_wl_runtime_resume(struct device
*dev
)
9889 struct scsi_device
*sdev
= to_scsi_device(dev
);
9890 struct ufs_hba
*hba
;
9892 ktime_t start
= ktime_get();
9894 hba
= shost_priv(sdev
->host
);
9896 ret
= __ufshcd_wl_resume(hba
, UFS_RUNTIME_PM
);
9898 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9900 trace_ufshcd_wl_runtime_resume(dev_name(dev
), ret
,
9901 ktime_to_us(ktime_sub(ktime_get(), start
)),
9902 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9908 #ifdef CONFIG_PM_SLEEP
9909 static int ufshcd_wl_suspend(struct device
*dev
)
9911 struct scsi_device
*sdev
= to_scsi_device(dev
);
9912 struct ufs_hba
*hba
;
9914 ktime_t start
= ktime_get();
9916 hba
= shost_priv(sdev
->host
);
9917 down(&hba
->host_sem
);
9918 hba
->system_suspending
= true;
9920 if (pm_runtime_suspended(dev
))
9923 ret
= __ufshcd_wl_suspend(hba
, UFS_SYSTEM_PM
);
9925 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9931 hba
->is_sys_suspended
= true;
9932 trace_ufshcd_wl_suspend(dev_name(dev
), ret
,
9933 ktime_to_us(ktime_sub(ktime_get(), start
)),
9934 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9939 static int ufshcd_wl_resume(struct device
*dev
)
9941 struct scsi_device
*sdev
= to_scsi_device(dev
);
9942 struct ufs_hba
*hba
;
9944 ktime_t start
= ktime_get();
9946 hba
= shost_priv(sdev
->host
);
9948 if (pm_runtime_suspended(dev
))
9951 ret
= __ufshcd_wl_resume(hba
, UFS_SYSTEM_PM
);
9953 dev_err(&sdev
->sdev_gendev
, "%s failed: %d\n", __func__
, ret
);
9955 trace_ufshcd_wl_resume(dev_name(dev
), ret
,
9956 ktime_to_us(ktime_sub(ktime_get(), start
)),
9957 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
9959 hba
->is_sys_suspended
= false;
9960 hba
->system_suspending
= false;
9967 * ufshcd_suspend - helper function for suspend operations
9968 * @hba: per adapter instance
9970 * This function will put disable irqs, turn off clocks
9971 * and set vreg and hba-vreg in lpm mode.
9973 * Return: 0 upon success; < 0 upon failure.
9975 static int ufshcd_suspend(struct ufs_hba
*hba
)
9979 if (!hba
->is_powered
)
9982 * Disable the host irq as host controller as there won't be any
9983 * host controller transaction expected till resume.
9985 ufshcd_disable_irq(hba
);
9986 ret
= ufshcd_setup_clocks(hba
, false);
9988 ufshcd_enable_irq(hba
);
9991 if (ufshcd_is_clkgating_allowed(hba
)) {
9992 hba
->clk_gating
.state
= CLKS_OFF
;
9993 trace_ufshcd_clk_gating(dev_name(hba
->dev
),
9994 hba
->clk_gating
.state
);
9997 ufshcd_vreg_set_lpm(hba
);
9998 /* Put the host controller in low power mode if possible */
9999 ufshcd_hba_vreg_set_lpm(hba
);
10000 ufshcd_pm_qos_update(hba
, false);
10006 * ufshcd_resume - helper function for resume operations
10007 * @hba: per adapter instance
10009 * This function basically turns on the regulators, clocks and
10012 * Return: 0 for success and non-zero for failure.
10014 static int ufshcd_resume(struct ufs_hba
*hba
)
10018 if (!hba
->is_powered
)
10021 ufshcd_hba_vreg_set_hpm(hba
);
10022 ret
= ufshcd_vreg_set_hpm(hba
);
10026 /* Make sure clocks are enabled before accessing controller */
10027 ret
= ufshcd_setup_clocks(hba
, true);
10031 /* enable the host irq as host controller would be active soon */
10032 ufshcd_enable_irq(hba
);
10037 ufshcd_vreg_set_lpm(hba
);
10040 ufshcd_update_evt_hist(hba
, UFS_EVT_RESUME_ERR
, (u32
)ret
);
10043 #endif /* CONFIG_PM */
10045 #ifdef CONFIG_PM_SLEEP
10047 * ufshcd_system_suspend - system suspend callback
10048 * @dev: Device associated with the UFS controller.
10050 * Executed before putting the system into a sleep state in which the contents
10051 * of main memory are preserved.
10053 * Return: 0 for success and non-zero for failure.
10055 int ufshcd_system_suspend(struct device
*dev
)
10057 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10059 ktime_t start
= ktime_get();
10061 if (pm_runtime_suspended(hba
->dev
))
10064 ret
= ufshcd_suspend(hba
);
10066 trace_ufshcd_system_suspend(dev_name(hba
->dev
), ret
,
10067 ktime_to_us(ktime_sub(ktime_get(), start
)),
10068 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10071 EXPORT_SYMBOL(ufshcd_system_suspend
);
10074 * ufshcd_system_resume - system resume callback
10075 * @dev: Device associated with the UFS controller.
10077 * Executed after waking the system up from a sleep state in which the contents
10078 * of main memory were preserved.
10080 * Return: 0 for success and non-zero for failure.
10082 int ufshcd_system_resume(struct device
*dev
)
10084 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10085 ktime_t start
= ktime_get();
10088 if (pm_runtime_suspended(hba
->dev
))
10091 ret
= ufshcd_resume(hba
);
10094 trace_ufshcd_system_resume(dev_name(hba
->dev
), ret
,
10095 ktime_to_us(ktime_sub(ktime_get(), start
)),
10096 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10100 EXPORT_SYMBOL(ufshcd_system_resume
);
10101 #endif /* CONFIG_PM_SLEEP */
10105 * ufshcd_runtime_suspend - runtime suspend callback
10106 * @dev: Device associated with the UFS controller.
10108 * Check the description of ufshcd_suspend() function for more details.
10110 * Return: 0 for success and non-zero for failure.
10112 int ufshcd_runtime_suspend(struct device
*dev
)
10114 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10116 ktime_t start
= ktime_get();
10118 ret
= ufshcd_suspend(hba
);
10120 trace_ufshcd_runtime_suspend(dev_name(hba
->dev
), ret
,
10121 ktime_to_us(ktime_sub(ktime_get(), start
)),
10122 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10125 EXPORT_SYMBOL(ufshcd_runtime_suspend
);
10128 * ufshcd_runtime_resume - runtime resume routine
10129 * @dev: Device associated with the UFS controller.
10131 * This function basically brings controller
10132 * to active state. Following operations are done in this function:
10134 * 1. Turn on all the controller related clocks
10135 * 2. Turn ON VCC rail
10137 * Return: 0 upon success; < 0 upon failure.
10139 int ufshcd_runtime_resume(struct device
*dev
)
10141 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10143 ktime_t start
= ktime_get();
10145 ret
= ufshcd_resume(hba
);
10147 trace_ufshcd_runtime_resume(dev_name(hba
->dev
), ret
,
10148 ktime_to_us(ktime_sub(ktime_get(), start
)),
10149 hba
->curr_dev_pwr_mode
, hba
->uic_link_state
);
10152 EXPORT_SYMBOL(ufshcd_runtime_resume
);
10153 #endif /* CONFIG_PM */
10155 static void ufshcd_wl_shutdown(struct device
*dev
)
10157 struct scsi_device
*sdev
= to_scsi_device(dev
);
10158 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
10160 down(&hba
->host_sem
);
10161 hba
->shutting_down
= true;
10162 up(&hba
->host_sem
);
10164 /* Turn on everything while shutting down */
10165 ufshcd_rpm_get_sync(hba
);
10166 scsi_device_quiesce(sdev
);
10167 shost_for_each_device(sdev
, hba
->host
) {
10168 if (sdev
== hba
->ufs_device_wlun
)
10170 mutex_lock(&sdev
->state_mutex
);
10171 scsi_device_set_state(sdev
, SDEV_OFFLINE
);
10172 mutex_unlock(&sdev
->state_mutex
);
10174 __ufshcd_wl_suspend(hba
, UFS_SHUTDOWN_PM
);
10177 * Next, turn off the UFS controller and the UFS regulators. Disable
10180 if (ufshcd_is_ufs_dev_poweroff(hba
) && ufshcd_is_link_off(hba
))
10181 ufshcd_suspend(hba
);
10183 hba
->is_powered
= false;
10187 * ufshcd_remove - de-allocate SCSI host and host memory space
10188 * data structure memory
10189 * @hba: per adapter instance
10191 void ufshcd_remove(struct ufs_hba
*hba
)
10193 if (hba
->ufs_device_wlun
)
10194 ufshcd_rpm_get_sync(hba
);
10195 ufs_hwmon_remove(hba
);
10196 ufs_bsg_remove(hba
);
10197 ufs_sysfs_remove_nodes(hba
->dev
);
10198 blk_mq_destroy_queue(hba
->tmf_queue
);
10199 blk_put_queue(hba
->tmf_queue
);
10200 blk_mq_free_tag_set(&hba
->tmf_tag_set
);
10201 if (hba
->scsi_host_added
)
10202 scsi_remove_host(hba
->host
);
10203 /* disable interrupts */
10204 ufshcd_disable_intr(hba
, hba
->intr_mask
);
10205 ufshcd_hba_stop(hba
);
10206 ufshcd_hba_exit(hba
);
10208 EXPORT_SYMBOL_GPL(ufshcd_remove
);
10210 #ifdef CONFIG_PM_SLEEP
10211 int ufshcd_system_freeze(struct device
*dev
)
10214 return ufshcd_system_suspend(dev
);
10217 EXPORT_SYMBOL_GPL(ufshcd_system_freeze
);
10219 int ufshcd_system_restore(struct device
*dev
)
10222 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10225 ret
= ufshcd_system_resume(dev
);
10229 /* Configure UTRL and UTMRL base address registers */
10230 ufshcd_writel(hba
, lower_32_bits(hba
->utrdl_dma_addr
),
10231 REG_UTP_TRANSFER_REQ_LIST_BASE_L
);
10232 ufshcd_writel(hba
, upper_32_bits(hba
->utrdl_dma_addr
),
10233 REG_UTP_TRANSFER_REQ_LIST_BASE_H
);
10234 ufshcd_writel(hba
, lower_32_bits(hba
->utmrdl_dma_addr
),
10235 REG_UTP_TASK_REQ_LIST_BASE_L
);
10236 ufshcd_writel(hba
, upper_32_bits(hba
->utmrdl_dma_addr
),
10237 REG_UTP_TASK_REQ_LIST_BASE_H
);
10239 * Make sure that UTRL and UTMRL base address registers
10240 * are updated with the latest queue addresses. Only after
10241 * updating these addresses, we can queue the new commands.
10243 ufshcd_readl(hba
, REG_UTP_TASK_REQ_LIST_BASE_H
);
10248 EXPORT_SYMBOL_GPL(ufshcd_system_restore
);
10250 int ufshcd_system_thaw(struct device
*dev
)
10252 return ufshcd_system_resume(dev
);
10254 EXPORT_SYMBOL_GPL(ufshcd_system_thaw
);
10255 #endif /* CONFIG_PM_SLEEP */
10258 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
10259 * @hba: pointer to Host Bus Adapter (HBA)
10261 void ufshcd_dealloc_host(struct ufs_hba
*hba
)
10263 scsi_host_put(hba
->host
);
10265 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host
);
10268 * ufshcd_set_dma_mask - Set dma mask based on the controller
10269 * addressing capability
10270 * @hba: per adapter instance
10272 * Return: 0 for success, non-zero for failure.
10274 static int ufshcd_set_dma_mask(struct ufs_hba
*hba
)
10276 if (hba
->vops
&& hba
->vops
->set_dma_mask
)
10277 return hba
->vops
->set_dma_mask(hba
);
10278 if (hba
->capabilities
& MASK_64_ADDRESSING_SUPPORT
) {
10279 if (!dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(64)))
10282 return dma_set_mask_and_coherent(hba
->dev
, DMA_BIT_MASK(32));
10286 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
10287 * @dev: pointer to device handle
10288 * @hba_handle: driver private handle
10290 * Return: 0 on success, non-zero value on failure.
10292 int ufshcd_alloc_host(struct device
*dev
, struct ufs_hba
**hba_handle
)
10294 struct Scsi_Host
*host
;
10295 struct ufs_hba
*hba
;
10300 "Invalid memory reference for dev is NULL\n");
10305 host
= scsi_host_alloc(&ufshcd_driver_template
,
10306 sizeof(struct ufs_hba
));
10308 dev_err(dev
, "scsi_host_alloc failed\n");
10312 host
->nr_maps
= HCTX_TYPE_POLL
+ 1;
10313 hba
= shost_priv(host
);
10316 hba
->dev_ref_clk_freq
= REF_CLK_FREQ_INVAL
;
10317 hba
->nop_out_timeout
= NOP_OUT_TIMEOUT
;
10318 ufshcd_set_sg_entry_size(hba
, sizeof(struct ufshcd_sg_entry
));
10319 INIT_LIST_HEAD(&hba
->clk_list_head
);
10320 spin_lock_init(&hba
->outstanding_lock
);
10327 EXPORT_SYMBOL(ufshcd_alloc_host
);
10329 /* This function exists because blk_mq_alloc_tag_set() requires this. */
10330 static blk_status_t
ufshcd_queue_tmf(struct blk_mq_hw_ctx
*hctx
,
10331 const struct blk_mq_queue_data
*qd
)
10333 WARN_ON_ONCE(true);
10334 return BLK_STS_NOTSUPP
;
10337 static const struct blk_mq_ops ufshcd_tmf_ops
= {
10338 .queue_rq
= ufshcd_queue_tmf
,
10341 static int ufshcd_add_scsi_host(struct ufs_hba
*hba
)
10345 if (is_mcq_supported(hba
)) {
10346 ufshcd_mcq_enable(hba
);
10347 err
= ufshcd_alloc_mcq(hba
);
10349 ufshcd_config_mcq(hba
);
10351 /* Continue with SDB mode */
10352 ufshcd_mcq_disable(hba
);
10353 use_mcq_mode
= false;
10354 dev_err(hba
->dev
, "MCQ mode is disabled, err=%d\n",
10358 if (!is_mcq_supported(hba
) && !hba
->lsdb_sup
) {
10360 "%s: failed to initialize (legacy doorbell mode not supported)\n",
10365 err
= scsi_add_host(hba
->host
, hba
->dev
);
10367 dev_err(hba
->dev
, "scsi_add_host failed\n");
10370 hba
->scsi_host_added
= true;
10372 hba
->tmf_tag_set
= (struct blk_mq_tag_set
) {
10374 .queue_depth
= hba
->nutmrs
,
10375 .ops
= &ufshcd_tmf_ops
,
10376 .flags
= BLK_MQ_F_NO_SCHED
,
10378 err
= blk_mq_alloc_tag_set(&hba
->tmf_tag_set
);
10380 goto remove_scsi_host
;
10381 hba
->tmf_queue
= blk_mq_alloc_queue(&hba
->tmf_tag_set
, NULL
, NULL
);
10382 if (IS_ERR(hba
->tmf_queue
)) {
10383 err
= PTR_ERR(hba
->tmf_queue
);
10384 goto free_tmf_tag_set
;
10386 hba
->tmf_rqs
= devm_kcalloc(hba
->dev
, hba
->nutmrs
,
10387 sizeof(*hba
->tmf_rqs
), GFP_KERNEL
);
10388 if (!hba
->tmf_rqs
) {
10390 goto free_tmf_queue
;
10396 blk_mq_destroy_queue(hba
->tmf_queue
);
10397 blk_put_queue(hba
->tmf_queue
);
10400 blk_mq_free_tag_set(&hba
->tmf_tag_set
);
10403 if (hba
->scsi_host_added
)
10404 scsi_remove_host(hba
->host
);
10410 * ufshcd_init - Driver initialization routine
10411 * @hba: per-adapter instance
10412 * @mmio_base: base register address
10413 * @irq: Interrupt line of device
10415 * Return: 0 on success, non-zero value on failure.
10417 int ufshcd_init(struct ufs_hba
*hba
, void __iomem
*mmio_base
, unsigned int irq
)
10420 struct Scsi_Host
*host
= hba
->host
;
10421 struct device
*dev
= hba
->dev
;
10424 * dev_set_drvdata() must be called before any callbacks are registered
10425 * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
10428 dev_set_drvdata(dev
, hba
);
10432 "Invalid memory reference for mmio_base is NULL\n");
10437 hba
->mmio_base
= mmio_base
;
10439 hba
->vps
= &ufs_hba_vps
;
10441 err
= ufshcd_hba_init(hba
);
10445 /* Read capabilities registers */
10446 err
= ufshcd_hba_capabilities(hba
);
10450 /* Get UFS version supported by the controller */
10451 hba
->ufs_version
= ufshcd_get_ufs_version(hba
);
10453 /* Get Interrupt bit mask per version */
10454 hba
->intr_mask
= ufshcd_get_intr_mask(hba
);
10456 err
= ufshcd_set_dma_mask(hba
);
10458 dev_err(hba
->dev
, "set dma mask failed\n");
10462 /* Allocate memory for host memory space */
10463 err
= ufshcd_memory_alloc(hba
);
10465 dev_err(hba
->dev
, "Memory allocation failed\n");
10469 /* Configure LRB */
10470 ufshcd_host_memory_configure(hba
);
10472 host
->can_queue
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
10473 host
->cmd_per_lun
= hba
->nutrs
- UFSHCD_NUM_RESERVED
;
10474 host
->max_id
= UFSHCD_MAX_ID
;
10475 host
->max_lun
= UFS_MAX_LUNS
;
10476 host
->max_channel
= UFSHCD_MAX_CHANNEL
;
10477 host
->unique_id
= host
->host_no
;
10478 host
->max_cmd_len
= UFS_CDB_SIZE
;
10479 host
->queuecommand_may_block
= !!(hba
->caps
& UFSHCD_CAP_CLK_GATING
);
10481 /* Use default RPM delay if host not set */
10482 if (host
->rpm_autosuspend_delay
== 0)
10483 host
->rpm_autosuspend_delay
= RPM_AUTOSUSPEND_DELAY_MS
;
10485 hba
->max_pwr_info
.is_valid
= false;
10487 /* Initialize work queues */
10488 hba
->eh_wq
= alloc_ordered_workqueue("ufs_eh_wq_%d", WQ_MEM_RECLAIM
,
10489 hba
->host
->host_no
);
10491 dev_err(hba
->dev
, "%s: failed to create eh workqueue\n",
10496 INIT_WORK(&hba
->eh_work
, ufshcd_err_handler
);
10497 INIT_WORK(&hba
->eeh_work
, ufshcd_exception_event_handler
);
10499 sema_init(&hba
->host_sem
, 1);
10501 /* Initialize UIC command mutex */
10502 mutex_init(&hba
->uic_cmd_mutex
);
10504 /* Initialize mutex for device management commands */
10505 mutex_init(&hba
->dev_cmd
.lock
);
10507 /* Initialize mutex for exception event control */
10508 mutex_init(&hba
->ee_ctrl_mutex
);
10510 mutex_init(&hba
->wb_mutex
);
10511 init_rwsem(&hba
->clk_scaling_lock
);
10513 ufshcd_init_clk_gating(hba
);
10515 ufshcd_init_clk_scaling(hba
);
10518 * In order to avoid any spurious interrupt immediately after
10519 * registering UFS controller interrupt handler, clear any pending UFS
10520 * interrupt status and disable all the UFS interrupts.
10522 ufshcd_writel(hba
, ufshcd_readl(hba
, REG_INTERRUPT_STATUS
),
10523 REG_INTERRUPT_STATUS
);
10524 ufshcd_writel(hba
, 0, REG_INTERRUPT_ENABLE
);
10526 * Make sure that UFS interrupts are disabled and any pending interrupt
10527 * status is cleared before registering UFS interrupt handler.
10529 ufshcd_readl(hba
, REG_INTERRUPT_ENABLE
);
10531 /* IRQ registration */
10532 err
= devm_request_irq(dev
, irq
, ufshcd_intr
, IRQF_SHARED
, UFSHCD
, hba
);
10534 dev_err(hba
->dev
, "request irq failed\n");
10537 hba
->is_irq_enabled
= true;
10540 /* Reset the attached device */
10541 ufshcd_device_reset(hba
);
10543 ufshcd_init_crypto(hba
);
10545 /* Host controller enable */
10546 err
= ufshcd_hba_enable(hba
);
10548 dev_err(hba
->dev
, "Host controller enable failed\n");
10549 ufshcd_print_evt_hist(hba
);
10550 ufshcd_print_host_state(hba
);
10555 * Set the default power management level for runtime and system PM.
10556 * Default power saving mode is to keep UFS link in Hibern8 state
10557 * and UFS device in sleep state.
10559 hba
->rpm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
10560 UFS_SLEEP_PWR_MODE
,
10561 UIC_LINK_HIBERN8_STATE
);
10562 hba
->spm_lvl
= ufs_get_desired_pm_lvl_for_dev_link_state(
10563 UFS_SLEEP_PWR_MODE
,
10564 UIC_LINK_HIBERN8_STATE
);
10566 INIT_DELAYED_WORK(&hba
->rpm_dev_flush_recheck_work
, ufshcd_rpm_dev_flush_recheck_work
);
10567 INIT_DELAYED_WORK(&hba
->ufs_rtc_update_work
, ufshcd_rtc_work
);
10569 /* Set the default auto-hiberate idle timer value to 150 ms */
10570 if (ufshcd_is_auto_hibern8_supported(hba
) && !hba
->ahit
) {
10571 hba
->ahit
= FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK
, 150) |
10572 FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK
, 3);
10575 /* Hold auto suspend until async scan completes */
10576 pm_runtime_get_sync(dev
);
10579 * We are assuming that device wasn't put in sleep/power-down
10580 * state exclusively during the boot stage before kernel.
10581 * This assumption helps avoid doing link startup twice during
10582 * ufshcd_probe_hba().
10584 ufshcd_set_ufs_dev_active(hba
);
10586 /* Initialize hba, detect and initialize UFS device */
10587 ktime_t probe_start
= ktime_get();
10589 hba
->ufshcd_state
= UFSHCD_STATE_RESET
;
10591 err
= ufshcd_link_startup(hba
);
10595 if (hba
->quirks
& UFSHCD_QUIRK_SKIP_PH_CONFIGURATION
)
10598 /* Debug counters initialization */
10599 ufshcd_clear_dbg_ufs_stats(hba
);
10601 /* UniPro link is active now */
10602 ufshcd_set_link_active(hba
);
10604 /* Verify device initialization by sending NOP OUT UPIU */
10605 err
= ufshcd_verify_dev_init(hba
);
10609 /* Initiate UFS initialization, and waiting until completion */
10610 err
= ufshcd_complete_dev_init(hba
);
10614 err
= ufshcd_device_params_init(hba
);
10618 err
= ufshcd_post_device_init(hba
);
10621 ufshcd_process_probe_result(hba
, probe_start
, err
);
10625 err
= ufshcd_add_scsi_host(hba
);
10629 async_schedule(ufshcd_async_scan
, hba
);
10630 ufs_sysfs_add_nodes(hba
->dev
);
10632 device_enable_async_suspend(dev
);
10633 ufshcd_pm_qos_init(hba
);
10637 hba
->is_irq_enabled
= false;
10638 ufshcd_hba_exit(hba
);
10642 EXPORT_SYMBOL_GPL(ufshcd_init
);
10644 void ufshcd_resume_complete(struct device
*dev
)
10646 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10648 if (hba
->complete_put
) {
10649 ufshcd_rpm_put(hba
);
10650 hba
->complete_put
= false;
10653 EXPORT_SYMBOL_GPL(ufshcd_resume_complete
);
10655 static bool ufshcd_rpm_ok_for_spm(struct ufs_hba
*hba
)
10657 struct device
*dev
= &hba
->ufs_device_wlun
->sdev_gendev
;
10658 enum ufs_dev_pwr_mode dev_pwr_mode
;
10659 enum uic_link_state link_state
;
10660 unsigned long flags
;
10663 spin_lock_irqsave(&dev
->power
.lock
, flags
);
10664 dev_pwr_mode
= ufs_get_pm_lvl_to_dev_pwr_mode(hba
->spm_lvl
);
10665 link_state
= ufs_get_pm_lvl_to_link_pwr_state(hba
->spm_lvl
);
10666 res
= pm_runtime_suspended(dev
) &&
10667 hba
->curr_dev_pwr_mode
== dev_pwr_mode
&&
10668 hba
->uic_link_state
== link_state
&&
10669 !hba
->dev_info
.b_rpm_dev_flush_capable
;
10670 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
10675 int __ufshcd_suspend_prepare(struct device
*dev
, bool rpm_ok_for_spm
)
10677 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
10681 * SCSI assumes that runtime-pm and system-pm for scsi drivers
10682 * are same. And it doesn't wake up the device for system-suspend
10683 * if it's runtime suspended. But ufs doesn't follow that.
10684 * Refer ufshcd_resume_complete()
10686 if (hba
->ufs_device_wlun
) {
10687 /* Prevent runtime suspend */
10688 ufshcd_rpm_get_noresume(hba
);
10690 * Check if already runtime suspended in same state as system
10691 * suspend would be.
10693 if (!rpm_ok_for_spm
|| !ufshcd_rpm_ok_for_spm(hba
)) {
10694 /* RPM state is not ok for SPM, so runtime resume */
10695 ret
= ufshcd_rpm_resume(hba
);
10696 if (ret
< 0 && ret
!= -EACCES
) {
10697 ufshcd_rpm_put(hba
);
10701 hba
->complete_put
= true;
10705 EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare
);
10707 int ufshcd_suspend_prepare(struct device
*dev
)
10709 return __ufshcd_suspend_prepare(dev
, true);
10711 EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare
);
10713 #ifdef CONFIG_PM_SLEEP
10714 static int ufshcd_wl_poweroff(struct device
*dev
)
10716 struct scsi_device
*sdev
= to_scsi_device(dev
);
10717 struct ufs_hba
*hba
= shost_priv(sdev
->host
);
10719 __ufshcd_wl_suspend(hba
, UFS_SHUTDOWN_PM
);
10724 static int ufshcd_wl_probe(struct device
*dev
)
10726 struct scsi_device
*sdev
= to_scsi_device(dev
);
10728 if (!is_device_wlun(sdev
))
10731 blk_pm_runtime_init(sdev
->request_queue
, dev
);
10732 pm_runtime_set_autosuspend_delay(dev
, 0);
10733 pm_runtime_allow(dev
);
10738 static int ufshcd_wl_remove(struct device
*dev
)
10740 pm_runtime_forbid(dev
);
10744 static const struct dev_pm_ops ufshcd_wl_pm_ops
= {
10745 #ifdef CONFIG_PM_SLEEP
10746 .suspend
= ufshcd_wl_suspend
,
10747 .resume
= ufshcd_wl_resume
,
10748 .freeze
= ufshcd_wl_suspend
,
10749 .thaw
= ufshcd_wl_resume
,
10750 .poweroff
= ufshcd_wl_poweroff
,
10751 .restore
= ufshcd_wl_resume
,
10753 SET_RUNTIME_PM_OPS(ufshcd_wl_runtime_suspend
, ufshcd_wl_runtime_resume
, NULL
)
10756 static void ufshcd_check_header_layout(void)
10759 * gcc compilers before version 10 cannot do constant-folding for
10760 * sub-byte bitfields. Hence skip the layout checks for gcc 9 and
10763 if (IS_ENABLED(CONFIG_CC_IS_GCC
) && CONFIG_GCC_VERSION
< 100000)
10766 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10767 .cci
= 3})[0] != 3);
10769 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10770 .ehs_length
= 2})[1] != 2);
10772 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10773 .enable_crypto
= 1})[2]
10776 BUILD_BUG_ON((((u8
*)&(struct request_desc_header
){
10778 .data_direction
= 3,
10780 })[3]) != ((5 << 4) | (3 << 1) | 1));
10782 BUILD_BUG_ON(((__le32
*)&(struct request_desc_header
){
10783 .dunl
= cpu_to_le32(0xdeadbeef)})[1] !=
10784 cpu_to_le32(0xdeadbeef));
10786 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10787 .ocs
= 4})[8] != 4);
10789 BUILD_BUG_ON(((u8
*)&(struct request_desc_header
){
10790 .cds
= 5})[9] != 5);
10792 BUILD_BUG_ON(((__le32
*)&(struct request_desc_header
){
10793 .dunu
= cpu_to_le32(0xbadcafe)})[3] !=
10794 cpu_to_le32(0xbadcafe));
10796 BUILD_BUG_ON(((u8
*)&(struct utp_upiu_header
){
10797 .iid
= 0xf })[4] != 0xf0);
10799 BUILD_BUG_ON(((u8
*)&(struct utp_upiu_header
){
10800 .command_set_type
= 0xf })[4] != 0xf);
10804 * ufs_dev_wlun_template - describes ufs device wlun
10805 * ufs-device wlun - used to send pm commands
10806 * All luns are consumers of ufs-device wlun.
10808 * Currently, no sd driver is present for wluns.
10809 * Hence the no specific pm operations are performed.
10810 * With ufs design, SSU should be sent to ufs-device wlun.
10811 * Hence register a scsi driver for ufs wluns only.
10813 static struct scsi_driver ufs_dev_wlun_template
= {
10815 .name
= "ufs_device_wlun",
10816 .probe
= ufshcd_wl_probe
,
10817 .remove
= ufshcd_wl_remove
,
10818 .pm
= &ufshcd_wl_pm_ops
,
10819 .shutdown
= ufshcd_wl_shutdown
,
10823 static int __init
ufshcd_core_init(void)
10827 ufshcd_check_header_layout();
10829 ufs_debugfs_init();
10831 ret
= scsi_register_driver(&ufs_dev_wlun_template
.gendrv
);
10833 ufs_debugfs_exit();
10837 static void __exit
ufshcd_core_exit(void)
10839 ufs_debugfs_exit();
10840 scsi_unregister_driver(&ufs_dev_wlun_template
.gendrv
);
10843 module_init(ufshcd_core_init
);
10844 module_exit(ufshcd_core_exit
);
10846 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
10847 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
10848 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
10849 MODULE_SOFTDEP("pre: governor_simpleondemand");
10850 MODULE_LICENSE("GPL");