Linux 4.16.11
[linux/fpc-iii.git] / drivers / scsi / ufs / ufshcd.c
blobc7da2c185990615f906e847def5b6513a1338801
1 /*
2 * Universal Flash Storage Host controller driver Core
4 * This code is based on drivers/scsi/ufs/ufshcd.c
5 * Copyright (C) 2011-2013 Samsung India Software Operations
6 * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
8 * Authors:
9 * Santosh Yaraganavi <santosh.sy@samsung.com>
10 * Vinayak Holikatti <h.vinayak@samsung.com>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 * See the COPYING file in the top-level directory or visit
17 * <http://www.gnu.org/licenses/gpl-2.0.html>
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * This program is provided "AS IS" and "WITH ALL FAULTS" and
25 * without warranty of any kind. You are solely responsible for
26 * determining the appropriateness of using and distributing
27 * the program and assume all risks associated with your exercise
28 * of rights with respect to the program, including but not limited
29 * to infringement of third party rights, the risks and costs of
30 * program errors, damage to or loss of data, programs or equipment,
31 * and unavailability or interruption of operations. Under no
32 * circumstances will the contributor of this Program be liable for
33 * any damages of any kind arising from your use or distribution of
34 * this program.
36 * The Linux Foundation chooses to take subject only to the GPLv2
37 * license terms, and distributes only under these terms.
40 #include <linux/async.h>
41 #include <linux/devfreq.h>
42 #include <linux/nls.h>
43 #include <linux/of.h>
44 #include "ufshcd.h"
45 #include "ufs_quirks.h"
46 #include "unipro.h"
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/ufs.h>
51 #define UFSHCD_REQ_SENSE_SIZE 18
53 #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
54 UTP_TASK_REQ_COMPL |\
55 UFSHCD_ERROR_MASK)
56 /* UIC command timeout, unit: ms */
57 #define UIC_CMD_TIMEOUT 500
59 /* NOP OUT retries waiting for NOP IN response */
60 #define NOP_OUT_RETRIES 10
61 /* Timeout after 30 msecs if NOP OUT hangs without response */
62 #define NOP_OUT_TIMEOUT 30 /* msecs */
64 /* Query request retries */
65 #define QUERY_REQ_RETRIES 3
66 /* Query request timeout */
67 #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
69 /* Task management command timeout */
70 #define TM_CMD_TIMEOUT 100 /* msecs */
72 /* maximum number of retries for a general UIC command */
73 #define UFS_UIC_COMMAND_RETRIES 3
75 /* maximum number of link-startup retries */
76 #define DME_LINKSTARTUP_RETRIES 3
78 /* Maximum retries for Hibern8 enter */
79 #define UIC_HIBERN8_ENTER_RETRIES 3
81 /* maximum number of reset retries before giving up */
82 #define MAX_HOST_RESET_RETRIES 5
84 /* Expose the flag value from utp_upiu_query.value */
85 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
87 /* Interrupt aggregation default timeout, unit: 40us */
88 #define INT_AGGR_DEF_TO 0x02
90 #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
91 ({ \
92 int _ret; \
93 if (_on) \
94 _ret = ufshcd_enable_vreg(_dev, _vreg); \
95 else \
96 _ret = ufshcd_disable_vreg(_dev, _vreg); \
97 _ret; \
100 #define ufshcd_hex_dump(prefix_str, buf, len) \
101 print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
103 enum {
104 UFSHCD_MAX_CHANNEL = 0,
105 UFSHCD_MAX_ID = 1,
106 UFSHCD_CMD_PER_LUN = 32,
107 UFSHCD_CAN_QUEUE = 32,
110 /* UFSHCD states */
111 enum {
112 UFSHCD_STATE_RESET,
113 UFSHCD_STATE_ERROR,
114 UFSHCD_STATE_OPERATIONAL,
115 UFSHCD_STATE_EH_SCHEDULED,
118 /* UFSHCD error handling flags */
119 enum {
120 UFSHCD_EH_IN_PROGRESS = (1 << 0),
123 /* UFSHCD UIC layer error flags */
124 enum {
125 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
126 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
127 UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
128 UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
129 UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
130 UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
133 #define ufshcd_set_eh_in_progress(h) \
134 ((h)->eh_flags |= UFSHCD_EH_IN_PROGRESS)
135 #define ufshcd_eh_in_progress(h) \
136 ((h)->eh_flags & UFSHCD_EH_IN_PROGRESS)
137 #define ufshcd_clear_eh_in_progress(h) \
138 ((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
140 #define ufshcd_set_ufs_dev_active(h) \
141 ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
142 #define ufshcd_set_ufs_dev_sleep(h) \
143 ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
144 #define ufshcd_set_ufs_dev_poweroff(h) \
145 ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
146 #define ufshcd_is_ufs_dev_active(h) \
147 ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
148 #define ufshcd_is_ufs_dev_sleep(h) \
149 ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
150 #define ufshcd_is_ufs_dev_poweroff(h) \
151 ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
153 static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
154 {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
155 {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
156 {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
157 {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
158 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
159 {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
162 static inline enum ufs_dev_pwr_mode
163 ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
165 return ufs_pm_lvl_states[lvl].dev_state;
168 static inline enum uic_link_state
169 ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
171 return ufs_pm_lvl_states[lvl].link_state;
174 static inline enum ufs_pm_level
175 ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
176 enum uic_link_state link_state)
178 enum ufs_pm_level lvl;
180 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
181 if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
182 (ufs_pm_lvl_states[lvl].link_state == link_state))
183 return lvl;
186 /* if no match found, return the level 0 */
187 return UFS_PM_LVL_0;
190 static struct ufs_dev_fix ufs_fixups[] = {
191 /* UFS cards deviations table */
192 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
193 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
194 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
195 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
196 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
197 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
198 UFS_DEVICE_NO_FASTAUTO),
199 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
200 UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
201 UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
202 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
203 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
204 UFS_DEVICE_QUIRK_PA_TACTIVATE),
205 UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
206 UFS_DEVICE_QUIRK_PA_TACTIVATE),
207 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
208 UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
209 UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
211 END_FIX
214 static void ufshcd_tmc_handler(struct ufs_hba *hba);
215 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
216 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
217 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
218 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
219 static void ufshcd_hba_exit(struct ufs_hba *hba);
220 static int ufshcd_probe_hba(struct ufs_hba *hba);
221 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
222 bool skip_ref_clk);
223 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
224 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
225 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
226 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
227 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
228 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
229 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
230 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
231 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
232 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
233 static irqreturn_t ufshcd_intr(int irq, void *__hba);
234 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
235 struct ufs_pa_layer_attr *desired_pwr_mode);
236 static int ufshcd_change_power_mode(struct ufs_hba *hba,
237 struct ufs_pa_layer_attr *pwr_mode);
238 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
240 return tag >= 0 && tag < hba->nutrs;
243 static inline int ufshcd_enable_irq(struct ufs_hba *hba)
245 int ret = 0;
247 if (!hba->is_irq_enabled) {
248 ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
249 hba);
250 if (ret)
251 dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
252 __func__, ret);
253 hba->is_irq_enabled = true;
256 return ret;
259 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
261 if (hba->is_irq_enabled) {
262 free_irq(hba->irq, hba);
263 hba->is_irq_enabled = false;
267 /* replace non-printable or non-ASCII characters with spaces */
268 static inline void ufshcd_remove_non_printable(char *val)
270 if (!val)
271 return;
273 if (*val < 0x20 || *val > 0x7e)
274 *val = ' ';
277 static void ufshcd_add_command_trace(struct ufs_hba *hba,
278 unsigned int tag, const char *str)
280 sector_t lba = -1;
281 u8 opcode = 0;
282 u32 intr, doorbell;
283 struct ufshcd_lrb *lrbp;
284 int transfer_len = -1;
286 if (!trace_ufshcd_command_enabled())
287 return;
289 lrbp = &hba->lrb[tag];
291 if (lrbp->cmd) { /* data phase exists */
292 opcode = (u8)(*lrbp->cmd->cmnd);
293 if ((opcode == READ_10) || (opcode == WRITE_10)) {
295 * Currently we only fully trace read(10) and write(10)
296 * commands
298 if (lrbp->cmd->request && lrbp->cmd->request->bio)
299 lba =
300 lrbp->cmd->request->bio->bi_iter.bi_sector;
301 transfer_len = be32_to_cpu(
302 lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
306 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
307 doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
308 trace_ufshcd_command(dev_name(hba->dev), str, tag,
309 doorbell, transfer_len, intr, lba, opcode);
312 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
314 struct ufs_clk_info *clki;
315 struct list_head *head = &hba->clk_list_head;
317 if (list_empty(head))
318 return;
320 list_for_each_entry(clki, head, list) {
321 if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
322 clki->max_freq)
323 dev_err(hba->dev, "clk: %s, rate: %u\n",
324 clki->name, clki->curr_freq);
328 static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
329 struct ufs_uic_err_reg_hist *err_hist, char *err_name)
331 int i;
333 for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
334 int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
336 if (err_hist->reg[p] == 0)
337 continue;
338 dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i,
339 err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
343 static void ufshcd_print_host_regs(struct ufs_hba *hba)
346 * hex_dump reads its data without the readl macro. This might
347 * cause inconsistency issues on some platform, as the printed
348 * values may be from cache and not the most recent value.
349 * To know whether you are looking at an un-cached version verify
350 * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
351 * during platform/pci probe function.
353 ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
354 dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
355 hba->ufs_version, hba->capabilities);
356 dev_err(hba->dev,
357 "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
358 (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
359 dev_err(hba->dev,
360 "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
361 ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
362 hba->ufs_stats.hibern8_exit_cnt);
364 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
365 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
366 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
367 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
368 ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
370 ufshcd_print_clk_freqs(hba);
372 if (hba->vops && hba->vops->dbg_register_dump)
373 hba->vops->dbg_register_dump(hba);
376 static
377 void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
379 struct ufshcd_lrb *lrbp;
380 int prdt_length;
381 int tag;
383 for_each_set_bit(tag, &bitmap, hba->nutrs) {
384 lrbp = &hba->lrb[tag];
386 dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
387 tag, ktime_to_us(lrbp->issue_time_stamp));
388 dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
389 tag, ktime_to_us(lrbp->compl_time_stamp));
390 dev_err(hba->dev,
391 "UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
392 tag, (u64)lrbp->utrd_dma_addr);
394 ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
395 sizeof(struct utp_transfer_req_desc));
396 dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx\n", tag,
397 (u64)lrbp->ucd_req_dma_addr);
398 ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
399 sizeof(struct utp_upiu_req));
400 dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx\n", tag,
401 (u64)lrbp->ucd_rsp_dma_addr);
402 ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
403 sizeof(struct utp_upiu_rsp));
405 prdt_length = le16_to_cpu(
406 lrbp->utr_descriptor_ptr->prd_table_length);
407 dev_err(hba->dev,
408 "UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
409 tag, prdt_length,
410 (u64)lrbp->ucd_prdt_dma_addr);
412 if (pr_prdt)
413 ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
414 sizeof(struct ufshcd_sg_entry) * prdt_length);
418 static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
420 struct utp_task_req_desc *tmrdp;
421 int tag;
423 for_each_set_bit(tag, &bitmap, hba->nutmrs) {
424 tmrdp = &hba->utmrdl_base_addr[tag];
425 dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
426 ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
427 sizeof(struct request_desc_header));
428 dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
429 tag);
430 ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
431 sizeof(struct utp_upiu_req));
432 dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
433 tag);
434 ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
435 sizeof(struct utp_task_req_desc));
439 static void ufshcd_print_host_state(struct ufs_hba *hba)
441 dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
442 dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
443 hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks);
444 dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
445 hba->saved_err, hba->saved_uic_err);
446 dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
447 hba->curr_dev_pwr_mode, hba->uic_link_state);
448 dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
449 hba->pm_op_in_progress, hba->is_sys_suspended);
450 dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
451 hba->auto_bkops_enabled, hba->host->host_self_blocked);
452 dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
453 dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
454 hba->eh_flags, hba->req_abort_count);
455 dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
456 hba->capabilities, hba->caps);
457 dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
458 hba->dev_quirks);
462 * ufshcd_print_pwr_info - print power params as saved in hba
463 * power info
464 * @hba: per-adapter instance
466 static void ufshcd_print_pwr_info(struct ufs_hba *hba)
468 static const char * const names[] = {
469 "INVALID MODE",
470 "FAST MODE",
471 "SLOW_MODE",
472 "INVALID MODE",
473 "FASTAUTO_MODE",
474 "SLOWAUTO_MODE",
475 "INVALID MODE",
478 dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
479 __func__,
480 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
481 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
482 names[hba->pwr_info.pwr_rx],
483 names[hba->pwr_info.pwr_tx],
484 hba->pwr_info.hs_rate);
488 * ufshcd_wait_for_register - wait for register value to change
489 * @hba - per-adapter interface
490 * @reg - mmio register offset
491 * @mask - mask to apply to read register value
492 * @val - wait condition
493 * @interval_us - polling interval in microsecs
494 * @timeout_ms - timeout in millisecs
495 * @can_sleep - perform sleep or just spin
497 * Returns -ETIMEDOUT on error, zero on success
499 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
500 u32 val, unsigned long interval_us,
501 unsigned long timeout_ms, bool can_sleep)
503 int err = 0;
504 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
506 /* ignore bits that we don't intend to wait on */
507 val = val & mask;
509 while ((ufshcd_readl(hba, reg) & mask) != val) {
510 if (can_sleep)
511 usleep_range(interval_us, interval_us + 50);
512 else
513 udelay(interval_us);
514 if (time_after(jiffies, timeout)) {
515 if ((ufshcd_readl(hba, reg) & mask) != val)
516 err = -ETIMEDOUT;
517 break;
521 return err;
525 * ufshcd_get_intr_mask - Get the interrupt bit mask
526 * @hba - Pointer to adapter instance
528 * Returns interrupt bit mask per version
530 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
532 u32 intr_mask = 0;
534 switch (hba->ufs_version) {
535 case UFSHCI_VERSION_10:
536 intr_mask = INTERRUPT_MASK_ALL_VER_10;
537 break;
538 case UFSHCI_VERSION_11:
539 case UFSHCI_VERSION_20:
540 intr_mask = INTERRUPT_MASK_ALL_VER_11;
541 break;
542 case UFSHCI_VERSION_21:
543 default:
544 intr_mask = INTERRUPT_MASK_ALL_VER_21;
545 break;
548 return intr_mask;
552 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
553 * @hba - Pointer to adapter instance
555 * Returns UFSHCI version supported by the controller
557 static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
559 if (hba->quirks & UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION)
560 return ufshcd_vops_get_ufs_hci_version(hba);
562 return ufshcd_readl(hba, REG_UFS_VERSION);
566 * ufshcd_is_device_present - Check if any device connected to
567 * the host controller
568 * @hba: pointer to adapter instance
570 * Returns true if device present, false if no device detected
572 static inline bool ufshcd_is_device_present(struct ufs_hba *hba)
574 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
575 DEVICE_PRESENT) ? true : false;
579 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
580 * @lrb: pointer to local command reference block
582 * This function is used to get the OCS field from UTRD
583 * Returns the OCS field in the UTRD
585 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
587 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
591 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
592 * @task_req_descp: pointer to utp_task_req_desc structure
594 * This function is used to get the OCS field from UTMRD
595 * Returns the OCS field in the UTMRD
597 static inline int
598 ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
600 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
604 * ufshcd_get_tm_free_slot - get a free slot for task management request
605 * @hba: per adapter instance
606 * @free_slot: pointer to variable with available slot value
608 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
609 * Returns 0 if free slot is not available, else return 1 with tag value
610 * in @free_slot.
612 static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
614 int tag;
615 bool ret = false;
617 if (!free_slot)
618 goto out;
620 do {
621 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
622 if (tag >= hba->nutmrs)
623 goto out;
624 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
626 *free_slot = tag;
627 ret = true;
628 out:
629 return ret;
632 static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
634 clear_bit_unlock(slot, &hba->tm_slots_in_use);
638 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
639 * @hba: per adapter instance
640 * @pos: position of the bit to be cleared
642 static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
644 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
648 * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
649 * @hba: per adapter instance
650 * @tag: position of the bit to be cleared
652 static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
654 __clear_bit(tag, &hba->outstanding_reqs);
658 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
659 * @reg: Register value of host controller status
661 * Returns integer, 0 on Success and positive value if failed
663 static inline int ufshcd_get_lists_status(u32 reg)
665 return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY);
669 * ufshcd_get_uic_cmd_result - Get the UIC command result
670 * @hba: Pointer to adapter instance
672 * This function gets the result of UIC command completion
673 * Returns 0 on success, non zero value on error
675 static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
677 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
678 MASK_UIC_COMMAND_RESULT;
682 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
683 * @hba: Pointer to adapter instance
685 * This function gets UIC command argument3
686 * Returns 0 on success, non zero value on error
688 static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
690 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
694 * ufshcd_get_req_rsp - returns the TR response transaction type
695 * @ucd_rsp_ptr: pointer to response UPIU
697 static inline int
698 ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
700 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
704 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
705 * @ucd_rsp_ptr: pointer to response UPIU
707 * This function gets the response status and scsi_status from response UPIU
708 * Returns the response result code.
710 static inline int
711 ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
713 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
717 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
718 * from response UPIU
719 * @ucd_rsp_ptr: pointer to response UPIU
721 * Return the data segment length.
723 static inline unsigned int
724 ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
726 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
727 MASK_RSP_UPIU_DATA_SEG_LEN;
731 * ufshcd_is_exception_event - Check if the device raised an exception event
732 * @ucd_rsp_ptr: pointer to response UPIU
734 * The function checks if the device raised an exception event indicated in
735 * the Device Information field of response UPIU.
737 * Returns true if exception is raised, false otherwise.
739 static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
741 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
742 MASK_RSP_EXCEPTION_EVENT ? true : false;
746 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
747 * @hba: per adapter instance
749 static inline void
750 ufshcd_reset_intr_aggr(struct ufs_hba *hba)
752 ufshcd_writel(hba, INT_AGGR_ENABLE |
753 INT_AGGR_COUNTER_AND_TIMER_RESET,
754 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
758 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
759 * @hba: per adapter instance
760 * @cnt: Interrupt aggregation counter threshold
761 * @tmout: Interrupt aggregation timeout value
763 static inline void
764 ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
766 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
767 INT_AGGR_COUNTER_THLD_VAL(cnt) |
768 INT_AGGR_TIMEOUT_VAL(tmout),
769 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
773 * ufshcd_disable_intr_aggr - Disables interrupt aggregation.
774 * @hba: per adapter instance
776 static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba)
778 ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
782 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
783 * When run-stop registers are set to 1, it indicates the
784 * host controller that it can process the requests
785 * @hba: per adapter instance
787 static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
789 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
790 REG_UTP_TASK_REQ_LIST_RUN_STOP);
791 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
792 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
796 * ufshcd_hba_start - Start controller initialization sequence
797 * @hba: per adapter instance
799 static inline void ufshcd_hba_start(struct ufs_hba *hba)
801 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
805 * ufshcd_is_hba_active - Get controller state
806 * @hba: per adapter instance
808 * Returns false if controller is active, true otherwise
810 static inline bool ufshcd_is_hba_active(struct ufs_hba *hba)
812 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE)
813 ? false : true;
816 static const char *ufschd_uic_link_state_to_string(
817 enum uic_link_state state)
819 switch (state) {
820 case UIC_LINK_OFF_STATE: return "OFF";
821 case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
822 case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
823 default: return "UNKNOWN";
827 static const char *ufschd_ufs_dev_pwr_mode_to_string(
828 enum ufs_dev_pwr_mode state)
830 switch (state) {
831 case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
832 case UFS_SLEEP_PWR_MODE: return "SLEEP";
833 case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
834 default: return "UNKNOWN";
838 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
840 /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
841 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
842 (hba->ufs_version == UFSHCI_VERSION_11))
843 return UFS_UNIPRO_VER_1_41;
844 else
845 return UFS_UNIPRO_VER_1_6;
847 EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
849 static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
852 * If both host and device support UniPro ver1.6 or later, PA layer
853 * parameters tuning happens during link startup itself.
855 * We can manually tune PA layer parameters if either host or device
856 * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
857 * logic simple, we will only do manual tuning if local unipro version
858 * doesn't support ver1.6 or later.
860 if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
861 return true;
862 else
863 return false;
866 static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
868 int ret = 0;
869 struct ufs_clk_info *clki;
870 struct list_head *head = &hba->clk_list_head;
871 ktime_t start = ktime_get();
872 bool clk_state_changed = false;
874 if (list_empty(head))
875 goto out;
877 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
878 if (ret)
879 return ret;
881 list_for_each_entry(clki, head, list) {
882 if (!IS_ERR_OR_NULL(clki->clk)) {
883 if (scale_up && clki->max_freq) {
884 if (clki->curr_freq == clki->max_freq)
885 continue;
887 clk_state_changed = true;
888 ret = clk_set_rate(clki->clk, clki->max_freq);
889 if (ret) {
890 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
891 __func__, clki->name,
892 clki->max_freq, ret);
893 break;
895 trace_ufshcd_clk_scaling(dev_name(hba->dev),
896 "scaled up", clki->name,
897 clki->curr_freq,
898 clki->max_freq);
900 clki->curr_freq = clki->max_freq;
902 } else if (!scale_up && clki->min_freq) {
903 if (clki->curr_freq == clki->min_freq)
904 continue;
906 clk_state_changed = true;
907 ret = clk_set_rate(clki->clk, clki->min_freq);
908 if (ret) {
909 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
910 __func__, clki->name,
911 clki->min_freq, ret);
912 break;
914 trace_ufshcd_clk_scaling(dev_name(hba->dev),
915 "scaled down", clki->name,
916 clki->curr_freq,
917 clki->min_freq);
918 clki->curr_freq = clki->min_freq;
921 dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
922 clki->name, clk_get_rate(clki->clk));
925 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
927 out:
928 if (clk_state_changed)
929 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
930 (scale_up ? "up" : "down"),
931 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
932 return ret;
936 * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
937 * @hba: per adapter instance
938 * @scale_up: True if scaling up and false if scaling down
940 * Returns true if scaling is required, false otherwise.
942 static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
943 bool scale_up)
945 struct ufs_clk_info *clki;
946 struct list_head *head = &hba->clk_list_head;
948 if (list_empty(head))
949 return false;
951 list_for_each_entry(clki, head, list) {
952 if (!IS_ERR_OR_NULL(clki->clk)) {
953 if (scale_up && clki->max_freq) {
954 if (clki->curr_freq == clki->max_freq)
955 continue;
956 return true;
957 } else if (!scale_up && clki->min_freq) {
958 if (clki->curr_freq == clki->min_freq)
959 continue;
960 return true;
965 return false;
968 static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
969 u64 wait_timeout_us)
971 unsigned long flags;
972 int ret = 0;
973 u32 tm_doorbell;
974 u32 tr_doorbell;
975 bool timeout = false, do_last_check = false;
976 ktime_t start;
978 ufshcd_hold(hba, false);
979 spin_lock_irqsave(hba->host->host_lock, flags);
981 * Wait for all the outstanding tasks/transfer requests.
982 * Verify by checking the doorbell registers are clear.
984 start = ktime_get();
985 do {
986 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
987 ret = -EBUSY;
988 goto out;
991 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
992 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
993 if (!tm_doorbell && !tr_doorbell) {
994 timeout = false;
995 break;
996 } else if (do_last_check) {
997 break;
1000 spin_unlock_irqrestore(hba->host->host_lock, flags);
1001 schedule();
1002 if (ktime_to_us(ktime_sub(ktime_get(), start)) >
1003 wait_timeout_us) {
1004 timeout = true;
1006 * We might have scheduled out for long time so make
1007 * sure to check if doorbells are cleared by this time
1008 * or not.
1010 do_last_check = true;
1012 spin_lock_irqsave(hba->host->host_lock, flags);
1013 } while (tm_doorbell || tr_doorbell);
1015 if (timeout) {
1016 dev_err(hba->dev,
1017 "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
1018 __func__, tm_doorbell, tr_doorbell);
1019 ret = -EBUSY;
1021 out:
1022 spin_unlock_irqrestore(hba->host->host_lock, flags);
1023 ufshcd_release(hba);
1024 return ret;
1028 * ufshcd_scale_gear - scale up/down UFS gear
1029 * @hba: per adapter instance
1030 * @scale_up: True for scaling up gear and false for scaling down
1032 * Returns 0 for success,
1033 * Returns -EBUSY if scaling can't happen at this time
1034 * Returns non-zero for any other errors
1036 static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
1038 #define UFS_MIN_GEAR_TO_SCALE_DOWN UFS_HS_G1
1039 int ret = 0;
1040 struct ufs_pa_layer_attr new_pwr_info;
1042 if (scale_up) {
1043 memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
1044 sizeof(struct ufs_pa_layer_attr));
1045 } else {
1046 memcpy(&new_pwr_info, &hba->pwr_info,
1047 sizeof(struct ufs_pa_layer_attr));
1049 if (hba->pwr_info.gear_tx > UFS_MIN_GEAR_TO_SCALE_DOWN
1050 || hba->pwr_info.gear_rx > UFS_MIN_GEAR_TO_SCALE_DOWN) {
1051 /* save the current power mode */
1052 memcpy(&hba->clk_scaling.saved_pwr_info.info,
1053 &hba->pwr_info,
1054 sizeof(struct ufs_pa_layer_attr));
1056 /* scale down gear */
1057 new_pwr_info.gear_tx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1058 new_pwr_info.gear_rx = UFS_MIN_GEAR_TO_SCALE_DOWN;
1062 /* check if the power mode needs to be changed or not? */
1063 ret = ufshcd_change_power_mode(hba, &new_pwr_info);
1065 if (ret)
1066 dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
1067 __func__, ret,
1068 hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
1069 new_pwr_info.gear_tx, new_pwr_info.gear_rx);
1071 return ret;
1074 static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
1076 #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
1077 int ret = 0;
1079 * make sure that there are no outstanding requests when
1080 * clock scaling is in progress
1082 scsi_block_requests(hba->host);
1083 down_write(&hba->clk_scaling_lock);
1084 if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
1085 ret = -EBUSY;
1086 up_write(&hba->clk_scaling_lock);
1087 scsi_unblock_requests(hba->host);
1090 return ret;
1093 static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
1095 up_write(&hba->clk_scaling_lock);
1096 scsi_unblock_requests(hba->host);
1100 * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
1101 * @hba: per adapter instance
1102 * @scale_up: True for scaling up and false for scalin down
1104 * Returns 0 for success,
1105 * Returns -EBUSY if scaling can't happen at this time
1106 * Returns non-zero for any other errors
1108 static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
1110 int ret = 0;
1112 /* let's not get into low power until clock scaling is completed */
1113 ufshcd_hold(hba, false);
1115 ret = ufshcd_clock_scaling_prepare(hba);
1116 if (ret)
1117 return ret;
1119 /* scale down the gear before scaling down clocks */
1120 if (!scale_up) {
1121 ret = ufshcd_scale_gear(hba, false);
1122 if (ret)
1123 goto out;
1126 ret = ufshcd_scale_clks(hba, scale_up);
1127 if (ret) {
1128 if (!scale_up)
1129 ufshcd_scale_gear(hba, true);
1130 goto out;
1133 /* scale up the gear after scaling up clocks */
1134 if (scale_up) {
1135 ret = ufshcd_scale_gear(hba, true);
1136 if (ret) {
1137 ufshcd_scale_clks(hba, false);
1138 goto out;
1142 ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
1144 out:
1145 ufshcd_clock_scaling_unprepare(hba);
1146 ufshcd_release(hba);
1147 return ret;
1150 static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
1152 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1153 clk_scaling.suspend_work);
1154 unsigned long irq_flags;
1156 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1157 if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
1158 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1159 return;
1161 hba->clk_scaling.is_suspended = true;
1162 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1164 __ufshcd_suspend_clkscaling(hba);
1167 static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
1169 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1170 clk_scaling.resume_work);
1171 unsigned long irq_flags;
1173 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1174 if (!hba->clk_scaling.is_suspended) {
1175 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1176 return;
1178 hba->clk_scaling.is_suspended = false;
1179 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1181 devfreq_resume_device(hba->devfreq);
1184 static int ufshcd_devfreq_target(struct device *dev,
1185 unsigned long *freq, u32 flags)
1187 int ret = 0;
1188 struct ufs_hba *hba = dev_get_drvdata(dev);
1189 ktime_t start;
1190 bool scale_up, sched_clk_scaling_suspend_work = false;
1191 unsigned long irq_flags;
1193 if (!ufshcd_is_clkscaling_supported(hba))
1194 return -EINVAL;
1196 if ((*freq > 0) && (*freq < UINT_MAX)) {
1197 dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
1198 return -EINVAL;
1201 spin_lock_irqsave(hba->host->host_lock, irq_flags);
1202 if (ufshcd_eh_in_progress(hba)) {
1203 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1204 return 0;
1207 if (!hba->clk_scaling.active_reqs)
1208 sched_clk_scaling_suspend_work = true;
1210 scale_up = (*freq == UINT_MAX) ? true : false;
1211 if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
1212 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1213 ret = 0;
1214 goto out; /* no state change required */
1216 spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
1218 start = ktime_get();
1219 ret = ufshcd_devfreq_scale(hba, scale_up);
1221 trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
1222 (scale_up ? "up" : "down"),
1223 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
1225 out:
1226 if (sched_clk_scaling_suspend_work)
1227 queue_work(hba->clk_scaling.workq,
1228 &hba->clk_scaling.suspend_work);
1230 return ret;
1234 static int ufshcd_devfreq_get_dev_status(struct device *dev,
1235 struct devfreq_dev_status *stat)
1237 struct ufs_hba *hba = dev_get_drvdata(dev);
1238 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1239 unsigned long flags;
1241 if (!ufshcd_is_clkscaling_supported(hba))
1242 return -EINVAL;
1244 memset(stat, 0, sizeof(*stat));
1246 spin_lock_irqsave(hba->host->host_lock, flags);
1247 if (!scaling->window_start_t)
1248 goto start_window;
1250 if (scaling->is_busy_started)
1251 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1252 scaling->busy_start_t));
1254 stat->total_time = jiffies_to_usecs((long)jiffies -
1255 (long)scaling->window_start_t);
1256 stat->busy_time = scaling->tot_busy_t;
1257 start_window:
1258 scaling->window_start_t = jiffies;
1259 scaling->tot_busy_t = 0;
1261 if (hba->outstanding_reqs) {
1262 scaling->busy_start_t = ktime_get();
1263 scaling->is_busy_started = true;
1264 } else {
1265 scaling->busy_start_t = 0;
1266 scaling->is_busy_started = false;
1268 spin_unlock_irqrestore(hba->host->host_lock, flags);
1269 return 0;
1272 static struct devfreq_dev_profile ufs_devfreq_profile = {
1273 .polling_ms = 100,
1274 .target = ufshcd_devfreq_target,
1275 .get_dev_status = ufshcd_devfreq_get_dev_status,
1278 static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1280 unsigned long flags;
1282 devfreq_suspend_device(hba->devfreq);
1283 spin_lock_irqsave(hba->host->host_lock, flags);
1284 hba->clk_scaling.window_start_t = 0;
1285 spin_unlock_irqrestore(hba->host->host_lock, flags);
1288 static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
1290 unsigned long flags;
1291 bool suspend = false;
1293 if (!ufshcd_is_clkscaling_supported(hba))
1294 return;
1296 spin_lock_irqsave(hba->host->host_lock, flags);
1297 if (!hba->clk_scaling.is_suspended) {
1298 suspend = true;
1299 hba->clk_scaling.is_suspended = true;
1301 spin_unlock_irqrestore(hba->host->host_lock, flags);
1303 if (suspend)
1304 __ufshcd_suspend_clkscaling(hba);
1307 static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
1309 unsigned long flags;
1310 bool resume = false;
1312 if (!ufshcd_is_clkscaling_supported(hba))
1313 return;
1315 spin_lock_irqsave(hba->host->host_lock, flags);
1316 if (hba->clk_scaling.is_suspended) {
1317 resume = true;
1318 hba->clk_scaling.is_suspended = false;
1320 spin_unlock_irqrestore(hba->host->host_lock, flags);
1322 if (resume)
1323 devfreq_resume_device(hba->devfreq);
1326 static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
1327 struct device_attribute *attr, char *buf)
1329 struct ufs_hba *hba = dev_get_drvdata(dev);
1331 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
1334 static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
1335 struct device_attribute *attr, const char *buf, size_t count)
1337 struct ufs_hba *hba = dev_get_drvdata(dev);
1338 u32 value;
1339 int err;
1341 if (kstrtou32(buf, 0, &value))
1342 return -EINVAL;
1344 value = !!value;
1345 if (value == hba->clk_scaling.is_allowed)
1346 goto out;
1348 pm_runtime_get_sync(hba->dev);
1349 ufshcd_hold(hba, false);
1351 cancel_work_sync(&hba->clk_scaling.suspend_work);
1352 cancel_work_sync(&hba->clk_scaling.resume_work);
1354 hba->clk_scaling.is_allowed = value;
1356 if (value) {
1357 ufshcd_resume_clkscaling(hba);
1358 } else {
1359 ufshcd_suspend_clkscaling(hba);
1360 err = ufshcd_devfreq_scale(hba, true);
1361 if (err)
1362 dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
1363 __func__, err);
1366 ufshcd_release(hba);
1367 pm_runtime_put_sync(hba->dev);
1368 out:
1369 return count;
1372 static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
1374 hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
1375 hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
1376 sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
1377 hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
1378 hba->clk_scaling.enable_attr.attr.mode = 0644;
1379 if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
1380 dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
1383 static void ufshcd_ungate_work(struct work_struct *work)
1385 int ret;
1386 unsigned long flags;
1387 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1388 clk_gating.ungate_work);
1390 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1392 spin_lock_irqsave(hba->host->host_lock, flags);
1393 if (hba->clk_gating.state == CLKS_ON) {
1394 spin_unlock_irqrestore(hba->host->host_lock, flags);
1395 goto unblock_reqs;
1398 spin_unlock_irqrestore(hba->host->host_lock, flags);
1399 ufshcd_setup_clocks(hba, true);
1401 /* Exit from hibern8 */
1402 if (ufshcd_can_hibern8_during_gating(hba)) {
1403 /* Prevent gating in this path */
1404 hba->clk_gating.is_suspended = true;
1405 if (ufshcd_is_link_hibern8(hba)) {
1406 ret = ufshcd_uic_hibern8_exit(hba);
1407 if (ret)
1408 dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
1409 __func__, ret);
1410 else
1411 ufshcd_set_link_active(hba);
1413 hba->clk_gating.is_suspended = false;
1415 unblock_reqs:
1416 scsi_unblock_requests(hba->host);
1420 * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
1421 * Also, exit from hibern8 mode and set the link as active.
1422 * @hba: per adapter instance
1423 * @async: This indicates whether caller should ungate clocks asynchronously.
1425 int ufshcd_hold(struct ufs_hba *hba, bool async)
1427 int rc = 0;
1428 unsigned long flags;
1430 if (!ufshcd_is_clkgating_allowed(hba))
1431 goto out;
1432 spin_lock_irqsave(hba->host->host_lock, flags);
1433 hba->clk_gating.active_reqs++;
1435 if (ufshcd_eh_in_progress(hba)) {
1436 spin_unlock_irqrestore(hba->host->host_lock, flags);
1437 return 0;
1440 start:
1441 switch (hba->clk_gating.state) {
1442 case CLKS_ON:
1444 * Wait for the ungate work to complete if in progress.
1445 * Though the clocks may be in ON state, the link could
1446 * still be in hibner8 state if hibern8 is allowed
1447 * during clock gating.
1448 * Make sure we exit hibern8 state also in addition to
1449 * clocks being ON.
1451 if (ufshcd_can_hibern8_during_gating(hba) &&
1452 ufshcd_is_link_hibern8(hba)) {
1453 spin_unlock_irqrestore(hba->host->host_lock, flags);
1454 flush_work(&hba->clk_gating.ungate_work);
1455 spin_lock_irqsave(hba->host->host_lock, flags);
1456 goto start;
1458 break;
1459 case REQ_CLKS_OFF:
1460 if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
1461 hba->clk_gating.state = CLKS_ON;
1462 trace_ufshcd_clk_gating(dev_name(hba->dev),
1463 hba->clk_gating.state);
1464 break;
1467 * If we are here, it means gating work is either done or
1468 * currently running. Hence, fall through to cancel gating
1469 * work and to enable clocks.
1471 case CLKS_OFF:
1472 scsi_block_requests(hba->host);
1473 hba->clk_gating.state = REQ_CLKS_ON;
1474 trace_ufshcd_clk_gating(dev_name(hba->dev),
1475 hba->clk_gating.state);
1476 schedule_work(&hba->clk_gating.ungate_work);
1478 * fall through to check if we should wait for this
1479 * work to be done or not.
1481 case REQ_CLKS_ON:
1482 if (async) {
1483 rc = -EAGAIN;
1484 hba->clk_gating.active_reqs--;
1485 break;
1488 spin_unlock_irqrestore(hba->host->host_lock, flags);
1489 flush_work(&hba->clk_gating.ungate_work);
1490 /* Make sure state is CLKS_ON before returning */
1491 spin_lock_irqsave(hba->host->host_lock, flags);
1492 goto start;
1493 default:
1494 dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
1495 __func__, hba->clk_gating.state);
1496 break;
1498 spin_unlock_irqrestore(hba->host->host_lock, flags);
1499 out:
1500 return rc;
1502 EXPORT_SYMBOL_GPL(ufshcd_hold);
1504 static void ufshcd_gate_work(struct work_struct *work)
1506 struct ufs_hba *hba = container_of(work, struct ufs_hba,
1507 clk_gating.gate_work.work);
1508 unsigned long flags;
1510 spin_lock_irqsave(hba->host->host_lock, flags);
1512 * In case you are here to cancel this work the gating state
1513 * would be marked as REQ_CLKS_ON. In this case save time by
1514 * skipping the gating work and exit after changing the clock
1515 * state to CLKS_ON.
1517 if (hba->clk_gating.is_suspended ||
1518 (hba->clk_gating.state == REQ_CLKS_ON)) {
1519 hba->clk_gating.state = CLKS_ON;
1520 trace_ufshcd_clk_gating(dev_name(hba->dev),
1521 hba->clk_gating.state);
1522 goto rel_lock;
1525 if (hba->clk_gating.active_reqs
1526 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1527 || hba->lrb_in_use || hba->outstanding_tasks
1528 || hba->active_uic_cmd || hba->uic_async_done)
1529 goto rel_lock;
1531 spin_unlock_irqrestore(hba->host->host_lock, flags);
1533 /* put the link into hibern8 mode before turning off clocks */
1534 if (ufshcd_can_hibern8_during_gating(hba)) {
1535 if (ufshcd_uic_hibern8_enter(hba)) {
1536 hba->clk_gating.state = CLKS_ON;
1537 trace_ufshcd_clk_gating(dev_name(hba->dev),
1538 hba->clk_gating.state);
1539 goto out;
1541 ufshcd_set_link_hibern8(hba);
1544 if (!ufshcd_is_link_active(hba))
1545 ufshcd_setup_clocks(hba, false);
1546 else
1547 /* If link is active, device ref_clk can't be switched off */
1548 __ufshcd_setup_clocks(hba, false, true);
1551 * In case you are here to cancel this work the gating state
1552 * would be marked as REQ_CLKS_ON. In this case keep the state
1553 * as REQ_CLKS_ON which would anyway imply that clocks are off
1554 * and a request to turn them on is pending. By doing this way,
1555 * we keep the state machine in tact and this would ultimately
1556 * prevent from doing cancel work multiple times when there are
1557 * new requests arriving before the current cancel work is done.
1559 spin_lock_irqsave(hba->host->host_lock, flags);
1560 if (hba->clk_gating.state == REQ_CLKS_OFF) {
1561 hba->clk_gating.state = CLKS_OFF;
1562 trace_ufshcd_clk_gating(dev_name(hba->dev),
1563 hba->clk_gating.state);
1565 rel_lock:
1566 spin_unlock_irqrestore(hba->host->host_lock, flags);
1567 out:
1568 return;
1571 /* host lock must be held before calling this variant */
1572 static void __ufshcd_release(struct ufs_hba *hba)
1574 if (!ufshcd_is_clkgating_allowed(hba))
1575 return;
1577 hba->clk_gating.active_reqs--;
1579 if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
1580 || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
1581 || hba->lrb_in_use || hba->outstanding_tasks
1582 || hba->active_uic_cmd || hba->uic_async_done
1583 || ufshcd_eh_in_progress(hba))
1584 return;
1586 hba->clk_gating.state = REQ_CLKS_OFF;
1587 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
1588 schedule_delayed_work(&hba->clk_gating.gate_work,
1589 msecs_to_jiffies(hba->clk_gating.delay_ms));
1592 void ufshcd_release(struct ufs_hba *hba)
1594 unsigned long flags;
1596 spin_lock_irqsave(hba->host->host_lock, flags);
1597 __ufshcd_release(hba);
1598 spin_unlock_irqrestore(hba->host->host_lock, flags);
1600 EXPORT_SYMBOL_GPL(ufshcd_release);
1602 static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
1603 struct device_attribute *attr, char *buf)
1605 struct ufs_hba *hba = dev_get_drvdata(dev);
1607 return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
1610 static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
1611 struct device_attribute *attr, const char *buf, size_t count)
1613 struct ufs_hba *hba = dev_get_drvdata(dev);
1614 unsigned long flags, value;
1616 if (kstrtoul(buf, 0, &value))
1617 return -EINVAL;
1619 spin_lock_irqsave(hba->host->host_lock, flags);
1620 hba->clk_gating.delay_ms = value;
1621 spin_unlock_irqrestore(hba->host->host_lock, flags);
1622 return count;
1625 static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
1626 struct device_attribute *attr, char *buf)
1628 struct ufs_hba *hba = dev_get_drvdata(dev);
1630 return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
1633 static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
1634 struct device_attribute *attr, const char *buf, size_t count)
1636 struct ufs_hba *hba = dev_get_drvdata(dev);
1637 unsigned long flags;
1638 u32 value;
1640 if (kstrtou32(buf, 0, &value))
1641 return -EINVAL;
1643 value = !!value;
1644 if (value == hba->clk_gating.is_enabled)
1645 goto out;
1647 if (value) {
1648 ufshcd_release(hba);
1649 } else {
1650 spin_lock_irqsave(hba->host->host_lock, flags);
1651 hba->clk_gating.active_reqs++;
1652 spin_unlock_irqrestore(hba->host->host_lock, flags);
1655 hba->clk_gating.is_enabled = value;
1656 out:
1657 return count;
1660 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
1662 if (!ufshcd_is_clkgating_allowed(hba))
1663 return;
1665 hba->clk_gating.delay_ms = 150;
1666 INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
1667 INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
1669 hba->clk_gating.is_enabled = true;
1671 hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
1672 hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
1673 sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
1674 hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
1675 hba->clk_gating.delay_attr.attr.mode = 0644;
1676 if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
1677 dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
1679 hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
1680 hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
1681 sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
1682 hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
1683 hba->clk_gating.enable_attr.attr.mode = 0644;
1684 if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
1685 dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
1688 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
1690 if (!ufshcd_is_clkgating_allowed(hba))
1691 return;
1692 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
1693 device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
1694 cancel_work_sync(&hba->clk_gating.ungate_work);
1695 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
1698 /* Must be called with host lock acquired */
1699 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
1701 bool queue_resume_work = false;
1703 if (!ufshcd_is_clkscaling_supported(hba))
1704 return;
1706 if (!hba->clk_scaling.active_reqs++)
1707 queue_resume_work = true;
1709 if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
1710 return;
1712 if (queue_resume_work)
1713 queue_work(hba->clk_scaling.workq,
1714 &hba->clk_scaling.resume_work);
1716 if (!hba->clk_scaling.window_start_t) {
1717 hba->clk_scaling.window_start_t = jiffies;
1718 hba->clk_scaling.tot_busy_t = 0;
1719 hba->clk_scaling.is_busy_started = false;
1722 if (!hba->clk_scaling.is_busy_started) {
1723 hba->clk_scaling.busy_start_t = ktime_get();
1724 hba->clk_scaling.is_busy_started = true;
1728 static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
1730 struct ufs_clk_scaling *scaling = &hba->clk_scaling;
1732 if (!ufshcd_is_clkscaling_supported(hba))
1733 return;
1735 if (!hba->outstanding_reqs && scaling->is_busy_started) {
1736 scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
1737 scaling->busy_start_t));
1738 scaling->busy_start_t = 0;
1739 scaling->is_busy_started = false;
1743 * ufshcd_send_command - Send SCSI or device management commands
1744 * @hba: per adapter instance
1745 * @task_tag: Task tag of the command
1747 static inline
1748 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
1750 hba->lrb[task_tag].issue_time_stamp = ktime_get();
1751 hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
1752 ufshcd_clk_scaling_start_busy(hba);
1753 __set_bit(task_tag, &hba->outstanding_reqs);
1754 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
1755 /* Make sure that doorbell is committed immediately */
1756 wmb();
1757 ufshcd_add_command_trace(hba, task_tag, "send");
1761 * ufshcd_copy_sense_data - Copy sense data in case of check condition
1762 * @lrb - pointer to local reference block
1764 static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
1766 int len;
1767 if (lrbp->sense_buffer &&
1768 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
1769 int len_to_copy;
1771 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
1772 len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
1774 memcpy(lrbp->sense_buffer,
1775 lrbp->ucd_rsp_ptr->sr.sense_data,
1776 min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
1781 * ufshcd_copy_query_response() - Copy the Query Response and the data
1782 * descriptor
1783 * @hba: per adapter instance
1784 * @lrb - pointer to local reference block
1786 static
1787 int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1789 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
1791 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
1793 /* Get the descriptor */
1794 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
1795 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
1796 GENERAL_UPIU_REQUEST_SIZE;
1797 u16 resp_len;
1798 u16 buf_len;
1800 /* data segment length */
1801 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
1802 MASK_QUERY_DATA_SEG_LEN;
1803 buf_len = be16_to_cpu(
1804 hba->dev_cmd.query.request.upiu_req.length);
1805 if (likely(buf_len >= resp_len)) {
1806 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
1807 } else {
1808 dev_warn(hba->dev,
1809 "%s: Response size is bigger than buffer",
1810 __func__);
1811 return -EINVAL;
1815 return 0;
1819 * ufshcd_hba_capabilities - Read controller capabilities
1820 * @hba: per adapter instance
1822 static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
1824 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
1826 /* nutrs and nutmrs are 0 based values */
1827 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
1828 hba->nutmrs =
1829 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
1833 * ufshcd_ready_for_uic_cmd - Check if controller is ready
1834 * to accept UIC commands
1835 * @hba: per adapter instance
1836 * Return true on success, else false
1838 static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
1840 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
1841 return true;
1842 else
1843 return false;
1847 * ufshcd_get_upmcrs - Get the power mode change request status
1848 * @hba: Pointer to adapter instance
1850 * This function gets the UPMCRS field of HCS register
1851 * Returns value of UPMCRS field
1853 static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
1855 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
1859 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
1860 * @hba: per adapter instance
1861 * @uic_cmd: UIC command
1863 * Mutex must be held.
1865 static inline void
1866 ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1868 WARN_ON(hba->active_uic_cmd);
1870 hba->active_uic_cmd = uic_cmd;
1872 /* Write Args */
1873 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
1874 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
1875 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
1877 /* Write UIC Cmd */
1878 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
1879 REG_UIC_COMMAND);
1883 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
1884 * @hba: per adapter instance
1885 * @uic_command: UIC command
1887 * Must be called with mutex held.
1888 * Returns 0 only if success.
1890 static int
1891 ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1893 int ret;
1894 unsigned long flags;
1896 if (wait_for_completion_timeout(&uic_cmd->done,
1897 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
1898 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
1899 else
1900 ret = -ETIMEDOUT;
1902 spin_lock_irqsave(hba->host->host_lock, flags);
1903 hba->active_uic_cmd = NULL;
1904 spin_unlock_irqrestore(hba->host->host_lock, flags);
1906 return ret;
1910 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1911 * @hba: per adapter instance
1912 * @uic_cmd: UIC command
1913 * @completion: initialize the completion only if this is set to true
1915 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
1916 * with mutex held and host_lock locked.
1917 * Returns 0 only if success.
1919 static int
1920 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
1921 bool completion)
1923 if (!ufshcd_ready_for_uic_cmd(hba)) {
1924 dev_err(hba->dev,
1925 "Controller not ready to accept UIC commands\n");
1926 return -EIO;
1929 if (completion)
1930 init_completion(&uic_cmd->done);
1932 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
1934 return 0;
1938 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
1939 * @hba: per adapter instance
1940 * @uic_cmd: UIC command
1942 * Returns 0 only if success.
1944 static int
1945 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
1947 int ret;
1948 unsigned long flags;
1950 ufshcd_hold(hba, false);
1951 mutex_lock(&hba->uic_cmd_mutex);
1952 ufshcd_add_delay_before_dme_cmd(hba);
1954 spin_lock_irqsave(hba->host->host_lock, flags);
1955 ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
1956 spin_unlock_irqrestore(hba->host->host_lock, flags);
1957 if (!ret)
1958 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
1960 mutex_unlock(&hba->uic_cmd_mutex);
1962 ufshcd_release(hba);
1963 return ret;
1967 * ufshcd_map_sg - Map scatter-gather list to prdt
1968 * @lrbp - pointer to local reference block
1970 * Returns 0 in case of success, non-zero value in case of failure
1972 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1974 struct ufshcd_sg_entry *prd_table;
1975 struct scatterlist *sg;
1976 struct scsi_cmnd *cmd;
1977 int sg_segments;
1978 int i;
1980 cmd = lrbp->cmd;
1981 sg_segments = scsi_dma_map(cmd);
1982 if (sg_segments < 0)
1983 return sg_segments;
1985 if (sg_segments) {
1986 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
1987 lrbp->utr_descriptor_ptr->prd_table_length =
1988 cpu_to_le16((u16)(sg_segments *
1989 sizeof(struct ufshcd_sg_entry)));
1990 else
1991 lrbp->utr_descriptor_ptr->prd_table_length =
1992 cpu_to_le16((u16) (sg_segments));
1994 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
1996 scsi_for_each_sg(cmd, sg, sg_segments, i) {
1997 prd_table[i].size =
1998 cpu_to_le32(((u32) sg_dma_len(sg))-1);
1999 prd_table[i].base_addr =
2000 cpu_to_le32(lower_32_bits(sg->dma_address));
2001 prd_table[i].upper_addr =
2002 cpu_to_le32(upper_32_bits(sg->dma_address));
2003 prd_table[i].reserved = 0;
2005 } else {
2006 lrbp->utr_descriptor_ptr->prd_table_length = 0;
2009 return 0;
2013 * ufshcd_enable_intr - enable interrupts
2014 * @hba: per adapter instance
2015 * @intrs: interrupt bits
2017 static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2019 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2021 if (hba->ufs_version == UFSHCI_VERSION_10) {
2022 u32 rw;
2023 rw = set & INTERRUPT_MASK_RW_VER_10;
2024 set = rw | ((set ^ intrs) & intrs);
2025 } else {
2026 set |= intrs;
2029 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2033 * ufshcd_disable_intr - disable interrupts
2034 * @hba: per adapter instance
2035 * @intrs: interrupt bits
2037 static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2039 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
2041 if (hba->ufs_version == UFSHCI_VERSION_10) {
2042 u32 rw;
2043 rw = (set & INTERRUPT_MASK_RW_VER_10) &
2044 ~(intrs & INTERRUPT_MASK_RW_VER_10);
2045 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
2047 } else {
2048 set &= ~intrs;
2051 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
2055 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
2056 * descriptor according to request
2057 * @lrbp: pointer to local reference block
2058 * @upiu_flags: flags required in the header
2059 * @cmd_dir: requests data direction
2061 static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
2062 u32 *upiu_flags, enum dma_data_direction cmd_dir)
2064 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
2065 u32 data_direction;
2066 u32 dword_0;
2068 if (cmd_dir == DMA_FROM_DEVICE) {
2069 data_direction = UTP_DEVICE_TO_HOST;
2070 *upiu_flags = UPIU_CMD_FLAGS_READ;
2071 } else if (cmd_dir == DMA_TO_DEVICE) {
2072 data_direction = UTP_HOST_TO_DEVICE;
2073 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
2074 } else {
2075 data_direction = UTP_NO_DATA_TRANSFER;
2076 *upiu_flags = UPIU_CMD_FLAGS_NONE;
2079 dword_0 = data_direction | (lrbp->command_type
2080 << UPIU_COMMAND_TYPE_OFFSET);
2081 if (lrbp->intr_cmd)
2082 dword_0 |= UTP_REQ_DESC_INT_CMD;
2084 /* Transfer request descriptor header fields */
2085 req_desc->header.dword_0 = cpu_to_le32(dword_0);
2086 /* dword_1 is reserved, hence it is set to 0 */
2087 req_desc->header.dword_1 = 0;
2089 * assigning invalid value for command status. Controller
2090 * updates OCS on command completion, with the command
2091 * status
2093 req_desc->header.dword_2 =
2094 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2095 /* dword_3 is reserved, hence it is set to 0 */
2096 req_desc->header.dword_3 = 0;
2098 req_desc->prd_table_length = 0;
2102 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
2103 * for scsi commands
2104 * @lrbp - local reference block pointer
2105 * @upiu_flags - flags
2107 static
2108 void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
2110 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2111 unsigned short cdb_len;
2113 /* command descriptor fields */
2114 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2115 UPIU_TRANSACTION_COMMAND, upiu_flags,
2116 lrbp->lun, lrbp->task_tag);
2117 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2118 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
2120 /* Total EHS length and Data segment length will be zero */
2121 ucd_req_ptr->header.dword_2 = 0;
2123 ucd_req_ptr->sc.exp_data_transfer_len =
2124 cpu_to_be32(lrbp->cmd->sdb.length);
2126 cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
2127 memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
2128 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
2130 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2134 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
2135 * for query requsts
2136 * @hba: UFS hba
2137 * @lrbp: local reference block pointer
2138 * @upiu_flags: flags
2140 static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
2141 struct ufshcd_lrb *lrbp, u32 upiu_flags)
2143 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2144 struct ufs_query *query = &hba->dev_cmd.query;
2145 u16 len = be16_to_cpu(query->request.upiu_req.length);
2146 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
2148 /* Query request header */
2149 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
2150 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
2151 lrbp->lun, lrbp->task_tag);
2152 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
2153 0, query->request.query_func, 0, 0);
2155 /* Data segment length only need for WRITE_DESC */
2156 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2157 ucd_req_ptr->header.dword_2 =
2158 UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len);
2159 else
2160 ucd_req_ptr->header.dword_2 = 0;
2162 /* Copy the Query Request buffer as is */
2163 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
2164 QUERY_OSF_SIZE);
2166 /* Copy the Descriptor */
2167 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
2168 memcpy(descp, query->descriptor, len);
2170 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2173 static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
2175 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
2177 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
2179 /* command descriptor fields */
2180 ucd_req_ptr->header.dword_0 =
2181 UPIU_HEADER_DWORD(
2182 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
2183 /* clear rest of the fields of basic header */
2184 ucd_req_ptr->header.dword_1 = 0;
2185 ucd_req_ptr->header.dword_2 = 0;
2187 memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
2191 * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
2192 * for Device Management Purposes
2193 * @hba - per adapter instance
2194 * @lrb - pointer to local reference block
2196 static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2198 u32 upiu_flags;
2199 int ret = 0;
2201 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2202 (hba->ufs_version == UFSHCI_VERSION_11))
2203 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
2204 else
2205 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2207 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
2208 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
2209 ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
2210 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
2211 ufshcd_prepare_utp_nop_upiu(lrbp);
2212 else
2213 ret = -EINVAL;
2215 return ret;
2219 * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
2220 * for SCSI Purposes
2221 * @hba - per adapter instance
2222 * @lrb - pointer to local reference block
2224 static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2226 u32 upiu_flags;
2227 int ret = 0;
2229 if ((hba->ufs_version == UFSHCI_VERSION_10) ||
2230 (hba->ufs_version == UFSHCI_VERSION_11))
2231 lrbp->command_type = UTP_CMD_TYPE_SCSI;
2232 else
2233 lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
2235 if (likely(lrbp->cmd)) {
2236 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
2237 lrbp->cmd->sc_data_direction);
2238 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
2239 } else {
2240 ret = -EINVAL;
2243 return ret;
2247 * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
2248 * @scsi_lun: scsi LUN id
2250 * Returns UPIU LUN id
2252 static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
2254 if (scsi_is_wlun(scsi_lun))
2255 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
2256 | UFS_UPIU_WLUN_ID;
2257 else
2258 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
2262 * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
2263 * @scsi_lun: UPIU W-LUN id
2265 * Returns SCSI W-LUN id
2267 static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
2269 return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
2273 * ufshcd_queuecommand - main entry point for SCSI requests
2274 * @cmd: command from SCSI Midlayer
2275 * @done: call back function
2277 * Returns 0 for success, non-zero in case of failure
2279 static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
2281 struct ufshcd_lrb *lrbp;
2282 struct ufs_hba *hba;
2283 unsigned long flags;
2284 int tag;
2285 int err = 0;
2287 hba = shost_priv(host);
2289 tag = cmd->request->tag;
2290 if (!ufshcd_valid_tag(hba, tag)) {
2291 dev_err(hba->dev,
2292 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
2293 __func__, tag, cmd, cmd->request);
2294 BUG();
2297 if (!down_read_trylock(&hba->clk_scaling_lock))
2298 return SCSI_MLQUEUE_HOST_BUSY;
2300 spin_lock_irqsave(hba->host->host_lock, flags);
2301 switch (hba->ufshcd_state) {
2302 case UFSHCD_STATE_OPERATIONAL:
2303 break;
2304 case UFSHCD_STATE_EH_SCHEDULED:
2305 case UFSHCD_STATE_RESET:
2306 err = SCSI_MLQUEUE_HOST_BUSY;
2307 goto out_unlock;
2308 case UFSHCD_STATE_ERROR:
2309 set_host_byte(cmd, DID_ERROR);
2310 cmd->scsi_done(cmd);
2311 goto out_unlock;
2312 default:
2313 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
2314 __func__, hba->ufshcd_state);
2315 set_host_byte(cmd, DID_BAD_TARGET);
2316 cmd->scsi_done(cmd);
2317 goto out_unlock;
2320 /* if error handling is in progress, don't issue commands */
2321 if (ufshcd_eh_in_progress(hba)) {
2322 set_host_byte(cmd, DID_ERROR);
2323 cmd->scsi_done(cmd);
2324 goto out_unlock;
2326 spin_unlock_irqrestore(hba->host->host_lock, flags);
2328 hba->req_abort_count = 0;
2330 /* acquire the tag to make sure device cmds don't use it */
2331 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
2333 * Dev manage command in progress, requeue the command.
2334 * Requeuing the command helps in cases where the request *may*
2335 * find different tag instead of waiting for dev manage command
2336 * completion.
2338 err = SCSI_MLQUEUE_HOST_BUSY;
2339 goto out;
2342 err = ufshcd_hold(hba, true);
2343 if (err) {
2344 err = SCSI_MLQUEUE_HOST_BUSY;
2345 clear_bit_unlock(tag, &hba->lrb_in_use);
2346 goto out;
2348 WARN_ON(hba->clk_gating.state != CLKS_ON);
2350 lrbp = &hba->lrb[tag];
2352 WARN_ON(lrbp->cmd);
2353 lrbp->cmd = cmd;
2354 lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
2355 lrbp->sense_buffer = cmd->sense_buffer;
2356 lrbp->task_tag = tag;
2357 lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
2358 lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
2359 lrbp->req_abort_skip = false;
2361 ufshcd_comp_scsi_upiu(hba, lrbp);
2363 err = ufshcd_map_sg(hba, lrbp);
2364 if (err) {
2365 lrbp->cmd = NULL;
2366 clear_bit_unlock(tag, &hba->lrb_in_use);
2367 goto out;
2369 /* Make sure descriptors are ready before ringing the doorbell */
2370 wmb();
2372 /* issue command to the controller */
2373 spin_lock_irqsave(hba->host->host_lock, flags);
2374 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2375 ufshcd_send_command(hba, tag);
2376 out_unlock:
2377 spin_unlock_irqrestore(hba->host->host_lock, flags);
2378 out:
2379 up_read(&hba->clk_scaling_lock);
2380 return err;
2383 static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
2384 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
2386 lrbp->cmd = NULL;
2387 lrbp->sense_bufflen = 0;
2388 lrbp->sense_buffer = NULL;
2389 lrbp->task_tag = tag;
2390 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
2391 lrbp->intr_cmd = true; /* No interrupt aggregation */
2392 hba->dev_cmd.type = cmd_type;
2394 return ufshcd_comp_devman_upiu(hba, lrbp);
2397 static int
2398 ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
2400 int err = 0;
2401 unsigned long flags;
2402 u32 mask = 1 << tag;
2404 /* clear outstanding transaction before retry */
2405 spin_lock_irqsave(hba->host->host_lock, flags);
2406 ufshcd_utrl_clear(hba, tag);
2407 spin_unlock_irqrestore(hba->host->host_lock, flags);
2410 * wait for for h/w to clear corresponding bit in door-bell.
2411 * max. wait is 1 sec.
2413 err = ufshcd_wait_for_register(hba,
2414 REG_UTP_TRANSFER_REQ_DOOR_BELL,
2415 mask, ~mask, 1000, 1000, true);
2417 return err;
2420 static int
2421 ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2423 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
2425 /* Get the UPIU response */
2426 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
2427 UPIU_RSP_CODE_OFFSET;
2428 return query_res->response;
2432 * ufshcd_dev_cmd_completion() - handles device management command responses
2433 * @hba: per adapter instance
2434 * @lrbp: pointer to local reference block
2436 static int
2437 ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2439 int resp;
2440 int err = 0;
2442 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
2443 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
2445 switch (resp) {
2446 case UPIU_TRANSACTION_NOP_IN:
2447 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
2448 err = -EINVAL;
2449 dev_err(hba->dev, "%s: unexpected response %x\n",
2450 __func__, resp);
2452 break;
2453 case UPIU_TRANSACTION_QUERY_RSP:
2454 err = ufshcd_check_query_response(hba, lrbp);
2455 if (!err)
2456 err = ufshcd_copy_query_response(hba, lrbp);
2457 break;
2458 case UPIU_TRANSACTION_REJECT_UPIU:
2459 /* TODO: handle Reject UPIU Response */
2460 err = -EPERM;
2461 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
2462 __func__);
2463 break;
2464 default:
2465 err = -EINVAL;
2466 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
2467 __func__, resp);
2468 break;
2471 return err;
2474 static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
2475 struct ufshcd_lrb *lrbp, int max_timeout)
2477 int err = 0;
2478 unsigned long time_left;
2479 unsigned long flags;
2481 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
2482 msecs_to_jiffies(max_timeout));
2484 /* Make sure descriptors are ready before ringing the doorbell */
2485 wmb();
2486 spin_lock_irqsave(hba->host->host_lock, flags);
2487 hba->dev_cmd.complete = NULL;
2488 if (likely(time_left)) {
2489 err = ufshcd_get_tr_ocs(lrbp);
2490 if (!err)
2491 err = ufshcd_dev_cmd_completion(hba, lrbp);
2493 spin_unlock_irqrestore(hba->host->host_lock, flags);
2495 if (!time_left) {
2496 err = -ETIMEDOUT;
2497 dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
2498 __func__, lrbp->task_tag);
2499 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
2500 /* successfully cleared the command, retry if needed */
2501 err = -EAGAIN;
2503 * in case of an error, after clearing the doorbell,
2504 * we also need to clear the outstanding_request
2505 * field in hba
2507 ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
2510 return err;
2514 * ufshcd_get_dev_cmd_tag - Get device management command tag
2515 * @hba: per-adapter instance
2516 * @tag: pointer to variable with available slot value
2518 * Get a free slot and lock it until device management command
2519 * completes.
2521 * Returns false if free slot is unavailable for locking, else
2522 * return true with tag value in @tag.
2524 static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
2526 int tag;
2527 bool ret = false;
2528 unsigned long tmp;
2530 if (!tag_out)
2531 goto out;
2533 do {
2534 tmp = ~hba->lrb_in_use;
2535 tag = find_last_bit(&tmp, hba->nutrs);
2536 if (tag >= hba->nutrs)
2537 goto out;
2538 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
2540 *tag_out = tag;
2541 ret = true;
2542 out:
2543 return ret;
2546 static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
2548 clear_bit_unlock(tag, &hba->lrb_in_use);
2552 * ufshcd_exec_dev_cmd - API for sending device management requests
2553 * @hba - UFS hba
2554 * @cmd_type - specifies the type (NOP, Query...)
2555 * @timeout - time in seconds
2557 * NOTE: Since there is only one available tag for device management commands,
2558 * it is expected you hold the hba->dev_cmd.lock mutex.
2560 static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
2561 enum dev_cmd_type cmd_type, int timeout)
2563 struct ufshcd_lrb *lrbp;
2564 int err;
2565 int tag;
2566 struct completion wait;
2567 unsigned long flags;
2569 down_read(&hba->clk_scaling_lock);
2572 * Get free slot, sleep if slots are unavailable.
2573 * Even though we use wait_event() which sleeps indefinitely,
2574 * the maximum wait time is bounded by SCSI request timeout.
2576 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
2578 init_completion(&wait);
2579 lrbp = &hba->lrb[tag];
2580 WARN_ON(lrbp->cmd);
2581 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
2582 if (unlikely(err))
2583 goto out_put_tag;
2585 hba->dev_cmd.complete = &wait;
2587 /* Make sure descriptors are ready before ringing the doorbell */
2588 wmb();
2589 spin_lock_irqsave(hba->host->host_lock, flags);
2590 ufshcd_vops_setup_xfer_req(hba, tag, (lrbp->cmd ? true : false));
2591 ufshcd_send_command(hba, tag);
2592 spin_unlock_irqrestore(hba->host->host_lock, flags);
2594 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
2596 out_put_tag:
2597 ufshcd_put_dev_cmd_tag(hba, tag);
2598 wake_up(&hba->dev_cmd.tag_wq);
2599 up_read(&hba->clk_scaling_lock);
2600 return err;
2604 * ufshcd_init_query() - init the query response and request parameters
2605 * @hba: per-adapter instance
2606 * @request: address of the request pointer to be initialized
2607 * @response: address of the response pointer to be initialized
2608 * @opcode: operation to perform
2609 * @idn: flag idn to access
2610 * @index: LU number to access
2611 * @selector: query/flag/descriptor further identification
2613 static inline void ufshcd_init_query(struct ufs_hba *hba,
2614 struct ufs_query_req **request, struct ufs_query_res **response,
2615 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
2617 *request = &hba->dev_cmd.query.request;
2618 *response = &hba->dev_cmd.query.response;
2619 memset(*request, 0, sizeof(struct ufs_query_req));
2620 memset(*response, 0, sizeof(struct ufs_query_res));
2621 (*request)->upiu_req.opcode = opcode;
2622 (*request)->upiu_req.idn = idn;
2623 (*request)->upiu_req.index = index;
2624 (*request)->upiu_req.selector = selector;
2627 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
2628 enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
2630 int ret;
2631 int retries;
2633 for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
2634 ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
2635 if (ret)
2636 dev_dbg(hba->dev,
2637 "%s: failed with error %d, retries %d\n",
2638 __func__, ret, retries);
2639 else
2640 break;
2643 if (ret)
2644 dev_err(hba->dev,
2645 "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
2646 __func__, opcode, idn, ret, retries);
2647 return ret;
2651 * ufshcd_query_flag() - API function for sending flag query requests
2652 * hba: per-adapter instance
2653 * query_opcode: flag query to perform
2654 * idn: flag idn to access
2655 * flag_res: the flag value after the query request completes
2657 * Returns 0 for success, non-zero in case of failure
2659 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
2660 enum flag_idn idn, bool *flag_res)
2662 struct ufs_query_req *request = NULL;
2663 struct ufs_query_res *response = NULL;
2664 int err, index = 0, selector = 0;
2665 int timeout = QUERY_REQ_TIMEOUT;
2667 BUG_ON(!hba);
2669 ufshcd_hold(hba, false);
2670 mutex_lock(&hba->dev_cmd.lock);
2671 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2672 selector);
2674 switch (opcode) {
2675 case UPIU_QUERY_OPCODE_SET_FLAG:
2676 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
2677 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
2678 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2679 break;
2680 case UPIU_QUERY_OPCODE_READ_FLAG:
2681 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2682 if (!flag_res) {
2683 /* No dummy reads */
2684 dev_err(hba->dev, "%s: Invalid argument for read request\n",
2685 __func__);
2686 err = -EINVAL;
2687 goto out_unlock;
2689 break;
2690 default:
2691 dev_err(hba->dev,
2692 "%s: Expected query flag opcode but got = %d\n",
2693 __func__, opcode);
2694 err = -EINVAL;
2695 goto out_unlock;
2698 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
2700 if (err) {
2701 dev_err(hba->dev,
2702 "%s: Sending flag query for idn %d failed, err = %d\n",
2703 __func__, idn, err);
2704 goto out_unlock;
2707 if (flag_res)
2708 *flag_res = (be32_to_cpu(response->upiu_res.value) &
2709 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
2711 out_unlock:
2712 mutex_unlock(&hba->dev_cmd.lock);
2713 ufshcd_release(hba);
2714 return err;
2718 * ufshcd_query_attr - API function for sending attribute requests
2719 * hba: per-adapter instance
2720 * opcode: attribute opcode
2721 * idn: attribute idn to access
2722 * index: index field
2723 * selector: selector field
2724 * attr_val: the attribute value after the query request completes
2726 * Returns 0 for success, non-zero in case of failure
2728 static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
2729 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
2731 struct ufs_query_req *request = NULL;
2732 struct ufs_query_res *response = NULL;
2733 int err;
2735 BUG_ON(!hba);
2737 ufshcd_hold(hba, false);
2738 if (!attr_val) {
2739 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
2740 __func__, opcode);
2741 err = -EINVAL;
2742 goto out;
2745 mutex_lock(&hba->dev_cmd.lock);
2746 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2747 selector);
2749 switch (opcode) {
2750 case UPIU_QUERY_OPCODE_WRITE_ATTR:
2751 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2752 request->upiu_req.value = cpu_to_be32(*attr_val);
2753 break;
2754 case UPIU_QUERY_OPCODE_READ_ATTR:
2755 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2756 break;
2757 default:
2758 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
2759 __func__, opcode);
2760 err = -EINVAL;
2761 goto out_unlock;
2764 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2766 if (err) {
2767 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2768 __func__, opcode, idn, index, err);
2769 goto out_unlock;
2772 *attr_val = be32_to_cpu(response->upiu_res.value);
2774 out_unlock:
2775 mutex_unlock(&hba->dev_cmd.lock);
2776 out:
2777 ufshcd_release(hba);
2778 return err;
2782 * ufshcd_query_attr_retry() - API function for sending query
2783 * attribute with retries
2784 * @hba: per-adapter instance
2785 * @opcode: attribute opcode
2786 * @idn: attribute idn to access
2787 * @index: index field
2788 * @selector: selector field
2789 * @attr_val: the attribute value after the query request
2790 * completes
2792 * Returns 0 for success, non-zero in case of failure
2794 static int ufshcd_query_attr_retry(struct ufs_hba *hba,
2795 enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
2796 u32 *attr_val)
2798 int ret = 0;
2799 u32 retries;
2801 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2802 ret = ufshcd_query_attr(hba, opcode, idn, index,
2803 selector, attr_val);
2804 if (ret)
2805 dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
2806 __func__, ret, retries);
2807 else
2808 break;
2811 if (ret)
2812 dev_err(hba->dev,
2813 "%s: query attribute, idn %d, failed with error %d after %d retires\n",
2814 __func__, idn, ret, QUERY_REQ_RETRIES);
2815 return ret;
2818 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
2819 enum query_opcode opcode, enum desc_idn idn, u8 index,
2820 u8 selector, u8 *desc_buf, int *buf_len)
2822 struct ufs_query_req *request = NULL;
2823 struct ufs_query_res *response = NULL;
2824 int err;
2826 BUG_ON(!hba);
2828 ufshcd_hold(hba, false);
2829 if (!desc_buf) {
2830 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
2831 __func__, opcode);
2832 err = -EINVAL;
2833 goto out;
2836 if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
2837 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
2838 __func__, *buf_len);
2839 err = -EINVAL;
2840 goto out;
2843 mutex_lock(&hba->dev_cmd.lock);
2844 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
2845 selector);
2846 hba->dev_cmd.query.descriptor = desc_buf;
2847 request->upiu_req.length = cpu_to_be16(*buf_len);
2849 switch (opcode) {
2850 case UPIU_QUERY_OPCODE_WRITE_DESC:
2851 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
2852 break;
2853 case UPIU_QUERY_OPCODE_READ_DESC:
2854 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
2855 break;
2856 default:
2857 dev_err(hba->dev,
2858 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
2859 __func__, opcode);
2860 err = -EINVAL;
2861 goto out_unlock;
2864 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
2866 if (err) {
2867 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
2868 __func__, opcode, idn, index, err);
2869 goto out_unlock;
2872 hba->dev_cmd.query.descriptor = NULL;
2873 *buf_len = be16_to_cpu(response->upiu_res.length);
2875 out_unlock:
2876 mutex_unlock(&hba->dev_cmd.lock);
2877 out:
2878 ufshcd_release(hba);
2879 return err;
2883 * ufshcd_query_descriptor_retry - API function for sending descriptor
2884 * requests
2885 * hba: per-adapter instance
2886 * opcode: attribute opcode
2887 * idn: attribute idn to access
2888 * index: index field
2889 * selector: selector field
2890 * desc_buf: the buffer that contains the descriptor
2891 * buf_len: length parameter passed to the device
2893 * Returns 0 for success, non-zero in case of failure.
2894 * The buf_len parameter will contain, on return, the length parameter
2895 * received on the response.
2897 static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
2898 enum query_opcode opcode,
2899 enum desc_idn idn, u8 index,
2900 u8 selector,
2901 u8 *desc_buf, int *buf_len)
2903 int err;
2904 int retries;
2906 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
2907 err = __ufshcd_query_descriptor(hba, opcode, idn, index,
2908 selector, desc_buf, buf_len);
2909 if (!err || err == -EINVAL)
2910 break;
2913 return err;
2917 * ufshcd_read_desc_length - read the specified descriptor length from header
2918 * @hba: Pointer to adapter instance
2919 * @desc_id: descriptor idn value
2920 * @desc_index: descriptor index
2921 * @desc_length: pointer to variable to read the length of descriptor
2923 * Return 0 in case of success, non-zero otherwise
2925 static int ufshcd_read_desc_length(struct ufs_hba *hba,
2926 enum desc_idn desc_id,
2927 int desc_index,
2928 int *desc_length)
2930 int ret;
2931 u8 header[QUERY_DESC_HDR_SIZE];
2932 int header_len = QUERY_DESC_HDR_SIZE;
2934 if (desc_id >= QUERY_DESC_IDN_MAX)
2935 return -EINVAL;
2937 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
2938 desc_id, desc_index, 0, header,
2939 &header_len);
2941 if (ret) {
2942 dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
2943 __func__, desc_id);
2944 return ret;
2945 } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
2946 dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
2947 __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
2948 desc_id);
2949 ret = -EINVAL;
2952 *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
2953 return ret;
2958 * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
2959 * @hba: Pointer to adapter instance
2960 * @desc_id: descriptor idn value
2961 * @desc_len: mapped desc length (out)
2963 * Return 0 in case of success, non-zero otherwise
2965 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
2966 enum desc_idn desc_id, int *desc_len)
2968 switch (desc_id) {
2969 case QUERY_DESC_IDN_DEVICE:
2970 *desc_len = hba->desc_size.dev_desc;
2971 break;
2972 case QUERY_DESC_IDN_POWER:
2973 *desc_len = hba->desc_size.pwr_desc;
2974 break;
2975 case QUERY_DESC_IDN_GEOMETRY:
2976 *desc_len = hba->desc_size.geom_desc;
2977 break;
2978 case QUERY_DESC_IDN_CONFIGURATION:
2979 *desc_len = hba->desc_size.conf_desc;
2980 break;
2981 case QUERY_DESC_IDN_UNIT:
2982 *desc_len = hba->desc_size.unit_desc;
2983 break;
2984 case QUERY_DESC_IDN_INTERCONNECT:
2985 *desc_len = hba->desc_size.interc_desc;
2986 break;
2987 case QUERY_DESC_IDN_STRING:
2988 *desc_len = QUERY_DESC_MAX_SIZE;
2989 break;
2990 case QUERY_DESC_IDN_RFU_0:
2991 case QUERY_DESC_IDN_RFU_1:
2992 *desc_len = 0;
2993 break;
2994 default:
2995 *desc_len = 0;
2996 return -EINVAL;
2998 return 0;
3000 EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
3003 * ufshcd_read_desc_param - read the specified descriptor parameter
3004 * @hba: Pointer to adapter instance
3005 * @desc_id: descriptor idn value
3006 * @desc_index: descriptor index
3007 * @param_offset: offset of the parameter to read
3008 * @param_read_buf: pointer to buffer where parameter would be read
3009 * @param_size: sizeof(param_read_buf)
3011 * Return 0 in case of success, non-zero otherwise
3013 static int ufshcd_read_desc_param(struct ufs_hba *hba,
3014 enum desc_idn desc_id,
3015 int desc_index,
3016 u8 param_offset,
3017 u8 *param_read_buf,
3018 u8 param_size)
3020 int ret;
3021 u8 *desc_buf;
3022 int buff_len;
3023 bool is_kmalloc = true;
3025 /* Safety check */
3026 if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
3027 return -EINVAL;
3029 /* Get the max length of descriptor from structure filled up at probe
3030 * time.
3032 ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
3034 /* Sanity checks */
3035 if (ret || !buff_len) {
3036 dev_err(hba->dev, "%s: Failed to get full descriptor length",
3037 __func__);
3038 return ret;
3041 /* Check whether we need temp memory */
3042 if (param_offset != 0 || param_size < buff_len) {
3043 desc_buf = kmalloc(buff_len, GFP_KERNEL);
3044 if (!desc_buf)
3045 return -ENOMEM;
3046 } else {
3047 desc_buf = param_read_buf;
3048 is_kmalloc = false;
3051 /* Request for full descriptor */
3052 ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
3053 desc_id, desc_index, 0,
3054 desc_buf, &buff_len);
3056 if (ret) {
3057 dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
3058 __func__, desc_id, desc_index, param_offset, ret);
3059 goto out;
3062 /* Sanity check */
3063 if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
3064 dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
3065 __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
3066 ret = -EINVAL;
3067 goto out;
3070 /* Check wherher we will not copy more data, than available */
3071 if (is_kmalloc && param_size > buff_len)
3072 param_size = buff_len;
3074 if (is_kmalloc)
3075 memcpy(param_read_buf, &desc_buf[param_offset], param_size);
3076 out:
3077 if (is_kmalloc)
3078 kfree(desc_buf);
3079 return ret;
3082 static inline int ufshcd_read_desc(struct ufs_hba *hba,
3083 enum desc_idn desc_id,
3084 int desc_index,
3085 u8 *buf,
3086 u32 size)
3088 return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
3091 static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
3092 u8 *buf,
3093 u32 size)
3095 return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
3098 static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
3100 return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
3104 * ufshcd_read_string_desc - read string descriptor
3105 * @hba: pointer to adapter instance
3106 * @desc_index: descriptor index
3107 * @buf: pointer to buffer where descriptor would be read
3108 * @size: size of buf
3109 * @ascii: if true convert from unicode to ascii characters
3111 * Return 0 in case of success, non-zero otherwise
3113 #define ASCII_STD true
3114 static int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index,
3115 u8 *buf, u32 size, bool ascii)
3117 int err = 0;
3119 err = ufshcd_read_desc(hba,
3120 QUERY_DESC_IDN_STRING, desc_index, buf, size);
3122 if (err) {
3123 dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
3124 __func__, QUERY_REQ_RETRIES, err);
3125 goto out;
3128 if (ascii) {
3129 int desc_len;
3130 int ascii_len;
3131 int i;
3132 char *buff_ascii;
3134 desc_len = buf[0];
3135 /* remove header and divide by 2 to move from UTF16 to UTF8 */
3136 ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
3137 if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
3138 dev_err(hba->dev, "%s: buffer allocated size is too small\n",
3139 __func__);
3140 err = -ENOMEM;
3141 goto out;
3144 buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
3145 if (!buff_ascii) {
3146 err = -ENOMEM;
3147 goto out;
3151 * the descriptor contains string in UTF16 format
3152 * we need to convert to utf-8 so it can be displayed
3154 utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
3155 desc_len - QUERY_DESC_HDR_SIZE,
3156 UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
3158 /* replace non-printable or non-ASCII characters with spaces */
3159 for (i = 0; i < ascii_len; i++)
3160 ufshcd_remove_non_printable(&buff_ascii[i]);
3162 memset(buf + QUERY_DESC_HDR_SIZE, 0,
3163 size - QUERY_DESC_HDR_SIZE);
3164 memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
3165 buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
3166 kfree(buff_ascii);
3168 out:
3169 return err;
3173 * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
3174 * @hba: Pointer to adapter instance
3175 * @lun: lun id
3176 * @param_offset: offset of the parameter to read
3177 * @param_read_buf: pointer to buffer where parameter would be read
3178 * @param_size: sizeof(param_read_buf)
3180 * Return 0 in case of success, non-zero otherwise
3182 static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
3183 int lun,
3184 enum unit_desc_param param_offset,
3185 u8 *param_read_buf,
3186 u32 param_size)
3189 * Unit descriptors are only available for general purpose LUs (LUN id
3190 * from 0 to 7) and RPMB Well known LU.
3192 if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
3193 return -EOPNOTSUPP;
3195 return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
3196 param_offset, param_read_buf, param_size);
3200 * ufshcd_memory_alloc - allocate memory for host memory space data structures
3201 * @hba: per adapter instance
3203 * 1. Allocate DMA memory for Command Descriptor array
3204 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
3205 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
3206 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
3207 * (UTMRDL)
3208 * 4. Allocate memory for local reference block(lrb).
3210 * Returns 0 for success, non-zero in case of failure
3212 static int ufshcd_memory_alloc(struct ufs_hba *hba)
3214 size_t utmrdl_size, utrdl_size, ucdl_size;
3216 /* Allocate memory for UTP command descriptors */
3217 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
3218 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
3219 ucdl_size,
3220 &hba->ucdl_dma_addr,
3221 GFP_KERNEL);
3224 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
3225 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
3226 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
3227 * be aligned to 128 bytes as well
3229 if (!hba->ucdl_base_addr ||
3230 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3231 dev_err(hba->dev,
3232 "Command Descriptor Memory allocation failed\n");
3233 goto out;
3237 * Allocate memory for UTP Transfer descriptors
3238 * UFSHCI requires 1024 byte alignment of UTRD
3240 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
3241 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
3242 utrdl_size,
3243 &hba->utrdl_dma_addr,
3244 GFP_KERNEL);
3245 if (!hba->utrdl_base_addr ||
3246 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3247 dev_err(hba->dev,
3248 "Transfer Descriptor Memory allocation failed\n");
3249 goto out;
3253 * Allocate memory for UTP Task Management descriptors
3254 * UFSHCI requires 1024 byte alignment of UTMRD
3256 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
3257 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
3258 utmrdl_size,
3259 &hba->utmrdl_dma_addr,
3260 GFP_KERNEL);
3261 if (!hba->utmrdl_base_addr ||
3262 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3263 dev_err(hba->dev,
3264 "Task Management Descriptor Memory allocation failed\n");
3265 goto out;
3268 /* Allocate memory for local reference block */
3269 hba->lrb = devm_kzalloc(hba->dev,
3270 hba->nutrs * sizeof(struct ufshcd_lrb),
3271 GFP_KERNEL);
3272 if (!hba->lrb) {
3273 dev_err(hba->dev, "LRB Memory allocation failed\n");
3274 goto out;
3276 return 0;
3277 out:
3278 return -ENOMEM;
3282 * ufshcd_host_memory_configure - configure local reference block with
3283 * memory offsets
3284 * @hba: per adapter instance
3286 * Configure Host memory space
3287 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
3288 * address.
3289 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
3290 * and PRDT offset.
3291 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
3292 * into local reference block.
3294 static void ufshcd_host_memory_configure(struct ufs_hba *hba)
3296 struct utp_transfer_cmd_desc *cmd_descp;
3297 struct utp_transfer_req_desc *utrdlp;
3298 dma_addr_t cmd_desc_dma_addr;
3299 dma_addr_t cmd_desc_element_addr;
3300 u16 response_offset;
3301 u16 prdt_offset;
3302 int cmd_desc_size;
3303 int i;
3305 utrdlp = hba->utrdl_base_addr;
3306 cmd_descp = hba->ucdl_base_addr;
3308 response_offset =
3309 offsetof(struct utp_transfer_cmd_desc, response_upiu);
3310 prdt_offset =
3311 offsetof(struct utp_transfer_cmd_desc, prd_table);
3313 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
3314 cmd_desc_dma_addr = hba->ucdl_dma_addr;
3316 for (i = 0; i < hba->nutrs; i++) {
3317 /* Configure UTRD with command descriptor base address */
3318 cmd_desc_element_addr =
3319 (cmd_desc_dma_addr + (cmd_desc_size * i));
3320 utrdlp[i].command_desc_base_addr_lo =
3321 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
3322 utrdlp[i].command_desc_base_addr_hi =
3323 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
3325 /* Response upiu and prdt offset should be in double words */
3326 if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) {
3327 utrdlp[i].response_upiu_offset =
3328 cpu_to_le16(response_offset);
3329 utrdlp[i].prd_table_offset =
3330 cpu_to_le16(prdt_offset);
3331 utrdlp[i].response_upiu_length =
3332 cpu_to_le16(ALIGNED_UPIU_SIZE);
3333 } else {
3334 utrdlp[i].response_upiu_offset =
3335 cpu_to_le16((response_offset >> 2));
3336 utrdlp[i].prd_table_offset =
3337 cpu_to_le16((prdt_offset >> 2));
3338 utrdlp[i].response_upiu_length =
3339 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
3342 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
3343 hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
3344 (i * sizeof(struct utp_transfer_req_desc));
3345 hba->lrb[i].ucd_req_ptr =
3346 (struct utp_upiu_req *)(cmd_descp + i);
3347 hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
3348 hba->lrb[i].ucd_rsp_ptr =
3349 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
3350 hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
3351 response_offset;
3352 hba->lrb[i].ucd_prdt_ptr =
3353 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
3354 hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
3355 prdt_offset;
3360 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
3361 * @hba: per adapter instance
3363 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
3364 * in order to initialize the Unipro link startup procedure.
3365 * Once the Unipro links are up, the device connected to the controller
3366 * is detected.
3368 * Returns 0 on success, non-zero value on failure
3370 static int ufshcd_dme_link_startup(struct ufs_hba *hba)
3372 struct uic_command uic_cmd = {0};
3373 int ret;
3375 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
3377 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3378 if (ret)
3379 dev_dbg(hba->dev,
3380 "dme-link-startup: error code %d\n", ret);
3381 return ret;
3384 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
3386 #define MIN_DELAY_BEFORE_DME_CMDS_US 1000
3387 unsigned long min_sleep_time_us;
3389 if (!(hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS))
3390 return;
3393 * last_dme_cmd_tstamp will be 0 only for 1st call to
3394 * this function
3396 if (unlikely(!ktime_to_us(hba->last_dme_cmd_tstamp))) {
3397 min_sleep_time_us = MIN_DELAY_BEFORE_DME_CMDS_US;
3398 } else {
3399 unsigned long delta =
3400 (unsigned long) ktime_to_us(
3401 ktime_sub(ktime_get(),
3402 hba->last_dme_cmd_tstamp));
3404 if (delta < MIN_DELAY_BEFORE_DME_CMDS_US)
3405 min_sleep_time_us =
3406 MIN_DELAY_BEFORE_DME_CMDS_US - delta;
3407 else
3408 return; /* no more delay required */
3411 /* allow sleep for extra 50us if needed */
3412 usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
3416 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
3417 * @hba: per adapter instance
3418 * @attr_sel: uic command argument1
3419 * @attr_set: attribute set type as uic command argument2
3420 * @mib_val: setting value as uic command argument3
3421 * @peer: indicate whether peer or local
3423 * Returns 0 on success, non-zero value on failure
3425 int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
3426 u8 attr_set, u32 mib_val, u8 peer)
3428 struct uic_command uic_cmd = {0};
3429 static const char *const action[] = {
3430 "dme-set",
3431 "dme-peer-set"
3433 const char *set = action[!!peer];
3434 int ret;
3435 int retries = UFS_UIC_COMMAND_RETRIES;
3437 uic_cmd.command = peer ?
3438 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
3439 uic_cmd.argument1 = attr_sel;
3440 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
3441 uic_cmd.argument3 = mib_val;
3443 do {
3444 /* for peer attributes we retry upon failure */
3445 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3446 if (ret)
3447 dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
3448 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
3449 } while (ret && peer && --retries);
3451 if (ret)
3452 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
3453 set, UIC_GET_ATTR_ID(attr_sel), mib_val,
3454 UFS_UIC_COMMAND_RETRIES - retries);
3456 return ret;
3458 EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
3461 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
3462 * @hba: per adapter instance
3463 * @attr_sel: uic command argument1
3464 * @mib_val: the value of the attribute as returned by the UIC command
3465 * @peer: indicate whether peer or local
3467 * Returns 0 on success, non-zero value on failure
3469 int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
3470 u32 *mib_val, u8 peer)
3472 struct uic_command uic_cmd = {0};
3473 static const char *const action[] = {
3474 "dme-get",
3475 "dme-peer-get"
3477 const char *get = action[!!peer];
3478 int ret;
3479 int retries = UFS_UIC_COMMAND_RETRIES;
3480 struct ufs_pa_layer_attr orig_pwr_info;
3481 struct ufs_pa_layer_attr temp_pwr_info;
3482 bool pwr_mode_change = false;
3484 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
3485 orig_pwr_info = hba->pwr_info;
3486 temp_pwr_info = orig_pwr_info;
3488 if (orig_pwr_info.pwr_tx == FAST_MODE ||
3489 orig_pwr_info.pwr_rx == FAST_MODE) {
3490 temp_pwr_info.pwr_tx = FASTAUTO_MODE;
3491 temp_pwr_info.pwr_rx = FASTAUTO_MODE;
3492 pwr_mode_change = true;
3493 } else if (orig_pwr_info.pwr_tx == SLOW_MODE ||
3494 orig_pwr_info.pwr_rx == SLOW_MODE) {
3495 temp_pwr_info.pwr_tx = SLOWAUTO_MODE;
3496 temp_pwr_info.pwr_rx = SLOWAUTO_MODE;
3497 pwr_mode_change = true;
3499 if (pwr_mode_change) {
3500 ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
3501 if (ret)
3502 goto out;
3506 uic_cmd.command = peer ?
3507 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
3508 uic_cmd.argument1 = attr_sel;
3510 do {
3511 /* for peer attributes we retry upon failure */
3512 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
3513 if (ret)
3514 dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
3515 get, UIC_GET_ATTR_ID(attr_sel), ret);
3516 } while (ret && peer && --retries);
3518 if (ret)
3519 dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
3520 get, UIC_GET_ATTR_ID(attr_sel),
3521 UFS_UIC_COMMAND_RETRIES - retries);
3523 if (mib_val && !ret)
3524 *mib_val = uic_cmd.argument3;
3526 if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
3527 && pwr_mode_change)
3528 ufshcd_change_power_mode(hba, &orig_pwr_info);
3529 out:
3530 return ret;
3532 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
3535 * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
3536 * state) and waits for it to take effect.
3538 * @hba: per adapter instance
3539 * @cmd: UIC command to execute
3541 * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
3542 * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
3543 * and device UniPro link and hence it's final completion would be indicated by
3544 * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
3545 * addition to normal UIC command completion Status (UCCS). This function only
3546 * returns after the relevant status bits indicate the completion.
3548 * Returns 0 on success, non-zero value on failure
3550 static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
3552 struct completion uic_async_done;
3553 unsigned long flags;
3554 u8 status;
3555 int ret;
3556 bool reenable_intr = false;
3558 mutex_lock(&hba->uic_cmd_mutex);
3559 init_completion(&uic_async_done);
3560 ufshcd_add_delay_before_dme_cmd(hba);
3562 spin_lock_irqsave(hba->host->host_lock, flags);
3563 hba->uic_async_done = &uic_async_done;
3564 if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
3565 ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
3567 * Make sure UIC command completion interrupt is disabled before
3568 * issuing UIC command.
3570 wmb();
3571 reenable_intr = true;
3573 ret = __ufshcd_send_uic_cmd(hba, cmd, false);
3574 spin_unlock_irqrestore(hba->host->host_lock, flags);
3575 if (ret) {
3576 dev_err(hba->dev,
3577 "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
3578 cmd->command, cmd->argument3, ret);
3579 goto out;
3582 if (!wait_for_completion_timeout(hba->uic_async_done,
3583 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
3584 dev_err(hba->dev,
3585 "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
3586 cmd->command, cmd->argument3);
3587 ret = -ETIMEDOUT;
3588 goto out;
3591 status = ufshcd_get_upmcrs(hba);
3592 if (status != PWR_LOCAL) {
3593 dev_err(hba->dev,
3594 "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
3595 cmd->command, status);
3596 ret = (status != PWR_OK) ? status : -1;
3598 out:
3599 if (ret) {
3600 ufshcd_print_host_state(hba);
3601 ufshcd_print_pwr_info(hba);
3602 ufshcd_print_host_regs(hba);
3605 spin_lock_irqsave(hba->host->host_lock, flags);
3606 hba->active_uic_cmd = NULL;
3607 hba->uic_async_done = NULL;
3608 if (reenable_intr)
3609 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
3610 spin_unlock_irqrestore(hba->host->host_lock, flags);
3611 mutex_unlock(&hba->uic_cmd_mutex);
3613 return ret;
3617 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
3618 * using DME_SET primitives.
3619 * @hba: per adapter instance
3620 * @mode: powr mode value
3622 * Returns 0 on success, non-zero value on failure
3624 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
3626 struct uic_command uic_cmd = {0};
3627 int ret;
3629 if (hba->quirks & UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP) {
3630 ret = ufshcd_dme_set(hba,
3631 UIC_ARG_MIB_SEL(PA_RXHSUNTERMCAP, 0), 1);
3632 if (ret) {
3633 dev_err(hba->dev, "%s: failed to enable PA_RXHSUNTERMCAP ret %d\n",
3634 __func__, ret);
3635 goto out;
3639 uic_cmd.command = UIC_CMD_DME_SET;
3640 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
3641 uic_cmd.argument3 = mode;
3642 ufshcd_hold(hba, false);
3643 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3644 ufshcd_release(hba);
3646 out:
3647 return ret;
3650 static int ufshcd_link_recovery(struct ufs_hba *hba)
3652 int ret;
3653 unsigned long flags;
3655 spin_lock_irqsave(hba->host->host_lock, flags);
3656 hba->ufshcd_state = UFSHCD_STATE_RESET;
3657 ufshcd_set_eh_in_progress(hba);
3658 spin_unlock_irqrestore(hba->host->host_lock, flags);
3660 ret = ufshcd_host_reset_and_restore(hba);
3662 spin_lock_irqsave(hba->host->host_lock, flags);
3663 if (ret)
3664 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3665 ufshcd_clear_eh_in_progress(hba);
3666 spin_unlock_irqrestore(hba->host->host_lock, flags);
3668 if (ret)
3669 dev_err(hba->dev, "%s: link recovery failed, err %d",
3670 __func__, ret);
3672 return ret;
3675 static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3677 int ret;
3678 struct uic_command uic_cmd = {0};
3679 ktime_t start = ktime_get();
3681 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, PRE_CHANGE);
3683 uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
3684 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3685 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
3686 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3688 if (ret) {
3689 dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
3690 __func__, ret);
3693 * If link recovery fails then return error so that caller
3694 * don't retry the hibern8 enter again.
3696 if (ufshcd_link_recovery(hba))
3697 ret = -ENOLINK;
3698 } else
3699 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
3700 POST_CHANGE);
3702 return ret;
3705 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
3707 int ret = 0, retries;
3709 for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
3710 ret = __ufshcd_uic_hibern8_enter(hba);
3711 if (!ret || ret == -ENOLINK)
3712 goto out;
3714 out:
3715 return ret;
3718 static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
3720 struct uic_command uic_cmd = {0};
3721 int ret;
3722 ktime_t start = ktime_get();
3724 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT, PRE_CHANGE);
3726 uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
3727 ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
3728 trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
3729 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
3731 if (ret) {
3732 dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
3733 __func__, ret);
3734 ret = ufshcd_link_recovery(hba);
3735 } else {
3736 ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
3737 POST_CHANGE);
3738 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
3739 hba->ufs_stats.hibern8_exit_cnt++;
3742 return ret;
3746 * ufshcd_init_pwr_info - setting the POR (power on reset)
3747 * values in hba power info
3748 * @hba: per-adapter instance
3750 static void ufshcd_init_pwr_info(struct ufs_hba *hba)
3752 hba->pwr_info.gear_rx = UFS_PWM_G1;
3753 hba->pwr_info.gear_tx = UFS_PWM_G1;
3754 hba->pwr_info.lane_rx = 1;
3755 hba->pwr_info.lane_tx = 1;
3756 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
3757 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
3758 hba->pwr_info.hs_rate = 0;
3762 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
3763 * @hba: per-adapter instance
3765 static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
3767 struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
3769 if (hba->max_pwr_info.is_valid)
3770 return 0;
3772 pwr_info->pwr_tx = FAST_MODE;
3773 pwr_info->pwr_rx = FAST_MODE;
3774 pwr_info->hs_rate = PA_HS_MODE_B;
3776 /* Get the connected lane count */
3777 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
3778 &pwr_info->lane_rx);
3779 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
3780 &pwr_info->lane_tx);
3782 if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
3783 dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
3784 __func__,
3785 pwr_info->lane_rx,
3786 pwr_info->lane_tx);
3787 return -EINVAL;
3791 * First, get the maximum gears of HS speed.
3792 * If a zero value, it means there is no HSGEAR capability.
3793 * Then, get the maximum gears of PWM speed.
3795 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
3796 if (!pwr_info->gear_rx) {
3797 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3798 &pwr_info->gear_rx);
3799 if (!pwr_info->gear_rx) {
3800 dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
3801 __func__, pwr_info->gear_rx);
3802 return -EINVAL;
3804 pwr_info->pwr_rx = SLOW_MODE;
3807 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
3808 &pwr_info->gear_tx);
3809 if (!pwr_info->gear_tx) {
3810 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
3811 &pwr_info->gear_tx);
3812 if (!pwr_info->gear_tx) {
3813 dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
3814 __func__, pwr_info->gear_tx);
3815 return -EINVAL;
3817 pwr_info->pwr_tx = SLOW_MODE;
3820 hba->max_pwr_info.is_valid = true;
3821 return 0;
3824 static int ufshcd_change_power_mode(struct ufs_hba *hba,
3825 struct ufs_pa_layer_attr *pwr_mode)
3827 int ret;
3829 /* if already configured to the requested pwr_mode */
3830 if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
3831 pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
3832 pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
3833 pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
3834 pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
3835 pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
3836 pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
3837 dev_dbg(hba->dev, "%s: power already configured\n", __func__);
3838 return 0;
3842 * Configure attributes for power mode change with below.
3843 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
3844 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
3845 * - PA_HSSERIES
3847 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
3848 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
3849 pwr_mode->lane_rx);
3850 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
3851 pwr_mode->pwr_rx == FAST_MODE)
3852 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
3853 else
3854 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
3856 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
3857 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
3858 pwr_mode->lane_tx);
3859 if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
3860 pwr_mode->pwr_tx == FAST_MODE)
3861 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
3862 else
3863 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
3865 if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
3866 pwr_mode->pwr_tx == FASTAUTO_MODE ||
3867 pwr_mode->pwr_rx == FAST_MODE ||
3868 pwr_mode->pwr_tx == FAST_MODE)
3869 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
3870 pwr_mode->hs_rate);
3872 ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
3873 | pwr_mode->pwr_tx);
3875 if (ret) {
3876 dev_err(hba->dev,
3877 "%s: power mode change failed %d\n", __func__, ret);
3878 } else {
3879 ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
3880 pwr_mode);
3882 memcpy(&hba->pwr_info, pwr_mode,
3883 sizeof(struct ufs_pa_layer_attr));
3886 return ret;
3890 * ufshcd_config_pwr_mode - configure a new power mode
3891 * @hba: per-adapter instance
3892 * @desired_pwr_mode: desired power configuration
3894 static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
3895 struct ufs_pa_layer_attr *desired_pwr_mode)
3897 struct ufs_pa_layer_attr final_params = { 0 };
3898 int ret;
3900 ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
3901 desired_pwr_mode, &final_params);
3903 if (ret)
3904 memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
3906 ret = ufshcd_change_power_mode(hba, &final_params);
3907 if (!ret)
3908 ufshcd_print_pwr_info(hba);
3910 return ret;
3914 * ufshcd_complete_dev_init() - checks device readiness
3915 * hba: per-adapter instance
3917 * Set fDeviceInit flag and poll until device toggles it.
3919 static int ufshcd_complete_dev_init(struct ufs_hba *hba)
3921 int i;
3922 int err;
3923 bool flag_res = 1;
3925 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
3926 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
3927 if (err) {
3928 dev_err(hba->dev,
3929 "%s setting fDeviceInit flag failed with error %d\n",
3930 __func__, err);
3931 goto out;
3934 /* poll for max. 1000 iterations for fDeviceInit flag to clear */
3935 for (i = 0; i < 1000 && !err && flag_res; i++)
3936 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
3937 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
3939 if (err)
3940 dev_err(hba->dev,
3941 "%s reading fDeviceInit flag failed with error %d\n",
3942 __func__, err);
3943 else if (flag_res)
3944 dev_err(hba->dev,
3945 "%s fDeviceInit was not cleared by the device\n",
3946 __func__);
3948 out:
3949 return err;
3953 * ufshcd_make_hba_operational - Make UFS controller operational
3954 * @hba: per adapter instance
3956 * To bring UFS host controller to operational state,
3957 * 1. Enable required interrupts
3958 * 2. Configure interrupt aggregation
3959 * 3. Program UTRL and UTMRL base address
3960 * 4. Configure run-stop-registers
3962 * Returns 0 on success, non-zero value on failure
3964 static int ufshcd_make_hba_operational(struct ufs_hba *hba)
3966 int err = 0;
3967 u32 reg;
3969 /* Enable required interrupts */
3970 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
3972 /* Configure interrupt aggregation */
3973 if (ufshcd_is_intr_aggr_allowed(hba))
3974 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
3975 else
3976 ufshcd_disable_intr_aggr(hba);
3978 /* Configure UTRL and UTMRL base address registers */
3979 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
3980 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
3981 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
3982 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
3983 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
3984 REG_UTP_TASK_REQ_LIST_BASE_L);
3985 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
3986 REG_UTP_TASK_REQ_LIST_BASE_H);
3989 * Make sure base address and interrupt setup are updated before
3990 * enabling the run/stop registers below.
3992 wmb();
3995 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
3997 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
3998 if (!(ufshcd_get_lists_status(reg))) {
3999 ufshcd_enable_run_stop_reg(hba);
4000 } else {
4001 dev_err(hba->dev,
4002 "Host controller not ready to process requests");
4003 err = -EIO;
4004 goto out;
4007 out:
4008 return err;
4012 * ufshcd_hba_stop - Send controller to reset state
4013 * @hba: per adapter instance
4014 * @can_sleep: perform sleep or just spin
4016 static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
4018 int err;
4020 ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
4021 err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
4022 CONTROLLER_ENABLE, CONTROLLER_DISABLE,
4023 10, 1, can_sleep);
4024 if (err)
4025 dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
4029 * ufshcd_hba_enable - initialize the controller
4030 * @hba: per adapter instance
4032 * The controller resets itself and controller firmware initialization
4033 * sequence kicks off. When controller is ready it will set
4034 * the Host Controller Enable bit to 1.
4036 * Returns 0 on success, non-zero value on failure
4038 static int ufshcd_hba_enable(struct ufs_hba *hba)
4040 int retry;
4043 * msleep of 1 and 5 used in this function might result in msleep(20),
4044 * but it was necessary to send the UFS FPGA to reset mode during
4045 * development and testing of this driver. msleep can be changed to
4046 * mdelay and retry count can be reduced based on the controller.
4048 if (!ufshcd_is_hba_active(hba))
4049 /* change controller state to "reset state" */
4050 ufshcd_hba_stop(hba, true);
4052 /* UniPro link is disabled at this point */
4053 ufshcd_set_link_off(hba);
4055 ufshcd_vops_hce_enable_notify(hba, PRE_CHANGE);
4057 /* start controller initialization sequence */
4058 ufshcd_hba_start(hba);
4061 * To initialize a UFS host controller HCE bit must be set to 1.
4062 * During initialization the HCE bit value changes from 1->0->1.
4063 * When the host controller completes initialization sequence
4064 * it sets the value of HCE bit to 1. The same HCE bit is read back
4065 * to check if the controller has completed initialization sequence.
4066 * So without this delay the value HCE = 1, set in the previous
4067 * instruction might be read back.
4068 * This delay can be changed based on the controller.
4070 msleep(1);
4072 /* wait for the host controller to complete initialization */
4073 retry = 10;
4074 while (ufshcd_is_hba_active(hba)) {
4075 if (retry) {
4076 retry--;
4077 } else {
4078 dev_err(hba->dev,
4079 "Controller enable failed\n");
4080 return -EIO;
4082 msleep(5);
4085 /* enable UIC related interrupts */
4086 ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
4088 ufshcd_vops_hce_enable_notify(hba, POST_CHANGE);
4090 return 0;
4093 static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
4095 int tx_lanes, i, err = 0;
4097 if (!peer)
4098 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4099 &tx_lanes);
4100 else
4101 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
4102 &tx_lanes);
4103 for (i = 0; i < tx_lanes; i++) {
4104 if (!peer)
4105 err = ufshcd_dme_set(hba,
4106 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4107 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4109 else
4110 err = ufshcd_dme_peer_set(hba,
4111 UIC_ARG_MIB_SEL(TX_LCC_ENABLE,
4112 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)),
4114 if (err) {
4115 dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d",
4116 __func__, peer, i, err);
4117 break;
4121 return err;
4124 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
4126 return ufshcd_disable_tx_lcc(hba, true);
4130 * ufshcd_link_startup - Initialize unipro link startup
4131 * @hba: per adapter instance
4133 * Returns 0 for success, non-zero in case of failure
4135 static int ufshcd_link_startup(struct ufs_hba *hba)
4137 int ret;
4138 int retries = DME_LINKSTARTUP_RETRIES;
4139 bool link_startup_again = false;
4142 * If UFS device isn't active then we will have to issue link startup
4143 * 2 times to make sure the device state move to active.
4145 if (!ufshcd_is_ufs_dev_active(hba))
4146 link_startup_again = true;
4148 link_startup:
4149 do {
4150 ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
4152 ret = ufshcd_dme_link_startup(hba);
4154 /* check if device is detected by inter-connect layer */
4155 if (!ret && !ufshcd_is_device_present(hba)) {
4156 dev_err(hba->dev, "%s: Device not present\n", __func__);
4157 ret = -ENXIO;
4158 goto out;
4162 * DME link lost indication is only received when link is up,
4163 * but we can't be sure if the link is up until link startup
4164 * succeeds. So reset the local Uni-Pro and try again.
4166 if (ret && ufshcd_hba_enable(hba))
4167 goto out;
4168 } while (ret && retries--);
4170 if (ret)
4171 /* failed to get the link up... retire */
4172 goto out;
4174 if (link_startup_again) {
4175 link_startup_again = false;
4176 retries = DME_LINKSTARTUP_RETRIES;
4177 goto link_startup;
4180 /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
4181 ufshcd_init_pwr_info(hba);
4182 ufshcd_print_pwr_info(hba);
4184 if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
4185 ret = ufshcd_disable_device_tx_lcc(hba);
4186 if (ret)
4187 goto out;
4190 /* Include any host controller configuration via UIC commands */
4191 ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
4192 if (ret)
4193 goto out;
4195 ret = ufshcd_make_hba_operational(hba);
4196 out:
4197 if (ret) {
4198 dev_err(hba->dev, "link startup failed %d\n", ret);
4199 ufshcd_print_host_state(hba);
4200 ufshcd_print_pwr_info(hba);
4201 ufshcd_print_host_regs(hba);
4203 return ret;
4207 * ufshcd_verify_dev_init() - Verify device initialization
4208 * @hba: per-adapter instance
4210 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
4211 * device Transport Protocol (UTP) layer is ready after a reset.
4212 * If the UTP layer at the device side is not initialized, it may
4213 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
4214 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
4216 static int ufshcd_verify_dev_init(struct ufs_hba *hba)
4218 int err = 0;
4219 int retries;
4221 ufshcd_hold(hba, false);
4222 mutex_lock(&hba->dev_cmd.lock);
4223 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
4224 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
4225 NOP_OUT_TIMEOUT);
4227 if (!err || err == -ETIMEDOUT)
4228 break;
4230 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
4232 mutex_unlock(&hba->dev_cmd.lock);
4233 ufshcd_release(hba);
4235 if (err)
4236 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
4237 return err;
4241 * ufshcd_set_queue_depth - set lun queue depth
4242 * @sdev: pointer to SCSI device
4244 * Read bLUQueueDepth value and activate scsi tagged command
4245 * queueing. For WLUN, queue depth is set to 1. For best-effort
4246 * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
4247 * value that host can queue.
4249 static void ufshcd_set_queue_depth(struct scsi_device *sdev)
4251 int ret = 0;
4252 u8 lun_qdepth;
4253 struct ufs_hba *hba;
4255 hba = shost_priv(sdev->host);
4257 lun_qdepth = hba->nutrs;
4258 ret = ufshcd_read_unit_desc_param(hba,
4259 ufshcd_scsi_to_upiu_lun(sdev->lun),
4260 UNIT_DESC_PARAM_LU_Q_DEPTH,
4261 &lun_qdepth,
4262 sizeof(lun_qdepth));
4264 /* Some WLUN doesn't support unit descriptor */
4265 if (ret == -EOPNOTSUPP)
4266 lun_qdepth = 1;
4267 else if (!lun_qdepth)
4268 /* eventually, we can figure out the real queue depth */
4269 lun_qdepth = hba->nutrs;
4270 else
4271 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
4273 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
4274 __func__, lun_qdepth);
4275 scsi_change_queue_depth(sdev, lun_qdepth);
4279 * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
4280 * @hba: per-adapter instance
4281 * @lun: UFS device lun id
4282 * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
4284 * Returns 0 in case of success and b_lu_write_protect status would be returned
4285 * @b_lu_write_protect parameter.
4286 * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
4287 * Returns -EINVAL in case of invalid parameters passed to this function.
4289 static int ufshcd_get_lu_wp(struct ufs_hba *hba,
4290 u8 lun,
4291 u8 *b_lu_write_protect)
4293 int ret;
4295 if (!b_lu_write_protect)
4296 ret = -EINVAL;
4298 * According to UFS device spec, RPMB LU can't be write
4299 * protected so skip reading bLUWriteProtect parameter for
4300 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
4302 else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
4303 ret = -ENOTSUPP;
4304 else
4305 ret = ufshcd_read_unit_desc_param(hba,
4306 lun,
4307 UNIT_DESC_PARAM_LU_WR_PROTECT,
4308 b_lu_write_protect,
4309 sizeof(*b_lu_write_protect));
4310 return ret;
4314 * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
4315 * status
4316 * @hba: per-adapter instance
4317 * @sdev: pointer to SCSI device
4320 static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
4321 struct scsi_device *sdev)
4323 if (hba->dev_info.f_power_on_wp_en &&
4324 !hba->dev_info.is_lu_power_on_wp) {
4325 u8 b_lu_write_protect;
4327 if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
4328 &b_lu_write_protect) &&
4329 (b_lu_write_protect == UFS_LU_POWER_ON_WP))
4330 hba->dev_info.is_lu_power_on_wp = true;
4335 * ufshcd_slave_alloc - handle initial SCSI device configurations
4336 * @sdev: pointer to SCSI device
4338 * Returns success
4340 static int ufshcd_slave_alloc(struct scsi_device *sdev)
4342 struct ufs_hba *hba;
4344 hba = shost_priv(sdev->host);
4346 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
4347 sdev->use_10_for_ms = 1;
4349 /* allow SCSI layer to restart the device in case of errors */
4350 sdev->allow_restart = 1;
4352 /* REPORT SUPPORTED OPERATION CODES is not supported */
4353 sdev->no_report_opcodes = 1;
4355 /* WRITE_SAME command is not supported */
4356 sdev->no_write_same = 1;
4358 ufshcd_set_queue_depth(sdev);
4360 ufshcd_get_lu_power_on_wp_status(hba, sdev);
4362 return 0;
4366 * ufshcd_change_queue_depth - change queue depth
4367 * @sdev: pointer to SCSI device
4368 * @depth: required depth to set
4370 * Change queue depth and make sure the max. limits are not crossed.
4372 static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
4374 struct ufs_hba *hba = shost_priv(sdev->host);
4376 if (depth > hba->nutrs)
4377 depth = hba->nutrs;
4378 return scsi_change_queue_depth(sdev, depth);
4382 * ufshcd_slave_configure - adjust SCSI device configurations
4383 * @sdev: pointer to SCSI device
4385 static int ufshcd_slave_configure(struct scsi_device *sdev)
4387 struct request_queue *q = sdev->request_queue;
4389 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
4390 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
4392 return 0;
4396 * ufshcd_slave_destroy - remove SCSI device configurations
4397 * @sdev: pointer to SCSI device
4399 static void ufshcd_slave_destroy(struct scsi_device *sdev)
4401 struct ufs_hba *hba;
4403 hba = shost_priv(sdev->host);
4404 /* Drop the reference as it won't be needed anymore */
4405 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
4406 unsigned long flags;
4408 spin_lock_irqsave(hba->host->host_lock, flags);
4409 hba->sdev_ufs_device = NULL;
4410 spin_unlock_irqrestore(hba->host->host_lock, flags);
4415 * ufshcd_task_req_compl - handle task management request completion
4416 * @hba: per adapter instance
4417 * @index: index of the completed request
4418 * @resp: task management service response
4420 * Returns non-zero value on error, zero on success
4422 static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
4424 struct utp_task_req_desc *task_req_descp;
4425 struct utp_upiu_task_rsp *task_rsp_upiup;
4426 unsigned long flags;
4427 int ocs_value;
4428 int task_result;
4430 spin_lock_irqsave(hba->host->host_lock, flags);
4432 /* Clear completed tasks from outstanding_tasks */
4433 __clear_bit(index, &hba->outstanding_tasks);
4435 task_req_descp = hba->utmrdl_base_addr;
4436 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
4438 if (ocs_value == OCS_SUCCESS) {
4439 task_rsp_upiup = (struct utp_upiu_task_rsp *)
4440 task_req_descp[index].task_rsp_upiu;
4441 task_result = be32_to_cpu(task_rsp_upiup->output_param1);
4442 task_result = task_result & MASK_TM_SERVICE_RESP;
4443 if (resp)
4444 *resp = (u8)task_result;
4445 } else {
4446 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
4447 __func__, ocs_value);
4449 spin_unlock_irqrestore(hba->host->host_lock, flags);
4451 return ocs_value;
4455 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
4456 * @lrb: pointer to local reference block of completed command
4457 * @scsi_status: SCSI command status
4459 * Returns value base on SCSI command status
4461 static inline int
4462 ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
4464 int result = 0;
4466 switch (scsi_status) {
4467 case SAM_STAT_CHECK_CONDITION:
4468 ufshcd_copy_sense_data(lrbp);
4469 case SAM_STAT_GOOD:
4470 result |= DID_OK << 16 |
4471 COMMAND_COMPLETE << 8 |
4472 scsi_status;
4473 break;
4474 case SAM_STAT_TASK_SET_FULL:
4475 case SAM_STAT_BUSY:
4476 case SAM_STAT_TASK_ABORTED:
4477 ufshcd_copy_sense_data(lrbp);
4478 result |= scsi_status;
4479 break;
4480 default:
4481 result |= DID_ERROR << 16;
4482 break;
4483 } /* end of switch */
4485 return result;
4489 * ufshcd_transfer_rsp_status - Get overall status of the response
4490 * @hba: per adapter instance
4491 * @lrb: pointer to local reference block of completed command
4493 * Returns result of the command to notify SCSI midlayer
4495 static inline int
4496 ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
4498 int result = 0;
4499 int scsi_status;
4500 int ocs;
4502 /* overall command status of utrd */
4503 ocs = ufshcd_get_tr_ocs(lrbp);
4505 switch (ocs) {
4506 case OCS_SUCCESS:
4507 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
4508 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
4509 switch (result) {
4510 case UPIU_TRANSACTION_RESPONSE:
4512 * get the response UPIU result to extract
4513 * the SCSI command status
4515 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
4518 * get the result based on SCSI status response
4519 * to notify the SCSI midlayer of the command status
4521 scsi_status = result & MASK_SCSI_STATUS;
4522 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
4525 * Currently we are only supporting BKOPs exception
4526 * events hence we can ignore BKOPs exception event
4527 * during power management callbacks. BKOPs exception
4528 * event is not expected to be raised in runtime suspend
4529 * callback as it allows the urgent bkops.
4530 * During system suspend, we are anyway forcefully
4531 * disabling the bkops and if urgent bkops is needed
4532 * it will be enabled on system resume. Long term
4533 * solution could be to abort the system suspend if
4534 * UFS device needs urgent BKOPs.
4536 if (!hba->pm_op_in_progress &&
4537 ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
4538 schedule_work(&hba->eeh_work);
4539 break;
4540 case UPIU_TRANSACTION_REJECT_UPIU:
4541 /* TODO: handle Reject UPIU Response */
4542 result = DID_ERROR << 16;
4543 dev_err(hba->dev,
4544 "Reject UPIU not fully implemented\n");
4545 break;
4546 default:
4547 result = DID_ERROR << 16;
4548 dev_err(hba->dev,
4549 "Unexpected request response code = %x\n",
4550 result);
4551 break;
4553 break;
4554 case OCS_ABORTED:
4555 result |= DID_ABORT << 16;
4556 break;
4557 case OCS_INVALID_COMMAND_STATUS:
4558 result |= DID_REQUEUE << 16;
4559 break;
4560 case OCS_INVALID_CMD_TABLE_ATTR:
4561 case OCS_INVALID_PRDT_ATTR:
4562 case OCS_MISMATCH_DATA_BUF_SIZE:
4563 case OCS_MISMATCH_RESP_UPIU_SIZE:
4564 case OCS_PEER_COMM_FAILURE:
4565 case OCS_FATAL_ERROR:
4566 default:
4567 result |= DID_ERROR << 16;
4568 dev_err(hba->dev,
4569 "OCS error from controller = %x for tag %d\n",
4570 ocs, lrbp->task_tag);
4571 ufshcd_print_host_regs(hba);
4572 ufshcd_print_host_state(hba);
4573 break;
4574 } /* end of switch */
4576 if (host_byte(result) != DID_OK)
4577 ufshcd_print_trs(hba, 1 << lrbp->task_tag, true);
4578 return result;
4582 * ufshcd_uic_cmd_compl - handle completion of uic command
4583 * @hba: per adapter instance
4584 * @intr_status: interrupt status generated by the controller
4586 static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
4588 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
4589 hba->active_uic_cmd->argument2 |=
4590 ufshcd_get_uic_cmd_result(hba);
4591 hba->active_uic_cmd->argument3 =
4592 ufshcd_get_dme_attr_val(hba);
4593 complete(&hba->active_uic_cmd->done);
4596 if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
4597 complete(hba->uic_async_done);
4601 * __ufshcd_transfer_req_compl - handle SCSI and query command completion
4602 * @hba: per adapter instance
4603 * @completed_reqs: requests to complete
4605 static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
4606 unsigned long completed_reqs)
4608 struct ufshcd_lrb *lrbp;
4609 struct scsi_cmnd *cmd;
4610 int result;
4611 int index;
4613 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
4614 lrbp = &hba->lrb[index];
4615 cmd = lrbp->cmd;
4616 if (cmd) {
4617 ufshcd_add_command_trace(hba, index, "complete");
4618 result = ufshcd_transfer_rsp_status(hba, lrbp);
4619 scsi_dma_unmap(cmd);
4620 cmd->result = result;
4621 /* Mark completed command as NULL in LRB */
4622 lrbp->cmd = NULL;
4623 clear_bit_unlock(index, &hba->lrb_in_use);
4624 /* Do not touch lrbp after scsi done */
4625 cmd->scsi_done(cmd);
4626 __ufshcd_release(hba);
4627 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
4628 lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
4629 if (hba->dev_cmd.complete) {
4630 ufshcd_add_command_trace(hba, index,
4631 "dev_complete");
4632 complete(hba->dev_cmd.complete);
4635 if (ufshcd_is_clkscaling_supported(hba))
4636 hba->clk_scaling.active_reqs--;
4638 lrbp->compl_time_stamp = ktime_get();
4641 /* clear corresponding bits of completed commands */
4642 hba->outstanding_reqs ^= completed_reqs;
4644 ufshcd_clk_scaling_update_busy(hba);
4646 /* we might have free'd some tags above */
4647 wake_up(&hba->dev_cmd.tag_wq);
4651 * ufshcd_transfer_req_compl - handle SCSI and query command completion
4652 * @hba: per adapter instance
4654 static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
4656 unsigned long completed_reqs;
4657 u32 tr_doorbell;
4659 /* Resetting interrupt aggregation counters first and reading the
4660 * DOOR_BELL afterward allows us to handle all the completed requests.
4661 * In order to prevent other interrupts starvation the DB is read once
4662 * after reset. The down side of this solution is the possibility of
4663 * false interrupt if device completes another request after resetting
4664 * aggregation and before reading the DB.
4666 if (ufshcd_is_intr_aggr_allowed(hba))
4667 ufshcd_reset_intr_aggr(hba);
4669 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
4670 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
4672 __ufshcd_transfer_req_compl(hba, completed_reqs);
4676 * ufshcd_disable_ee - disable exception event
4677 * @hba: per-adapter instance
4678 * @mask: exception event to disable
4680 * Disables exception event in the device so that the EVENT_ALERT
4681 * bit is not set.
4683 * Returns zero on success, non-zero error value on failure.
4685 static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
4687 int err = 0;
4688 u32 val;
4690 if (!(hba->ee_ctrl_mask & mask))
4691 goto out;
4693 val = hba->ee_ctrl_mask & ~mask;
4694 val &= MASK_EE_STATUS;
4695 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4696 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4697 if (!err)
4698 hba->ee_ctrl_mask &= ~mask;
4699 out:
4700 return err;
4704 * ufshcd_enable_ee - enable exception event
4705 * @hba: per-adapter instance
4706 * @mask: exception event to enable
4708 * Enable corresponding exception event in the device to allow
4709 * device to alert host in critical scenarios.
4711 * Returns zero on success, non-zero error value on failure.
4713 static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
4715 int err = 0;
4716 u32 val;
4718 if (hba->ee_ctrl_mask & mask)
4719 goto out;
4721 val = hba->ee_ctrl_mask | mask;
4722 val &= MASK_EE_STATUS;
4723 err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
4724 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
4725 if (!err)
4726 hba->ee_ctrl_mask |= mask;
4727 out:
4728 return err;
4732 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
4733 * @hba: per-adapter instance
4735 * Allow device to manage background operations on its own. Enabling
4736 * this might lead to inconsistent latencies during normal data transfers
4737 * as the device is allowed to manage its own way of handling background
4738 * operations.
4740 * Returns zero on success, non-zero on failure.
4742 static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
4744 int err = 0;
4746 if (hba->auto_bkops_enabled)
4747 goto out;
4749 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
4750 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4751 if (err) {
4752 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
4753 __func__, err);
4754 goto out;
4757 hba->auto_bkops_enabled = true;
4758 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Enabled");
4760 /* No need of URGENT_BKOPS exception from the device */
4761 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4762 if (err)
4763 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
4764 __func__, err);
4765 out:
4766 return err;
4770 * ufshcd_disable_auto_bkops - block device in doing background operations
4771 * @hba: per-adapter instance
4773 * Disabling background operations improves command response latency but
4774 * has drawback of device moving into critical state where the device is
4775 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
4776 * host is idle so that BKOPS are managed effectively without any negative
4777 * impacts.
4779 * Returns zero on success, non-zero on failure.
4781 static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
4783 int err = 0;
4785 if (!hba->auto_bkops_enabled)
4786 goto out;
4789 * If host assisted BKOPs is to be enabled, make sure
4790 * urgent bkops exception is allowed.
4792 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
4793 if (err) {
4794 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
4795 __func__, err);
4796 goto out;
4799 err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
4800 QUERY_FLAG_IDN_BKOPS_EN, NULL);
4801 if (err) {
4802 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
4803 __func__, err);
4804 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
4805 goto out;
4808 hba->auto_bkops_enabled = false;
4809 trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled");
4810 out:
4811 return err;
4815 * ufshcd_force_reset_auto_bkops - force reset auto bkops state
4816 * @hba: per adapter instance
4818 * After a device reset the device may toggle the BKOPS_EN flag
4819 * to default value. The s/w tracking variables should be updated
4820 * as well. This function would change the auto-bkops state based on
4821 * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
4823 static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
4825 if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
4826 hba->auto_bkops_enabled = false;
4827 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
4828 ufshcd_enable_auto_bkops(hba);
4829 } else {
4830 hba->auto_bkops_enabled = true;
4831 hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
4832 ufshcd_disable_auto_bkops(hba);
4836 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
4838 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
4839 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
4843 * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
4844 * @hba: per-adapter instance
4845 * @status: bkops_status value
4847 * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
4848 * flag in the device to permit background operations if the device
4849 * bkops_status is greater than or equal to "status" argument passed to
4850 * this function, disable otherwise.
4852 * Returns 0 for success, non-zero in case of failure.
4854 * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
4855 * to know whether auto bkops is enabled or disabled after this function
4856 * returns control to it.
4858 static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
4859 enum bkops_status status)
4861 int err;
4862 u32 curr_status = 0;
4864 err = ufshcd_get_bkops_status(hba, &curr_status);
4865 if (err) {
4866 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
4867 __func__, err);
4868 goto out;
4869 } else if (curr_status > BKOPS_STATUS_MAX) {
4870 dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
4871 __func__, curr_status);
4872 err = -EINVAL;
4873 goto out;
4876 if (curr_status >= status)
4877 err = ufshcd_enable_auto_bkops(hba);
4878 else
4879 err = ufshcd_disable_auto_bkops(hba);
4880 out:
4881 return err;
4885 * ufshcd_urgent_bkops - handle urgent bkops exception event
4886 * @hba: per-adapter instance
4888 * Enable fBackgroundOpsEn flag in the device to permit background
4889 * operations.
4891 * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
4892 * and negative error value for any other failure.
4894 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
4896 return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
4899 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
4901 return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
4902 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
4905 static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
4907 int err;
4908 u32 curr_status = 0;
4910 if (hba->is_urgent_bkops_lvl_checked)
4911 goto enable_auto_bkops;
4913 err = ufshcd_get_bkops_status(hba, &curr_status);
4914 if (err) {
4915 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
4916 __func__, err);
4917 goto out;
4921 * We are seeing that some devices are raising the urgent bkops
4922 * exception events even when BKOPS status doesn't indicate performace
4923 * impacted or critical. Handle these device by determining their urgent
4924 * bkops status at runtime.
4926 if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
4927 dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
4928 __func__, curr_status);
4929 /* update the current status as the urgent bkops level */
4930 hba->urgent_bkops_lvl = curr_status;
4931 hba->is_urgent_bkops_lvl_checked = true;
4934 enable_auto_bkops:
4935 err = ufshcd_enable_auto_bkops(hba);
4936 out:
4937 if (err < 0)
4938 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
4939 __func__, err);
4943 * ufshcd_exception_event_handler - handle exceptions raised by device
4944 * @work: pointer to work data
4946 * Read bExceptionEventStatus attribute from the device and handle the
4947 * exception event accordingly.
4949 static void ufshcd_exception_event_handler(struct work_struct *work)
4951 struct ufs_hba *hba;
4952 int err;
4953 u32 status = 0;
4954 hba = container_of(work, struct ufs_hba, eeh_work);
4956 pm_runtime_get_sync(hba->dev);
4957 err = ufshcd_get_ee_status(hba, &status);
4958 if (err) {
4959 dev_err(hba->dev, "%s: failed to get exception status %d\n",
4960 __func__, err);
4961 goto out;
4964 status &= hba->ee_ctrl_mask;
4966 if (status & MASK_EE_URGENT_BKOPS)
4967 ufshcd_bkops_exception_event_handler(hba);
4969 out:
4970 pm_runtime_put_sync(hba->dev);
4971 return;
4974 /* Complete requests that have door-bell cleared */
4975 static void ufshcd_complete_requests(struct ufs_hba *hba)
4977 ufshcd_transfer_req_compl(hba);
4978 ufshcd_tmc_handler(hba);
4982 * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
4983 * to recover from the DL NAC errors or not.
4984 * @hba: per-adapter instance
4986 * Returns true if error handling is required, false otherwise
4988 static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
4990 unsigned long flags;
4991 bool err_handling = true;
4993 spin_lock_irqsave(hba->host->host_lock, flags);
4995 * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
4996 * device fatal error and/or DL NAC & REPLAY timeout errors.
4998 if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
4999 goto out;
5001 if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
5002 ((hba->saved_err & UIC_ERROR) &&
5003 (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
5004 goto out;
5006 if ((hba->saved_err & UIC_ERROR) &&
5007 (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
5008 int err;
5010 * wait for 50ms to see if we can get any other errors or not.
5012 spin_unlock_irqrestore(hba->host->host_lock, flags);
5013 msleep(50);
5014 spin_lock_irqsave(hba->host->host_lock, flags);
5017 * now check if we have got any other severe errors other than
5018 * DL NAC error?
5020 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5021 ((hba->saved_err & UIC_ERROR) &&
5022 (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
5023 goto out;
5026 * As DL NAC is the only error received so far, send out NOP
5027 * command to confirm if link is still active or not.
5028 * - If we don't get any response then do error recovery.
5029 * - If we get response then clear the DL NAC error bit.
5032 spin_unlock_irqrestore(hba->host->host_lock, flags);
5033 err = ufshcd_verify_dev_init(hba);
5034 spin_lock_irqsave(hba->host->host_lock, flags);
5036 if (err)
5037 goto out;
5039 /* Link seems to be alive hence ignore the DL NAC errors */
5040 if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
5041 hba->saved_err &= ~UIC_ERROR;
5042 /* clear NAC error */
5043 hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5044 if (!hba->saved_uic_err) {
5045 err_handling = false;
5046 goto out;
5049 out:
5050 spin_unlock_irqrestore(hba->host->host_lock, flags);
5051 return err_handling;
5055 * ufshcd_err_handler - handle UFS errors that require s/w attention
5056 * @work: pointer to work structure
5058 static void ufshcd_err_handler(struct work_struct *work)
5060 struct ufs_hba *hba;
5061 unsigned long flags;
5062 u32 err_xfer = 0;
5063 u32 err_tm = 0;
5064 int err = 0;
5065 int tag;
5066 bool needs_reset = false;
5068 hba = container_of(work, struct ufs_hba, eh_work);
5070 pm_runtime_get_sync(hba->dev);
5071 ufshcd_hold(hba, false);
5073 spin_lock_irqsave(hba->host->host_lock, flags);
5074 if (hba->ufshcd_state == UFSHCD_STATE_RESET)
5075 goto out;
5077 hba->ufshcd_state = UFSHCD_STATE_RESET;
5078 ufshcd_set_eh_in_progress(hba);
5080 /* Complete requests that have door-bell cleared by h/w */
5081 ufshcd_complete_requests(hba);
5083 if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5084 bool ret;
5086 spin_unlock_irqrestore(hba->host->host_lock, flags);
5087 /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
5088 ret = ufshcd_quirk_dl_nac_errors(hba);
5089 spin_lock_irqsave(hba->host->host_lock, flags);
5090 if (!ret)
5091 goto skip_err_handling;
5093 if ((hba->saved_err & INT_FATAL_ERRORS) ||
5094 ((hba->saved_err & UIC_ERROR) &&
5095 (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
5096 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
5097 UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
5098 needs_reset = true;
5101 * if host reset is required then skip clearing the pending
5102 * transfers forcefully because they will automatically get
5103 * cleared after link startup.
5105 if (needs_reset)
5106 goto skip_pending_xfer_clear;
5108 /* release lock as clear command might sleep */
5109 spin_unlock_irqrestore(hba->host->host_lock, flags);
5110 /* Clear pending transfer requests */
5111 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
5112 if (ufshcd_clear_cmd(hba, tag)) {
5113 err_xfer = true;
5114 goto lock_skip_pending_xfer_clear;
5118 /* Clear pending task management requests */
5119 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
5120 if (ufshcd_clear_tm_cmd(hba, tag)) {
5121 err_tm = true;
5122 goto lock_skip_pending_xfer_clear;
5126 lock_skip_pending_xfer_clear:
5127 spin_lock_irqsave(hba->host->host_lock, flags);
5129 /* Complete the requests that are cleared by s/w */
5130 ufshcd_complete_requests(hba);
5132 if (err_xfer || err_tm)
5133 needs_reset = true;
5135 skip_pending_xfer_clear:
5136 /* Fatal errors need reset */
5137 if (needs_reset) {
5138 unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
5141 * ufshcd_reset_and_restore() does the link reinitialization
5142 * which will need atleast one empty doorbell slot to send the
5143 * device management commands (NOP and query commands).
5144 * If there is no slot empty at this moment then free up last
5145 * slot forcefully.
5147 if (hba->outstanding_reqs == max_doorbells)
5148 __ufshcd_transfer_req_compl(hba,
5149 (1UL << (hba->nutrs - 1)));
5151 spin_unlock_irqrestore(hba->host->host_lock, flags);
5152 err = ufshcd_reset_and_restore(hba);
5153 spin_lock_irqsave(hba->host->host_lock, flags);
5154 if (err) {
5155 dev_err(hba->dev, "%s: reset and restore failed\n",
5156 __func__);
5157 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5160 * Inform scsi mid-layer that we did reset and allow to handle
5161 * Unit Attention properly.
5163 scsi_report_bus_reset(hba->host, 0);
5164 hba->saved_err = 0;
5165 hba->saved_uic_err = 0;
5168 skip_err_handling:
5169 if (!needs_reset) {
5170 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5171 if (hba->saved_err || hba->saved_uic_err)
5172 dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
5173 __func__, hba->saved_err, hba->saved_uic_err);
5176 ufshcd_clear_eh_in_progress(hba);
5178 out:
5179 spin_unlock_irqrestore(hba->host->host_lock, flags);
5180 scsi_unblock_requests(hba->host);
5181 ufshcd_release(hba);
5182 pm_runtime_put_sync(hba->dev);
5185 static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
5186 u32 reg)
5188 reg_hist->reg[reg_hist->pos] = reg;
5189 reg_hist->tstamp[reg_hist->pos] = ktime_get();
5190 reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
5194 * ufshcd_update_uic_error - check and set fatal UIC error flags.
5195 * @hba: per-adapter instance
5197 static void ufshcd_update_uic_error(struct ufs_hba *hba)
5199 u32 reg;
5201 /* PHY layer lane error */
5202 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
5203 /* Ignore LINERESET indication, as this is not an error */
5204 if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
5205 (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
5207 * To know whether this error is fatal or not, DB timeout
5208 * must be checked but this error is handled separately.
5210 dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
5211 ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
5214 /* PA_INIT_ERROR is fatal and needs UIC reset */
5215 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
5216 if (reg)
5217 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
5219 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
5220 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
5221 else if (hba->dev_quirks &
5222 UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
5223 if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
5224 hba->uic_error |=
5225 UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
5226 else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
5227 hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
5230 /* UIC NL/TL/DME errors needs software retry */
5231 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
5232 if (reg) {
5233 ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
5234 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
5237 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
5238 if (reg) {
5239 ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
5240 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
5243 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
5244 if (reg) {
5245 ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
5246 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
5249 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
5250 __func__, hba->uic_error);
5254 * ufshcd_check_errors - Check for errors that need s/w attention
5255 * @hba: per-adapter instance
5257 static void ufshcd_check_errors(struct ufs_hba *hba)
5259 bool queue_eh_work = false;
5261 if (hba->errors & INT_FATAL_ERRORS)
5262 queue_eh_work = true;
5264 if (hba->errors & UIC_ERROR) {
5265 hba->uic_error = 0;
5266 ufshcd_update_uic_error(hba);
5267 if (hba->uic_error)
5268 queue_eh_work = true;
5271 if (queue_eh_work) {
5273 * update the transfer error masks to sticky bits, let's do this
5274 * irrespective of current ufshcd_state.
5276 hba->saved_err |= hba->errors;
5277 hba->saved_uic_err |= hba->uic_error;
5279 /* handle fatal errors only when link is functional */
5280 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
5281 /* block commands from scsi mid-layer */
5282 scsi_block_requests(hba->host);
5284 hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
5286 /* dump controller state before resetting */
5287 if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
5288 bool pr_prdt = !!(hba->saved_err &
5289 SYSTEM_BUS_FATAL_ERROR);
5291 dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
5292 __func__, hba->saved_err,
5293 hba->saved_uic_err);
5295 ufshcd_print_host_regs(hba);
5296 ufshcd_print_pwr_info(hba);
5297 ufshcd_print_tmrs(hba, hba->outstanding_tasks);
5298 ufshcd_print_trs(hba, hba->outstanding_reqs,
5299 pr_prdt);
5301 schedule_work(&hba->eh_work);
5305 * if (!queue_eh_work) -
5306 * Other errors are either non-fatal where host recovers
5307 * itself without s/w intervention or errors that will be
5308 * handled by the SCSI core layer.
5313 * ufshcd_tmc_handler - handle task management function completion
5314 * @hba: per adapter instance
5316 static void ufshcd_tmc_handler(struct ufs_hba *hba)
5318 u32 tm_doorbell;
5320 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
5321 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
5322 wake_up(&hba->tm_wq);
5326 * ufshcd_sl_intr - Interrupt service routine
5327 * @hba: per adapter instance
5328 * @intr_status: contains interrupts generated by the controller
5330 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
5332 hba->errors = UFSHCD_ERROR_MASK & intr_status;
5333 if (hba->errors)
5334 ufshcd_check_errors(hba);
5336 if (intr_status & UFSHCD_UIC_MASK)
5337 ufshcd_uic_cmd_compl(hba, intr_status);
5339 if (intr_status & UTP_TASK_REQ_COMPL)
5340 ufshcd_tmc_handler(hba);
5342 if (intr_status & UTP_TRANSFER_REQ_COMPL)
5343 ufshcd_transfer_req_compl(hba);
5347 * ufshcd_intr - Main interrupt service routine
5348 * @irq: irq number
5349 * @__hba: pointer to adapter instance
5351 * Returns IRQ_HANDLED - If interrupt is valid
5352 * IRQ_NONE - If invalid interrupt
5354 static irqreturn_t ufshcd_intr(int irq, void *__hba)
5356 u32 intr_status, enabled_intr_status;
5357 irqreturn_t retval = IRQ_NONE;
5358 struct ufs_hba *hba = __hba;
5360 spin_lock(hba->host->host_lock);
5361 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
5362 enabled_intr_status =
5363 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
5365 if (intr_status)
5366 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
5368 if (enabled_intr_status) {
5369 ufshcd_sl_intr(hba, enabled_intr_status);
5370 retval = IRQ_HANDLED;
5372 spin_unlock(hba->host->host_lock);
5373 return retval;
5376 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
5378 int err = 0;
5379 u32 mask = 1 << tag;
5380 unsigned long flags;
5382 if (!test_bit(tag, &hba->outstanding_tasks))
5383 goto out;
5385 spin_lock_irqsave(hba->host->host_lock, flags);
5386 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
5387 spin_unlock_irqrestore(hba->host->host_lock, flags);
5389 /* poll for max. 1 sec to clear door bell register by h/w */
5390 err = ufshcd_wait_for_register(hba,
5391 REG_UTP_TASK_REQ_DOOR_BELL,
5392 mask, 0, 1000, 1000, true);
5393 out:
5394 return err;
5398 * ufshcd_issue_tm_cmd - issues task management commands to controller
5399 * @hba: per adapter instance
5400 * @lun_id: LUN ID to which TM command is sent
5401 * @task_id: task ID to which the TM command is applicable
5402 * @tm_function: task management function opcode
5403 * @tm_response: task management service response return value
5405 * Returns non-zero value on error, zero on success.
5407 static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
5408 u8 tm_function, u8 *tm_response)
5410 struct utp_task_req_desc *task_req_descp;
5411 struct utp_upiu_task_req *task_req_upiup;
5412 struct Scsi_Host *host;
5413 unsigned long flags;
5414 int free_slot;
5415 int err;
5416 int task_tag;
5418 host = hba->host;
5421 * Get free slot, sleep if slots are unavailable.
5422 * Even though we use wait_event() which sleeps indefinitely,
5423 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
5425 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
5426 ufshcd_hold(hba, false);
5428 spin_lock_irqsave(host->host_lock, flags);
5429 task_req_descp = hba->utmrdl_base_addr;
5430 task_req_descp += free_slot;
5432 /* Configure task request descriptor */
5433 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
5434 task_req_descp->header.dword_2 =
5435 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
5437 /* Configure task request UPIU */
5438 task_req_upiup =
5439 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
5440 task_tag = hba->nutrs + free_slot;
5441 task_req_upiup->header.dword_0 =
5442 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
5443 lun_id, task_tag);
5444 task_req_upiup->header.dword_1 =
5445 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
5447 * The host shall provide the same value for LUN field in the basic
5448 * header and for Input Parameter.
5450 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
5451 task_req_upiup->input_param2 = cpu_to_be32(task_id);
5453 ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
5455 /* send command to the controller */
5456 __set_bit(free_slot, &hba->outstanding_tasks);
5458 /* Make sure descriptors are ready before ringing the task doorbell */
5459 wmb();
5461 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
5462 /* Make sure that doorbell is committed immediately */
5463 wmb();
5465 spin_unlock_irqrestore(host->host_lock, flags);
5467 /* wait until the task management command is completed */
5468 err = wait_event_timeout(hba->tm_wq,
5469 test_bit(free_slot, &hba->tm_condition),
5470 msecs_to_jiffies(TM_CMD_TIMEOUT));
5471 if (!err) {
5472 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
5473 __func__, tm_function);
5474 if (ufshcd_clear_tm_cmd(hba, free_slot))
5475 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
5476 __func__, free_slot);
5477 err = -ETIMEDOUT;
5478 } else {
5479 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
5482 clear_bit(free_slot, &hba->tm_condition);
5483 ufshcd_put_tm_slot(hba, free_slot);
5484 wake_up(&hba->tm_tag_wq);
5486 ufshcd_release(hba);
5487 return err;
5491 * ufshcd_eh_device_reset_handler - device reset handler registered to
5492 * scsi layer.
5493 * @cmd: SCSI command pointer
5495 * Returns SUCCESS/FAILED
5497 static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
5499 struct Scsi_Host *host;
5500 struct ufs_hba *hba;
5501 unsigned int tag;
5502 u32 pos;
5503 int err;
5504 u8 resp = 0xF;
5505 struct ufshcd_lrb *lrbp;
5506 unsigned long flags;
5508 host = cmd->device->host;
5509 hba = shost_priv(host);
5510 tag = cmd->request->tag;
5512 lrbp = &hba->lrb[tag];
5513 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
5514 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5515 if (!err)
5516 err = resp;
5517 goto out;
5520 /* clear the commands that were pending for corresponding LUN */
5521 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
5522 if (hba->lrb[pos].lun == lrbp->lun) {
5523 err = ufshcd_clear_cmd(hba, pos);
5524 if (err)
5525 break;
5528 spin_lock_irqsave(host->host_lock, flags);
5529 ufshcd_transfer_req_compl(hba);
5530 spin_unlock_irqrestore(host->host_lock, flags);
5532 out:
5533 hba->req_abort_count = 0;
5534 if (!err) {
5535 err = SUCCESS;
5536 } else {
5537 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5538 err = FAILED;
5540 return err;
5543 static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
5545 struct ufshcd_lrb *lrbp;
5546 int tag;
5548 for_each_set_bit(tag, &bitmap, hba->nutrs) {
5549 lrbp = &hba->lrb[tag];
5550 lrbp->req_abort_skip = true;
5555 * ufshcd_abort - abort a specific command
5556 * @cmd: SCSI command pointer
5558 * Abort the pending command in device by sending UFS_ABORT_TASK task management
5559 * command, and in host controller by clearing the door-bell register. There can
5560 * be race between controller sending the command to the device while abort is
5561 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
5562 * really issued and then try to abort it.
5564 * Returns SUCCESS/FAILED
5566 static int ufshcd_abort(struct scsi_cmnd *cmd)
5568 struct Scsi_Host *host;
5569 struct ufs_hba *hba;
5570 unsigned long flags;
5571 unsigned int tag;
5572 int err = 0;
5573 int poll_cnt;
5574 u8 resp = 0xF;
5575 struct ufshcd_lrb *lrbp;
5576 u32 reg;
5578 host = cmd->device->host;
5579 hba = shost_priv(host);
5580 tag = cmd->request->tag;
5581 lrbp = &hba->lrb[tag];
5582 if (!ufshcd_valid_tag(hba, tag)) {
5583 dev_err(hba->dev,
5584 "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
5585 __func__, tag, cmd, cmd->request);
5586 BUG();
5590 * Task abort to the device W-LUN is illegal. When this command
5591 * will fail, due to spec violation, scsi err handling next step
5592 * will be to send LU reset which, again, is a spec violation.
5593 * To avoid these unnecessary/illegal step we skip to the last error
5594 * handling stage: reset and restore.
5596 if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
5597 return ufshcd_eh_host_reset_handler(cmd);
5599 ufshcd_hold(hba, false);
5600 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5601 /* If command is already aborted/completed, return SUCCESS */
5602 if (!(test_bit(tag, &hba->outstanding_reqs))) {
5603 dev_err(hba->dev,
5604 "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
5605 __func__, tag, hba->outstanding_reqs, reg);
5606 goto out;
5609 if (!(reg & (1 << tag))) {
5610 dev_err(hba->dev,
5611 "%s: cmd was completed, but without a notifying intr, tag = %d",
5612 __func__, tag);
5615 /* Print Transfer Request of aborted task */
5616 dev_err(hba->dev, "%s: Device abort task at tag %d\n", __func__, tag);
5619 * Print detailed info about aborted request.
5620 * As more than one request might get aborted at the same time,
5621 * print full information only for the first aborted request in order
5622 * to reduce repeated printouts. For other aborted requests only print
5623 * basic details.
5625 scsi_print_command(hba->lrb[tag].cmd);
5626 if (!hba->req_abort_count) {
5627 ufshcd_print_host_regs(hba);
5628 ufshcd_print_host_state(hba);
5629 ufshcd_print_pwr_info(hba);
5630 ufshcd_print_trs(hba, 1 << tag, true);
5631 } else {
5632 ufshcd_print_trs(hba, 1 << tag, false);
5634 hba->req_abort_count++;
5636 /* Skip task abort in case previous aborts failed and report failure */
5637 if (lrbp->req_abort_skip) {
5638 err = -EIO;
5639 goto out;
5642 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
5643 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
5644 UFS_QUERY_TASK, &resp);
5645 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
5646 /* cmd pending in the device */
5647 dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
5648 __func__, tag);
5649 break;
5650 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5652 * cmd not pending in the device, check if it is
5653 * in transition.
5655 dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
5656 __func__, tag);
5657 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
5658 if (reg & (1 << tag)) {
5659 /* sleep for max. 200us to stabilize */
5660 usleep_range(100, 200);
5661 continue;
5663 /* command completed already */
5664 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
5665 __func__, tag);
5666 goto out;
5667 } else {
5668 dev_err(hba->dev,
5669 "%s: no response from device. tag = %d, err %d\n",
5670 __func__, tag, err);
5671 if (!err)
5672 err = resp; /* service response error */
5673 goto out;
5677 if (!poll_cnt) {
5678 err = -EBUSY;
5679 goto out;
5682 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
5683 UFS_ABORT_TASK, &resp);
5684 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
5685 if (!err) {
5686 err = resp; /* service response error */
5687 dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
5688 __func__, tag, err);
5690 goto out;
5693 err = ufshcd_clear_cmd(hba, tag);
5694 if (err) {
5695 dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
5696 __func__, tag, err);
5697 goto out;
5700 scsi_dma_unmap(cmd);
5702 spin_lock_irqsave(host->host_lock, flags);
5703 ufshcd_outstanding_req_clear(hba, tag);
5704 hba->lrb[tag].cmd = NULL;
5705 spin_unlock_irqrestore(host->host_lock, flags);
5707 clear_bit_unlock(tag, &hba->lrb_in_use);
5708 wake_up(&hba->dev_cmd.tag_wq);
5710 out:
5711 if (!err) {
5712 err = SUCCESS;
5713 } else {
5714 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
5715 ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
5716 err = FAILED;
5720 * This ufshcd_release() corresponds to the original scsi cmd that got
5721 * aborted here (as we won't get any IRQ for it).
5723 ufshcd_release(hba);
5724 return err;
5728 * ufshcd_host_reset_and_restore - reset and restore host controller
5729 * @hba: per-adapter instance
5731 * Note that host controller reset may issue DME_RESET to
5732 * local and remote (device) Uni-Pro stack and the attributes
5733 * are reset to default state.
5735 * Returns zero on success, non-zero on failure
5737 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
5739 int err;
5740 unsigned long flags;
5742 /* Reset the host controller */
5743 spin_lock_irqsave(hba->host->host_lock, flags);
5744 ufshcd_hba_stop(hba, false);
5745 spin_unlock_irqrestore(hba->host->host_lock, flags);
5747 /* scale up clocks to max frequency before full reinitialization */
5748 ufshcd_scale_clks(hba, true);
5750 err = ufshcd_hba_enable(hba);
5751 if (err)
5752 goto out;
5754 /* Establish the link again and restore the device */
5755 err = ufshcd_probe_hba(hba);
5757 if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
5758 err = -EIO;
5759 out:
5760 if (err)
5761 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
5763 return err;
5767 * ufshcd_reset_and_restore - reset and re-initialize host/device
5768 * @hba: per-adapter instance
5770 * Reset and recover device, host and re-establish link. This
5771 * is helpful to recover the communication in fatal error conditions.
5773 * Returns zero on success, non-zero on failure
5775 static int ufshcd_reset_and_restore(struct ufs_hba *hba)
5777 int err = 0;
5778 unsigned long flags;
5779 int retries = MAX_HOST_RESET_RETRIES;
5781 do {
5782 err = ufshcd_host_reset_and_restore(hba);
5783 } while (err && --retries);
5786 * After reset the door-bell might be cleared, complete
5787 * outstanding requests in s/w here.
5789 spin_lock_irqsave(hba->host->host_lock, flags);
5790 ufshcd_transfer_req_compl(hba);
5791 ufshcd_tmc_handler(hba);
5792 spin_unlock_irqrestore(hba->host->host_lock, flags);
5794 return err;
5798 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
5799 * @cmd - SCSI command pointer
5801 * Returns SUCCESS/FAILED
5803 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
5805 int err;
5806 unsigned long flags;
5807 struct ufs_hba *hba;
5809 hba = shost_priv(cmd->device->host);
5811 ufshcd_hold(hba, false);
5813 * Check if there is any race with fatal error handling.
5814 * If so, wait for it to complete. Even though fatal error
5815 * handling does reset and restore in some cases, don't assume
5816 * anything out of it. We are just avoiding race here.
5818 do {
5819 spin_lock_irqsave(hba->host->host_lock, flags);
5820 if (!(work_pending(&hba->eh_work) ||
5821 hba->ufshcd_state == UFSHCD_STATE_RESET ||
5822 hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
5823 break;
5824 spin_unlock_irqrestore(hba->host->host_lock, flags);
5825 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
5826 flush_work(&hba->eh_work);
5827 } while (1);
5829 hba->ufshcd_state = UFSHCD_STATE_RESET;
5830 ufshcd_set_eh_in_progress(hba);
5831 spin_unlock_irqrestore(hba->host->host_lock, flags);
5833 err = ufshcd_reset_and_restore(hba);
5835 spin_lock_irqsave(hba->host->host_lock, flags);
5836 if (!err) {
5837 err = SUCCESS;
5838 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
5839 } else {
5840 err = FAILED;
5841 hba->ufshcd_state = UFSHCD_STATE_ERROR;
5843 ufshcd_clear_eh_in_progress(hba);
5844 spin_unlock_irqrestore(hba->host->host_lock, flags);
5846 ufshcd_release(hba);
5847 return err;
5851 * ufshcd_get_max_icc_level - calculate the ICC level
5852 * @sup_curr_uA: max. current supported by the regulator
5853 * @start_scan: row at the desc table to start scan from
5854 * @buff: power descriptor buffer
5856 * Returns calculated max ICC level for specific regulator
5858 static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
5860 int i;
5861 int curr_uA;
5862 u16 data;
5863 u16 unit;
5865 for (i = start_scan; i >= 0; i--) {
5866 data = be16_to_cpup((__be16 *)&buff[2 * i]);
5867 unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
5868 ATTR_ICC_LVL_UNIT_OFFSET;
5869 curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
5870 switch (unit) {
5871 case UFSHCD_NANO_AMP:
5872 curr_uA = curr_uA / 1000;
5873 break;
5874 case UFSHCD_MILI_AMP:
5875 curr_uA = curr_uA * 1000;
5876 break;
5877 case UFSHCD_AMP:
5878 curr_uA = curr_uA * 1000 * 1000;
5879 break;
5880 case UFSHCD_MICRO_AMP:
5881 default:
5882 break;
5884 if (sup_curr_uA >= curr_uA)
5885 break;
5887 if (i < 0) {
5888 i = 0;
5889 pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
5892 return (u32)i;
5896 * ufshcd_calc_icc_level - calculate the max ICC level
5897 * In case regulators are not initialized we'll return 0
5898 * @hba: per-adapter instance
5899 * @desc_buf: power descriptor buffer to extract ICC levels from.
5900 * @len: length of desc_buff
5902 * Returns calculated ICC level
5904 static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
5905 u8 *desc_buf, int len)
5907 u32 icc_level = 0;
5909 if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
5910 !hba->vreg_info.vccq2) {
5911 dev_err(hba->dev,
5912 "%s: Regulator capability was not set, actvIccLevel=%d",
5913 __func__, icc_level);
5914 goto out;
5917 if (hba->vreg_info.vcc)
5918 icc_level = ufshcd_get_max_icc_level(
5919 hba->vreg_info.vcc->max_uA,
5920 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
5921 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
5923 if (hba->vreg_info.vccq)
5924 icc_level = ufshcd_get_max_icc_level(
5925 hba->vreg_info.vccq->max_uA,
5926 icc_level,
5927 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
5929 if (hba->vreg_info.vccq2)
5930 icc_level = ufshcd_get_max_icc_level(
5931 hba->vreg_info.vccq2->max_uA,
5932 icc_level,
5933 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
5934 out:
5935 return icc_level;
5938 static void ufshcd_init_icc_levels(struct ufs_hba *hba)
5940 int ret;
5941 int buff_len = hba->desc_size.pwr_desc;
5942 u8 desc_buf[hba->desc_size.pwr_desc];
5944 ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
5945 if (ret) {
5946 dev_err(hba->dev,
5947 "%s: Failed reading power descriptor.len = %d ret = %d",
5948 __func__, buff_len, ret);
5949 return;
5952 hba->init_prefetch_data.icc_level =
5953 ufshcd_find_max_sup_active_icc_level(hba,
5954 desc_buf, buff_len);
5955 dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
5956 __func__, hba->init_prefetch_data.icc_level);
5958 ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
5959 QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
5960 &hba->init_prefetch_data.icc_level);
5962 if (ret)
5963 dev_err(hba->dev,
5964 "%s: Failed configuring bActiveICCLevel = %d ret = %d",
5965 __func__, hba->init_prefetch_data.icc_level , ret);
5970 * ufshcd_scsi_add_wlus - Adds required W-LUs
5971 * @hba: per-adapter instance
5973 * UFS device specification requires the UFS devices to support 4 well known
5974 * logical units:
5975 * "REPORT_LUNS" (address: 01h)
5976 * "UFS Device" (address: 50h)
5977 * "RPMB" (address: 44h)
5978 * "BOOT" (address: 30h)
5979 * UFS device's power management needs to be controlled by "POWER CONDITION"
5980 * field of SSU (START STOP UNIT) command. But this "power condition" field
5981 * will take effect only when its sent to "UFS device" well known logical unit
5982 * hence we require the scsi_device instance to represent this logical unit in
5983 * order for the UFS host driver to send the SSU command for power management.
5985 * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
5986 * Block) LU so user space process can control this LU. User space may also
5987 * want to have access to BOOT LU.
5989 * This function adds scsi device instances for each of all well known LUs
5990 * (except "REPORT LUNS" LU).
5992 * Returns zero on success (all required W-LUs are added successfully),
5993 * non-zero error value on failure (if failed to add any of the required W-LU).
5995 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
5997 int ret = 0;
5998 struct scsi_device *sdev_rpmb;
5999 struct scsi_device *sdev_boot;
6001 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
6002 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
6003 if (IS_ERR(hba->sdev_ufs_device)) {
6004 ret = PTR_ERR(hba->sdev_ufs_device);
6005 hba->sdev_ufs_device = NULL;
6006 goto out;
6008 scsi_device_put(hba->sdev_ufs_device);
6010 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
6011 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
6012 if (IS_ERR(sdev_rpmb)) {
6013 ret = PTR_ERR(sdev_rpmb);
6014 goto remove_sdev_ufs_device;
6016 scsi_device_put(sdev_rpmb);
6018 sdev_boot = __scsi_add_device(hba->host, 0, 0,
6019 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
6020 if (IS_ERR(sdev_boot))
6021 dev_err(hba->dev, "%s: BOOT WLUN not found\n", __func__);
6022 else
6023 scsi_device_put(sdev_boot);
6024 goto out;
6026 remove_sdev_ufs_device:
6027 scsi_remove_device(hba->sdev_ufs_device);
6028 out:
6029 return ret;
6032 static int ufs_get_device_desc(struct ufs_hba *hba,
6033 struct ufs_dev_desc *dev_desc)
6035 int err;
6036 u8 model_index;
6037 u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
6038 u8 desc_buf[hba->desc_size.dev_desc];
6040 err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
6041 if (err) {
6042 dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
6043 __func__, err);
6044 goto out;
6048 * getting vendor (manufacturerID) and Bank Index in big endian
6049 * format
6051 dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
6052 desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
6054 model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
6056 err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
6057 QUERY_DESC_MAX_SIZE, ASCII_STD);
6058 if (err) {
6059 dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
6060 __func__, err);
6061 goto out;
6064 str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
6065 strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
6066 min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
6067 MAX_MODEL_LEN));
6069 /* Null terminate the model string */
6070 dev_desc->model[MAX_MODEL_LEN] = '\0';
6072 out:
6073 return err;
6076 static void ufs_fixup_device_setup(struct ufs_hba *hba,
6077 struct ufs_dev_desc *dev_desc)
6079 struct ufs_dev_fix *f;
6081 for (f = ufs_fixups; f->quirk; f++) {
6082 if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid ||
6083 f->card.wmanufacturerid == UFS_ANY_VENDOR) &&
6084 (STR_PRFX_EQUAL(f->card.model, dev_desc->model) ||
6085 !strcmp(f->card.model, UFS_ANY_MODEL)))
6086 hba->dev_quirks |= f->quirk;
6091 * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
6092 * @hba: per-adapter instance
6094 * PA_TActivate parameter can be tuned manually if UniPro version is less than
6095 * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
6096 * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
6097 * the hibern8 exit latency.
6099 * Returns zero on success, non-zero error value on failure.
6101 static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
6103 int ret = 0;
6104 u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
6106 ret = ufshcd_dme_peer_get(hba,
6107 UIC_ARG_MIB_SEL(
6108 RX_MIN_ACTIVATETIME_CAPABILITY,
6109 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6110 &peer_rx_min_activatetime);
6111 if (ret)
6112 goto out;
6114 /* make sure proper unit conversion is applied */
6115 tuned_pa_tactivate =
6116 ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
6117 / PA_TACTIVATE_TIME_UNIT_US);
6118 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6119 tuned_pa_tactivate);
6121 out:
6122 return ret;
6126 * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
6127 * @hba: per-adapter instance
6129 * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
6130 * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
6131 * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
6132 * This optimal value can help reduce the hibern8 exit latency.
6134 * Returns zero on success, non-zero error value on failure.
6136 static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
6138 int ret = 0;
6139 u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
6140 u32 max_hibern8_time, tuned_pa_hibern8time;
6142 ret = ufshcd_dme_get(hba,
6143 UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
6144 UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
6145 &local_tx_hibern8_time_cap);
6146 if (ret)
6147 goto out;
6149 ret = ufshcd_dme_peer_get(hba,
6150 UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
6151 UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
6152 &peer_rx_hibern8_time_cap);
6153 if (ret)
6154 goto out;
6156 max_hibern8_time = max(local_tx_hibern8_time_cap,
6157 peer_rx_hibern8_time_cap);
6158 /* make sure proper unit conversion is applied */
6159 tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
6160 / PA_HIBERN8_TIME_UNIT_US);
6161 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
6162 tuned_pa_hibern8time);
6163 out:
6164 return ret;
6168 * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
6169 * less than device PA_TACTIVATE time.
6170 * @hba: per-adapter instance
6172 * Some UFS devices require host PA_TACTIVATE to be lower than device
6173 * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
6174 * for such devices.
6176 * Returns zero on success, non-zero error value on failure.
6178 static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
6180 int ret = 0;
6181 u32 granularity, peer_granularity;
6182 u32 pa_tactivate, peer_pa_tactivate;
6183 u32 pa_tactivate_us, peer_pa_tactivate_us;
6184 u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
6186 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6187 &granularity);
6188 if (ret)
6189 goto out;
6191 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
6192 &peer_granularity);
6193 if (ret)
6194 goto out;
6196 if ((granularity < PA_GRANULARITY_MIN_VAL) ||
6197 (granularity > PA_GRANULARITY_MAX_VAL)) {
6198 dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
6199 __func__, granularity);
6200 return -EINVAL;
6203 if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
6204 (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
6205 dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
6206 __func__, peer_granularity);
6207 return -EINVAL;
6210 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
6211 if (ret)
6212 goto out;
6214 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
6215 &peer_pa_tactivate);
6216 if (ret)
6217 goto out;
6219 pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
6220 peer_pa_tactivate_us = peer_pa_tactivate *
6221 gran_to_us_table[peer_granularity - 1];
6223 if (pa_tactivate_us > peer_pa_tactivate_us) {
6224 u32 new_peer_pa_tactivate;
6226 new_peer_pa_tactivate = pa_tactivate_us /
6227 gran_to_us_table[peer_granularity - 1];
6228 new_peer_pa_tactivate++;
6229 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
6230 new_peer_pa_tactivate);
6233 out:
6234 return ret;
6237 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
6239 if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
6240 ufshcd_tune_pa_tactivate(hba);
6241 ufshcd_tune_pa_hibern8time(hba);
6244 if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
6245 /* set 1ms timeout for PA_TACTIVATE */
6246 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
6248 if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
6249 ufshcd_quirk_tune_host_pa_tactivate(hba);
6251 ufshcd_vops_apply_dev_quirks(hba);
6254 static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
6256 int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
6258 hba->ufs_stats.hibern8_exit_cnt = 0;
6259 hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
6261 memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
6262 memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
6263 memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
6264 memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
6265 memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
6267 hba->req_abort_count = 0;
6270 static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
6272 int err;
6274 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
6275 &hba->desc_size.dev_desc);
6276 if (err)
6277 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6279 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
6280 &hba->desc_size.pwr_desc);
6281 if (err)
6282 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6284 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
6285 &hba->desc_size.interc_desc);
6286 if (err)
6287 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6289 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
6290 &hba->desc_size.conf_desc);
6291 if (err)
6292 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6294 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
6295 &hba->desc_size.unit_desc);
6296 if (err)
6297 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6299 err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
6300 &hba->desc_size.geom_desc);
6301 if (err)
6302 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6305 static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
6307 hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
6308 hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
6309 hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
6310 hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
6311 hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
6312 hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
6316 * ufshcd_probe_hba - probe hba to detect device and initialize
6317 * @hba: per-adapter instance
6319 * Execute link-startup and verify device initialization
6321 static int ufshcd_probe_hba(struct ufs_hba *hba)
6323 struct ufs_dev_desc card = {0};
6324 int ret;
6325 ktime_t start = ktime_get();
6327 ret = ufshcd_link_startup(hba);
6328 if (ret)
6329 goto out;
6331 /* set the default level for urgent bkops */
6332 hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
6333 hba->is_urgent_bkops_lvl_checked = false;
6335 /* Debug counters initialization */
6336 ufshcd_clear_dbg_ufs_stats(hba);
6338 /* UniPro link is active now */
6339 ufshcd_set_link_active(hba);
6341 ret = ufshcd_verify_dev_init(hba);
6342 if (ret)
6343 goto out;
6345 ret = ufshcd_complete_dev_init(hba);
6346 if (ret)
6347 goto out;
6349 /* Init check for device descriptor sizes */
6350 ufshcd_init_desc_sizes(hba);
6352 ret = ufs_get_device_desc(hba, &card);
6353 if (ret) {
6354 dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
6355 __func__, ret);
6356 goto out;
6359 ufs_fixup_device_setup(hba, &card);
6360 ufshcd_tune_unipro_params(hba);
6362 ret = ufshcd_set_vccq_rail_unused(hba,
6363 (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
6364 if (ret)
6365 goto out;
6367 /* UFS device is also active now */
6368 ufshcd_set_ufs_dev_active(hba);
6369 ufshcd_force_reset_auto_bkops(hba);
6370 hba->wlun_dev_clr_ua = true;
6372 if (ufshcd_get_max_pwr_mode(hba)) {
6373 dev_err(hba->dev,
6374 "%s: Failed getting max supported power mode\n",
6375 __func__);
6376 } else {
6377 ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
6378 if (ret) {
6379 dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
6380 __func__, ret);
6381 goto out;
6385 /* set the state as operational after switching to desired gear */
6386 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
6389 * If we are in error handling context or in power management callbacks
6390 * context, no need to scan the host
6392 if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6393 bool flag;
6395 /* clear any previous UFS device information */
6396 memset(&hba->dev_info, 0, sizeof(hba->dev_info));
6397 if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
6398 QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
6399 hba->dev_info.f_power_on_wp_en = flag;
6401 if (!hba->is_init_prefetch)
6402 ufshcd_init_icc_levels(hba);
6404 /* Add required well known logical units to scsi mid layer */
6405 if (ufshcd_scsi_add_wlus(hba))
6406 goto out;
6408 /* Initialize devfreq after UFS device is detected */
6409 if (ufshcd_is_clkscaling_supported(hba)) {
6410 memcpy(&hba->clk_scaling.saved_pwr_info.info,
6411 &hba->pwr_info,
6412 sizeof(struct ufs_pa_layer_attr));
6413 hba->clk_scaling.saved_pwr_info.is_valid = true;
6414 if (!hba->devfreq) {
6415 hba->devfreq = devm_devfreq_add_device(hba->dev,
6416 &ufs_devfreq_profile,
6417 "simple_ondemand",
6418 NULL);
6419 if (IS_ERR(hba->devfreq)) {
6420 ret = PTR_ERR(hba->devfreq);
6421 dev_err(hba->dev, "Unable to register with devfreq %d\n",
6422 ret);
6423 goto out;
6426 hba->clk_scaling.is_allowed = true;
6429 scsi_scan_host(hba->host);
6430 pm_runtime_put_sync(hba->dev);
6433 if (!hba->is_init_prefetch)
6434 hba->is_init_prefetch = true;
6436 out:
6438 * If we failed to initialize the device or the device is not
6439 * present, turn off the power/clocks etc.
6441 if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
6442 pm_runtime_put_sync(hba->dev);
6443 ufshcd_hba_exit(hba);
6446 trace_ufshcd_init(dev_name(hba->dev), ret,
6447 ktime_to_us(ktime_sub(ktime_get(), start)),
6448 hba->curr_dev_pwr_mode, hba->uic_link_state);
6449 return ret;
6453 * ufshcd_async_scan - asynchronous execution for probing hba
6454 * @data: data pointer to pass to this function
6455 * @cookie: cookie data
6457 static void ufshcd_async_scan(void *data, async_cookie_t cookie)
6459 struct ufs_hba *hba = (struct ufs_hba *)data;
6461 ufshcd_probe_hba(hba);
6464 static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
6466 unsigned long flags;
6467 struct Scsi_Host *host;
6468 struct ufs_hba *hba;
6469 int index;
6470 bool found = false;
6472 if (!scmd || !scmd->device || !scmd->device->host)
6473 return BLK_EH_NOT_HANDLED;
6475 host = scmd->device->host;
6476 hba = shost_priv(host);
6477 if (!hba)
6478 return BLK_EH_NOT_HANDLED;
6480 spin_lock_irqsave(host->host_lock, flags);
6482 for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
6483 if (hba->lrb[index].cmd == scmd) {
6484 found = true;
6485 break;
6489 spin_unlock_irqrestore(host->host_lock, flags);
6492 * Bypass SCSI error handling and reset the block layer timer if this
6493 * SCSI command was not actually dispatched to UFS driver, otherwise
6494 * let SCSI layer handle the error as usual.
6496 return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
6499 static struct scsi_host_template ufshcd_driver_template = {
6500 .module = THIS_MODULE,
6501 .name = UFSHCD,
6502 .proc_name = UFSHCD,
6503 .queuecommand = ufshcd_queuecommand,
6504 .slave_alloc = ufshcd_slave_alloc,
6505 .slave_configure = ufshcd_slave_configure,
6506 .slave_destroy = ufshcd_slave_destroy,
6507 .change_queue_depth = ufshcd_change_queue_depth,
6508 .eh_abort_handler = ufshcd_abort,
6509 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
6510 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
6511 .eh_timed_out = ufshcd_eh_timed_out,
6512 .this_id = -1,
6513 .sg_tablesize = SG_ALL,
6514 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
6515 .can_queue = UFSHCD_CAN_QUEUE,
6516 .max_host_blocked = 1,
6517 .track_queue_depth = 1,
6520 static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
6521 int ua)
6523 int ret;
6525 if (!vreg)
6526 return 0;
6528 ret = regulator_set_load(vreg->reg, ua);
6529 if (ret < 0) {
6530 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
6531 __func__, vreg->name, ua, ret);
6534 return ret;
6537 static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
6538 struct ufs_vreg *vreg)
6540 if (!vreg)
6541 return 0;
6542 else if (vreg->unused)
6543 return 0;
6544 else
6545 return ufshcd_config_vreg_load(hba->dev, vreg,
6546 UFS_VREG_LPM_LOAD_UA);
6549 static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
6550 struct ufs_vreg *vreg)
6552 if (!vreg)
6553 return 0;
6554 else if (vreg->unused)
6555 return 0;
6556 else
6557 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
6560 static int ufshcd_config_vreg(struct device *dev,
6561 struct ufs_vreg *vreg, bool on)
6563 int ret = 0;
6564 struct regulator *reg;
6565 const char *name;
6566 int min_uV, uA_load;
6568 BUG_ON(!vreg);
6570 reg = vreg->reg;
6571 name = vreg->name;
6573 if (regulator_count_voltages(reg) > 0) {
6574 min_uV = on ? vreg->min_uV : 0;
6575 ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
6576 if (ret) {
6577 dev_err(dev, "%s: %s set voltage failed, err=%d\n",
6578 __func__, name, ret);
6579 goto out;
6582 uA_load = on ? vreg->max_uA : 0;
6583 ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
6584 if (ret)
6585 goto out;
6587 out:
6588 return ret;
6591 static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
6593 int ret = 0;
6595 if (!vreg)
6596 goto out;
6597 else if (vreg->enabled || vreg->unused)
6598 goto out;
6600 ret = ufshcd_config_vreg(dev, vreg, true);
6601 if (!ret)
6602 ret = regulator_enable(vreg->reg);
6604 if (!ret)
6605 vreg->enabled = true;
6606 else
6607 dev_err(dev, "%s: %s enable failed, err=%d\n",
6608 __func__, vreg->name, ret);
6609 out:
6610 return ret;
6613 static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
6615 int ret = 0;
6617 if (!vreg)
6618 goto out;
6619 else if (!vreg->enabled || vreg->unused)
6620 goto out;
6622 ret = regulator_disable(vreg->reg);
6624 if (!ret) {
6625 /* ignore errors on applying disable config */
6626 ufshcd_config_vreg(dev, vreg, false);
6627 vreg->enabled = false;
6628 } else {
6629 dev_err(dev, "%s: %s disable failed, err=%d\n",
6630 __func__, vreg->name, ret);
6632 out:
6633 return ret;
6636 static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
6638 int ret = 0;
6639 struct device *dev = hba->dev;
6640 struct ufs_vreg_info *info = &hba->vreg_info;
6642 if (!info)
6643 goto out;
6645 ret = ufshcd_toggle_vreg(dev, info->vcc, on);
6646 if (ret)
6647 goto out;
6649 ret = ufshcd_toggle_vreg(dev, info->vccq, on);
6650 if (ret)
6651 goto out;
6653 ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
6654 if (ret)
6655 goto out;
6657 out:
6658 if (ret) {
6659 ufshcd_toggle_vreg(dev, info->vccq2, false);
6660 ufshcd_toggle_vreg(dev, info->vccq, false);
6661 ufshcd_toggle_vreg(dev, info->vcc, false);
6663 return ret;
6666 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
6668 struct ufs_vreg_info *info = &hba->vreg_info;
6670 if (info)
6671 return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
6673 return 0;
6676 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
6678 int ret = 0;
6680 if (!vreg)
6681 goto out;
6683 vreg->reg = devm_regulator_get(dev, vreg->name);
6684 if (IS_ERR(vreg->reg)) {
6685 ret = PTR_ERR(vreg->reg);
6686 dev_err(dev, "%s: %s get failed, err=%d\n",
6687 __func__, vreg->name, ret);
6689 out:
6690 return ret;
6693 static int ufshcd_init_vreg(struct ufs_hba *hba)
6695 int ret = 0;
6696 struct device *dev = hba->dev;
6697 struct ufs_vreg_info *info = &hba->vreg_info;
6699 if (!info)
6700 goto out;
6702 ret = ufshcd_get_vreg(dev, info->vcc);
6703 if (ret)
6704 goto out;
6706 ret = ufshcd_get_vreg(dev, info->vccq);
6707 if (ret)
6708 goto out;
6710 ret = ufshcd_get_vreg(dev, info->vccq2);
6711 out:
6712 return ret;
6715 static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
6717 struct ufs_vreg_info *info = &hba->vreg_info;
6719 if (info)
6720 return ufshcd_get_vreg(hba->dev, info->vdd_hba);
6722 return 0;
6725 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
6727 int ret = 0;
6728 struct ufs_vreg_info *info = &hba->vreg_info;
6730 if (!info)
6731 goto out;
6732 else if (!info->vccq)
6733 goto out;
6735 if (unused) {
6736 /* shut off the rail here */
6737 ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
6739 * Mark this rail as no longer used, so it doesn't get enabled
6740 * later by mistake
6742 if (!ret)
6743 info->vccq->unused = true;
6744 } else {
6746 * rail should have been already enabled hence just make sure
6747 * that unused flag is cleared.
6749 info->vccq->unused = false;
6751 out:
6752 return ret;
6755 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
6756 bool skip_ref_clk)
6758 int ret = 0;
6759 struct ufs_clk_info *clki;
6760 struct list_head *head = &hba->clk_list_head;
6761 unsigned long flags;
6762 ktime_t start = ktime_get();
6763 bool clk_state_changed = false;
6765 if (list_empty(head))
6766 goto out;
6768 ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE);
6769 if (ret)
6770 return ret;
6772 list_for_each_entry(clki, head, list) {
6773 if (!IS_ERR_OR_NULL(clki->clk)) {
6774 if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
6775 continue;
6777 clk_state_changed = on ^ clki->enabled;
6778 if (on && !clki->enabled) {
6779 ret = clk_prepare_enable(clki->clk);
6780 if (ret) {
6781 dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
6782 __func__, clki->name, ret);
6783 goto out;
6785 } else if (!on && clki->enabled) {
6786 clk_disable_unprepare(clki->clk);
6788 clki->enabled = on;
6789 dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
6790 clki->name, on ? "en" : "dis");
6794 ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE);
6795 if (ret)
6796 return ret;
6798 out:
6799 if (ret) {
6800 list_for_each_entry(clki, head, list) {
6801 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
6802 clk_disable_unprepare(clki->clk);
6804 } else if (!ret && on) {
6805 spin_lock_irqsave(hba->host->host_lock, flags);
6806 hba->clk_gating.state = CLKS_ON;
6807 trace_ufshcd_clk_gating(dev_name(hba->dev),
6808 hba->clk_gating.state);
6809 spin_unlock_irqrestore(hba->host->host_lock, flags);
6812 if (clk_state_changed)
6813 trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
6814 (on ? "on" : "off"),
6815 ktime_to_us(ktime_sub(ktime_get(), start)), ret);
6816 return ret;
6819 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
6821 return __ufshcd_setup_clocks(hba, on, false);
6824 static int ufshcd_init_clocks(struct ufs_hba *hba)
6826 int ret = 0;
6827 struct ufs_clk_info *clki;
6828 struct device *dev = hba->dev;
6829 struct list_head *head = &hba->clk_list_head;
6831 if (list_empty(head))
6832 goto out;
6834 list_for_each_entry(clki, head, list) {
6835 if (!clki->name)
6836 continue;
6838 clki->clk = devm_clk_get(dev, clki->name);
6839 if (IS_ERR(clki->clk)) {
6840 ret = PTR_ERR(clki->clk);
6841 dev_err(dev, "%s: %s clk get failed, %d\n",
6842 __func__, clki->name, ret);
6843 goto out;
6846 if (clki->max_freq) {
6847 ret = clk_set_rate(clki->clk, clki->max_freq);
6848 if (ret) {
6849 dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
6850 __func__, clki->name,
6851 clki->max_freq, ret);
6852 goto out;
6854 clki->curr_freq = clki->max_freq;
6856 dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
6857 clki->name, clk_get_rate(clki->clk));
6859 out:
6860 return ret;
6863 static int ufshcd_variant_hba_init(struct ufs_hba *hba)
6865 int err = 0;
6867 if (!hba->vops)
6868 goto out;
6870 err = ufshcd_vops_init(hba);
6871 if (err)
6872 goto out;
6874 err = ufshcd_vops_setup_regulators(hba, true);
6875 if (err)
6876 goto out_exit;
6878 goto out;
6880 out_exit:
6881 ufshcd_vops_exit(hba);
6882 out:
6883 if (err)
6884 dev_err(hba->dev, "%s: variant %s init failed err %d\n",
6885 __func__, ufshcd_get_var_name(hba), err);
6886 return err;
6889 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
6891 if (!hba->vops)
6892 return;
6894 ufshcd_vops_setup_regulators(hba, false);
6896 ufshcd_vops_exit(hba);
6899 static int ufshcd_hba_init(struct ufs_hba *hba)
6901 int err;
6904 * Handle host controller power separately from the UFS device power
6905 * rails as it will help controlling the UFS host controller power
6906 * collapse easily which is different than UFS device power collapse.
6907 * Also, enable the host controller power before we go ahead with rest
6908 * of the initialization here.
6910 err = ufshcd_init_hba_vreg(hba);
6911 if (err)
6912 goto out;
6914 err = ufshcd_setup_hba_vreg(hba, true);
6915 if (err)
6916 goto out;
6918 err = ufshcd_init_clocks(hba);
6919 if (err)
6920 goto out_disable_hba_vreg;
6922 err = ufshcd_setup_clocks(hba, true);
6923 if (err)
6924 goto out_disable_hba_vreg;
6926 err = ufshcd_init_vreg(hba);
6927 if (err)
6928 goto out_disable_clks;
6930 err = ufshcd_setup_vreg(hba, true);
6931 if (err)
6932 goto out_disable_clks;
6934 err = ufshcd_variant_hba_init(hba);
6935 if (err)
6936 goto out_disable_vreg;
6938 hba->is_powered = true;
6939 goto out;
6941 out_disable_vreg:
6942 ufshcd_setup_vreg(hba, false);
6943 out_disable_clks:
6944 ufshcd_setup_clocks(hba, false);
6945 out_disable_hba_vreg:
6946 ufshcd_setup_hba_vreg(hba, false);
6947 out:
6948 return err;
6951 static void ufshcd_hba_exit(struct ufs_hba *hba)
6953 if (hba->is_powered) {
6954 ufshcd_variant_hba_exit(hba);
6955 ufshcd_setup_vreg(hba, false);
6956 ufshcd_suspend_clkscaling(hba);
6957 if (ufshcd_is_clkscaling_supported(hba)) {
6958 if (hba->devfreq)
6959 ufshcd_suspend_clkscaling(hba);
6960 destroy_workqueue(hba->clk_scaling.workq);
6962 ufshcd_setup_clocks(hba, false);
6963 ufshcd_setup_hba_vreg(hba, false);
6964 hba->is_powered = false;
6968 static int
6969 ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
6971 unsigned char cmd[6] = {REQUEST_SENSE,
6975 UFSHCD_REQ_SENSE_SIZE,
6977 char *buffer;
6978 int ret;
6980 buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
6981 if (!buffer) {
6982 ret = -ENOMEM;
6983 goto out;
6986 ret = scsi_execute(sdp, cmd, DMA_FROM_DEVICE, buffer,
6987 UFSHCD_REQ_SENSE_SIZE, NULL, NULL,
6988 msecs_to_jiffies(1000), 3, 0, RQF_PM, NULL);
6989 if (ret)
6990 pr_err("%s: failed with err %d\n", __func__, ret);
6992 kfree(buffer);
6993 out:
6994 return ret;
6998 * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
6999 * power mode
7000 * @hba: per adapter instance
7001 * @pwr_mode: device power mode to set
7003 * Returns 0 if requested power mode is set successfully
7004 * Returns non-zero if failed to set the requested power mode
7006 static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
7007 enum ufs_dev_pwr_mode pwr_mode)
7009 unsigned char cmd[6] = { START_STOP };
7010 struct scsi_sense_hdr sshdr;
7011 struct scsi_device *sdp;
7012 unsigned long flags;
7013 int ret;
7015 spin_lock_irqsave(hba->host->host_lock, flags);
7016 sdp = hba->sdev_ufs_device;
7017 if (sdp) {
7018 ret = scsi_device_get(sdp);
7019 if (!ret && !scsi_device_online(sdp)) {
7020 ret = -ENODEV;
7021 scsi_device_put(sdp);
7023 } else {
7024 ret = -ENODEV;
7026 spin_unlock_irqrestore(hba->host->host_lock, flags);
7028 if (ret)
7029 return ret;
7032 * If scsi commands fail, the scsi mid-layer schedules scsi error-
7033 * handling, which would wait for host to be resumed. Since we know
7034 * we are functional while we are here, skip host resume in error
7035 * handling context.
7037 hba->host->eh_noresume = 1;
7038 if (hba->wlun_dev_clr_ua) {
7039 ret = ufshcd_send_request_sense(hba, sdp);
7040 if (ret)
7041 goto out;
7042 /* Unit attention condition is cleared now */
7043 hba->wlun_dev_clr_ua = false;
7046 cmd[4] = pwr_mode << 4;
7049 * Current function would be generally called from the power management
7050 * callbacks hence set the RQF_PM flag so that it doesn't resume the
7051 * already suspended childs.
7053 ret = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
7054 START_STOP_TIMEOUT, 0, 0, RQF_PM, NULL);
7055 if (ret) {
7056 sdev_printk(KERN_WARNING, sdp,
7057 "START_STOP failed for power mode: %d, result %x\n",
7058 pwr_mode, ret);
7059 if (driver_byte(ret) & DRIVER_SENSE)
7060 scsi_print_sense_hdr(sdp, NULL, &sshdr);
7063 if (!ret)
7064 hba->curr_dev_pwr_mode = pwr_mode;
7065 out:
7066 scsi_device_put(sdp);
7067 hba->host->eh_noresume = 0;
7068 return ret;
7071 static int ufshcd_link_state_transition(struct ufs_hba *hba,
7072 enum uic_link_state req_link_state,
7073 int check_for_bkops)
7075 int ret = 0;
7077 if (req_link_state == hba->uic_link_state)
7078 return 0;
7080 if (req_link_state == UIC_LINK_HIBERN8_STATE) {
7081 ret = ufshcd_uic_hibern8_enter(hba);
7082 if (!ret)
7083 ufshcd_set_link_hibern8(hba);
7084 else
7085 goto out;
7088 * If autobkops is enabled, link can't be turned off because
7089 * turning off the link would also turn off the device.
7091 else if ((req_link_state == UIC_LINK_OFF_STATE) &&
7092 (!check_for_bkops || (check_for_bkops &&
7093 !hba->auto_bkops_enabled))) {
7095 * Let's make sure that link is in low power mode, we are doing
7096 * this currently by putting the link in Hibern8. Otherway to
7097 * put the link in low power mode is to send the DME end point
7098 * to device and then send the DME reset command to local
7099 * unipro. But putting the link in hibern8 is much faster.
7101 ret = ufshcd_uic_hibern8_enter(hba);
7102 if (ret)
7103 goto out;
7105 * Change controller state to "reset state" which
7106 * should also put the link in off/reset state
7108 ufshcd_hba_stop(hba, true);
7110 * TODO: Check if we need any delay to make sure that
7111 * controller is reset
7113 ufshcd_set_link_off(hba);
7116 out:
7117 return ret;
7120 static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
7123 * It seems some UFS devices may keep drawing more than sleep current
7124 * (atleast for 500us) from UFS rails (especially from VCCQ rail).
7125 * To avoid this situation, add 2ms delay before putting these UFS
7126 * rails in LPM mode.
7128 if (!ufshcd_is_link_active(hba) &&
7129 hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
7130 usleep_range(2000, 2100);
7133 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
7134 * power.
7136 * If UFS device and link is in OFF state, all power supplies (VCC,
7137 * VCCQ, VCCQ2) can be turned off if power on write protect is not
7138 * required. If UFS link is inactive (Hibern8 or OFF state) and device
7139 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
7141 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
7142 * in low power state which would save some power.
7144 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7145 !hba->dev_info.is_lu_power_on_wp) {
7146 ufshcd_setup_vreg(hba, false);
7147 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7148 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7149 if (!ufshcd_is_link_active(hba)) {
7150 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7151 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
7156 static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
7158 int ret = 0;
7160 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
7161 !hba->dev_info.is_lu_power_on_wp) {
7162 ret = ufshcd_setup_vreg(hba, true);
7163 } else if (!ufshcd_is_ufs_dev_active(hba)) {
7164 if (!ret && !ufshcd_is_link_active(hba)) {
7165 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
7166 if (ret)
7167 goto vcc_disable;
7168 ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
7169 if (ret)
7170 goto vccq_lpm;
7172 ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
7174 goto out;
7176 vccq_lpm:
7177 ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
7178 vcc_disable:
7179 ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
7180 out:
7181 return ret;
7184 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
7186 if (ufshcd_is_link_off(hba))
7187 ufshcd_setup_hba_vreg(hba, false);
7190 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
7192 if (ufshcd_is_link_off(hba))
7193 ufshcd_setup_hba_vreg(hba, true);
7197 * ufshcd_suspend - helper function for suspend operations
7198 * @hba: per adapter instance
7199 * @pm_op: desired low power operation type
7201 * This function will try to put the UFS device and link into low power
7202 * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
7203 * (System PM level).
7205 * If this function is called during shutdown, it will make sure that
7206 * both UFS device and UFS link is powered off.
7208 * NOTE: UFS device & link must be active before we enter in this function.
7210 * Returns 0 for success and non-zero for failure
7212 static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7214 int ret = 0;
7215 enum ufs_pm_level pm_lvl;
7216 enum ufs_dev_pwr_mode req_dev_pwr_mode;
7217 enum uic_link_state req_link_state;
7219 hba->pm_op_in_progress = 1;
7220 if (!ufshcd_is_shutdown_pm(pm_op)) {
7221 pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
7222 hba->rpm_lvl : hba->spm_lvl;
7223 req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
7224 req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
7225 } else {
7226 req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
7227 req_link_state = UIC_LINK_OFF_STATE;
7231 * If we can't transition into any of the low power modes
7232 * just gate the clocks.
7234 ufshcd_hold(hba, false);
7235 hba->clk_gating.is_suspended = true;
7237 if (hba->clk_scaling.is_allowed) {
7238 cancel_work_sync(&hba->clk_scaling.suspend_work);
7239 cancel_work_sync(&hba->clk_scaling.resume_work);
7240 ufshcd_suspend_clkscaling(hba);
7243 if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
7244 req_link_state == UIC_LINK_ACTIVE_STATE) {
7245 goto disable_clks;
7248 if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
7249 (req_link_state == hba->uic_link_state))
7250 goto enable_gating;
7252 /* UFS device & link must be active before we enter in this function */
7253 if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
7254 ret = -EINVAL;
7255 goto enable_gating;
7258 if (ufshcd_is_runtime_pm(pm_op)) {
7259 if (ufshcd_can_autobkops_during_suspend(hba)) {
7261 * The device is idle with no requests in the queue,
7262 * allow background operations if bkops status shows
7263 * that performance might be impacted.
7265 ret = ufshcd_urgent_bkops(hba);
7266 if (ret)
7267 goto enable_gating;
7268 } else {
7269 /* make sure that auto bkops is disabled */
7270 ufshcd_disable_auto_bkops(hba);
7274 if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
7275 ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
7276 !ufshcd_is_runtime_pm(pm_op))) {
7277 /* ensure that bkops is disabled */
7278 ufshcd_disable_auto_bkops(hba);
7279 ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
7280 if (ret)
7281 goto enable_gating;
7284 ret = ufshcd_link_state_transition(hba, req_link_state, 1);
7285 if (ret)
7286 goto set_dev_active;
7288 ufshcd_vreg_set_lpm(hba);
7290 disable_clks:
7292 * Call vendor specific suspend callback. As these callbacks may access
7293 * vendor specific host controller register space call them before the
7294 * host clocks are ON.
7296 ret = ufshcd_vops_suspend(hba, pm_op);
7297 if (ret)
7298 goto set_link_active;
7300 if (!ufshcd_is_link_active(hba))
7301 ufshcd_setup_clocks(hba, false);
7302 else
7303 /* If link is active, device ref_clk can't be switched off */
7304 __ufshcd_setup_clocks(hba, false, true);
7306 hba->clk_gating.state = CLKS_OFF;
7307 trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
7309 * Disable the host irq as host controller as there won't be any
7310 * host controller transaction expected till resume.
7312 ufshcd_disable_irq(hba);
7313 /* Put the host controller in low power mode if possible */
7314 ufshcd_hba_vreg_set_lpm(hba);
7315 goto out;
7317 set_link_active:
7318 if (hba->clk_scaling.is_allowed)
7319 ufshcd_resume_clkscaling(hba);
7320 ufshcd_vreg_set_hpm(hba);
7321 if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
7322 ufshcd_set_link_active(hba);
7323 else if (ufshcd_is_link_off(hba))
7324 ufshcd_host_reset_and_restore(hba);
7325 set_dev_active:
7326 if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
7327 ufshcd_disable_auto_bkops(hba);
7328 enable_gating:
7329 if (hba->clk_scaling.is_allowed)
7330 ufshcd_resume_clkscaling(hba);
7331 hba->clk_gating.is_suspended = false;
7332 ufshcd_release(hba);
7333 out:
7334 hba->pm_op_in_progress = 0;
7335 return ret;
7339 * ufshcd_resume - helper function for resume operations
7340 * @hba: per adapter instance
7341 * @pm_op: runtime PM or system PM
7343 * This function basically brings the UFS device, UniPro link and controller
7344 * to active state.
7346 * Returns 0 for success and non-zero for failure
7348 static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
7350 int ret;
7351 enum uic_link_state old_link_state;
7353 hba->pm_op_in_progress = 1;
7354 old_link_state = hba->uic_link_state;
7356 ufshcd_hba_vreg_set_hpm(hba);
7357 /* Make sure clocks are enabled before accessing controller */
7358 ret = ufshcd_setup_clocks(hba, true);
7359 if (ret)
7360 goto out;
7362 /* enable the host irq as host controller would be active soon */
7363 ret = ufshcd_enable_irq(hba);
7364 if (ret)
7365 goto disable_irq_and_vops_clks;
7367 ret = ufshcd_vreg_set_hpm(hba);
7368 if (ret)
7369 goto disable_irq_and_vops_clks;
7372 * Call vendor specific resume callback. As these callbacks may access
7373 * vendor specific host controller register space call them when the
7374 * host clocks are ON.
7376 ret = ufshcd_vops_resume(hba, pm_op);
7377 if (ret)
7378 goto disable_vreg;
7380 if (ufshcd_is_link_hibern8(hba)) {
7381 ret = ufshcd_uic_hibern8_exit(hba);
7382 if (!ret)
7383 ufshcd_set_link_active(hba);
7384 else
7385 goto vendor_suspend;
7386 } else if (ufshcd_is_link_off(hba)) {
7387 ret = ufshcd_host_reset_and_restore(hba);
7389 * ufshcd_host_reset_and_restore() should have already
7390 * set the link state as active
7392 if (ret || !ufshcd_is_link_active(hba))
7393 goto vendor_suspend;
7396 if (!ufshcd_is_ufs_dev_active(hba)) {
7397 ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
7398 if (ret)
7399 goto set_old_link_state;
7402 if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
7403 ufshcd_enable_auto_bkops(hba);
7404 else
7406 * If BKOPs operations are urgently needed at this moment then
7407 * keep auto-bkops enabled or else disable it.
7409 ufshcd_urgent_bkops(hba);
7411 hba->clk_gating.is_suspended = false;
7413 if (hba->clk_scaling.is_allowed)
7414 ufshcd_resume_clkscaling(hba);
7416 /* Schedule clock gating in case of no access to UFS device yet */
7417 ufshcd_release(hba);
7418 goto out;
7420 set_old_link_state:
7421 ufshcd_link_state_transition(hba, old_link_state, 0);
7422 vendor_suspend:
7423 ufshcd_vops_suspend(hba, pm_op);
7424 disable_vreg:
7425 ufshcd_vreg_set_lpm(hba);
7426 disable_irq_and_vops_clks:
7427 ufshcd_disable_irq(hba);
7428 if (hba->clk_scaling.is_allowed)
7429 ufshcd_suspend_clkscaling(hba);
7430 ufshcd_setup_clocks(hba, false);
7431 out:
7432 hba->pm_op_in_progress = 0;
7433 return ret;
7437 * ufshcd_system_suspend - system suspend routine
7438 * @hba: per adapter instance
7439 * @pm_op: runtime PM or system PM
7441 * Check the description of ufshcd_suspend() function for more details.
7443 * Returns 0 for success and non-zero for failure
7445 int ufshcd_system_suspend(struct ufs_hba *hba)
7447 int ret = 0;
7448 ktime_t start = ktime_get();
7450 if (!hba || !hba->is_powered)
7451 return 0;
7453 if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
7454 hba->curr_dev_pwr_mode) &&
7455 (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
7456 hba->uic_link_state))
7457 goto out;
7459 if (pm_runtime_suspended(hba->dev)) {
7461 * UFS device and/or UFS link low power states during runtime
7462 * suspend seems to be different than what is expected during
7463 * system suspend. Hence runtime resume the devic & link and
7464 * let the system suspend low power states to take effect.
7465 * TODO: If resume takes longer time, we might have optimize
7466 * it in future by not resuming everything if possible.
7468 ret = ufshcd_runtime_resume(hba);
7469 if (ret)
7470 goto out;
7473 ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
7474 out:
7475 trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
7476 ktime_to_us(ktime_sub(ktime_get(), start)),
7477 hba->curr_dev_pwr_mode, hba->uic_link_state);
7478 if (!ret)
7479 hba->is_sys_suspended = true;
7480 return ret;
7482 EXPORT_SYMBOL(ufshcd_system_suspend);
7485 * ufshcd_system_resume - system resume routine
7486 * @hba: per adapter instance
7488 * Returns 0 for success and non-zero for failure
7491 int ufshcd_system_resume(struct ufs_hba *hba)
7493 int ret = 0;
7494 ktime_t start = ktime_get();
7496 if (!hba)
7497 return -EINVAL;
7499 if (!hba->is_powered || pm_runtime_suspended(hba->dev))
7501 * Let the runtime resume take care of resuming
7502 * if runtime suspended.
7504 goto out;
7505 else
7506 ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
7507 out:
7508 trace_ufshcd_system_resume(dev_name(hba->dev), ret,
7509 ktime_to_us(ktime_sub(ktime_get(), start)),
7510 hba->curr_dev_pwr_mode, hba->uic_link_state);
7511 return ret;
7513 EXPORT_SYMBOL(ufshcd_system_resume);
7516 * ufshcd_runtime_suspend - runtime suspend routine
7517 * @hba: per adapter instance
7519 * Check the description of ufshcd_suspend() function for more details.
7521 * Returns 0 for success and non-zero for failure
7523 int ufshcd_runtime_suspend(struct ufs_hba *hba)
7525 int ret = 0;
7526 ktime_t start = ktime_get();
7528 if (!hba)
7529 return -EINVAL;
7531 if (!hba->is_powered)
7532 goto out;
7533 else
7534 ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
7535 out:
7536 trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
7537 ktime_to_us(ktime_sub(ktime_get(), start)),
7538 hba->curr_dev_pwr_mode, hba->uic_link_state);
7539 return ret;
7541 EXPORT_SYMBOL(ufshcd_runtime_suspend);
7544 * ufshcd_runtime_resume - runtime resume routine
7545 * @hba: per adapter instance
7547 * This function basically brings the UFS device, UniPro link and controller
7548 * to active state. Following operations are done in this function:
7550 * 1. Turn on all the controller related clocks
7551 * 2. Bring the UniPro link out of Hibernate state
7552 * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
7553 * to active state.
7554 * 4. If auto-bkops is enabled on the device, disable it.
7556 * So following would be the possible power state after this function return
7557 * successfully:
7558 * S1: UFS device in Active state with VCC rail ON
7559 * UniPro link in Active state
7560 * All the UFS/UniPro controller clocks are ON
7562 * Returns 0 for success and non-zero for failure
7564 int ufshcd_runtime_resume(struct ufs_hba *hba)
7566 int ret = 0;
7567 ktime_t start = ktime_get();
7569 if (!hba)
7570 return -EINVAL;
7572 if (!hba->is_powered)
7573 goto out;
7574 else
7575 ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
7576 out:
7577 trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
7578 ktime_to_us(ktime_sub(ktime_get(), start)),
7579 hba->curr_dev_pwr_mode, hba->uic_link_state);
7580 return ret;
7582 EXPORT_SYMBOL(ufshcd_runtime_resume);
7584 int ufshcd_runtime_idle(struct ufs_hba *hba)
7586 return 0;
7588 EXPORT_SYMBOL(ufshcd_runtime_idle);
7590 static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
7591 struct device_attribute *attr,
7592 const char *buf, size_t count,
7593 bool rpm)
7595 struct ufs_hba *hba = dev_get_drvdata(dev);
7596 unsigned long flags, value;
7598 if (kstrtoul(buf, 0, &value))
7599 return -EINVAL;
7601 if (value >= UFS_PM_LVL_MAX)
7602 return -EINVAL;
7604 spin_lock_irqsave(hba->host->host_lock, flags);
7605 if (rpm)
7606 hba->rpm_lvl = value;
7607 else
7608 hba->spm_lvl = value;
7609 spin_unlock_irqrestore(hba->host->host_lock, flags);
7610 return count;
7613 static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
7614 struct device_attribute *attr, char *buf)
7616 struct ufs_hba *hba = dev_get_drvdata(dev);
7617 int curr_len;
7618 u8 lvl;
7620 curr_len = snprintf(buf, PAGE_SIZE,
7621 "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
7622 hba->rpm_lvl,
7623 ufschd_ufs_dev_pwr_mode_to_string(
7624 ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
7625 ufschd_uic_link_state_to_string(
7626 ufs_pm_lvl_states[hba->rpm_lvl].link_state));
7628 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7629 "\nAll available Runtime PM levels info:\n");
7630 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
7631 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7632 "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
7633 lvl,
7634 ufschd_ufs_dev_pwr_mode_to_string(
7635 ufs_pm_lvl_states[lvl].dev_state),
7636 ufschd_uic_link_state_to_string(
7637 ufs_pm_lvl_states[lvl].link_state));
7639 return curr_len;
7642 static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
7643 struct device_attribute *attr, const char *buf, size_t count)
7645 return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
7648 static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
7650 hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
7651 hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
7652 sysfs_attr_init(&hba->rpm_lvl_attr.attr);
7653 hba->rpm_lvl_attr.attr.name = "rpm_lvl";
7654 hba->rpm_lvl_attr.attr.mode = 0644;
7655 if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
7656 dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
7659 static ssize_t ufshcd_spm_lvl_show(struct device *dev,
7660 struct device_attribute *attr, char *buf)
7662 struct ufs_hba *hba = dev_get_drvdata(dev);
7663 int curr_len;
7664 u8 lvl;
7666 curr_len = snprintf(buf, PAGE_SIZE,
7667 "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
7668 hba->spm_lvl,
7669 ufschd_ufs_dev_pwr_mode_to_string(
7670 ufs_pm_lvl_states[hba->spm_lvl].dev_state),
7671 ufschd_uic_link_state_to_string(
7672 ufs_pm_lvl_states[hba->spm_lvl].link_state));
7674 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7675 "\nAll available System PM levels info:\n");
7676 for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
7677 curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
7678 "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
7679 lvl,
7680 ufschd_ufs_dev_pwr_mode_to_string(
7681 ufs_pm_lvl_states[lvl].dev_state),
7682 ufschd_uic_link_state_to_string(
7683 ufs_pm_lvl_states[lvl].link_state));
7685 return curr_len;
7688 static ssize_t ufshcd_spm_lvl_store(struct device *dev,
7689 struct device_attribute *attr, const char *buf, size_t count)
7691 return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
7694 static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
7696 hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
7697 hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
7698 sysfs_attr_init(&hba->spm_lvl_attr.attr);
7699 hba->spm_lvl_attr.attr.name = "spm_lvl";
7700 hba->spm_lvl_attr.attr.mode = 0644;
7701 if (device_create_file(hba->dev, &hba->spm_lvl_attr))
7702 dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
7705 static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
7707 ufshcd_add_rpm_lvl_sysfs_nodes(hba);
7708 ufshcd_add_spm_lvl_sysfs_nodes(hba);
7711 static inline void ufshcd_remove_sysfs_nodes(struct ufs_hba *hba)
7713 device_remove_file(hba->dev, &hba->rpm_lvl_attr);
7714 device_remove_file(hba->dev, &hba->spm_lvl_attr);
7718 * ufshcd_shutdown - shutdown routine
7719 * @hba: per adapter instance
7721 * This function would power off both UFS device and UFS link.
7723 * Returns 0 always to allow force shutdown even in case of errors.
7725 int ufshcd_shutdown(struct ufs_hba *hba)
7727 int ret = 0;
7729 if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
7730 goto out;
7732 if (pm_runtime_suspended(hba->dev)) {
7733 ret = ufshcd_runtime_resume(hba);
7734 if (ret)
7735 goto out;
7738 ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
7739 out:
7740 if (ret)
7741 dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
7742 /* allow force shutdown even in case of errors */
7743 return 0;
7745 EXPORT_SYMBOL(ufshcd_shutdown);
7748 * ufshcd_remove - de-allocate SCSI host and host memory space
7749 * data structure memory
7750 * @hba - per adapter instance
7752 void ufshcd_remove(struct ufs_hba *hba)
7754 ufshcd_remove_sysfs_nodes(hba);
7755 scsi_remove_host(hba->host);
7756 /* disable interrupts */
7757 ufshcd_disable_intr(hba, hba->intr_mask);
7758 ufshcd_hba_stop(hba, true);
7760 ufshcd_exit_clk_gating(hba);
7761 if (ufshcd_is_clkscaling_supported(hba))
7762 device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
7763 ufshcd_hba_exit(hba);
7765 EXPORT_SYMBOL_GPL(ufshcd_remove);
7768 * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
7769 * @hba: pointer to Host Bus Adapter (HBA)
7771 void ufshcd_dealloc_host(struct ufs_hba *hba)
7773 scsi_host_put(hba->host);
7775 EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
7778 * ufshcd_set_dma_mask - Set dma mask based on the controller
7779 * addressing capability
7780 * @hba: per adapter instance
7782 * Returns 0 for success, non-zero for failure
7784 static int ufshcd_set_dma_mask(struct ufs_hba *hba)
7786 if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
7787 if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
7788 return 0;
7790 return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
7794 * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
7795 * @dev: pointer to device handle
7796 * @hba_handle: driver private handle
7797 * Returns 0 on success, non-zero value on failure
7799 int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
7801 struct Scsi_Host *host;
7802 struct ufs_hba *hba;
7803 int err = 0;
7805 if (!dev) {
7806 dev_err(dev,
7807 "Invalid memory reference for dev is NULL\n");
7808 err = -ENODEV;
7809 goto out_error;
7812 host = scsi_host_alloc(&ufshcd_driver_template,
7813 sizeof(struct ufs_hba));
7814 if (!host) {
7815 dev_err(dev, "scsi_host_alloc failed\n");
7816 err = -ENOMEM;
7817 goto out_error;
7819 hba = shost_priv(host);
7820 hba->host = host;
7821 hba->dev = dev;
7822 *hba_handle = hba;
7824 INIT_LIST_HEAD(&hba->clk_list_head);
7826 out_error:
7827 return err;
7829 EXPORT_SYMBOL(ufshcd_alloc_host);
7832 * ufshcd_init - Driver initialization routine
7833 * @hba: per-adapter instance
7834 * @mmio_base: base register address
7835 * @irq: Interrupt line of device
7836 * Returns 0 on success, non-zero value on failure
7838 int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
7840 int err;
7841 struct Scsi_Host *host = hba->host;
7842 struct device *dev = hba->dev;
7844 if (!mmio_base) {
7845 dev_err(hba->dev,
7846 "Invalid memory reference for mmio_base is NULL\n");
7847 err = -ENODEV;
7848 goto out_error;
7851 hba->mmio_base = mmio_base;
7852 hba->irq = irq;
7854 /* Set descriptor lengths to specification defaults */
7855 ufshcd_def_desc_sizes(hba);
7857 err = ufshcd_hba_init(hba);
7858 if (err)
7859 goto out_error;
7861 /* Read capabilities registers */
7862 ufshcd_hba_capabilities(hba);
7864 /* Get UFS version supported by the controller */
7865 hba->ufs_version = ufshcd_get_ufs_version(hba);
7867 if ((hba->ufs_version != UFSHCI_VERSION_10) &&
7868 (hba->ufs_version != UFSHCI_VERSION_11) &&
7869 (hba->ufs_version != UFSHCI_VERSION_20) &&
7870 (hba->ufs_version != UFSHCI_VERSION_21))
7871 dev_err(hba->dev, "invalid UFS version 0x%x\n",
7872 hba->ufs_version);
7874 /* Get Interrupt bit mask per version */
7875 hba->intr_mask = ufshcd_get_intr_mask(hba);
7877 err = ufshcd_set_dma_mask(hba);
7878 if (err) {
7879 dev_err(hba->dev, "set dma mask failed\n");
7880 goto out_disable;
7883 /* Allocate memory for host memory space */
7884 err = ufshcd_memory_alloc(hba);
7885 if (err) {
7886 dev_err(hba->dev, "Memory allocation failed\n");
7887 goto out_disable;
7890 /* Configure LRB */
7891 ufshcd_host_memory_configure(hba);
7893 host->can_queue = hba->nutrs;
7894 host->cmd_per_lun = hba->nutrs;
7895 host->max_id = UFSHCD_MAX_ID;
7896 host->max_lun = UFS_MAX_LUNS;
7897 host->max_channel = UFSHCD_MAX_CHANNEL;
7898 host->unique_id = host->host_no;
7899 host->max_cmd_len = MAX_CDB_SIZE;
7901 hba->max_pwr_info.is_valid = false;
7903 /* Initailize wait queue for task management */
7904 init_waitqueue_head(&hba->tm_wq);
7905 init_waitqueue_head(&hba->tm_tag_wq);
7907 /* Initialize work queues */
7908 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
7909 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
7911 /* Initialize UIC command mutex */
7912 mutex_init(&hba->uic_cmd_mutex);
7914 /* Initialize mutex for device management commands */
7915 mutex_init(&hba->dev_cmd.lock);
7917 init_rwsem(&hba->clk_scaling_lock);
7919 /* Initialize device management tag acquire wait queue */
7920 init_waitqueue_head(&hba->dev_cmd.tag_wq);
7922 ufshcd_init_clk_gating(hba);
7925 * In order to avoid any spurious interrupt immediately after
7926 * registering UFS controller interrupt handler, clear any pending UFS
7927 * interrupt status and disable all the UFS interrupts.
7929 ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
7930 REG_INTERRUPT_STATUS);
7931 ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
7933 * Make sure that UFS interrupts are disabled and any pending interrupt
7934 * status is cleared before registering UFS interrupt handler.
7936 mb();
7938 /* IRQ registration */
7939 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
7940 if (err) {
7941 dev_err(hba->dev, "request irq failed\n");
7942 goto exit_gating;
7943 } else {
7944 hba->is_irq_enabled = true;
7947 err = scsi_add_host(host, hba->dev);
7948 if (err) {
7949 dev_err(hba->dev, "scsi_add_host failed\n");
7950 goto exit_gating;
7953 /* Host controller enable */
7954 err = ufshcd_hba_enable(hba);
7955 if (err) {
7956 dev_err(hba->dev, "Host controller enable failed\n");
7957 ufshcd_print_host_regs(hba);
7958 ufshcd_print_host_state(hba);
7959 goto out_remove_scsi_host;
7962 if (ufshcd_is_clkscaling_supported(hba)) {
7963 char wq_name[sizeof("ufs_clkscaling_00")];
7965 INIT_WORK(&hba->clk_scaling.suspend_work,
7966 ufshcd_clk_scaling_suspend_work);
7967 INIT_WORK(&hba->clk_scaling.resume_work,
7968 ufshcd_clk_scaling_resume_work);
7970 snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
7971 host->host_no);
7972 hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
7974 ufshcd_clkscaling_init_sysfs(hba);
7978 * Set the default power management level for runtime and system PM.
7979 * Default power saving mode is to keep UFS link in Hibern8 state
7980 * and UFS device in sleep state.
7982 hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
7983 UFS_SLEEP_PWR_MODE,
7984 UIC_LINK_HIBERN8_STATE);
7985 hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
7986 UFS_SLEEP_PWR_MODE,
7987 UIC_LINK_HIBERN8_STATE);
7989 /* Hold auto suspend until async scan completes */
7990 pm_runtime_get_sync(dev);
7993 * We are assuming that device wasn't put in sleep/power-down
7994 * state exclusively during the boot stage before kernel.
7995 * This assumption helps avoid doing link startup twice during
7996 * ufshcd_probe_hba().
7998 ufshcd_set_ufs_dev_active(hba);
8000 async_schedule(ufshcd_async_scan, hba);
8001 ufshcd_add_sysfs_nodes(hba);
8003 return 0;
8005 out_remove_scsi_host:
8006 scsi_remove_host(hba->host);
8007 exit_gating:
8008 ufshcd_exit_clk_gating(hba);
8009 out_disable:
8010 hba->is_irq_enabled = false;
8011 ufshcd_hba_exit(hba);
8012 out_error:
8013 return err;
8015 EXPORT_SYMBOL_GPL(ufshcd_init);
8017 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
8018 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
8019 MODULE_DESCRIPTION("Generic UFS host controller driver Core");
8020 MODULE_LICENSE("GPL");
8021 MODULE_VERSION(UFSHCD_DRIVER_VERSION);