1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Universal Flash Storage Host controller PCI glue driver
5 * Copyright (C) 2011-2013 Samsung India Software Operations
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
12 #include <ufs/ufshcd.h>
13 #include <linux/delay.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/pm_qos.h>
18 #include <linux/debugfs.h>
19 #include <linux/uuid.h>
20 #include <linux/acpi.h>
21 #include <linux/gpio/consumer.h>
23 #define MAX_SUPP_MAC 64
26 void (*late_init
)(struct ufs_hba
*hba
);
29 enum intel_ufs_dsm_func_id
{
35 struct ufs_host ufs_host
;
39 struct dentry
*debugfs_root
;
40 struct gpio_desc
*reset_gpio
;
43 static const guid_t intel_dsm_guid
=
44 GUID_INIT(0x1A4832A0, 0x7D03, 0x43CA,
45 0xB0, 0x20, 0xF6, 0xDC, 0xD1, 0x2A, 0x19, 0x50);
47 static bool __intel_dsm_supported(struct intel_host
*host
,
48 enum intel_ufs_dsm_func_id fn
)
50 return fn
< 32 && fn
>= 0 && (host
->dsm_fns
& (1u << fn
));
53 #define INTEL_DSM_SUPPORTED(host, name) \
54 __intel_dsm_supported(host, INTEL_DSM_##name)
56 static int __intel_dsm(struct intel_host
*intel_host
, struct device
*dev
,
57 unsigned int fn
, u32
*result
)
59 union acpi_object
*obj
;
63 obj
= acpi_evaluate_dsm_typed(ACPI_HANDLE(dev
), &intel_dsm_guid
, 0, fn
, NULL
,
68 if (obj
->buffer
.length
< 1) {
73 len
= min_t(size_t, obj
->buffer
.length
, 4);
76 memcpy(result
, obj
->buffer
.pointer
, len
);
83 static int intel_dsm(struct intel_host
*intel_host
, struct device
*dev
,
84 unsigned int fn
, u32
*result
)
86 if (!__intel_dsm_supported(intel_host
, fn
))
89 return __intel_dsm(intel_host
, dev
, fn
, result
);
92 static void intel_dsm_init(struct intel_host
*intel_host
, struct device
*dev
)
96 err
= __intel_dsm(intel_host
, dev
, INTEL_DSM_FNS
, &intel_host
->dsm_fns
);
97 dev_dbg(dev
, "DSM fns %#x, error %d\n", intel_host
->dsm_fns
, err
);
100 static int ufs_intel_hce_enable_notify(struct ufs_hba
*hba
,
101 enum ufs_notify_change_status status
)
103 /* Cannot enable ICE until after HC enable */
104 if (status
== POST_CHANGE
&& hba
->caps
& UFSHCD_CAP_CRYPTO
) {
105 u32 hce
= ufshcd_readl(hba
, REG_CONTROLLER_ENABLE
);
107 hce
|= CRYPTO_GENERAL_ENABLE
;
108 ufshcd_writel(hba
, hce
, REG_CONTROLLER_ENABLE
);
114 static int ufs_intel_disable_lcc(struct ufs_hba
*hba
)
116 u32 attr
= UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE
);
119 ufshcd_dme_get(hba
, attr
, &lcc_enable
);
121 ufshcd_disable_host_tx_lcc(hba
);
126 static int ufs_intel_link_startup_notify(struct ufs_hba
*hba
,
127 enum ufs_notify_change_status status
)
133 err
= ufs_intel_disable_lcc(hba
);
144 static int ufs_intel_set_lanes(struct ufs_hba
*hba
, u32 lanes
)
146 struct ufs_pa_layer_attr pwr_info
= hba
->pwr_info
;
149 pwr_info
.lane_rx
= lanes
;
150 pwr_info
.lane_tx
= lanes
;
151 ret
= ufshcd_config_pwr_mode(hba
, &pwr_info
);
153 dev_err(hba
->dev
, "%s: Setting %u lanes, err = %d\n",
154 __func__
, lanes
, ret
);
158 static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba
*hba
,
159 enum ufs_notify_change_status status
,
160 struct ufs_pa_layer_attr
*dev_max_params
,
161 struct ufs_pa_layer_attr
*dev_req_params
)
167 if (ufshcd_is_hs_mode(dev_max_params
) &&
168 (hba
->pwr_info
.lane_rx
!= 2 || hba
->pwr_info
.lane_tx
!= 2))
169 ufs_intel_set_lanes(hba
, 2);
170 memcpy(dev_req_params
, dev_max_params
, sizeof(*dev_req_params
));
173 if (ufshcd_is_hs_mode(dev_req_params
)) {
174 u32 peer_granularity
;
176 usleep_range(1000, 1250);
177 err
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
),
188 static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba
*hba
)
190 u32 granularity
, peer_granularity
;
191 u32 pa_tactivate
, peer_pa_tactivate
;
194 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
), &granularity
);
198 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_GRANULARITY
), &peer_granularity
);
202 ret
= ufshcd_dme_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
), &pa_tactivate
);
206 ret
= ufshcd_dme_peer_get(hba
, UIC_ARG_MIB(PA_TACTIVATE
), &peer_pa_tactivate
);
210 if (granularity
== peer_granularity
) {
211 u32 new_peer_pa_tactivate
= pa_tactivate
+ 2;
213 ret
= ufshcd_dme_peer_set(hba
, UIC_ARG_MIB(PA_TACTIVATE
), new_peer_pa_tactivate
);
219 #define INTEL_ACTIVELTR 0x804
220 #define INTEL_IDLELTR 0x808
222 #define INTEL_LTR_REQ BIT(15)
223 #define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
224 #define INTEL_LTR_SCALE_1US (2 << 10)
225 #define INTEL_LTR_SCALE_32US (3 << 10)
226 #define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
228 static void intel_cache_ltr(struct ufs_hba
*hba
)
230 struct intel_host
*host
= ufshcd_get_variant(hba
);
232 host
->active_ltr
= readl(hba
->mmio_base
+ INTEL_ACTIVELTR
);
233 host
->idle_ltr
= readl(hba
->mmio_base
+ INTEL_IDLELTR
);
236 static void intel_ltr_set(struct device
*dev
, s32 val
)
238 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
239 struct intel_host
*host
= ufshcd_get_variant(hba
);
242 pm_runtime_get_sync(dev
);
245 * Program latency tolerance (LTR) accordingly what has been asked
246 * by the PM QoS layer or disable it in case we were passed
247 * negative value or PM_QOS_LATENCY_ANY.
249 ltr
= readl(hba
->mmio_base
+ INTEL_ACTIVELTR
);
251 if (val
== PM_QOS_LATENCY_ANY
|| val
< 0) {
252 ltr
&= ~INTEL_LTR_REQ
;
254 ltr
|= INTEL_LTR_REQ
;
255 ltr
&= ~INTEL_LTR_SCALE_MASK
;
256 ltr
&= ~INTEL_LTR_VALUE_MASK
;
258 if (val
> INTEL_LTR_VALUE_MASK
) {
260 if (val
> INTEL_LTR_VALUE_MASK
)
261 val
= INTEL_LTR_VALUE_MASK
;
262 ltr
|= INTEL_LTR_SCALE_32US
| val
;
264 ltr
|= INTEL_LTR_SCALE_1US
| val
;
268 if (ltr
== host
->active_ltr
)
271 writel(ltr
, hba
->mmio_base
+ INTEL_ACTIVELTR
);
272 writel(ltr
, hba
->mmio_base
+ INTEL_IDLELTR
);
274 /* Cache the values into intel_host structure */
275 intel_cache_ltr(hba
);
280 static void intel_ltr_expose(struct device
*dev
)
282 dev
->power
.set_latency_tolerance
= intel_ltr_set
;
283 dev_pm_qos_expose_latency_tolerance(dev
);
286 static void intel_ltr_hide(struct device
*dev
)
288 dev_pm_qos_hide_latency_tolerance(dev
);
289 dev
->power
.set_latency_tolerance
= NULL
;
292 static void intel_add_debugfs(struct ufs_hba
*hba
)
294 struct dentry
*dir
= debugfs_create_dir(dev_name(hba
->dev
), NULL
);
295 struct intel_host
*host
= ufshcd_get_variant(hba
);
297 intel_cache_ltr(hba
);
299 host
->debugfs_root
= dir
;
300 debugfs_create_x32("active_ltr", 0444, dir
, &host
->active_ltr
);
301 debugfs_create_x32("idle_ltr", 0444, dir
, &host
->idle_ltr
);
304 static void intel_remove_debugfs(struct ufs_hba
*hba
)
306 struct intel_host
*host
= ufshcd_get_variant(hba
);
308 debugfs_remove_recursive(host
->debugfs_root
);
311 static int ufs_intel_device_reset(struct ufs_hba
*hba
)
313 struct intel_host
*host
= ufshcd_get_variant(hba
);
315 if (INTEL_DSM_SUPPORTED(host
, RESET
)) {
319 err
= intel_dsm(host
, hba
->dev
, INTEL_DSM_RESET
, &result
);
323 dev_err(hba
->dev
, "%s: DSM error %d result %u\n",
324 __func__
, err
, result
);
328 if (!host
->reset_gpio
)
331 gpiod_set_value_cansleep(host
->reset_gpio
, 1);
332 usleep_range(10, 15);
334 gpiod_set_value_cansleep(host
->reset_gpio
, 0);
335 usleep_range(10, 15);
340 static struct gpio_desc
*ufs_intel_get_reset_gpio(struct device
*dev
)
342 /* GPIO in _DSD has active low setting */
343 return devm_gpiod_get_optional(dev
, "reset", GPIOD_OUT_LOW
);
346 static int ufs_intel_common_init(struct ufs_hba
*hba
)
348 struct intel_host
*host
;
350 hba
->caps
|= UFSHCD_CAP_RPM_AUTOSUSPEND
;
352 host
= devm_kzalloc(hba
->dev
, sizeof(*host
), GFP_KERNEL
);
355 ufshcd_set_variant(hba
, host
);
356 intel_dsm_init(host
, hba
->dev
);
357 if (INTEL_DSM_SUPPORTED(host
, RESET
)) {
358 if (hba
->vops
->device_reset
)
359 hba
->caps
|= UFSHCD_CAP_DEEPSLEEP
;
361 if (hba
->vops
->device_reset
)
362 host
->reset_gpio
= ufs_intel_get_reset_gpio(hba
->dev
);
363 if (IS_ERR(host
->reset_gpio
)) {
364 dev_err(hba
->dev
, "%s: failed to get reset GPIO, error %ld\n",
365 __func__
, PTR_ERR(host
->reset_gpio
));
366 host
->reset_gpio
= NULL
;
368 if (host
->reset_gpio
) {
369 gpiod_set_value_cansleep(host
->reset_gpio
, 0);
370 hba
->caps
|= UFSHCD_CAP_DEEPSLEEP
;
373 intel_ltr_expose(hba
->dev
);
374 intel_add_debugfs(hba
);
378 static void ufs_intel_common_exit(struct ufs_hba
*hba
)
380 intel_remove_debugfs(hba
);
381 intel_ltr_hide(hba
->dev
);
384 static int ufs_intel_resume(struct ufs_hba
*hba
, enum ufs_pm_op op
)
386 if (ufshcd_is_link_hibern8(hba
)) {
387 int ret
= ufshcd_uic_hibern8_exit(hba
);
390 ufshcd_set_link_active(hba
);
392 dev_err(hba
->dev
, "%s: hibern8 exit failed %d\n",
395 * Force reset and restore. Any other actions can lead
396 * to an unrecoverable state.
398 ufshcd_set_link_off(hba
);
405 static int ufs_intel_ehl_init(struct ufs_hba
*hba
)
407 hba
->quirks
|= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8
;
408 return ufs_intel_common_init(hba
);
411 static void ufs_intel_lkf_late_init(struct ufs_hba
*hba
)
413 /* LKF always needs a full reset, so set PM accordingly */
414 if (hba
->caps
& UFSHCD_CAP_DEEPSLEEP
) {
415 hba
->spm_lvl
= UFS_PM_LVL_6
;
416 hba
->rpm_lvl
= UFS_PM_LVL_6
;
418 hba
->spm_lvl
= UFS_PM_LVL_5
;
419 hba
->rpm_lvl
= UFS_PM_LVL_5
;
423 static int ufs_intel_lkf_init(struct ufs_hba
*hba
)
425 struct ufs_host
*ufs_host
;
428 hba
->nop_out_timeout
= 200;
429 hba
->quirks
|= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8
;
430 hba
->caps
|= UFSHCD_CAP_CRYPTO
;
431 err
= ufs_intel_common_init(hba
);
432 ufs_host
= ufshcd_get_variant(hba
);
433 ufs_host
->late_init
= ufs_intel_lkf_late_init
;
437 static int ufs_intel_adl_init(struct ufs_hba
*hba
)
439 hba
->nop_out_timeout
= 200;
440 hba
->quirks
|= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8
;
441 hba
->caps
|= UFSHCD_CAP_WB_EN
;
442 return ufs_intel_common_init(hba
);
445 static int ufs_intel_mtl_init(struct ufs_hba
*hba
)
447 hba
->caps
|= UFSHCD_CAP_CRYPTO
| UFSHCD_CAP_WB_EN
;
448 return ufs_intel_common_init(hba
);
451 static int ufs_qemu_get_hba_mac(struct ufs_hba
*hba
)
456 static int ufs_qemu_mcq_config_resource(struct ufs_hba
*hba
)
458 hba
->mcq_base
= hba
->mmio_base
+ ufshcd_mcq_queue_cfg_addr(hba
);
463 static int ufs_qemu_op_runtime_config(struct ufs_hba
*hba
)
465 struct ufshcd_mcq_opr_info_t
*opr
;
468 u32 sqdao
= ufsmcq_readl(hba
, ufshcd_mcq_cfg_offset(REG_SQDAO
, 0));
469 u32 sqisao
= ufsmcq_readl(hba
, ufshcd_mcq_cfg_offset(REG_SQISAO
, 0));
470 u32 cqdao
= ufsmcq_readl(hba
, ufshcd_mcq_cfg_offset(REG_CQDAO
, 0));
471 u32 cqisao
= ufsmcq_readl(hba
, ufshcd_mcq_cfg_offset(REG_CQISAO
, 0));
473 hba
->mcq_opr
[OPR_SQD
].offset
= sqdao
;
474 hba
->mcq_opr
[OPR_SQIS
].offset
= sqisao
;
475 hba
->mcq_opr
[OPR_CQD
].offset
= cqdao
;
476 hba
->mcq_opr
[OPR_CQIS
].offset
= cqisao
;
478 for (i
= 0; i
< OPR_MAX
; i
++) {
479 opr
= &hba
->mcq_opr
[i
];
481 opr
->base
= hba
->mmio_base
+ opr
->offset
;
487 static struct ufs_hba_variant_ops ufs_qemu_hba_vops
= {
489 .get_hba_mac
= ufs_qemu_get_hba_mac
,
490 .mcq_config_resource
= ufs_qemu_mcq_config_resource
,
491 .op_runtime_config
= ufs_qemu_op_runtime_config
,
494 static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops
= {
496 .init
= ufs_intel_common_init
,
497 .exit
= ufs_intel_common_exit
,
498 .link_startup_notify
= ufs_intel_link_startup_notify
,
499 .resume
= ufs_intel_resume
,
502 static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops
= {
504 .init
= ufs_intel_ehl_init
,
505 .exit
= ufs_intel_common_exit
,
506 .link_startup_notify
= ufs_intel_link_startup_notify
,
507 .resume
= ufs_intel_resume
,
510 static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops
= {
512 .init
= ufs_intel_lkf_init
,
513 .exit
= ufs_intel_common_exit
,
514 .hce_enable_notify
= ufs_intel_hce_enable_notify
,
515 .link_startup_notify
= ufs_intel_link_startup_notify
,
516 .pwr_change_notify
= ufs_intel_lkf_pwr_change_notify
,
517 .apply_dev_quirks
= ufs_intel_lkf_apply_dev_quirks
,
518 .resume
= ufs_intel_resume
,
519 .device_reset
= ufs_intel_device_reset
,
522 static struct ufs_hba_variant_ops ufs_intel_adl_hba_vops
= {
524 .init
= ufs_intel_adl_init
,
525 .exit
= ufs_intel_common_exit
,
526 .link_startup_notify
= ufs_intel_link_startup_notify
,
527 .resume
= ufs_intel_resume
,
528 .device_reset
= ufs_intel_device_reset
,
531 static struct ufs_hba_variant_ops ufs_intel_mtl_hba_vops
= {
533 .init
= ufs_intel_mtl_init
,
534 .exit
= ufs_intel_common_exit
,
535 .hce_enable_notify
= ufs_intel_hce_enable_notify
,
536 .link_startup_notify
= ufs_intel_link_startup_notify
,
537 .resume
= ufs_intel_resume
,
538 .device_reset
= ufs_intel_device_reset
,
541 #ifdef CONFIG_PM_SLEEP
542 static int ufshcd_pci_restore(struct device
*dev
)
544 struct ufs_hba
*hba
= dev_get_drvdata(dev
);
546 /* Force a full reset and restore */
547 ufshcd_set_link_off(hba
);
549 return ufshcd_system_resume(dev
);
554 * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
555 * data structure memory
556 * @pdev: pointer to PCI handle
558 static void ufshcd_pci_remove(struct pci_dev
*pdev
)
560 struct ufs_hba
*hba
= pci_get_drvdata(pdev
);
562 pm_runtime_forbid(&pdev
->dev
);
563 pm_runtime_get_noresume(&pdev
->dev
);
565 ufshcd_dealloc_host(hba
);
569 * ufshcd_pci_probe - probe routine of the driver
570 * @pdev: pointer to PCI device handle
573 * Return: 0 on success, non-zero value on failure.
576 ufshcd_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
578 struct ufs_host
*ufs_host
;
580 void __iomem
*mmio_base
;
583 err
= pcim_enable_device(pdev
);
585 dev_err(&pdev
->dev
, "pcim_enable_device failed\n");
589 pci_set_master(pdev
);
591 mmio_base
= pcim_iomap_region(pdev
, 0, UFSHCD
);
592 if (IS_ERR(mmio_base
)) {
593 dev_err(&pdev
->dev
, "request and iomap failed\n");
594 return PTR_ERR(mmio_base
);
597 err
= ufshcd_alloc_host(&pdev
->dev
, &hba
);
599 dev_err(&pdev
->dev
, "Allocation failed\n");
603 hba
->vops
= (struct ufs_hba_variant_ops
*)id
->driver_data
;
605 err
= ufshcd_init(hba
, mmio_base
, pdev
->irq
);
607 dev_err(&pdev
->dev
, "Initialization failed\n");
608 ufshcd_dealloc_host(hba
);
612 ufs_host
= ufshcd_get_variant(hba
);
613 if (ufs_host
&& ufs_host
->late_init
)
614 ufs_host
->late_init(hba
);
616 pm_runtime_put_noidle(&pdev
->dev
);
617 pm_runtime_allow(&pdev
->dev
);
622 static const struct dev_pm_ops ufshcd_pci_pm_ops
= {
623 SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend
, ufshcd_runtime_resume
, NULL
)
624 #ifdef CONFIG_PM_SLEEP
625 .suspend
= ufshcd_system_suspend
,
626 .resume
= ufshcd_system_resume
,
627 .freeze
= ufshcd_system_suspend
,
628 .thaw
= ufshcd_system_resume
,
629 .poweroff
= ufshcd_system_suspend
,
630 .restore
= ufshcd_pci_restore
,
631 .prepare
= ufshcd_suspend_prepare
,
632 .complete
= ufshcd_resume_complete
,
636 static const struct pci_device_id ufshcd_pci_tbl
[] = {
637 { PCI_VENDOR_ID_REDHAT
, 0x0013, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0,
638 (kernel_ulong_t
)&ufs_qemu_hba_vops
},
639 { PCI_VENDOR_ID_SAMSUNG
, 0xC00C, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, 0 },
640 { PCI_VDEVICE(INTEL
, 0x9DFA), (kernel_ulong_t
)&ufs_intel_cnl_hba_vops
},
641 { PCI_VDEVICE(INTEL
, 0x4B41), (kernel_ulong_t
)&ufs_intel_ehl_hba_vops
},
642 { PCI_VDEVICE(INTEL
, 0x4B43), (kernel_ulong_t
)&ufs_intel_ehl_hba_vops
},
643 { PCI_VDEVICE(INTEL
, 0x98FA), (kernel_ulong_t
)&ufs_intel_lkf_hba_vops
},
644 { PCI_VDEVICE(INTEL
, 0x51FF), (kernel_ulong_t
)&ufs_intel_adl_hba_vops
},
645 { PCI_VDEVICE(INTEL
, 0x54FF), (kernel_ulong_t
)&ufs_intel_adl_hba_vops
},
646 { PCI_VDEVICE(INTEL
, 0x7E47), (kernel_ulong_t
)&ufs_intel_mtl_hba_vops
},
647 { PCI_VDEVICE(INTEL
, 0xA847), (kernel_ulong_t
)&ufs_intel_mtl_hba_vops
},
648 { PCI_VDEVICE(INTEL
, 0x7747), (kernel_ulong_t
)&ufs_intel_mtl_hba_vops
},
649 { PCI_VDEVICE(INTEL
, 0xE447), (kernel_ulong_t
)&ufs_intel_mtl_hba_vops
},
650 { } /* terminate list */
653 MODULE_DEVICE_TABLE(pci
, ufshcd_pci_tbl
);
655 static struct pci_driver ufshcd_pci_driver
= {
657 .id_table
= ufshcd_pci_tbl
,
658 .probe
= ufshcd_pci_probe
,
659 .remove
= ufshcd_pci_remove
,
661 .pm
= &ufshcd_pci_pm_ops
665 module_pci_driver(ufshcd_pci_driver
);
667 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
668 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
669 MODULE_DESCRIPTION("UFS host controller PCI glue driver");
670 MODULE_LICENSE("GPL");