2 * ACPI support for Intel Lynxpoint LPSS.
4 * Copyright (C) 2013, Intel Corporation
5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/acpi.h>
14 #include <linux/clkdev.h>
15 #include <linux/clk-provider.h>
16 #include <linux/err.h>
18 #include <linux/mutex.h>
19 #include <linux/platform_device.h>
20 #include <linux/platform_data/clk-lpss.h>
21 #include <linux/pm_domain.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/delay.h>
27 ACPI_MODULE_NAME("acpi_lpss");
29 #ifdef CONFIG_X86_INTEL_LPSS
31 #include <asm/cpu_device_id.h>
32 #include <asm/intel-family.h>
33 #include <asm/iosf_mbi.h>
34 #include <asm/pmc_atom.h>
36 #define LPSS_ADDR(desc) ((unsigned long)&desc)
38 #define LPSS_CLK_SIZE 0x04
39 #define LPSS_LTR_SIZE 0x18
41 /* Offsets relative to LPSS_PRIVATE_OFFSET */
42 #define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16))
43 #define LPSS_RESETS 0x04
44 #define LPSS_RESETS_RESET_FUNC BIT(0)
45 #define LPSS_RESETS_RESET_APB BIT(1)
46 #define LPSS_GENERAL 0x08
47 #define LPSS_GENERAL_LTR_MODE_SW BIT(2)
48 #define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
49 #define LPSS_SW_LTR 0x10
50 #define LPSS_AUTO_LTR 0x14
51 #define LPSS_LTR_SNOOP_REQ BIT(15)
52 #define LPSS_LTR_SNOOP_MASK 0x0000FFFF
53 #define LPSS_LTR_SNOOP_LAT_1US 0x800
54 #define LPSS_LTR_SNOOP_LAT_32US 0xC00
55 #define LPSS_LTR_SNOOP_LAT_SHIFT 5
56 #define LPSS_LTR_SNOOP_LAT_CUTOFF 3000
57 #define LPSS_LTR_MAX_VAL 0x3FF
58 #define LPSS_TX_INT 0x20
59 #define LPSS_TX_INT_MASK BIT(1)
61 #define LPSS_PRV_REG_COUNT 9
64 #define LPSS_CLK BIT(0)
65 #define LPSS_CLK_GATE BIT(1)
66 #define LPSS_CLK_DIVIDER BIT(2)
67 #define LPSS_LTR BIT(3)
68 #define LPSS_SAVE_CTX BIT(4)
69 #define LPSS_NO_D3_DELAY BIT(5)
71 struct lpss_private_data
;
73 struct lpss_device_desc
{
75 const char *clk_con_id
;
76 unsigned int prv_offset
;
77 size_t prv_size_override
;
78 struct property_entry
*properties
;
79 void (*setup
)(struct lpss_private_data
*pdata
);
82 static const struct lpss_device_desc lpss_dma_desc
= {
86 struct lpss_private_data
{
87 void __iomem
*mmio_base
;
88 resource_size_t mmio_size
;
89 unsigned int fixed_clk_rate
;
91 const struct lpss_device_desc
*dev_desc
;
92 u32 prv_reg_ctx
[LPSS_PRV_REG_COUNT
];
95 /* LPSS run time quirks */
96 static unsigned int lpss_quirks
;
99 * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
101 * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
102 * it can be powered off automatically whenever the last LPSS device goes down.
103 * In case of no power any access to the DMA controller will hang the system.
104 * The behaviour is reproduced on some HP laptops based on Intel BayTrail as
105 * well as on ASuS T100TA transformer.
107 * This quirk overrides power state of entire LPSS island to keep DMA powered
108 * on whenever we have at least one other device in use.
110 #define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0)
112 /* UART Component Parameter Register */
113 #define LPSS_UART_CPR 0xF4
114 #define LPSS_UART_CPR_AFCE BIT(4)
116 static void lpss_uart_setup(struct lpss_private_data
*pdata
)
121 offset
= pdata
->dev_desc
->prv_offset
+ LPSS_TX_INT
;
122 val
= readl(pdata
->mmio_base
+ offset
);
123 writel(val
| LPSS_TX_INT_MASK
, pdata
->mmio_base
+ offset
);
125 val
= readl(pdata
->mmio_base
+ LPSS_UART_CPR
);
126 if (!(val
& LPSS_UART_CPR_AFCE
)) {
127 offset
= pdata
->dev_desc
->prv_offset
+ LPSS_GENERAL
;
128 val
= readl(pdata
->mmio_base
+ offset
);
129 val
|= LPSS_GENERAL_UART_RTS_OVRD
;
130 writel(val
, pdata
->mmio_base
+ offset
);
134 static void lpss_deassert_reset(struct lpss_private_data
*pdata
)
139 offset
= pdata
->dev_desc
->prv_offset
+ LPSS_RESETS
;
140 val
= readl(pdata
->mmio_base
+ offset
);
141 val
|= LPSS_RESETS_RESET_APB
| LPSS_RESETS_RESET_FUNC
;
142 writel(val
, pdata
->mmio_base
+ offset
);
145 #define LPSS_I2C_ENABLE 0x6c
147 static void byt_i2c_setup(struct lpss_private_data
*pdata
)
149 lpss_deassert_reset(pdata
);
151 if (readl(pdata
->mmio_base
+ pdata
->dev_desc
->prv_offset
))
152 pdata
->fixed_clk_rate
= 133000000;
154 writel(0, pdata
->mmio_base
+ LPSS_I2C_ENABLE
);
157 static const struct lpss_device_desc lpt_dev_desc
= {
158 .flags
= LPSS_CLK
| LPSS_CLK_GATE
| LPSS_CLK_DIVIDER
| LPSS_LTR
,
162 static const struct lpss_device_desc lpt_i2c_dev_desc
= {
163 .flags
= LPSS_CLK
| LPSS_CLK_GATE
| LPSS_LTR
,
167 static struct property_entry uart_properties
[] = {
168 PROPERTY_ENTRY_U32("reg-io-width", 4),
169 PROPERTY_ENTRY_U32("reg-shift", 2),
170 PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
174 static const struct lpss_device_desc lpt_uart_dev_desc
= {
175 .flags
= LPSS_CLK
| LPSS_CLK_GATE
| LPSS_CLK_DIVIDER
| LPSS_LTR
,
176 .clk_con_id
= "baudclk",
178 .setup
= lpss_uart_setup
,
179 .properties
= uart_properties
,
182 static const struct lpss_device_desc lpt_sdio_dev_desc
= {
184 .prv_offset
= 0x1000,
185 .prv_size_override
= 0x1018,
188 static const struct lpss_device_desc byt_pwm_dev_desc
= {
189 .flags
= LPSS_SAVE_CTX
,
192 static const struct lpss_device_desc bsw_pwm_dev_desc
= {
193 .flags
= LPSS_SAVE_CTX
| LPSS_NO_D3_DELAY
,
196 static const struct lpss_device_desc byt_uart_dev_desc
= {
197 .flags
= LPSS_CLK
| LPSS_CLK_GATE
| LPSS_CLK_DIVIDER
| LPSS_SAVE_CTX
,
198 .clk_con_id
= "baudclk",
200 .setup
= lpss_uart_setup
,
201 .properties
= uart_properties
,
204 static const struct lpss_device_desc bsw_uart_dev_desc
= {
205 .flags
= LPSS_CLK
| LPSS_CLK_GATE
| LPSS_CLK_DIVIDER
| LPSS_SAVE_CTX
207 .clk_con_id
= "baudclk",
209 .setup
= lpss_uart_setup
,
210 .properties
= uart_properties
,
213 static const struct lpss_device_desc byt_spi_dev_desc
= {
214 .flags
= LPSS_CLK
| LPSS_CLK_GATE
| LPSS_CLK_DIVIDER
| LPSS_SAVE_CTX
,
218 static const struct lpss_device_desc byt_sdio_dev_desc
= {
222 static const struct lpss_device_desc byt_i2c_dev_desc
= {
223 .flags
= LPSS_CLK
| LPSS_SAVE_CTX
,
225 .setup
= byt_i2c_setup
,
228 static const struct lpss_device_desc bsw_i2c_dev_desc
= {
229 .flags
= LPSS_CLK
| LPSS_SAVE_CTX
| LPSS_NO_D3_DELAY
,
231 .setup
= byt_i2c_setup
,
234 static const struct lpss_device_desc bsw_spi_dev_desc
= {
235 .flags
= LPSS_CLK
| LPSS_CLK_GATE
| LPSS_CLK_DIVIDER
| LPSS_SAVE_CTX
238 .setup
= lpss_deassert_reset
,
241 #define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
243 static const struct x86_cpu_id lpss_cpu_ids
[] = {
244 ICPU(INTEL_FAM6_ATOM_SILVERMONT1
), /* Valleyview, Bay Trail */
245 ICPU(INTEL_FAM6_ATOM_AIRMONT
), /* Braswell, Cherry Trail */
251 #define LPSS_ADDR(desc) (0UL)
253 #endif /* CONFIG_X86_INTEL_LPSS */
255 static const struct acpi_device_id acpi_lpss_device_ids
[] = {
256 /* Generic LPSS devices */
257 { "INTL9C60", LPSS_ADDR(lpss_dma_desc
) },
259 /* Lynxpoint LPSS devices */
260 { "INT33C0", LPSS_ADDR(lpt_dev_desc
) },
261 { "INT33C1", LPSS_ADDR(lpt_dev_desc
) },
262 { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc
) },
263 { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc
) },
264 { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc
) },
265 { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc
) },
266 { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc
) },
269 /* BayTrail LPSS devices */
270 { "80860F09", LPSS_ADDR(byt_pwm_dev_desc
) },
271 { "80860F0A", LPSS_ADDR(byt_uart_dev_desc
) },
272 { "80860F0E", LPSS_ADDR(byt_spi_dev_desc
) },
273 { "80860F14", LPSS_ADDR(byt_sdio_dev_desc
) },
274 { "80860F41", LPSS_ADDR(byt_i2c_dev_desc
) },
278 /* Braswell LPSS devices */
279 { "80862288", LPSS_ADDR(bsw_pwm_dev_desc
) },
280 { "8086228A", LPSS_ADDR(bsw_uart_dev_desc
) },
281 { "8086228E", LPSS_ADDR(bsw_spi_dev_desc
) },
282 { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc
) },
284 /* Broadwell LPSS devices */
285 { "INT3430", LPSS_ADDR(lpt_dev_desc
) },
286 { "INT3431", LPSS_ADDR(lpt_dev_desc
) },
287 { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc
) },
288 { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc
) },
289 { "INT3434", LPSS_ADDR(lpt_uart_dev_desc
) },
290 { "INT3435", LPSS_ADDR(lpt_uart_dev_desc
) },
291 { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc
) },
294 /* Wildcat Point LPSS devices */
295 { "INT3438", LPSS_ADDR(lpt_dev_desc
) },
300 #ifdef CONFIG_X86_INTEL_LPSS
302 static int is_memory(struct acpi_resource
*res
, void *not_used
)
305 return !acpi_dev_resource_memory(res
, &r
);
308 /* LPSS main clock device. */
309 static struct platform_device
*lpss_clk_dev
;
311 static inline void lpt_register_clock_device(void)
313 lpss_clk_dev
= platform_device_register_simple("clk-lpt", -1, NULL
, 0);
316 static int register_device_clock(struct acpi_device
*adev
,
317 struct lpss_private_data
*pdata
)
319 const struct lpss_device_desc
*dev_desc
= pdata
->dev_desc
;
320 const char *devname
= dev_name(&adev
->dev
);
321 struct clk
*clk
= ERR_PTR(-ENODEV
);
322 struct lpss_clk_data
*clk_data
;
323 const char *parent
, *clk_name
;
324 void __iomem
*prv_base
;
327 lpt_register_clock_device();
329 clk_data
= platform_get_drvdata(lpss_clk_dev
);
334 if (!pdata
->mmio_base
335 || pdata
->mmio_size
< dev_desc
->prv_offset
+ LPSS_CLK_SIZE
)
338 parent
= clk_data
->name
;
339 prv_base
= pdata
->mmio_base
+ dev_desc
->prv_offset
;
341 if (pdata
->fixed_clk_rate
) {
342 clk
= clk_register_fixed_rate(NULL
, devname
, parent
, 0,
343 pdata
->fixed_clk_rate
);
347 if (dev_desc
->flags
& LPSS_CLK_GATE
) {
348 clk
= clk_register_gate(NULL
, devname
, parent
, 0,
349 prv_base
, 0, 0, NULL
);
353 if (dev_desc
->flags
& LPSS_CLK_DIVIDER
) {
354 /* Prevent division by zero */
355 if (!readl(prv_base
))
356 writel(LPSS_CLK_DIVIDER_DEF_MASK
, prv_base
);
358 clk_name
= kasprintf(GFP_KERNEL
, "%s-div", devname
);
361 clk
= clk_register_fractional_divider(NULL
, clk_name
, parent
,
363 1, 15, 16, 15, 0, NULL
);
366 clk_name
= kasprintf(GFP_KERNEL
, "%s-update", devname
);
371 clk
= clk_register_gate(NULL
, clk_name
, parent
,
372 CLK_SET_RATE_PARENT
| CLK_SET_RATE_GATE
,
373 prv_base
, 31, 0, NULL
);
382 clk_register_clkdev(clk
, dev_desc
->clk_con_id
, devname
);
386 static int acpi_lpss_create_device(struct acpi_device
*adev
,
387 const struct acpi_device_id
*id
)
389 const struct lpss_device_desc
*dev_desc
;
390 struct lpss_private_data
*pdata
;
391 struct resource_entry
*rentry
;
392 struct list_head resource_list
;
393 struct platform_device
*pdev
;
396 dev_desc
= (const struct lpss_device_desc
*)id
->driver_data
;
398 pdev
= acpi_create_platform_device(adev
, NULL
);
399 return IS_ERR_OR_NULL(pdev
) ? PTR_ERR(pdev
) : 1;
401 pdata
= kzalloc(sizeof(*pdata
), GFP_KERNEL
);
405 INIT_LIST_HEAD(&resource_list
);
406 ret
= acpi_dev_get_resources(adev
, &resource_list
, is_memory
, NULL
);
410 list_for_each_entry(rentry
, &resource_list
, node
)
411 if (resource_type(rentry
->res
) == IORESOURCE_MEM
) {
412 if (dev_desc
->prv_size_override
)
413 pdata
->mmio_size
= dev_desc
->prv_size_override
;
415 pdata
->mmio_size
= resource_size(rentry
->res
);
416 pdata
->mmio_base
= ioremap(rentry
->res
->start
,
421 acpi_dev_free_resource_list(&resource_list
);
423 if (!pdata
->mmio_base
) {
428 pdata
->dev_desc
= dev_desc
;
431 dev_desc
->setup(pdata
);
433 if (dev_desc
->flags
& LPSS_CLK
) {
434 ret
= register_device_clock(adev
, pdata
);
436 /* Skip the device, but continue the namespace scan. */
443 * This works around a known issue in ACPI tables where LPSS devices
444 * have _PS0 and _PS3 without _PSC (and no power resources), so
445 * acpi_bus_init_power() will assume that the BIOS has put them into D0.
447 ret
= acpi_device_fix_up_power(adev
);
449 /* Skip the device, but continue the namespace scan. */
454 adev
->driver_data
= pdata
;
455 pdev
= acpi_create_platform_device(adev
, dev_desc
->properties
);
456 if (!IS_ERR_OR_NULL(pdev
)) {
461 adev
->driver_data
= NULL
;
468 static u32
__lpss_reg_read(struct lpss_private_data
*pdata
, unsigned int reg
)
470 return readl(pdata
->mmio_base
+ pdata
->dev_desc
->prv_offset
+ reg
);
473 static void __lpss_reg_write(u32 val
, struct lpss_private_data
*pdata
,
476 writel(val
, pdata
->mmio_base
+ pdata
->dev_desc
->prv_offset
+ reg
);
479 static int lpss_reg_read(struct device
*dev
, unsigned int reg
, u32
*val
)
481 struct acpi_device
*adev
;
482 struct lpss_private_data
*pdata
;
486 ret
= acpi_bus_get_device(ACPI_HANDLE(dev
), &adev
);
490 spin_lock_irqsave(&dev
->power
.lock
, flags
);
491 if (pm_runtime_suspended(dev
)) {
495 pdata
= acpi_driver_data(adev
);
496 if (WARN_ON(!pdata
|| !pdata
->mmio_base
)) {
500 *val
= __lpss_reg_read(pdata
, reg
);
503 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
507 static ssize_t
lpss_ltr_show(struct device
*dev
, struct device_attribute
*attr
,
514 reg
= strcmp(attr
->attr
.name
, "auto_ltr") ? LPSS_SW_LTR
: LPSS_AUTO_LTR
;
515 ret
= lpss_reg_read(dev
, reg
, <r_value
);
519 return snprintf(buf
, PAGE_SIZE
, "%08x\n", ltr_value
);
522 static ssize_t
lpss_ltr_mode_show(struct device
*dev
,
523 struct device_attribute
*attr
, char *buf
)
529 ret
= lpss_reg_read(dev
, LPSS_GENERAL
, <r_mode
);
533 outstr
= (ltr_mode
& LPSS_GENERAL_LTR_MODE_SW
) ? "sw" : "auto";
534 return sprintf(buf
, "%s\n", outstr
);
537 static DEVICE_ATTR(auto_ltr
, S_IRUSR
, lpss_ltr_show
, NULL
);
538 static DEVICE_ATTR(sw_ltr
, S_IRUSR
, lpss_ltr_show
, NULL
);
539 static DEVICE_ATTR(ltr_mode
, S_IRUSR
, lpss_ltr_mode_show
, NULL
);
541 static struct attribute
*lpss_attrs
[] = {
542 &dev_attr_auto_ltr
.attr
,
543 &dev_attr_sw_ltr
.attr
,
544 &dev_attr_ltr_mode
.attr
,
548 static struct attribute_group lpss_attr_group
= {
553 static void acpi_lpss_set_ltr(struct device
*dev
, s32 val
)
555 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
556 u32 ltr_mode
, ltr_val
;
558 ltr_mode
= __lpss_reg_read(pdata
, LPSS_GENERAL
);
560 if (ltr_mode
& LPSS_GENERAL_LTR_MODE_SW
) {
561 ltr_mode
&= ~LPSS_GENERAL_LTR_MODE_SW
;
562 __lpss_reg_write(ltr_mode
, pdata
, LPSS_GENERAL
);
566 ltr_val
= __lpss_reg_read(pdata
, LPSS_SW_LTR
) & ~LPSS_LTR_SNOOP_MASK
;
567 if (val
>= LPSS_LTR_SNOOP_LAT_CUTOFF
) {
568 ltr_val
|= LPSS_LTR_SNOOP_LAT_32US
;
569 val
= LPSS_LTR_MAX_VAL
;
570 } else if (val
> LPSS_LTR_MAX_VAL
) {
571 ltr_val
|= LPSS_LTR_SNOOP_LAT_32US
| LPSS_LTR_SNOOP_REQ
;
572 val
>>= LPSS_LTR_SNOOP_LAT_SHIFT
;
574 ltr_val
|= LPSS_LTR_SNOOP_LAT_1US
| LPSS_LTR_SNOOP_REQ
;
577 __lpss_reg_write(ltr_val
, pdata
, LPSS_SW_LTR
);
578 if (!(ltr_mode
& LPSS_GENERAL_LTR_MODE_SW
)) {
579 ltr_mode
|= LPSS_GENERAL_LTR_MODE_SW
;
580 __lpss_reg_write(ltr_mode
, pdata
, LPSS_GENERAL
);
586 * acpi_lpss_save_ctx() - Save the private registers of LPSS device
588 * @pdata: pointer to the private data of the LPSS device
590 * Most LPSS devices have private registers which may loose their context when
591 * the device is powered down. acpi_lpss_save_ctx() saves those registers into
594 static void acpi_lpss_save_ctx(struct device
*dev
,
595 struct lpss_private_data
*pdata
)
599 for (i
= 0; i
< LPSS_PRV_REG_COUNT
; i
++) {
600 unsigned long offset
= i
* sizeof(u32
);
602 pdata
->prv_reg_ctx
[i
] = __lpss_reg_read(pdata
, offset
);
603 dev_dbg(dev
, "saving 0x%08x from LPSS reg at offset 0x%02lx\n",
604 pdata
->prv_reg_ctx
[i
], offset
);
609 * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device
611 * @pdata: pointer to the private data of the LPSS device
613 * Restores the registers that were previously stored with acpi_lpss_save_ctx().
615 static void acpi_lpss_restore_ctx(struct device
*dev
,
616 struct lpss_private_data
*pdata
)
620 for (i
= 0; i
< LPSS_PRV_REG_COUNT
; i
++) {
621 unsigned long offset
= i
* sizeof(u32
);
623 __lpss_reg_write(pdata
->prv_reg_ctx
[i
], pdata
, offset
);
624 dev_dbg(dev
, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
625 pdata
->prv_reg_ctx
[i
], offset
);
629 static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data
*pdata
)
632 * The following delay is needed or the subsequent write operations may
633 * fail. The LPSS devices are actually PCI devices and the PCI spec
634 * expects 10ms delay before the device can be accessed after D3 to D0
635 * transition. However some platforms like BSW does not need this delay.
637 unsigned int delay
= 10; /* default 10ms delay */
639 if (pdata
->dev_desc
->flags
& LPSS_NO_D3_DELAY
)
645 static int acpi_lpss_activate(struct device
*dev
)
647 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
650 ret
= acpi_dev_runtime_resume(dev
);
654 acpi_lpss_d3_to_d0_delay(pdata
);
657 * This is called only on ->probe() stage where a device is either in
658 * known state defined by BIOS or most likely powered off. Due to this
659 * we have to deassert reset line to be sure that ->probe() will
660 * recognize the device.
662 if (pdata
->dev_desc
->flags
& LPSS_SAVE_CTX
)
663 lpss_deassert_reset(pdata
);
668 static void acpi_lpss_dismiss(struct device
*dev
)
670 acpi_dev_runtime_suspend(dev
);
673 #ifdef CONFIG_PM_SLEEP
674 static int acpi_lpss_suspend_late(struct device
*dev
)
676 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
679 ret
= pm_generic_suspend_late(dev
);
683 if (pdata
->dev_desc
->flags
& LPSS_SAVE_CTX
)
684 acpi_lpss_save_ctx(dev
, pdata
);
686 return acpi_dev_suspend_late(dev
);
689 static int acpi_lpss_resume_early(struct device
*dev
)
691 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
694 ret
= acpi_dev_resume_early(dev
);
698 acpi_lpss_d3_to_d0_delay(pdata
);
700 if (pdata
->dev_desc
->flags
& LPSS_SAVE_CTX
)
701 acpi_lpss_restore_ctx(dev
, pdata
);
703 return pm_generic_resume_early(dev
);
705 #endif /* CONFIG_PM_SLEEP */
707 /* IOSF SB for LPSS island */
708 #define LPSS_IOSF_UNIT_LPIOEP 0xA0
709 #define LPSS_IOSF_UNIT_LPIO1 0xAB
710 #define LPSS_IOSF_UNIT_LPIO2 0xAC
712 #define LPSS_IOSF_PMCSR 0x84
713 #define LPSS_PMCSR_D0 0
714 #define LPSS_PMCSR_D3hot 3
715 #define LPSS_PMCSR_Dx_MASK GENMASK(1, 0)
717 #define LPSS_IOSF_GPIODEF0 0x154
718 #define LPSS_GPIODEF0_DMA1_D3 BIT(2)
719 #define LPSS_GPIODEF0_DMA2_D3 BIT(3)
720 #define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2)
721 #define LPSS_GPIODEF0_DMA_LLP BIT(13)
723 static DEFINE_MUTEX(lpss_iosf_mutex
);
725 static void lpss_iosf_enter_d3_state(void)
728 u32 mask1
= LPSS_GPIODEF0_DMA_D3_MASK
| LPSS_GPIODEF0_DMA_LLP
;
729 u32 value2
= LPSS_PMCSR_D3hot
;
730 u32 mask2
= LPSS_PMCSR_Dx_MASK
;
732 * PMC provides an information about actual status of the LPSS devices.
733 * Here we read the values related to LPSS power island, i.e. LPSS
734 * devices, excluding both LPSS DMA controllers, along with SCC domain.
736 u32 func_dis
, d3_sts_0
, pmc_status
, pmc_mask
= 0xfe000ffe;
739 ret
= pmc_atom_read(PMC_FUNC_DIS
, &func_dis
);
743 mutex_lock(&lpss_iosf_mutex
);
745 ret
= pmc_atom_read(PMC_D3_STS_0
, &d3_sts_0
);
750 * Get the status of entire LPSS power island per device basis.
751 * Shutdown both LPSS DMA controllers if and only if all other devices
752 * are already in D3hot.
754 pmc_status
= (~(d3_sts_0
| func_dis
)) & pmc_mask
;
758 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1
, MBI_CFG_WRITE
,
759 LPSS_IOSF_PMCSR
, value2
, mask2
);
761 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2
, MBI_CFG_WRITE
,
762 LPSS_IOSF_PMCSR
, value2
, mask2
);
764 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP
, MBI_CR_WRITE
,
765 LPSS_IOSF_GPIODEF0
, value1
, mask1
);
767 mutex_unlock(&lpss_iosf_mutex
);
770 static void lpss_iosf_exit_d3_state(void)
772 u32 value1
= LPSS_GPIODEF0_DMA1_D3
| LPSS_GPIODEF0_DMA2_D3
|
773 LPSS_GPIODEF0_DMA_LLP
;
774 u32 mask1
= LPSS_GPIODEF0_DMA_D3_MASK
| LPSS_GPIODEF0_DMA_LLP
;
775 u32 value2
= LPSS_PMCSR_D0
;
776 u32 mask2
= LPSS_PMCSR_Dx_MASK
;
778 mutex_lock(&lpss_iosf_mutex
);
780 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP
, MBI_CR_WRITE
,
781 LPSS_IOSF_GPIODEF0
, value1
, mask1
);
783 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2
, MBI_CFG_WRITE
,
784 LPSS_IOSF_PMCSR
, value2
, mask2
);
786 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1
, MBI_CFG_WRITE
,
787 LPSS_IOSF_PMCSR
, value2
, mask2
);
789 mutex_unlock(&lpss_iosf_mutex
);
792 static int acpi_lpss_runtime_suspend(struct device
*dev
)
794 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
797 ret
= pm_generic_runtime_suspend(dev
);
801 if (pdata
->dev_desc
->flags
& LPSS_SAVE_CTX
)
802 acpi_lpss_save_ctx(dev
, pdata
);
804 ret
= acpi_dev_runtime_suspend(dev
);
807 * This call must be last in the sequence, otherwise PMC will return
808 * wrong status for devices being about to be powered off. See
809 * lpss_iosf_enter_d3_state() for further information.
811 if (lpss_quirks
& LPSS_QUIRK_ALWAYS_POWER_ON
&& iosf_mbi_available())
812 lpss_iosf_enter_d3_state();
817 static int acpi_lpss_runtime_resume(struct device
*dev
)
819 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
823 * This call is kept first to be in symmetry with
824 * acpi_lpss_runtime_suspend() one.
826 if (lpss_quirks
& LPSS_QUIRK_ALWAYS_POWER_ON
&& iosf_mbi_available())
827 lpss_iosf_exit_d3_state();
829 ret
= acpi_dev_runtime_resume(dev
);
833 acpi_lpss_d3_to_d0_delay(pdata
);
835 if (pdata
->dev_desc
->flags
& LPSS_SAVE_CTX
)
836 acpi_lpss_restore_ctx(dev
, pdata
);
838 return pm_generic_runtime_resume(dev
);
840 #endif /* CONFIG_PM */
842 static struct dev_pm_domain acpi_lpss_pm_domain
= {
844 .activate
= acpi_lpss_activate
,
845 .dismiss
= acpi_lpss_dismiss
,
849 #ifdef CONFIG_PM_SLEEP
850 .prepare
= acpi_subsys_prepare
,
851 .complete
= pm_complete_with_resume_check
,
852 .suspend
= acpi_subsys_suspend
,
853 .suspend_late
= acpi_lpss_suspend_late
,
854 .resume_early
= acpi_lpss_resume_early
,
855 .freeze
= acpi_subsys_freeze
,
856 .poweroff
= acpi_subsys_suspend
,
857 .poweroff_late
= acpi_lpss_suspend_late
,
858 .restore_early
= acpi_lpss_resume_early
,
860 .runtime_suspend
= acpi_lpss_runtime_suspend
,
861 .runtime_resume
= acpi_lpss_runtime_resume
,
866 static int acpi_lpss_platform_notify(struct notifier_block
*nb
,
867 unsigned long action
, void *data
)
869 struct platform_device
*pdev
= to_platform_device(data
);
870 struct lpss_private_data
*pdata
;
871 struct acpi_device
*adev
;
872 const struct acpi_device_id
*id
;
874 id
= acpi_match_device(acpi_lpss_device_ids
, &pdev
->dev
);
875 if (!id
|| !id
->driver_data
)
878 if (acpi_bus_get_device(ACPI_HANDLE(&pdev
->dev
), &adev
))
881 pdata
= acpi_driver_data(adev
);
885 if (pdata
->mmio_base
&&
886 pdata
->mmio_size
< pdata
->dev_desc
->prv_offset
+ LPSS_LTR_SIZE
) {
887 dev_err(&pdev
->dev
, "MMIO size insufficient to access LTR\n");
892 case BUS_NOTIFY_BIND_DRIVER
:
893 dev_pm_domain_set(&pdev
->dev
, &acpi_lpss_pm_domain
);
895 case BUS_NOTIFY_DRIVER_NOT_BOUND
:
896 case BUS_NOTIFY_UNBOUND_DRIVER
:
897 dev_pm_domain_set(&pdev
->dev
, NULL
);
899 case BUS_NOTIFY_ADD_DEVICE
:
900 dev_pm_domain_set(&pdev
->dev
, &acpi_lpss_pm_domain
);
901 if (pdata
->dev_desc
->flags
& LPSS_LTR
)
902 return sysfs_create_group(&pdev
->dev
.kobj
,
905 case BUS_NOTIFY_DEL_DEVICE
:
906 if (pdata
->dev_desc
->flags
& LPSS_LTR
)
907 sysfs_remove_group(&pdev
->dev
.kobj
, &lpss_attr_group
);
908 dev_pm_domain_set(&pdev
->dev
, NULL
);
917 static struct notifier_block acpi_lpss_nb
= {
918 .notifier_call
= acpi_lpss_platform_notify
,
921 static void acpi_lpss_bind(struct device
*dev
)
923 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
925 if (!pdata
|| !pdata
->mmio_base
|| !(pdata
->dev_desc
->flags
& LPSS_LTR
))
928 if (pdata
->mmio_size
>= pdata
->dev_desc
->prv_offset
+ LPSS_LTR_SIZE
)
929 dev
->power
.set_latency_tolerance
= acpi_lpss_set_ltr
;
931 dev_err(dev
, "MMIO size insufficient to access LTR\n");
934 static void acpi_lpss_unbind(struct device
*dev
)
936 dev
->power
.set_latency_tolerance
= NULL
;
939 static struct acpi_scan_handler lpss_handler
= {
940 .ids
= acpi_lpss_device_ids
,
941 .attach
= acpi_lpss_create_device
,
942 .bind
= acpi_lpss_bind
,
943 .unbind
= acpi_lpss_unbind
,
946 void __init
acpi_lpss_init(void)
948 const struct x86_cpu_id
*id
;
951 ret
= lpt_clk_init();
955 id
= x86_match_cpu(lpss_cpu_ids
);
957 lpss_quirks
|= LPSS_QUIRK_ALWAYS_POWER_ON
;
959 bus_register_notifier(&platform_bus_type
, &acpi_lpss_nb
);
960 acpi_scan_add_handler(&lpss_handler
);
965 static struct acpi_scan_handler lpss_handler
= {
966 .ids
= acpi_lpss_device_ids
,
969 void __init
acpi_lpss_init(void)
971 acpi_scan_add_handler(&lpss_handler
);
974 #endif /* CONFIG_X86_INTEL_LPSS */