1 // SPDX-License-Identifier: GPL-2.0-only
3 * ACPI support for Intel Lynxpoint LPSS.
5 * Copyright (C) 2013, Intel Corporation
6 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
7 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
10 #include <linux/acpi.h>
11 #include <linux/clkdev.h>
12 #include <linux/clk-provider.h>
13 #include <linux/dmi.h>
14 #include <linux/err.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/platform_device.h>
19 #include <linux/platform_data/x86/clk-lpss.h>
20 #include <linux/platform_data/x86/pmc_atom.h>
21 #include <linux/pm_domain.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/pwm.h>
24 #include <linux/suspend.h>
25 #include <linux/delay.h>
29 ACPI_MODULE_NAME("acpi_lpss");
31 #ifdef CONFIG_X86_INTEL_LPSS
33 #include <asm/cpu_device_id.h>
34 #include <asm/intel-family.h>
35 #include <asm/iosf_mbi.h>
37 #define LPSS_ADDR(desc) ((unsigned long)&desc)
39 #define LPSS_CLK_SIZE 0x04
40 #define LPSS_LTR_SIZE 0x18
42 /* Offsets relative to LPSS_PRIVATE_OFFSET */
43 #define LPSS_CLK_DIVIDER_DEF_MASK (BIT(1) | BIT(16))
44 #define LPSS_RESETS 0x04
45 #define LPSS_RESETS_RESET_FUNC BIT(0)
46 #define LPSS_RESETS_RESET_APB BIT(1)
47 #define LPSS_GENERAL 0x08
48 #define LPSS_GENERAL_LTR_MODE_SW BIT(2)
49 #define LPSS_GENERAL_UART_RTS_OVRD BIT(3)
50 #define LPSS_SW_LTR 0x10
51 #define LPSS_AUTO_LTR 0x14
52 #define LPSS_LTR_SNOOP_REQ BIT(15)
53 #define LPSS_LTR_SNOOP_MASK 0x0000FFFF
54 #define LPSS_LTR_SNOOP_LAT_1US 0x800
55 #define LPSS_LTR_SNOOP_LAT_32US 0xC00
56 #define LPSS_LTR_SNOOP_LAT_SHIFT 5
57 #define LPSS_LTR_SNOOP_LAT_CUTOFF 3000
58 #define LPSS_LTR_MAX_VAL 0x3FF
59 #define LPSS_TX_INT 0x20
60 #define LPSS_TX_INT_MASK BIT(1)
62 #define LPSS_PRV_REG_COUNT 9
65 #define LPSS_CLK BIT(0)
66 #define LPSS_CLK_GATE BIT(1)
67 #define LPSS_CLK_DIVIDER BIT(2)
68 #define LPSS_LTR BIT(3)
69 #define LPSS_SAVE_CTX BIT(4)
70 #define LPSS_NO_D3_DELAY BIT(5)
72 /* Crystal Cove PMIC shares same ACPI ID between different platforms */
76 struct lpss_private_data
;
78 struct lpss_device_desc
{
80 const char *clk_con_id
;
81 unsigned int prv_offset
;
82 size_t prv_size_override
;
83 struct property_entry
*properties
;
84 void (*setup
)(struct lpss_private_data
*pdata
);
85 bool resume_from_noirq
;
88 static const struct lpss_device_desc lpss_dma_desc
= {
92 struct lpss_private_data
{
93 struct acpi_device
*adev
;
94 void __iomem
*mmio_base
;
95 resource_size_t mmio_size
;
96 unsigned int fixed_clk_rate
;
98 const struct lpss_device_desc
*dev_desc
;
99 u32 prv_reg_ctx
[LPSS_PRV_REG_COUNT
];
102 /* Devices which need to be in D3 before lpss_iosf_enter_d3_state() proceeds */
103 static u32 pmc_atom_d3_mask
= 0xfe000ffe;
105 /* LPSS run time quirks */
106 static unsigned int lpss_quirks
;
109 * LPSS_QUIRK_ALWAYS_POWER_ON: override power state for LPSS DMA device.
111 * The LPSS DMA controller has neither _PS0 nor _PS3 method. Moreover
112 * it can be powered off automatically whenever the last LPSS device goes down.
113 * In case of no power any access to the DMA controller will hang the system.
114 * The behaviour is reproduced on some HP laptops based on Intel BayTrail as
115 * well as on ASuS T100TA transformer.
117 * This quirk overrides power state of entire LPSS island to keep DMA powered
118 * on whenever we have at least one other device in use.
120 #define LPSS_QUIRK_ALWAYS_POWER_ON BIT(0)
122 /* UART Component Parameter Register */
123 #define LPSS_UART_CPR 0xF4
124 #define LPSS_UART_CPR_AFCE BIT(4)
126 static void lpss_uart_setup(struct lpss_private_data
*pdata
)
131 offset
= pdata
->dev_desc
->prv_offset
+ LPSS_TX_INT
;
132 val
= readl(pdata
->mmio_base
+ offset
);
133 writel(val
| LPSS_TX_INT_MASK
, pdata
->mmio_base
+ offset
);
135 val
= readl(pdata
->mmio_base
+ LPSS_UART_CPR
);
136 if (!(val
& LPSS_UART_CPR_AFCE
)) {
137 offset
= pdata
->dev_desc
->prv_offset
+ LPSS_GENERAL
;
138 val
= readl(pdata
->mmio_base
+ offset
);
139 val
|= LPSS_GENERAL_UART_RTS_OVRD
;
140 writel(val
, pdata
->mmio_base
+ offset
);
144 static void lpss_deassert_reset(struct lpss_private_data
*pdata
)
149 offset
= pdata
->dev_desc
->prv_offset
+ LPSS_RESETS
;
150 val
= readl(pdata
->mmio_base
+ offset
);
151 val
|= LPSS_RESETS_RESET_APB
| LPSS_RESETS_RESET_FUNC
;
152 writel(val
, pdata
->mmio_base
+ offset
);
156 * BYT PWM used for backlight control by the i915 driver on systems without
157 * the Crystal Cove PMIC.
159 static struct pwm_lookup byt_pwm_lookup
[] = {
160 PWM_LOOKUP_WITH_MODULE("80860F09:00", 0, "0000:00:02.0",
161 "pwm_backlight", 0, PWM_POLARITY_NORMAL
,
162 "pwm-lpss-platform"),
165 static void byt_pwm_setup(struct lpss_private_data
*pdata
)
167 struct acpi_device
*adev
= pdata
->adev
;
169 /* Only call pwm_add_table for the first PWM controller */
170 if (!adev
->pnp
.unique_id
|| strcmp(adev
->pnp
.unique_id
, "1"))
173 if (!acpi_dev_present("INT33FD", NULL
, BYT_CRC_HRV
))
174 pwm_add_table(byt_pwm_lookup
, ARRAY_SIZE(byt_pwm_lookup
));
177 #define LPSS_I2C_ENABLE 0x6c
179 static void byt_i2c_setup(struct lpss_private_data
*pdata
)
181 const char *uid_str
= acpi_device_uid(pdata
->adev
);
182 acpi_handle handle
= pdata
->adev
->handle
;
183 unsigned long long shared_host
= 0;
187 /* Expected to always be true, but better safe then sorry */
189 uid
= simple_strtol(uid_str
, NULL
, 10);
191 /* Detect I2C bus shared with PUNIT and ignore its d3 status */
192 status
= acpi_evaluate_integer(handle
, "_SEM", NULL
, &shared_host
);
193 if (ACPI_SUCCESS(status
) && shared_host
&& uid
)
194 pmc_atom_d3_mask
&= ~(BIT_LPSS2_F1_I2C1
<< (uid
- 1));
196 lpss_deassert_reset(pdata
);
198 if (readl(pdata
->mmio_base
+ pdata
->dev_desc
->prv_offset
))
199 pdata
->fixed_clk_rate
= 133000000;
201 writel(0, pdata
->mmio_base
+ LPSS_I2C_ENABLE
);
204 /* BSW PWM used for backlight control by the i915 driver */
205 static struct pwm_lookup bsw_pwm_lookup
[] = {
206 PWM_LOOKUP_WITH_MODULE("80862288:00", 0, "0000:00:02.0",
207 "pwm_backlight", 0, PWM_POLARITY_NORMAL
,
208 "pwm-lpss-platform"),
211 static void bsw_pwm_setup(struct lpss_private_data
*pdata
)
213 struct acpi_device
*adev
= pdata
->adev
;
215 /* Only call pwm_add_table for the first PWM controller */
216 if (!adev
->pnp
.unique_id
|| strcmp(adev
->pnp
.unique_id
, "1"))
219 pwm_add_table(bsw_pwm_lookup
, ARRAY_SIZE(bsw_pwm_lookup
));
222 static const struct lpss_device_desc lpt_dev_desc
= {
223 .flags
= LPSS_CLK
| LPSS_CLK_GATE
| LPSS_CLK_DIVIDER
| LPSS_LTR
228 static const struct lpss_device_desc lpt_i2c_dev_desc
= {
229 .flags
= LPSS_CLK
| LPSS_CLK_GATE
| LPSS_LTR
| LPSS_SAVE_CTX
,
233 static struct property_entry uart_properties
[] = {
234 PROPERTY_ENTRY_U32("reg-io-width", 4),
235 PROPERTY_ENTRY_U32("reg-shift", 2),
236 PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
240 static const struct lpss_device_desc lpt_uart_dev_desc
= {
241 .flags
= LPSS_CLK
| LPSS_CLK_GATE
| LPSS_CLK_DIVIDER
| LPSS_LTR
243 .clk_con_id
= "baudclk",
245 .setup
= lpss_uart_setup
,
246 .properties
= uart_properties
,
249 static const struct lpss_device_desc lpt_sdio_dev_desc
= {
251 .prv_offset
= 0x1000,
252 .prv_size_override
= 0x1018,
255 static const struct lpss_device_desc byt_pwm_dev_desc
= {
256 .flags
= LPSS_SAVE_CTX
,
258 .setup
= byt_pwm_setup
,
261 static const struct lpss_device_desc bsw_pwm_dev_desc
= {
262 .flags
= LPSS_SAVE_CTX
| LPSS_NO_D3_DELAY
,
264 .setup
= bsw_pwm_setup
,
267 static const struct lpss_device_desc byt_uart_dev_desc
= {
268 .flags
= LPSS_CLK
| LPSS_CLK_GATE
| LPSS_CLK_DIVIDER
| LPSS_SAVE_CTX
,
269 .clk_con_id
= "baudclk",
271 .setup
= lpss_uart_setup
,
272 .properties
= uart_properties
,
275 static const struct lpss_device_desc bsw_uart_dev_desc
= {
276 .flags
= LPSS_CLK
| LPSS_CLK_GATE
| LPSS_CLK_DIVIDER
| LPSS_SAVE_CTX
278 .clk_con_id
= "baudclk",
280 .setup
= lpss_uart_setup
,
281 .properties
= uart_properties
,
284 static const struct lpss_device_desc byt_spi_dev_desc
= {
285 .flags
= LPSS_CLK
| LPSS_CLK_GATE
| LPSS_CLK_DIVIDER
| LPSS_SAVE_CTX
,
289 static const struct lpss_device_desc byt_sdio_dev_desc
= {
293 static const struct lpss_device_desc byt_i2c_dev_desc
= {
294 .flags
= LPSS_CLK
| LPSS_SAVE_CTX
,
296 .setup
= byt_i2c_setup
,
297 .resume_from_noirq
= true,
300 static const struct lpss_device_desc bsw_i2c_dev_desc
= {
301 .flags
= LPSS_CLK
| LPSS_SAVE_CTX
| LPSS_NO_D3_DELAY
,
303 .setup
= byt_i2c_setup
,
304 .resume_from_noirq
= true,
307 static const struct lpss_device_desc bsw_spi_dev_desc
= {
308 .flags
= LPSS_CLK
| LPSS_CLK_GATE
| LPSS_CLK_DIVIDER
| LPSS_SAVE_CTX
311 .setup
= lpss_deassert_reset
,
314 #define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
316 static const struct x86_cpu_id lpss_cpu_ids
[] = {
317 ICPU(INTEL_FAM6_ATOM_SILVERMONT
), /* Valleyview, Bay Trail */
318 ICPU(INTEL_FAM6_ATOM_AIRMONT
), /* Braswell, Cherry Trail */
324 #define LPSS_ADDR(desc) (0UL)
326 #endif /* CONFIG_X86_INTEL_LPSS */
328 static const struct acpi_device_id acpi_lpss_device_ids
[] = {
329 /* Generic LPSS devices */
330 { "INTL9C60", LPSS_ADDR(lpss_dma_desc
) },
332 /* Lynxpoint LPSS devices */
333 { "INT33C0", LPSS_ADDR(lpt_dev_desc
) },
334 { "INT33C1", LPSS_ADDR(lpt_dev_desc
) },
335 { "INT33C2", LPSS_ADDR(lpt_i2c_dev_desc
) },
336 { "INT33C3", LPSS_ADDR(lpt_i2c_dev_desc
) },
337 { "INT33C4", LPSS_ADDR(lpt_uart_dev_desc
) },
338 { "INT33C5", LPSS_ADDR(lpt_uart_dev_desc
) },
339 { "INT33C6", LPSS_ADDR(lpt_sdio_dev_desc
) },
342 /* BayTrail LPSS devices */
343 { "80860F09", LPSS_ADDR(byt_pwm_dev_desc
) },
344 { "80860F0A", LPSS_ADDR(byt_uart_dev_desc
) },
345 { "80860F0E", LPSS_ADDR(byt_spi_dev_desc
) },
346 { "80860F14", LPSS_ADDR(byt_sdio_dev_desc
) },
347 { "80860F41", LPSS_ADDR(byt_i2c_dev_desc
) },
351 /* Braswell LPSS devices */
352 { "80862286", LPSS_ADDR(lpss_dma_desc
) },
353 { "80862288", LPSS_ADDR(bsw_pwm_dev_desc
) },
354 { "8086228A", LPSS_ADDR(bsw_uart_dev_desc
) },
355 { "8086228E", LPSS_ADDR(bsw_spi_dev_desc
) },
356 { "808622C0", LPSS_ADDR(lpss_dma_desc
) },
357 { "808622C1", LPSS_ADDR(bsw_i2c_dev_desc
) },
359 /* Broadwell LPSS devices */
360 { "INT3430", LPSS_ADDR(lpt_dev_desc
) },
361 { "INT3431", LPSS_ADDR(lpt_dev_desc
) },
362 { "INT3432", LPSS_ADDR(lpt_i2c_dev_desc
) },
363 { "INT3433", LPSS_ADDR(lpt_i2c_dev_desc
) },
364 { "INT3434", LPSS_ADDR(lpt_uart_dev_desc
) },
365 { "INT3435", LPSS_ADDR(lpt_uart_dev_desc
) },
366 { "INT3436", LPSS_ADDR(lpt_sdio_dev_desc
) },
369 /* Wildcat Point LPSS devices */
370 { "INT3438", LPSS_ADDR(lpt_dev_desc
) },
375 #ifdef CONFIG_X86_INTEL_LPSS
377 static int is_memory(struct acpi_resource
*res
, void *not_used
)
380 return !acpi_dev_resource_memory(res
, &r
);
383 /* LPSS main clock device. */
384 static struct platform_device
*lpss_clk_dev
;
386 static inline void lpt_register_clock_device(void)
388 lpss_clk_dev
= platform_device_register_simple("clk-lpt", -1, NULL
, 0);
391 static int register_device_clock(struct acpi_device
*adev
,
392 struct lpss_private_data
*pdata
)
394 const struct lpss_device_desc
*dev_desc
= pdata
->dev_desc
;
395 const char *devname
= dev_name(&adev
->dev
);
397 struct lpss_clk_data
*clk_data
;
398 const char *parent
, *clk_name
;
399 void __iomem
*prv_base
;
402 lpt_register_clock_device();
404 clk_data
= platform_get_drvdata(lpss_clk_dev
);
409 if (!pdata
->mmio_base
410 || pdata
->mmio_size
< dev_desc
->prv_offset
+ LPSS_CLK_SIZE
)
413 parent
= clk_data
->name
;
414 prv_base
= pdata
->mmio_base
+ dev_desc
->prv_offset
;
416 if (pdata
->fixed_clk_rate
) {
417 clk
= clk_register_fixed_rate(NULL
, devname
, parent
, 0,
418 pdata
->fixed_clk_rate
);
422 if (dev_desc
->flags
& LPSS_CLK_GATE
) {
423 clk
= clk_register_gate(NULL
, devname
, parent
, 0,
424 prv_base
, 0, 0, NULL
);
428 if (dev_desc
->flags
& LPSS_CLK_DIVIDER
) {
429 /* Prevent division by zero */
430 if (!readl(prv_base
))
431 writel(LPSS_CLK_DIVIDER_DEF_MASK
, prv_base
);
433 clk_name
= kasprintf(GFP_KERNEL
, "%s-div", devname
);
436 clk
= clk_register_fractional_divider(NULL
, clk_name
, parent
,
438 1, 15, 16, 15, 0, NULL
);
441 clk_name
= kasprintf(GFP_KERNEL
, "%s-update", devname
);
446 clk
= clk_register_gate(NULL
, clk_name
, parent
,
447 CLK_SET_RATE_PARENT
| CLK_SET_RATE_GATE
,
448 prv_base
, 31, 0, NULL
);
457 clk_register_clkdev(clk
, dev_desc
->clk_con_id
, devname
);
461 struct lpss_device_links
{
462 const char *supplier_hid
;
463 const char *supplier_uid
;
464 const char *consumer_hid
;
465 const char *consumer_uid
;
467 const struct dmi_system_id
*dep_missing_ids
;
470 /* Please keep this list sorted alphabetically by vendor and model */
471 static const struct dmi_system_id i2c1_dep_missing_dmi_ids
[] = {
474 DMI_MATCH(DMI_SYS_VENDOR
, "ASUSTeK COMPUTER INC."),
475 DMI_MATCH(DMI_PRODUCT_NAME
, "T200TA"),
482 * The _DEP method is used to identify dependencies but instead of creating
483 * device links for every handle in _DEP, only links in the following list are
484 * created. That is necessary because, in the general case, _DEP can refer to
485 * devices that might not have drivers, or that are on different buses, or where
486 * the supplier is not enumerated until after the consumer is probed.
488 static const struct lpss_device_links lpss_device_links
[] = {
489 /* CHT External sdcard slot controller depends on PMIC I2C ctrl */
490 {"808622C1", "7", "80860F14", "3", DL_FLAG_PM_RUNTIME
},
491 /* CHT iGPU depends on PMIC I2C controller */
492 {"808622C1", "7", "LNXVIDEO", NULL
, DL_FLAG_PM_RUNTIME
},
493 /* BYT iGPU depends on the Embedded Controller I2C controller (UID 1) */
494 {"80860F41", "1", "LNXVIDEO", NULL
, DL_FLAG_PM_RUNTIME
,
495 i2c1_dep_missing_dmi_ids
},
496 /* BYT CR iGPU depends on PMIC I2C controller (UID 5 on CR) */
497 {"80860F41", "5", "LNXVIDEO", NULL
, DL_FLAG_PM_RUNTIME
},
498 /* BYT iGPU depends on PMIC I2C controller (UID 7 on non CR) */
499 {"80860F41", "7", "LNXVIDEO", NULL
, DL_FLAG_PM_RUNTIME
},
502 static bool hid_uid_match(struct acpi_device
*adev
,
503 const char *hid2
, const char *uid2
)
505 const char *hid1
= acpi_device_hid(adev
);
506 const char *uid1
= acpi_device_uid(adev
);
508 if (strcmp(hid1
, hid2
))
514 return uid1
&& !strcmp(uid1
, uid2
);
517 static bool acpi_lpss_is_supplier(struct acpi_device
*adev
,
518 const struct lpss_device_links
*link
)
520 return hid_uid_match(adev
, link
->supplier_hid
, link
->supplier_uid
);
523 static bool acpi_lpss_is_consumer(struct acpi_device
*adev
,
524 const struct lpss_device_links
*link
)
526 return hid_uid_match(adev
, link
->consumer_hid
, link
->consumer_uid
);
534 static int match_hid_uid(struct device
*dev
, const void *data
)
536 struct acpi_device
*adev
= ACPI_COMPANION(dev
);
537 const struct hid_uid
*id
= data
;
542 return hid_uid_match(adev
, id
->hid
, id
->uid
);
545 static struct device
*acpi_lpss_find_device(const char *hid
, const char *uid
)
549 struct hid_uid data
= {
554 dev
= bus_find_device(&platform_bus_type
, NULL
, &data
, match_hid_uid
);
558 return bus_find_device(&pci_bus_type
, NULL
, &data
, match_hid_uid
);
561 static bool acpi_lpss_dep(struct acpi_device
*adev
, acpi_handle handle
)
563 struct acpi_handle_list dep_devices
;
567 if (!acpi_has_method(adev
->handle
, "_DEP"))
570 status
= acpi_evaluate_reference(adev
->handle
, "_DEP", NULL
,
572 if (ACPI_FAILURE(status
)) {
573 dev_dbg(&adev
->dev
, "Failed to evaluate _DEP.\n");
577 for (i
= 0; i
< dep_devices
.count
; i
++) {
578 if (dep_devices
.handles
[i
] == handle
)
585 static void acpi_lpss_link_consumer(struct device
*dev1
,
586 const struct lpss_device_links
*link
)
590 dev2
= acpi_lpss_find_device(link
->consumer_hid
, link
->consumer_uid
);
594 if ((link
->dep_missing_ids
&& dmi_check_system(link
->dep_missing_ids
))
595 || acpi_lpss_dep(ACPI_COMPANION(dev2
), ACPI_HANDLE(dev1
)))
596 device_link_add(dev2
, dev1
, link
->flags
);
601 static void acpi_lpss_link_supplier(struct device
*dev1
,
602 const struct lpss_device_links
*link
)
606 dev2
= acpi_lpss_find_device(link
->supplier_hid
, link
->supplier_uid
);
610 if ((link
->dep_missing_ids
&& dmi_check_system(link
->dep_missing_ids
))
611 || acpi_lpss_dep(ACPI_COMPANION(dev1
), ACPI_HANDLE(dev2
)))
612 device_link_add(dev1
, dev2
, link
->flags
);
617 static void acpi_lpss_create_device_links(struct acpi_device
*adev
,
618 struct platform_device
*pdev
)
622 for (i
= 0; i
< ARRAY_SIZE(lpss_device_links
); i
++) {
623 const struct lpss_device_links
*link
= &lpss_device_links
[i
];
625 if (acpi_lpss_is_supplier(adev
, link
))
626 acpi_lpss_link_consumer(&pdev
->dev
, link
);
628 if (acpi_lpss_is_consumer(adev
, link
))
629 acpi_lpss_link_supplier(&pdev
->dev
, link
);
633 static int acpi_lpss_create_device(struct acpi_device
*adev
,
634 const struct acpi_device_id
*id
)
636 const struct lpss_device_desc
*dev_desc
;
637 struct lpss_private_data
*pdata
;
638 struct resource_entry
*rentry
;
639 struct list_head resource_list
;
640 struct platform_device
*pdev
;
643 dev_desc
= (const struct lpss_device_desc
*)id
->driver_data
;
645 pdev
= acpi_create_platform_device(adev
, NULL
);
646 return IS_ERR_OR_NULL(pdev
) ? PTR_ERR(pdev
) : 1;
648 pdata
= kzalloc(sizeof(*pdata
), GFP_KERNEL
);
652 INIT_LIST_HEAD(&resource_list
);
653 ret
= acpi_dev_get_resources(adev
, &resource_list
, is_memory
, NULL
);
657 list_for_each_entry(rentry
, &resource_list
, node
)
658 if (resource_type(rentry
->res
) == IORESOURCE_MEM
) {
659 if (dev_desc
->prv_size_override
)
660 pdata
->mmio_size
= dev_desc
->prv_size_override
;
662 pdata
->mmio_size
= resource_size(rentry
->res
);
663 pdata
->mmio_base
= ioremap(rentry
->res
->start
,
668 acpi_dev_free_resource_list(&resource_list
);
670 if (!pdata
->mmio_base
) {
671 /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
672 adev
->pnp
.type
.platform_id
= 0;
673 /* Skip the device, but continue the namespace scan. */
679 pdata
->dev_desc
= dev_desc
;
682 dev_desc
->setup(pdata
);
684 if (dev_desc
->flags
& LPSS_CLK
) {
685 ret
= register_device_clock(adev
, pdata
);
687 /* Skip the device, but continue the namespace scan. */
694 * This works around a known issue in ACPI tables where LPSS devices
695 * have _PS0 and _PS3 without _PSC (and no power resources), so
696 * acpi_bus_init_power() will assume that the BIOS has put them into D0.
698 acpi_device_fix_up_power(adev
);
700 adev
->driver_data
= pdata
;
701 pdev
= acpi_create_platform_device(adev
, dev_desc
->properties
);
702 if (!IS_ERR_OR_NULL(pdev
)) {
703 acpi_lpss_create_device_links(adev
, pdev
);
708 adev
->driver_data
= NULL
;
715 static u32
__lpss_reg_read(struct lpss_private_data
*pdata
, unsigned int reg
)
717 return readl(pdata
->mmio_base
+ pdata
->dev_desc
->prv_offset
+ reg
);
720 static void __lpss_reg_write(u32 val
, struct lpss_private_data
*pdata
,
723 writel(val
, pdata
->mmio_base
+ pdata
->dev_desc
->prv_offset
+ reg
);
726 static int lpss_reg_read(struct device
*dev
, unsigned int reg
, u32
*val
)
728 struct acpi_device
*adev
;
729 struct lpss_private_data
*pdata
;
733 ret
= acpi_bus_get_device(ACPI_HANDLE(dev
), &adev
);
737 spin_lock_irqsave(&dev
->power
.lock
, flags
);
738 if (pm_runtime_suspended(dev
)) {
742 pdata
= acpi_driver_data(adev
);
743 if (WARN_ON(!pdata
|| !pdata
->mmio_base
)) {
747 *val
= __lpss_reg_read(pdata
, reg
);
750 spin_unlock_irqrestore(&dev
->power
.lock
, flags
);
754 static ssize_t
lpss_ltr_show(struct device
*dev
, struct device_attribute
*attr
,
761 reg
= strcmp(attr
->attr
.name
, "auto_ltr") ? LPSS_SW_LTR
: LPSS_AUTO_LTR
;
762 ret
= lpss_reg_read(dev
, reg
, <r_value
);
766 return snprintf(buf
, PAGE_SIZE
, "%08x\n", ltr_value
);
769 static ssize_t
lpss_ltr_mode_show(struct device
*dev
,
770 struct device_attribute
*attr
, char *buf
)
776 ret
= lpss_reg_read(dev
, LPSS_GENERAL
, <r_mode
);
780 outstr
= (ltr_mode
& LPSS_GENERAL_LTR_MODE_SW
) ? "sw" : "auto";
781 return sprintf(buf
, "%s\n", outstr
);
784 static DEVICE_ATTR(auto_ltr
, S_IRUSR
, lpss_ltr_show
, NULL
);
785 static DEVICE_ATTR(sw_ltr
, S_IRUSR
, lpss_ltr_show
, NULL
);
786 static DEVICE_ATTR(ltr_mode
, S_IRUSR
, lpss_ltr_mode_show
, NULL
);
788 static struct attribute
*lpss_attrs
[] = {
789 &dev_attr_auto_ltr
.attr
,
790 &dev_attr_sw_ltr
.attr
,
791 &dev_attr_ltr_mode
.attr
,
795 static const struct attribute_group lpss_attr_group
= {
800 static void acpi_lpss_set_ltr(struct device
*dev
, s32 val
)
802 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
803 u32 ltr_mode
, ltr_val
;
805 ltr_mode
= __lpss_reg_read(pdata
, LPSS_GENERAL
);
807 if (ltr_mode
& LPSS_GENERAL_LTR_MODE_SW
) {
808 ltr_mode
&= ~LPSS_GENERAL_LTR_MODE_SW
;
809 __lpss_reg_write(ltr_mode
, pdata
, LPSS_GENERAL
);
813 ltr_val
= __lpss_reg_read(pdata
, LPSS_SW_LTR
) & ~LPSS_LTR_SNOOP_MASK
;
814 if (val
>= LPSS_LTR_SNOOP_LAT_CUTOFF
) {
815 ltr_val
|= LPSS_LTR_SNOOP_LAT_32US
;
816 val
= LPSS_LTR_MAX_VAL
;
817 } else if (val
> LPSS_LTR_MAX_VAL
) {
818 ltr_val
|= LPSS_LTR_SNOOP_LAT_32US
| LPSS_LTR_SNOOP_REQ
;
819 val
>>= LPSS_LTR_SNOOP_LAT_SHIFT
;
821 ltr_val
|= LPSS_LTR_SNOOP_LAT_1US
| LPSS_LTR_SNOOP_REQ
;
824 __lpss_reg_write(ltr_val
, pdata
, LPSS_SW_LTR
);
825 if (!(ltr_mode
& LPSS_GENERAL_LTR_MODE_SW
)) {
826 ltr_mode
|= LPSS_GENERAL_LTR_MODE_SW
;
827 __lpss_reg_write(ltr_mode
, pdata
, LPSS_GENERAL
);
833 * acpi_lpss_save_ctx() - Save the private registers of LPSS device
835 * @pdata: pointer to the private data of the LPSS device
837 * Most LPSS devices have private registers which may loose their context when
838 * the device is powered down. acpi_lpss_save_ctx() saves those registers into
841 static void acpi_lpss_save_ctx(struct device
*dev
,
842 struct lpss_private_data
*pdata
)
846 for (i
= 0; i
< LPSS_PRV_REG_COUNT
; i
++) {
847 unsigned long offset
= i
* sizeof(u32
);
849 pdata
->prv_reg_ctx
[i
] = __lpss_reg_read(pdata
, offset
);
850 dev_dbg(dev
, "saving 0x%08x from LPSS reg at offset 0x%02lx\n",
851 pdata
->prv_reg_ctx
[i
], offset
);
856 * acpi_lpss_restore_ctx() - Restore the private registers of LPSS device
858 * @pdata: pointer to the private data of the LPSS device
860 * Restores the registers that were previously stored with acpi_lpss_save_ctx().
862 static void acpi_lpss_restore_ctx(struct device
*dev
,
863 struct lpss_private_data
*pdata
)
867 for (i
= 0; i
< LPSS_PRV_REG_COUNT
; i
++) {
868 unsigned long offset
= i
* sizeof(u32
);
870 __lpss_reg_write(pdata
->prv_reg_ctx
[i
], pdata
, offset
);
871 dev_dbg(dev
, "restoring 0x%08x to LPSS reg at offset 0x%02lx\n",
872 pdata
->prv_reg_ctx
[i
], offset
);
876 static void acpi_lpss_d3_to_d0_delay(struct lpss_private_data
*pdata
)
879 * The following delay is needed or the subsequent write operations may
880 * fail. The LPSS devices are actually PCI devices and the PCI spec
881 * expects 10ms delay before the device can be accessed after D3 to D0
882 * transition. However some platforms like BSW does not need this delay.
884 unsigned int delay
= 10; /* default 10ms delay */
886 if (pdata
->dev_desc
->flags
& LPSS_NO_D3_DELAY
)
892 static int acpi_lpss_activate(struct device
*dev
)
894 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
897 ret
= acpi_dev_resume(dev
);
901 acpi_lpss_d3_to_d0_delay(pdata
);
904 * This is called only on ->probe() stage where a device is either in
905 * known state defined by BIOS or most likely powered off. Due to this
906 * we have to deassert reset line to be sure that ->probe() will
907 * recognize the device.
909 if (pdata
->dev_desc
->flags
& LPSS_SAVE_CTX
)
910 lpss_deassert_reset(pdata
);
915 static void acpi_lpss_dismiss(struct device
*dev
)
917 acpi_dev_suspend(dev
, false);
920 /* IOSF SB for LPSS island */
921 #define LPSS_IOSF_UNIT_LPIOEP 0xA0
922 #define LPSS_IOSF_UNIT_LPIO1 0xAB
923 #define LPSS_IOSF_UNIT_LPIO2 0xAC
925 #define LPSS_IOSF_PMCSR 0x84
926 #define LPSS_PMCSR_D0 0
927 #define LPSS_PMCSR_D3hot 3
928 #define LPSS_PMCSR_Dx_MASK GENMASK(1, 0)
930 #define LPSS_IOSF_GPIODEF0 0x154
931 #define LPSS_GPIODEF0_DMA1_D3 BIT(2)
932 #define LPSS_GPIODEF0_DMA2_D3 BIT(3)
933 #define LPSS_GPIODEF0_DMA_D3_MASK GENMASK(3, 2)
934 #define LPSS_GPIODEF0_DMA_LLP BIT(13)
936 static DEFINE_MUTEX(lpss_iosf_mutex
);
937 static bool lpss_iosf_d3_entered
= true;
939 static void lpss_iosf_enter_d3_state(void)
942 u32 mask1
= LPSS_GPIODEF0_DMA_D3_MASK
| LPSS_GPIODEF0_DMA_LLP
;
943 u32 value2
= LPSS_PMCSR_D3hot
;
944 u32 mask2
= LPSS_PMCSR_Dx_MASK
;
946 * PMC provides an information about actual status of the LPSS devices.
947 * Here we read the values related to LPSS power island, i.e. LPSS
948 * devices, excluding both LPSS DMA controllers, along with SCC domain.
950 u32 func_dis
, d3_sts_0
, pmc_status
;
953 ret
= pmc_atom_read(PMC_FUNC_DIS
, &func_dis
);
957 mutex_lock(&lpss_iosf_mutex
);
959 ret
= pmc_atom_read(PMC_D3_STS_0
, &d3_sts_0
);
964 * Get the status of entire LPSS power island per device basis.
965 * Shutdown both LPSS DMA controllers if and only if all other devices
966 * are already in D3hot.
968 pmc_status
= (~(d3_sts_0
| func_dis
)) & pmc_atom_d3_mask
;
972 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1
, MBI_CFG_WRITE
,
973 LPSS_IOSF_PMCSR
, value2
, mask2
);
975 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2
, MBI_CFG_WRITE
,
976 LPSS_IOSF_PMCSR
, value2
, mask2
);
978 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP
, MBI_CR_WRITE
,
979 LPSS_IOSF_GPIODEF0
, value1
, mask1
);
981 lpss_iosf_d3_entered
= true;
984 mutex_unlock(&lpss_iosf_mutex
);
987 static void lpss_iosf_exit_d3_state(void)
989 u32 value1
= LPSS_GPIODEF0_DMA1_D3
| LPSS_GPIODEF0_DMA2_D3
|
990 LPSS_GPIODEF0_DMA_LLP
;
991 u32 mask1
= LPSS_GPIODEF0_DMA_D3_MASK
| LPSS_GPIODEF0_DMA_LLP
;
992 u32 value2
= LPSS_PMCSR_D0
;
993 u32 mask2
= LPSS_PMCSR_Dx_MASK
;
995 mutex_lock(&lpss_iosf_mutex
);
997 if (!lpss_iosf_d3_entered
)
1000 lpss_iosf_d3_entered
= false;
1002 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIOEP
, MBI_CR_WRITE
,
1003 LPSS_IOSF_GPIODEF0
, value1
, mask1
);
1005 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO2
, MBI_CFG_WRITE
,
1006 LPSS_IOSF_PMCSR
, value2
, mask2
);
1008 iosf_mbi_modify(LPSS_IOSF_UNIT_LPIO1
, MBI_CFG_WRITE
,
1009 LPSS_IOSF_PMCSR
, value2
, mask2
);
1012 mutex_unlock(&lpss_iosf_mutex
);
1015 static int acpi_lpss_suspend(struct device
*dev
, bool wakeup
)
1017 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
1020 if (pdata
->dev_desc
->flags
& LPSS_SAVE_CTX
)
1021 acpi_lpss_save_ctx(dev
, pdata
);
1023 ret
= acpi_dev_suspend(dev
, wakeup
);
1026 * This call must be last in the sequence, otherwise PMC will return
1027 * wrong status for devices being about to be powered off. See
1028 * lpss_iosf_enter_d3_state() for further information.
1030 if (acpi_target_system_state() == ACPI_STATE_S0
&&
1031 lpss_quirks
& LPSS_QUIRK_ALWAYS_POWER_ON
&& iosf_mbi_available())
1032 lpss_iosf_enter_d3_state();
1037 static int acpi_lpss_resume(struct device
*dev
)
1039 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
1043 * This call is kept first to be in symmetry with
1044 * acpi_lpss_runtime_suspend() one.
1046 if (lpss_quirks
& LPSS_QUIRK_ALWAYS_POWER_ON
&& iosf_mbi_available())
1047 lpss_iosf_exit_d3_state();
1049 ret
= acpi_dev_resume(dev
);
1053 acpi_lpss_d3_to_d0_delay(pdata
);
1055 if (pdata
->dev_desc
->flags
& LPSS_SAVE_CTX
)
1056 acpi_lpss_restore_ctx(dev
, pdata
);
1061 #ifdef CONFIG_PM_SLEEP
1062 static int acpi_lpss_do_suspend_late(struct device
*dev
)
1066 if (dev_pm_smart_suspend_and_suspended(dev
))
1069 ret
= pm_generic_suspend_late(dev
);
1070 return ret
? ret
: acpi_lpss_suspend(dev
, device_may_wakeup(dev
));
1073 static int acpi_lpss_suspend_late(struct device
*dev
)
1075 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
1077 if (pdata
->dev_desc
->resume_from_noirq
)
1080 return acpi_lpss_do_suspend_late(dev
);
1083 static int acpi_lpss_suspend_noirq(struct device
*dev
)
1085 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
1088 if (pdata
->dev_desc
->resume_from_noirq
) {
1090 * The driver's ->suspend_late callback will be invoked by
1091 * acpi_lpss_do_suspend_late(), with the assumption that the
1092 * driver really wanted to run that code in ->suspend_noirq, but
1093 * it could not run after acpi_dev_suspend() and the driver
1094 * expected the latter to be called in the "late" phase.
1096 ret
= acpi_lpss_do_suspend_late(dev
);
1101 return acpi_subsys_suspend_noirq(dev
);
1104 static int acpi_lpss_do_resume_early(struct device
*dev
)
1106 int ret
= acpi_lpss_resume(dev
);
1108 return ret
? ret
: pm_generic_resume_early(dev
);
1111 static int acpi_lpss_resume_early(struct device
*dev
)
1113 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
1115 if (pdata
->dev_desc
->resume_from_noirq
)
1118 return acpi_lpss_do_resume_early(dev
);
1121 static int acpi_lpss_resume_noirq(struct device
*dev
)
1123 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
1126 /* Follow acpi_subsys_resume_noirq(). */
1127 if (dev_pm_may_skip_resume(dev
))
1130 if (dev_pm_smart_suspend_and_suspended(dev
))
1131 pm_runtime_set_active(dev
);
1133 ret
= pm_generic_resume_noirq(dev
);
1137 if (!pdata
->dev_desc
->resume_from_noirq
)
1141 * The driver's ->resume_early callback will be invoked by
1142 * acpi_lpss_do_resume_early(), with the assumption that the driver
1143 * really wanted to run that code in ->resume_noirq, but it could not
1144 * run before acpi_dev_resume() and the driver expected the latter to be
1145 * called in the "early" phase.
1147 return acpi_lpss_do_resume_early(dev
);
1150 static int acpi_lpss_do_restore_early(struct device
*dev
)
1152 int ret
= acpi_lpss_resume(dev
);
1154 return ret
? ret
: pm_generic_restore_early(dev
);
1157 static int acpi_lpss_restore_early(struct device
*dev
)
1159 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
1161 if (pdata
->dev_desc
->resume_from_noirq
)
1164 return acpi_lpss_do_restore_early(dev
);
1167 static int acpi_lpss_restore_noirq(struct device
*dev
)
1169 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
1172 ret
= pm_generic_restore_noirq(dev
);
1176 if (!pdata
->dev_desc
->resume_from_noirq
)
1179 /* This is analogous to what happens in acpi_lpss_resume_noirq(). */
1180 return acpi_lpss_do_restore_early(dev
);
1183 static int acpi_lpss_do_poweroff_late(struct device
*dev
)
1185 int ret
= pm_generic_poweroff_late(dev
);
1187 return ret
? ret
: acpi_lpss_suspend(dev
, device_may_wakeup(dev
));
1190 static int acpi_lpss_poweroff_late(struct device
*dev
)
1192 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
1194 if (dev_pm_smart_suspend_and_suspended(dev
))
1197 if (pdata
->dev_desc
->resume_from_noirq
)
1200 return acpi_lpss_do_poweroff_late(dev
);
1203 static int acpi_lpss_poweroff_noirq(struct device
*dev
)
1205 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
1207 if (dev_pm_smart_suspend_and_suspended(dev
))
1210 if (pdata
->dev_desc
->resume_from_noirq
) {
1211 /* This is analogous to the acpi_lpss_suspend_noirq() case. */
1212 int ret
= acpi_lpss_do_poweroff_late(dev
);
1217 return pm_generic_poweroff_noirq(dev
);
1219 #endif /* CONFIG_PM_SLEEP */
1221 static int acpi_lpss_runtime_suspend(struct device
*dev
)
1223 int ret
= pm_generic_runtime_suspend(dev
);
1225 return ret
? ret
: acpi_lpss_suspend(dev
, true);
1228 static int acpi_lpss_runtime_resume(struct device
*dev
)
1230 int ret
= acpi_lpss_resume(dev
);
1232 return ret
? ret
: pm_generic_runtime_resume(dev
);
1234 #endif /* CONFIG_PM */
1236 static struct dev_pm_domain acpi_lpss_pm_domain
= {
1238 .activate
= acpi_lpss_activate
,
1239 .dismiss
= acpi_lpss_dismiss
,
1243 #ifdef CONFIG_PM_SLEEP
1244 .prepare
= acpi_subsys_prepare
,
1245 .complete
= acpi_subsys_complete
,
1246 .suspend
= acpi_subsys_suspend
,
1247 .suspend_late
= acpi_lpss_suspend_late
,
1248 .suspend_noirq
= acpi_lpss_suspend_noirq
,
1249 .resume_noirq
= acpi_lpss_resume_noirq
,
1250 .resume_early
= acpi_lpss_resume_early
,
1251 .freeze
= acpi_subsys_freeze
,
1252 .poweroff
= acpi_subsys_poweroff
,
1253 .poweroff_late
= acpi_lpss_poweroff_late
,
1254 .poweroff_noirq
= acpi_lpss_poweroff_noirq
,
1255 .restore_noirq
= acpi_lpss_restore_noirq
,
1256 .restore_early
= acpi_lpss_restore_early
,
1258 .runtime_suspend
= acpi_lpss_runtime_suspend
,
1259 .runtime_resume
= acpi_lpss_runtime_resume
,
1264 static int acpi_lpss_platform_notify(struct notifier_block
*nb
,
1265 unsigned long action
, void *data
)
1267 struct platform_device
*pdev
= to_platform_device(data
);
1268 struct lpss_private_data
*pdata
;
1269 struct acpi_device
*adev
;
1270 const struct acpi_device_id
*id
;
1272 id
= acpi_match_device(acpi_lpss_device_ids
, &pdev
->dev
);
1273 if (!id
|| !id
->driver_data
)
1276 if (acpi_bus_get_device(ACPI_HANDLE(&pdev
->dev
), &adev
))
1279 pdata
= acpi_driver_data(adev
);
1283 if (pdata
->mmio_base
&&
1284 pdata
->mmio_size
< pdata
->dev_desc
->prv_offset
+ LPSS_LTR_SIZE
) {
1285 dev_err(&pdev
->dev
, "MMIO size insufficient to access LTR\n");
1290 case BUS_NOTIFY_BIND_DRIVER
:
1291 dev_pm_domain_set(&pdev
->dev
, &acpi_lpss_pm_domain
);
1293 case BUS_NOTIFY_DRIVER_NOT_BOUND
:
1294 case BUS_NOTIFY_UNBOUND_DRIVER
:
1295 dev_pm_domain_set(&pdev
->dev
, NULL
);
1297 case BUS_NOTIFY_ADD_DEVICE
:
1298 dev_pm_domain_set(&pdev
->dev
, &acpi_lpss_pm_domain
);
1299 if (pdata
->dev_desc
->flags
& LPSS_LTR
)
1300 return sysfs_create_group(&pdev
->dev
.kobj
,
1303 case BUS_NOTIFY_DEL_DEVICE
:
1304 if (pdata
->dev_desc
->flags
& LPSS_LTR
)
1305 sysfs_remove_group(&pdev
->dev
.kobj
, &lpss_attr_group
);
1306 dev_pm_domain_set(&pdev
->dev
, NULL
);
1315 static struct notifier_block acpi_lpss_nb
= {
1316 .notifier_call
= acpi_lpss_platform_notify
,
1319 static void acpi_lpss_bind(struct device
*dev
)
1321 struct lpss_private_data
*pdata
= acpi_driver_data(ACPI_COMPANION(dev
));
1323 if (!pdata
|| !pdata
->mmio_base
|| !(pdata
->dev_desc
->flags
& LPSS_LTR
))
1326 if (pdata
->mmio_size
>= pdata
->dev_desc
->prv_offset
+ LPSS_LTR_SIZE
)
1327 dev
->power
.set_latency_tolerance
= acpi_lpss_set_ltr
;
1329 dev_err(dev
, "MMIO size insufficient to access LTR\n");
1332 static void acpi_lpss_unbind(struct device
*dev
)
1334 dev
->power
.set_latency_tolerance
= NULL
;
1337 static struct acpi_scan_handler lpss_handler
= {
1338 .ids
= acpi_lpss_device_ids
,
1339 .attach
= acpi_lpss_create_device
,
1340 .bind
= acpi_lpss_bind
,
1341 .unbind
= acpi_lpss_unbind
,
1344 void __init
acpi_lpss_init(void)
1346 const struct x86_cpu_id
*id
;
1349 ret
= lpt_clk_init();
1353 id
= x86_match_cpu(lpss_cpu_ids
);
1355 lpss_quirks
|= LPSS_QUIRK_ALWAYS_POWER_ON
;
1357 bus_register_notifier(&platform_bus_type
, &acpi_lpss_nb
);
1358 acpi_scan_add_handler(&lpss_handler
);
1363 static struct acpi_scan_handler lpss_handler
= {
1364 .ids
= acpi_lpss_device_ids
,
1367 void __init
acpi_lpss_init(void)
1369 acpi_scan_add_handler(&lpss_handler
);
1372 #endif /* CONFIG_X86_INTEL_LPSS */