interconnect: qcom: Fix Kconfig indentation
[linux/fpc-iii.git] / drivers / platform / x86 / intel_pmc_core.c
blob571b4754477c5ca8e7d7600f2330669b3c9c196b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Intel Core SoC Power Management Controller Driver
5 * Copyright (c) 2016, Intel Corporation.
6 * All Rights Reserved.
8 * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
9 * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/acpi.h>
15 #include <linux/bitfield.h>
16 #include <linux/debugfs.h>
17 #include <linux/delay.h>
18 #include <linux/dmi.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/pci.h>
22 #include <linux/platform_device.h>
23 #include <linux/suspend.h>
24 #include <linux/uaccess.h>
26 #include <asm/cpu_device_id.h>
27 #include <asm/intel-family.h>
28 #include <asm/msr.h>
29 #include <asm/tsc.h>
31 #include "intel_pmc_core.h"
33 static struct pmc_dev pmc;
35 /* PKGC MSRs are common across Intel Core SoCs */
36 static const struct pmc_bit_map msr_map[] = {
37 {"Package C2", MSR_PKG_C2_RESIDENCY},
38 {"Package C3", MSR_PKG_C3_RESIDENCY},
39 {"Package C6", MSR_PKG_C6_RESIDENCY},
40 {"Package C7", MSR_PKG_C7_RESIDENCY},
41 {"Package C8", MSR_PKG_C8_RESIDENCY},
42 {"Package C9", MSR_PKG_C9_RESIDENCY},
43 {"Package C10", MSR_PKG_C10_RESIDENCY},
47 static const struct pmc_bit_map spt_pll_map[] = {
48 {"MIPI PLL", SPT_PMC_BIT_MPHY_CMN_LANE0},
49 {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1},
50 {"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2},
51 {"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3},
52 {},
55 static const struct pmc_bit_map spt_mphy_map[] = {
56 {"MPHY CORE LANE 0", SPT_PMC_BIT_MPHY_LANE0},
57 {"MPHY CORE LANE 1", SPT_PMC_BIT_MPHY_LANE1},
58 {"MPHY CORE LANE 2", SPT_PMC_BIT_MPHY_LANE2},
59 {"MPHY CORE LANE 3", SPT_PMC_BIT_MPHY_LANE3},
60 {"MPHY CORE LANE 4", SPT_PMC_BIT_MPHY_LANE4},
61 {"MPHY CORE LANE 5", SPT_PMC_BIT_MPHY_LANE5},
62 {"MPHY CORE LANE 6", SPT_PMC_BIT_MPHY_LANE6},
63 {"MPHY CORE LANE 7", SPT_PMC_BIT_MPHY_LANE7},
64 {"MPHY CORE LANE 8", SPT_PMC_BIT_MPHY_LANE8},
65 {"MPHY CORE LANE 9", SPT_PMC_BIT_MPHY_LANE9},
66 {"MPHY CORE LANE 10", SPT_PMC_BIT_MPHY_LANE10},
67 {"MPHY CORE LANE 11", SPT_PMC_BIT_MPHY_LANE11},
68 {"MPHY CORE LANE 12", SPT_PMC_BIT_MPHY_LANE12},
69 {"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13},
70 {"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14},
71 {"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15},
72 {},
75 static const struct pmc_bit_map spt_pfear_map[] = {
76 {"PMC", SPT_PMC_BIT_PMC},
77 {"OPI-DMI", SPT_PMC_BIT_OPI},
78 {"SPI / eSPI", SPT_PMC_BIT_SPI},
79 {"XHCI", SPT_PMC_BIT_XHCI},
80 {"SPA", SPT_PMC_BIT_SPA},
81 {"SPB", SPT_PMC_BIT_SPB},
82 {"SPC", SPT_PMC_BIT_SPC},
83 {"GBE", SPT_PMC_BIT_GBE},
84 {"SATA", SPT_PMC_BIT_SATA},
85 {"HDA-PGD0", SPT_PMC_BIT_HDA_PGD0},
86 {"HDA-PGD1", SPT_PMC_BIT_HDA_PGD1},
87 {"HDA-PGD2", SPT_PMC_BIT_HDA_PGD2},
88 {"HDA-PGD3", SPT_PMC_BIT_HDA_PGD3},
89 {"RSVD", SPT_PMC_BIT_RSVD_0B},
90 {"LPSS", SPT_PMC_BIT_LPSS},
91 {"LPC", SPT_PMC_BIT_LPC},
92 {"SMB", SPT_PMC_BIT_SMB},
93 {"ISH", SPT_PMC_BIT_ISH},
94 {"P2SB", SPT_PMC_BIT_P2SB},
95 {"DFX", SPT_PMC_BIT_DFX},
96 {"SCC", SPT_PMC_BIT_SCC},
97 {"RSVD", SPT_PMC_BIT_RSVD_0C},
98 {"FUSE", SPT_PMC_BIT_FUSE},
99 {"CAMERA", SPT_PMC_BIT_CAMREA},
100 {"RSVD", SPT_PMC_BIT_RSVD_0D},
101 {"USB3-OTG", SPT_PMC_BIT_USB3_OTG},
102 {"EXI", SPT_PMC_BIT_EXI},
103 {"CSE", SPT_PMC_BIT_CSE},
104 {"CSME_KVM", SPT_PMC_BIT_CSME_KVM},
105 {"CSME_PMT", SPT_PMC_BIT_CSME_PMT},
106 {"CSME_CLINK", SPT_PMC_BIT_CSME_CLINK},
107 {"CSME_PTIO", SPT_PMC_BIT_CSME_PTIO},
108 {"CSME_USBR", SPT_PMC_BIT_CSME_USBR},
109 {"CSME_SUSRAM", SPT_PMC_BIT_CSME_SUSRAM},
110 {"CSME_SMT", SPT_PMC_BIT_CSME_SMT},
111 {"RSVD", SPT_PMC_BIT_RSVD_1A},
112 {"CSME_SMS2", SPT_PMC_BIT_CSME_SMS2},
113 {"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1},
114 {"CSME_RTC", SPT_PMC_BIT_CSME_RTC},
115 {"CSME_PSF", SPT_PMC_BIT_CSME_PSF},
119 static const struct pmc_bit_map spt_ltr_show_map[] = {
120 {"SOUTHPORT_A", SPT_PMC_LTR_SPA},
121 {"SOUTHPORT_B", SPT_PMC_LTR_SPB},
122 {"SATA", SPT_PMC_LTR_SATA},
123 {"GIGABIT_ETHERNET", SPT_PMC_LTR_GBE},
124 {"XHCI", SPT_PMC_LTR_XHCI},
125 {"Reserved", SPT_PMC_LTR_RESERVED},
126 {"ME", SPT_PMC_LTR_ME},
127 /* EVA is Enterprise Value Add, doesn't really exist on PCH */
128 {"EVA", SPT_PMC_LTR_EVA},
129 {"SOUTHPORT_C", SPT_PMC_LTR_SPC},
130 {"HD_AUDIO", SPT_PMC_LTR_AZ},
131 {"LPSS", SPT_PMC_LTR_LPSS},
132 {"SOUTHPORT_D", SPT_PMC_LTR_SPD},
133 {"SOUTHPORT_E", SPT_PMC_LTR_SPE},
134 {"CAMERA", SPT_PMC_LTR_CAM},
135 {"ESPI", SPT_PMC_LTR_ESPI},
136 {"SCC", SPT_PMC_LTR_SCC},
137 {"ISH", SPT_PMC_LTR_ISH},
138 /* Below two cannot be used for LTR_IGNORE */
139 {"CURRENT_PLATFORM", SPT_PMC_LTR_CUR_PLT},
140 {"AGGREGATED_SYSTEM", SPT_PMC_LTR_CUR_ASLT},
144 static const struct pmc_reg_map spt_reg_map = {
145 .pfear_sts = spt_pfear_map,
146 .mphy_sts = spt_mphy_map,
147 .pll_sts = spt_pll_map,
148 .ltr_show_sts = spt_ltr_show_map,
149 .msr_sts = msr_map,
150 .slp_s0_offset = SPT_PMC_SLP_S0_RES_COUNTER_OFFSET,
151 .ltr_ignore_offset = SPT_PMC_LTR_IGNORE_OFFSET,
152 .regmap_length = SPT_PMC_MMIO_REG_LEN,
153 .ppfear0_offset = SPT_PMC_XRAM_PPFEAR0A,
154 .ppfear_buckets = SPT_PPFEAR_NUM_ENTRIES,
155 .pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET,
156 .pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT,
157 .ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED,
158 .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
161 /* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */
162 static const struct pmc_bit_map cnp_pfear_map[] = {
163 /* Reserved for Cannon Lake but valid for Comet Lake */
164 {"PMC", BIT(0)},
165 {"OPI-DMI", BIT(1)},
166 {"SPI/eSPI", BIT(2)},
167 {"XHCI", BIT(3)},
168 {"SPA", BIT(4)},
169 {"SPB", BIT(5)},
170 {"SPC", BIT(6)},
171 {"GBE", BIT(7)},
173 {"SATA", BIT(0)},
174 {"HDA_PGD0", BIT(1)},
175 {"HDA_PGD1", BIT(2)},
176 {"HDA_PGD2", BIT(3)},
177 {"HDA_PGD3", BIT(4)},
178 {"SPD", BIT(5)},
179 {"LPSS", BIT(6)},
180 {"LPC", BIT(7)},
182 {"SMB", BIT(0)},
183 {"ISH", BIT(1)},
184 {"P2SB", BIT(2)},
185 {"NPK_VNN", BIT(3)},
186 {"SDX", BIT(4)},
187 {"SPE", BIT(5)},
188 {"Fuse", BIT(6)},
189 /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
190 {"SBR8", BIT(7)},
192 {"CSME_FSC", BIT(0)},
193 {"USB3_OTG", BIT(1)},
194 {"EXI", BIT(2)},
195 {"CSE", BIT(3)},
196 {"CSME_KVM", BIT(4)},
197 {"CSME_PMT", BIT(5)},
198 {"CSME_CLINK", BIT(6)},
199 {"CSME_PTIO", BIT(7)},
201 {"CSME_USBR", BIT(0)},
202 {"CSME_SUSRAM", BIT(1)},
203 {"CSME_SMT1", BIT(2)},
204 {"CSME_SMT4", BIT(3)},
205 {"CSME_SMS2", BIT(4)},
206 {"CSME_SMS1", BIT(5)},
207 {"CSME_RTC", BIT(6)},
208 {"CSME_PSF", BIT(7)},
210 {"SBR0", BIT(0)},
211 {"SBR1", BIT(1)},
212 {"SBR2", BIT(2)},
213 {"SBR3", BIT(3)},
214 {"SBR4", BIT(4)},
215 {"SBR5", BIT(5)},
216 {"CSME_PECI", BIT(6)},
217 {"PSF1", BIT(7)},
219 {"PSF2", BIT(0)},
220 {"PSF3", BIT(1)},
221 {"PSF4", BIT(2)},
222 {"CNVI", BIT(3)},
223 {"UFS0", BIT(4)},
224 {"EMMC", BIT(5)},
225 {"SPF", BIT(6)},
226 {"SBR6", BIT(7)},
228 {"SBR7", BIT(0)},
229 {"NPK_AON", BIT(1)},
230 {"HDA_PGD4", BIT(2)},
231 {"HDA_PGD5", BIT(3)},
232 {"HDA_PGD6", BIT(4)},
233 /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */
234 {"PSF6", BIT(5)},
235 {"PSF7", BIT(6)},
236 {"PSF8", BIT(7)},
238 /* Ice Lake generation onwards only */
239 {"RES_65", BIT(0)},
240 {"RES_66", BIT(1)},
241 {"RES_67", BIT(2)},
242 {"TAM", BIT(3)},
243 {"GBETSN", BIT(4)},
244 {"TBTLSX", BIT(5)},
245 {"RES_71", BIT(6)},
246 {"RES_72", BIT(7)},
250 static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
251 {"AUDIO_D3", BIT(0)},
252 {"OTG_D3", BIT(1)},
253 {"XHCI_D3", BIT(2)},
254 {"LPIO_D3", BIT(3)},
255 {"SDX_D3", BIT(4)},
256 {"SATA_D3", BIT(5)},
257 {"UFS0_D3", BIT(6)},
258 {"UFS1_D3", BIT(7)},
259 {"EMMC_D3", BIT(8)},
263 static const struct pmc_bit_map cnp_slps0_dbg1_map[] = {
264 {"SDIO_PLL_OFF", BIT(0)},
265 {"USB2_PLL_OFF", BIT(1)},
266 {"AUDIO_PLL_OFF", BIT(2)},
267 {"OC_PLL_OFF", BIT(3)},
268 {"MAIN_PLL_OFF", BIT(4)},
269 {"XOSC_OFF", BIT(5)},
270 {"LPC_CLKS_GATED", BIT(6)},
271 {"PCIE_CLKREQS_IDLE", BIT(7)},
272 {"AUDIO_ROSC_OFF", BIT(8)},
273 {"HPET_XOSC_CLK_REQ", BIT(9)},
274 {"PMC_ROSC_SLOW_CLK", BIT(10)},
275 {"AON2_ROSC_GATED", BIT(11)},
276 {"CLKACKS_DEASSERTED", BIT(12)},
280 static const struct pmc_bit_map cnp_slps0_dbg2_map[] = {
281 {"MPHY_CORE_GATED", BIT(0)},
282 {"CSME_GATED", BIT(1)},
283 {"USB2_SUS_GATED", BIT(2)},
284 {"DYN_FLEX_IO_IDLE", BIT(3)},
285 {"GBE_NO_LINK", BIT(4)},
286 {"THERM_SEN_DISABLED", BIT(5)},
287 {"PCIE_LOW_POWER", BIT(6)},
288 {"ISH_VNNAON_REQ_ACT", BIT(7)},
289 {"ISH_VNN_REQ_ACT", BIT(8)},
290 {"CNV_VNNAON_REQ_ACT", BIT(9)},
291 {"CNV_VNN_REQ_ACT", BIT(10)},
292 {"NPK_VNNON_REQ_ACT", BIT(11)},
293 {"PMSYNC_STATE_IDLE", BIT(12)},
294 {"ALST_GT_THRES", BIT(13)},
295 {"PMC_ARC_PG_READY", BIT(14)},
299 static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = {
300 cnp_slps0_dbg0_map,
301 cnp_slps0_dbg1_map,
302 cnp_slps0_dbg2_map,
303 NULL,
306 static const struct pmc_bit_map cnp_ltr_show_map[] = {
307 {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
308 {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
309 {"SATA", CNP_PMC_LTR_SATA},
310 {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
311 {"XHCI", CNP_PMC_LTR_XHCI},
312 {"Reserved", CNP_PMC_LTR_RESERVED},
313 {"ME", CNP_PMC_LTR_ME},
314 /* EVA is Enterprise Value Add, doesn't really exist on PCH */
315 {"EVA", CNP_PMC_LTR_EVA},
316 {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
317 {"HD_AUDIO", CNP_PMC_LTR_AZ},
318 {"CNV", CNP_PMC_LTR_CNV},
319 {"LPSS", CNP_PMC_LTR_LPSS},
320 {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
321 {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
322 {"CAMERA", CNP_PMC_LTR_CAM},
323 {"ESPI", CNP_PMC_LTR_ESPI},
324 {"SCC", CNP_PMC_LTR_SCC},
325 {"ISH", CNP_PMC_LTR_ISH},
326 {"UFSX2", CNP_PMC_LTR_UFSX2},
327 {"EMMC", CNP_PMC_LTR_EMMC},
328 /* Reserved for Cannon Lake but valid for Ice Lake */
329 {"WIGIG", ICL_PMC_LTR_WIGIG},
330 /* Below two cannot be used for LTR_IGNORE */
331 {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
332 {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
336 static const struct pmc_reg_map cnp_reg_map = {
337 .pfear_sts = cnp_pfear_map,
338 .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
339 .slps0_dbg_maps = cnp_slps0_dbg_maps,
340 .ltr_show_sts = cnp_ltr_show_map,
341 .msr_sts = msr_map,
342 .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
343 .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
344 .regmap_length = CNP_PMC_MMIO_REG_LEN,
345 .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
346 .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
347 .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
348 .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
349 .ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED,
352 static const struct pmc_reg_map icl_reg_map = {
353 .pfear_sts = cnp_pfear_map,
354 .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
355 .slps0_dbg_maps = cnp_slps0_dbg_maps,
356 .ltr_show_sts = cnp_ltr_show_map,
357 .msr_sts = msr_map,
358 .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
359 .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
360 .regmap_length = CNP_PMC_MMIO_REG_LEN,
361 .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
362 .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
363 .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
364 .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
365 .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
368 static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
370 return readb(pmcdev->regbase + offset);
373 static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
375 return readl(pmcdev->regbase + reg_offset);
378 static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int
379 reg_offset, u32 val)
381 writel(val, pmcdev->regbase + reg_offset);
384 static inline u64 pmc_core_adjust_slp_s0_step(u32 value)
386 return (u64)value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
389 static int pmc_core_dev_state_get(void *data, u64 *val)
391 struct pmc_dev *pmcdev = data;
392 const struct pmc_reg_map *map = pmcdev->map;
393 u32 value;
395 value = pmc_core_reg_read(pmcdev, map->slp_s0_offset);
396 *val = pmc_core_adjust_slp_s0_step(value);
398 return 0;
401 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
403 static int pmc_core_check_read_lock_bit(void)
405 struct pmc_dev *pmcdev = &pmc;
406 u32 value;
408 value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_cfg_offset);
409 return value & BIT(pmcdev->map->pm_read_disable_bit);
412 #if IS_ENABLED(CONFIG_DEBUG_FS)
413 static bool slps0_dbg_latch;
415 static void pmc_core_display_map(struct seq_file *s, int index,
416 u8 pf_reg, const struct pmc_bit_map *pf_map)
418 seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
419 index, pf_map[index].name,
420 pf_map[index].bit_mask & pf_reg ? "Off" : "On");
423 static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
425 struct pmc_dev *pmcdev = s->private;
426 const struct pmc_bit_map *map = pmcdev->map->pfear_sts;
427 u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
428 int index, iter;
430 iter = pmcdev->map->ppfear0_offset;
432 for (index = 0; index < pmcdev->map->ppfear_buckets &&
433 index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
434 pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
436 for (index = 0; map[index].name &&
437 index < pmcdev->map->ppfear_buckets * 8; index++)
438 pmc_core_display_map(s, index, pf_regs[index / 8], map);
440 return 0;
442 DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
444 /* This function should return link status, 0 means ready */
445 static int pmc_core_mtpmc_link_status(void)
447 struct pmc_dev *pmcdev = &pmc;
448 u32 value;
450 value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET);
451 return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
454 static int pmc_core_send_msg(u32 *addr_xram)
456 struct pmc_dev *pmcdev = &pmc;
457 u32 dest;
458 int timeout;
460 for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
461 if (pmc_core_mtpmc_link_status() == 0)
462 break;
463 msleep(5);
466 if (timeout <= 0 && pmc_core_mtpmc_link_status())
467 return -EBUSY;
469 dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
470 pmc_core_reg_write(pmcdev, SPT_PMC_MTPMC_OFFSET, dest);
471 return 0;
474 static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
476 struct pmc_dev *pmcdev = s->private;
477 const struct pmc_bit_map *map = pmcdev->map->mphy_sts;
478 u32 mphy_core_reg_low, mphy_core_reg_high;
479 u32 val_low, val_high;
480 int index, err = 0;
482 if (pmcdev->pmc_xram_read_bit) {
483 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
484 return 0;
487 mphy_core_reg_low = (SPT_PMC_MPHY_CORE_STS_0 << 16);
488 mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
490 mutex_lock(&pmcdev->lock);
492 if (pmc_core_send_msg(&mphy_core_reg_low) != 0) {
493 err = -EBUSY;
494 goto out_unlock;
497 msleep(10);
498 val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
500 if (pmc_core_send_msg(&mphy_core_reg_high) != 0) {
501 err = -EBUSY;
502 goto out_unlock;
505 msleep(10);
506 val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
508 for (index = 0; map[index].name && index < 8; index++) {
509 seq_printf(s, "%-32s\tState: %s\n",
510 map[index].name,
511 map[index].bit_mask & val_low ? "Not power gated" :
512 "Power gated");
515 for (index = 8; map[index].name; index++) {
516 seq_printf(s, "%-32s\tState: %s\n",
517 map[index].name,
518 map[index].bit_mask & val_high ? "Not power gated" :
519 "Power gated");
522 out_unlock:
523 mutex_unlock(&pmcdev->lock);
524 return err;
526 DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
528 static int pmc_core_pll_show(struct seq_file *s, void *unused)
530 struct pmc_dev *pmcdev = s->private;
531 const struct pmc_bit_map *map = pmcdev->map->pll_sts;
532 u32 mphy_common_reg, val;
533 int index, err = 0;
535 if (pmcdev->pmc_xram_read_bit) {
536 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
537 return 0;
540 mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16);
541 mutex_lock(&pmcdev->lock);
543 if (pmc_core_send_msg(&mphy_common_reg) != 0) {
544 err = -EBUSY;
545 goto out_unlock;
548 /* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
549 msleep(10);
550 val = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
552 for (index = 0; map[index].name ; index++) {
553 seq_printf(s, "%-32s\tState: %s\n",
554 map[index].name,
555 map[index].bit_mask & val ? "Active" : "Idle");
558 out_unlock:
559 mutex_unlock(&pmcdev->lock);
560 return err;
562 DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
564 static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user
565 *userbuf, size_t count, loff_t *ppos)
567 struct pmc_dev *pmcdev = &pmc;
568 const struct pmc_reg_map *map = pmcdev->map;
569 u32 val, buf_size, fd;
570 int err = 0;
572 buf_size = count < 64 ? count : 64;
573 mutex_lock(&pmcdev->lock);
575 if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) {
576 err = -EFAULT;
577 goto out_unlock;
580 if (val > map->ltr_ignore_max) {
581 err = -EINVAL;
582 goto out_unlock;
585 fd = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
586 fd |= (1U << val);
587 pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, fd);
589 out_unlock:
590 mutex_unlock(&pmcdev->lock);
591 return err == 0 ? count : err;
594 static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
596 return 0;
599 static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
601 return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
604 static const struct file_operations pmc_core_ltr_ignore_ops = {
605 .open = pmc_core_ltr_ignore_open,
606 .read = seq_read,
607 .write = pmc_core_ltr_ignore_write,
608 .llseek = seq_lseek,
609 .release = single_release,
612 static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
614 const struct pmc_reg_map *map = pmcdev->map;
615 u32 fd;
617 mutex_lock(&pmcdev->lock);
619 if (!reset && !slps0_dbg_latch)
620 goto out_unlock;
622 fd = pmc_core_reg_read(pmcdev, map->slps0_dbg_offset);
623 if (reset)
624 fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS;
625 else
626 fd |= CNP_PMC_LATCH_SLPS0_EVENTS;
627 pmc_core_reg_write(pmcdev, map->slps0_dbg_offset, fd);
629 slps0_dbg_latch = 0;
631 out_unlock:
632 mutex_unlock(&pmcdev->lock);
635 static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
637 struct pmc_dev *pmcdev = s->private;
638 const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
639 const struct pmc_bit_map *map;
640 int offset;
641 u32 data;
643 pmc_core_slps0_dbg_latch(pmcdev, false);
644 offset = pmcdev->map->slps0_dbg_offset;
645 while (*maps) {
646 map = *maps;
647 data = pmc_core_reg_read(pmcdev, offset);
648 offset += 4;
649 while (map->name) {
650 seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
651 map->name,
652 data & map->bit_mask ?
653 "Yes" : "No");
654 ++map;
656 ++maps;
658 pmc_core_slps0_dbg_latch(pmcdev, true);
659 return 0;
661 DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
663 static u32 convert_ltr_scale(u32 val)
666 * As per PCIE specification supporting document
667 * ECN_LatencyTolnReporting_14Aug08.pdf the Latency
668 * Tolerance Reporting data payload is encoded in a
669 * 3 bit scale and 10 bit value fields. Values are
670 * multiplied by the indicated scale to yield an absolute time
671 * value, expressible in a range from 1 nanosecond to
672 * 2^25*(2^10-1) = 34,326,183,936 nanoseconds.
674 * scale encoding is as follows:
676 * ----------------------------------------------
677 * |scale factor | Multiplier (ns) |
678 * ----------------------------------------------
679 * | 0 | 1 |
680 * | 1 | 32 |
681 * | 2 | 1024 |
682 * | 3 | 32768 |
683 * | 4 | 1048576 |
684 * | 5 | 33554432 |
685 * | 6 | Invalid |
686 * | 7 | Invalid |
687 * ----------------------------------------------
689 if (val > 5) {
690 pr_warn("Invalid LTR scale factor.\n");
691 return 0;
694 return 1U << (5 * val);
697 static int pmc_core_ltr_show(struct seq_file *s, void *unused)
699 struct pmc_dev *pmcdev = s->private;
700 const struct pmc_bit_map *map = pmcdev->map->ltr_show_sts;
701 u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
702 u32 ltr_raw_data, scale, val;
703 u16 snoop_ltr, nonsnoop_ltr;
704 int index;
706 for (index = 0; map[index].name ; index++) {
707 decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
708 ltr_raw_data = pmc_core_reg_read(pmcdev,
709 map[index].bit_mask);
710 snoop_ltr = ltr_raw_data & ~MTPMC_MASK;
711 nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK;
713 if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) {
714 scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr);
715 val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr);
716 decoded_non_snoop_ltr = val * convert_ltr_scale(scale);
719 if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) {
720 scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr);
721 val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr);
722 decoded_snoop_ltr = val * convert_ltr_scale(scale);
725 seq_printf(s, "%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n",
726 map[index].name, ltr_raw_data,
727 decoded_non_snoop_ltr,
728 decoded_snoop_ltr);
730 return 0;
732 DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
734 static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
736 struct pmc_dev *pmcdev = s->private;
737 const struct pmc_bit_map *map = pmcdev->map->msr_sts;
738 u64 pcstate_count;
739 int index;
741 for (index = 0; map[index].name ; index++) {
742 if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
743 continue;
745 pcstate_count *= 1000;
746 do_div(pcstate_count, tsc_khz);
747 seq_printf(s, "%-8s : %llu\n", map[index].name,
748 pcstate_count);
751 return 0;
753 DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
755 static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
757 debugfs_remove_recursive(pmcdev->dbgfs_dir);
760 static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
762 struct dentry *dir;
764 dir = debugfs_create_dir("pmc_core", NULL);
765 pmcdev->dbgfs_dir = dir;
767 debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev,
768 &pmc_core_dev_state);
770 debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev,
771 &pmc_core_ppfear_fops);
773 debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
774 &pmc_core_ltr_ignore_ops);
776 debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
778 debugfs_create_file("package_cstate_show", 0444, dir, pmcdev,
779 &pmc_core_pkgc_fops);
781 if (pmcdev->map->pll_sts)
782 debugfs_create_file("pll_status", 0444, dir, pmcdev,
783 &pmc_core_pll_fops);
785 if (pmcdev->map->mphy_sts)
786 debugfs_create_file("mphy_core_lanes_power_gating_status",
787 0444, dir, pmcdev,
788 &pmc_core_mphy_pg_fops);
790 if (pmcdev->map->slps0_dbg_maps) {
791 debugfs_create_file("slp_s0_debug_status", 0444,
792 dir, pmcdev,
793 &pmc_core_slps0_dbg_fops);
795 debugfs_create_bool("slp_s0_dbg_latch", 0644,
796 dir, &slps0_dbg_latch);
799 #else
800 static inline void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
804 static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
807 #endif /* CONFIG_DEBUG_FS */
809 static const struct x86_cpu_id intel_pmc_core_ids[] = {
810 INTEL_CPU_FAM6(SKYLAKE_L, spt_reg_map),
811 INTEL_CPU_FAM6(SKYLAKE, spt_reg_map),
812 INTEL_CPU_FAM6(KABYLAKE_L, spt_reg_map),
813 INTEL_CPU_FAM6(KABYLAKE, spt_reg_map),
814 INTEL_CPU_FAM6(CANNONLAKE_L, cnp_reg_map),
815 INTEL_CPU_FAM6(ICELAKE_L, icl_reg_map),
816 INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
817 INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map),
818 INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map),
822 MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
824 static const struct pci_device_id pmc_pci_ids[] = {
825 { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), 0},
826 { 0, },
830 * This quirk can be used on those platforms where
831 * the platform BIOS enforces 24Mhx Crystal to shutdown
832 * before PMC can assert SLP_S0#.
834 static int quirk_xtal_ignore(const struct dmi_system_id *id)
836 struct pmc_dev *pmcdev = &pmc;
837 u32 value;
839 value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
840 /* 24MHz Crystal Shutdown Qualification Disable */
841 value |= SPT_PMC_VRIC1_XTALSDQDIS;
842 /* Low Voltage Mode Enable */
843 value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
844 pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
845 return 0;
848 static const struct dmi_system_id pmc_core_dmi_table[] = {
850 .callback = quirk_xtal_ignore,
851 .ident = "HP Elite x2 1013 G3",
852 .matches = {
853 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
854 DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
860 static int pmc_core_probe(struct platform_device *pdev)
862 static bool device_initialized;
863 struct pmc_dev *pmcdev = &pmc;
864 const struct x86_cpu_id *cpu_id;
865 u64 slp_s0_addr;
867 if (device_initialized)
868 return -ENODEV;
870 cpu_id = x86_match_cpu(intel_pmc_core_ids);
871 if (!cpu_id)
872 return -ENODEV;
874 pmcdev->map = (struct pmc_reg_map *)cpu_id->driver_data;
877 * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
878 * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
879 * in this case.
881 if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
882 pmcdev->map = &cnp_reg_map;
884 if (lpit_read_residency_count_address(&slp_s0_addr)) {
885 pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT;
887 if (page_is_ram(PHYS_PFN(pmcdev->base_addr)))
888 return -ENODEV;
889 } else {
890 pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset;
893 pmcdev->regbase = ioremap(pmcdev->base_addr,
894 pmcdev->map->regmap_length);
895 if (!pmcdev->regbase)
896 return -ENOMEM;
898 mutex_init(&pmcdev->lock);
899 platform_set_drvdata(pdev, pmcdev);
900 pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
901 dmi_check_system(pmc_core_dmi_table);
903 pmc_core_dbgfs_register(pmcdev);
905 device_initialized = true;
906 dev_info(&pdev->dev, " initialized\n");
908 return 0;
911 static int pmc_core_remove(struct platform_device *pdev)
913 struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
915 pmc_core_dbgfs_unregister(pmcdev);
916 platform_set_drvdata(pdev, NULL);
917 mutex_destroy(&pmcdev->lock);
918 iounmap(pmcdev->regbase);
919 return 0;
922 #ifdef CONFIG_PM_SLEEP
924 static bool warn_on_s0ix_failures;
925 module_param(warn_on_s0ix_failures, bool, 0644);
926 MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
928 static int pmc_core_suspend(struct device *dev)
930 struct pmc_dev *pmcdev = dev_get_drvdata(dev);
932 pmcdev->check_counters = false;
934 /* No warnings on S0ix failures */
935 if (!warn_on_s0ix_failures)
936 return 0;
938 /* Check if the syspend will actually use S0ix */
939 if (pm_suspend_via_firmware())
940 return 0;
942 /* Save PC10 residency for checking later */
943 if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
944 return -EIO;
946 /* Save S0ix residency for checking later */
947 if (pmc_core_dev_state_get(pmcdev, &pmcdev->s0ix_counter))
948 return -EIO;
950 pmcdev->check_counters = true;
951 return 0;
954 static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
956 u64 pc10_counter;
958 if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
959 return false;
961 if (pc10_counter == pmcdev->pc10_counter)
962 return true;
964 return false;
967 static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
969 u64 s0ix_counter;
971 if (pmc_core_dev_state_get(pmcdev, &s0ix_counter))
972 return false;
974 if (s0ix_counter == pmcdev->s0ix_counter)
975 return true;
977 return false;
980 static int pmc_core_resume(struct device *dev)
982 struct pmc_dev *pmcdev = dev_get_drvdata(dev);
983 const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
984 int offset = pmcdev->map->slps0_dbg_offset;
985 const struct pmc_bit_map *map;
986 u32 data;
988 if (!pmcdev->check_counters)
989 return 0;
991 if (!pmc_core_is_s0ix_failed(pmcdev))
992 return 0;
994 if (pmc_core_is_pc10_failed(pmcdev)) {
995 /* S0ix failed because of PC10 entry failure */
996 dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
997 pmcdev->pc10_counter);
998 return 0;
1001 /* The real interesting case - S0ix failed - lets ask PMC why. */
1002 dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
1003 pmcdev->s0ix_counter);
1004 while (*maps) {
1005 map = *maps;
1006 data = pmc_core_reg_read(pmcdev, offset);
1007 offset += 4;
1008 while (map->name) {
1009 dev_dbg(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
1010 map->name,
1011 data & map->bit_mask ? "Yes" : "No");
1012 map++;
1014 maps++;
1016 return 0;
1019 #endif
1021 static const struct dev_pm_ops pmc_core_pm_ops = {
1022 SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
1025 static const struct acpi_device_id pmc_core_acpi_ids[] = {
1026 {"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/
1029 MODULE_DEVICE_TABLE(acpi, pmc_core_acpi_ids);
1031 static struct platform_driver pmc_core_driver = {
1032 .driver = {
1033 .name = "intel_pmc_core",
1034 .acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
1035 .pm = &pmc_core_pm_ops,
1037 .probe = pmc_core_probe,
1038 .remove = pmc_core_remove,
1041 module_platform_driver(pmc_core_driver);
1043 MODULE_LICENSE("GPL v2");
1044 MODULE_DESCRIPTION("Intel PMC Core Driver");