treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / platform / x86 / intel_pmc_core.c
blob144faa8bad3d20715e0d9197c8dcdecd9673e31b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Intel Core SoC Power Management Controller Driver
5 * Copyright (c) 2016, Intel Corporation.
6 * All Rights Reserved.
8 * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
9 * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/acpi.h>
15 #include <linux/bitfield.h>
16 #include <linux/debugfs.h>
17 #include <linux/delay.h>
18 #include <linux/dmi.h>
19 #include <linux/io.h>
20 #include <linux/module.h>
21 #include <linux/pci.h>
22 #include <linux/platform_device.h>
23 #include <linux/suspend.h>
24 #include <linux/uaccess.h>
26 #include <asm/cpu_device_id.h>
27 #include <asm/intel-family.h>
28 #include <asm/msr.h>
29 #include <asm/tsc.h>
31 #include "intel_pmc_core.h"
33 static struct pmc_dev pmc;
35 /* PKGC MSRs are common across Intel Core SoCs */
36 static const struct pmc_bit_map msr_map[] = {
37 {"Package C2", MSR_PKG_C2_RESIDENCY},
38 {"Package C3", MSR_PKG_C3_RESIDENCY},
39 {"Package C6", MSR_PKG_C6_RESIDENCY},
40 {"Package C7", MSR_PKG_C7_RESIDENCY},
41 {"Package C8", MSR_PKG_C8_RESIDENCY},
42 {"Package C9", MSR_PKG_C9_RESIDENCY},
43 {"Package C10", MSR_PKG_C10_RESIDENCY},
47 static const struct pmc_bit_map spt_pll_map[] = {
48 {"MIPI PLL", SPT_PMC_BIT_MPHY_CMN_LANE0},
49 {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1},
50 {"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2},
51 {"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3},
55 static const struct pmc_bit_map spt_mphy_map[] = {
56 {"MPHY CORE LANE 0", SPT_PMC_BIT_MPHY_LANE0},
57 {"MPHY CORE LANE 1", SPT_PMC_BIT_MPHY_LANE1},
58 {"MPHY CORE LANE 2", SPT_PMC_BIT_MPHY_LANE2},
59 {"MPHY CORE LANE 3", SPT_PMC_BIT_MPHY_LANE3},
60 {"MPHY CORE LANE 4", SPT_PMC_BIT_MPHY_LANE4},
61 {"MPHY CORE LANE 5", SPT_PMC_BIT_MPHY_LANE5},
62 {"MPHY CORE LANE 6", SPT_PMC_BIT_MPHY_LANE6},
63 {"MPHY CORE LANE 7", SPT_PMC_BIT_MPHY_LANE7},
64 {"MPHY CORE LANE 8", SPT_PMC_BIT_MPHY_LANE8},
65 {"MPHY CORE LANE 9", SPT_PMC_BIT_MPHY_LANE9},
66 {"MPHY CORE LANE 10", SPT_PMC_BIT_MPHY_LANE10},
67 {"MPHY CORE LANE 11", SPT_PMC_BIT_MPHY_LANE11},
68 {"MPHY CORE LANE 12", SPT_PMC_BIT_MPHY_LANE12},
69 {"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13},
70 {"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14},
71 {"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15},
75 static const struct pmc_bit_map spt_pfear_map[] = {
76 {"PMC", SPT_PMC_BIT_PMC},
77 {"OPI-DMI", SPT_PMC_BIT_OPI},
78 {"SPI / eSPI", SPT_PMC_BIT_SPI},
79 {"XHCI", SPT_PMC_BIT_XHCI},
80 {"SPA", SPT_PMC_BIT_SPA},
81 {"SPB", SPT_PMC_BIT_SPB},
82 {"SPC", SPT_PMC_BIT_SPC},
83 {"GBE", SPT_PMC_BIT_GBE},
84 {"SATA", SPT_PMC_BIT_SATA},
85 {"HDA-PGD0", SPT_PMC_BIT_HDA_PGD0},
86 {"HDA-PGD1", SPT_PMC_BIT_HDA_PGD1},
87 {"HDA-PGD2", SPT_PMC_BIT_HDA_PGD2},
88 {"HDA-PGD3", SPT_PMC_BIT_HDA_PGD3},
89 {"RSVD", SPT_PMC_BIT_RSVD_0B},
90 {"LPSS", SPT_PMC_BIT_LPSS},
91 {"LPC", SPT_PMC_BIT_LPC},
92 {"SMB", SPT_PMC_BIT_SMB},
93 {"ISH", SPT_PMC_BIT_ISH},
94 {"P2SB", SPT_PMC_BIT_P2SB},
95 {"DFX", SPT_PMC_BIT_DFX},
96 {"SCC", SPT_PMC_BIT_SCC},
97 {"RSVD", SPT_PMC_BIT_RSVD_0C},
98 {"FUSE", SPT_PMC_BIT_FUSE},
99 {"CAMERA", SPT_PMC_BIT_CAMREA},
100 {"RSVD", SPT_PMC_BIT_RSVD_0D},
101 {"USB3-OTG", SPT_PMC_BIT_USB3_OTG},
102 {"EXI", SPT_PMC_BIT_EXI},
103 {"CSE", SPT_PMC_BIT_CSE},
104 {"CSME_KVM", SPT_PMC_BIT_CSME_KVM},
105 {"CSME_PMT", SPT_PMC_BIT_CSME_PMT},
106 {"CSME_CLINK", SPT_PMC_BIT_CSME_CLINK},
107 {"CSME_PTIO", SPT_PMC_BIT_CSME_PTIO},
108 {"CSME_USBR", SPT_PMC_BIT_CSME_USBR},
109 {"CSME_SUSRAM", SPT_PMC_BIT_CSME_SUSRAM},
110 {"CSME_SMT", SPT_PMC_BIT_CSME_SMT},
111 {"RSVD", SPT_PMC_BIT_RSVD_1A},
112 {"CSME_SMS2", SPT_PMC_BIT_CSME_SMS2},
113 {"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1},
114 {"CSME_RTC", SPT_PMC_BIT_CSME_RTC},
115 {"CSME_PSF", SPT_PMC_BIT_CSME_PSF},
119 static const struct pmc_bit_map *ext_spt_pfear_map[] = {
120 spt_pfear_map,
121 NULL
124 static const struct pmc_bit_map spt_ltr_show_map[] = {
125 {"SOUTHPORT_A", SPT_PMC_LTR_SPA},
126 {"SOUTHPORT_B", SPT_PMC_LTR_SPB},
127 {"SATA", SPT_PMC_LTR_SATA},
128 {"GIGABIT_ETHERNET", SPT_PMC_LTR_GBE},
129 {"XHCI", SPT_PMC_LTR_XHCI},
130 {"Reserved", SPT_PMC_LTR_RESERVED},
131 {"ME", SPT_PMC_LTR_ME},
132 /* EVA is Enterprise Value Add, doesn't really exist on PCH */
133 {"EVA", SPT_PMC_LTR_EVA},
134 {"SOUTHPORT_C", SPT_PMC_LTR_SPC},
135 {"HD_AUDIO", SPT_PMC_LTR_AZ},
136 {"LPSS", SPT_PMC_LTR_LPSS},
137 {"SOUTHPORT_D", SPT_PMC_LTR_SPD},
138 {"SOUTHPORT_E", SPT_PMC_LTR_SPE},
139 {"CAMERA", SPT_PMC_LTR_CAM},
140 {"ESPI", SPT_PMC_LTR_ESPI},
141 {"SCC", SPT_PMC_LTR_SCC},
142 {"ISH", SPT_PMC_LTR_ISH},
143 /* Below two cannot be used for LTR_IGNORE */
144 {"CURRENT_PLATFORM", SPT_PMC_LTR_CUR_PLT},
145 {"AGGREGATED_SYSTEM", SPT_PMC_LTR_CUR_ASLT},
149 static const struct pmc_reg_map spt_reg_map = {
150 .pfear_sts = ext_spt_pfear_map,
151 .mphy_sts = spt_mphy_map,
152 .pll_sts = spt_pll_map,
153 .ltr_show_sts = spt_ltr_show_map,
154 .msr_sts = msr_map,
155 .slp_s0_offset = SPT_PMC_SLP_S0_RES_COUNTER_OFFSET,
156 .ltr_ignore_offset = SPT_PMC_LTR_IGNORE_OFFSET,
157 .regmap_length = SPT_PMC_MMIO_REG_LEN,
158 .ppfear0_offset = SPT_PMC_XRAM_PPFEAR0A,
159 .ppfear_buckets = SPT_PPFEAR_NUM_ENTRIES,
160 .pm_cfg_offset = SPT_PMC_PM_CFG_OFFSET,
161 .pm_read_disable_bit = SPT_PMC_READ_DISABLE_BIT,
162 .ltr_ignore_max = SPT_NUM_IP_IGN_ALLOWED,
163 .pm_vric1_offset = SPT_PMC_VRIC1_OFFSET,
166 /* Cannon Lake: PGD PFET Enable Ack Status Register(s) bitmap */
167 static const struct pmc_bit_map cnp_pfear_map[] = {
168 /* Reserved for Cannon Lake but valid for Comet Lake */
169 {"PMC", BIT(0)},
170 {"OPI-DMI", BIT(1)},
171 {"SPI/eSPI", BIT(2)},
172 {"XHCI", BIT(3)},
173 {"SPA", BIT(4)},
174 {"SPB", BIT(5)},
175 {"SPC", BIT(6)},
176 {"GBE", BIT(7)},
178 {"SATA", BIT(0)},
179 {"HDA_PGD0", BIT(1)},
180 {"HDA_PGD1", BIT(2)},
181 {"HDA_PGD2", BIT(3)},
182 {"HDA_PGD3", BIT(4)},
183 {"SPD", BIT(5)},
184 {"LPSS", BIT(6)},
185 {"LPC", BIT(7)},
187 {"SMB", BIT(0)},
188 {"ISH", BIT(1)},
189 {"P2SB", BIT(2)},
190 {"NPK_VNN", BIT(3)},
191 {"SDX", BIT(4)},
192 {"SPE", BIT(5)},
193 {"Fuse", BIT(6)},
195 * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
196 * Tiger Lake and Elkhart Lake.
198 {"SBR8", BIT(7)},
200 {"CSME_FSC", BIT(0)},
201 {"USB3_OTG", BIT(1)},
202 {"EXI", BIT(2)},
203 {"CSE", BIT(3)},
204 {"CSME_KVM", BIT(4)},
205 {"CSME_PMT", BIT(5)},
206 {"CSME_CLINK", BIT(6)},
207 {"CSME_PTIO", BIT(7)},
209 {"CSME_USBR", BIT(0)},
210 {"CSME_SUSRAM", BIT(1)},
211 {"CSME_SMT1", BIT(2)},
212 {"CSME_SMT4", BIT(3)},
213 {"CSME_SMS2", BIT(4)},
214 {"CSME_SMS1", BIT(5)},
215 {"CSME_RTC", BIT(6)},
216 {"CSME_PSF", BIT(7)},
218 {"SBR0", BIT(0)},
219 {"SBR1", BIT(1)},
220 {"SBR2", BIT(2)},
221 {"SBR3", BIT(3)},
222 {"SBR4", BIT(4)},
223 {"SBR5", BIT(5)},
224 {"CSME_PECI", BIT(6)},
225 {"PSF1", BIT(7)},
227 {"PSF2", BIT(0)},
228 {"PSF3", BIT(1)},
229 {"PSF4", BIT(2)},
230 {"CNVI", BIT(3)},
231 {"UFS0", BIT(4)},
232 {"EMMC", BIT(5)},
233 {"SPF", BIT(6)},
234 {"SBR6", BIT(7)},
236 {"SBR7", BIT(0)},
237 {"NPK_AON", BIT(1)},
238 {"HDA_PGD4", BIT(2)},
239 {"HDA_PGD5", BIT(3)},
240 {"HDA_PGD6", BIT(4)},
242 * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake,
243 * Tiger Lake and ELkhart Lake.
245 {"PSF6", BIT(5)},
246 {"PSF7", BIT(6)},
247 {"PSF8", BIT(7)},
251 static const struct pmc_bit_map *ext_cnp_pfear_map[] = {
252 cnp_pfear_map,
253 NULL
256 static const struct pmc_bit_map icl_pfear_map[] = {
257 /* Ice Lake generation onwards only */
258 {"RES_65", BIT(0)},
259 {"RES_66", BIT(1)},
260 {"RES_67", BIT(2)},
261 {"TAM", BIT(3)},
262 {"GBETSN", BIT(4)},
263 {"TBTLSX", BIT(5)},
264 {"RES_71", BIT(6)},
265 {"RES_72", BIT(7)},
269 static const struct pmc_bit_map *ext_icl_pfear_map[] = {
270 cnp_pfear_map,
271 icl_pfear_map,
272 NULL
275 static const struct pmc_bit_map tgl_pfear_map[] = {
276 /* Tiger Lake and Elkhart Lake generation onwards only */
277 {"PSF9", BIT(0)},
278 {"RES_66", BIT(1)},
279 {"RES_67", BIT(2)},
280 {"RES_68", BIT(3)},
281 {"RES_69", BIT(4)},
282 {"RES_70", BIT(5)},
283 {"TBTLSX", BIT(6)},
287 static const struct pmc_bit_map *ext_tgl_pfear_map[] = {
288 cnp_pfear_map,
289 tgl_pfear_map,
290 NULL
293 static const struct pmc_bit_map cnp_slps0_dbg0_map[] = {
294 {"AUDIO_D3", BIT(0)},
295 {"OTG_D3", BIT(1)},
296 {"XHCI_D3", BIT(2)},
297 {"LPIO_D3", BIT(3)},
298 {"SDX_D3", BIT(4)},
299 {"SATA_D3", BIT(5)},
300 {"UFS0_D3", BIT(6)},
301 {"UFS1_D3", BIT(7)},
302 {"EMMC_D3", BIT(8)},
306 static const struct pmc_bit_map cnp_slps0_dbg1_map[] = {
307 {"SDIO_PLL_OFF", BIT(0)},
308 {"USB2_PLL_OFF", BIT(1)},
309 {"AUDIO_PLL_OFF", BIT(2)},
310 {"OC_PLL_OFF", BIT(3)},
311 {"MAIN_PLL_OFF", BIT(4)},
312 {"XOSC_OFF", BIT(5)},
313 {"LPC_CLKS_GATED", BIT(6)},
314 {"PCIE_CLKREQS_IDLE", BIT(7)},
315 {"AUDIO_ROSC_OFF", BIT(8)},
316 {"HPET_XOSC_CLK_REQ", BIT(9)},
317 {"PMC_ROSC_SLOW_CLK", BIT(10)},
318 {"AON2_ROSC_GATED", BIT(11)},
319 {"CLKACKS_DEASSERTED", BIT(12)},
323 static const struct pmc_bit_map cnp_slps0_dbg2_map[] = {
324 {"MPHY_CORE_GATED", BIT(0)},
325 {"CSME_GATED", BIT(1)},
326 {"USB2_SUS_GATED", BIT(2)},
327 {"DYN_FLEX_IO_IDLE", BIT(3)},
328 {"GBE_NO_LINK", BIT(4)},
329 {"THERM_SEN_DISABLED", BIT(5)},
330 {"PCIE_LOW_POWER", BIT(6)},
331 {"ISH_VNNAON_REQ_ACT", BIT(7)},
332 {"ISH_VNN_REQ_ACT", BIT(8)},
333 {"CNV_VNNAON_REQ_ACT", BIT(9)},
334 {"CNV_VNN_REQ_ACT", BIT(10)},
335 {"NPK_VNNON_REQ_ACT", BIT(11)},
336 {"PMSYNC_STATE_IDLE", BIT(12)},
337 {"ALST_GT_THRES", BIT(13)},
338 {"PMC_ARC_PG_READY", BIT(14)},
342 static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = {
343 cnp_slps0_dbg0_map,
344 cnp_slps0_dbg1_map,
345 cnp_slps0_dbg2_map,
346 NULL
349 static const struct pmc_bit_map cnp_ltr_show_map[] = {
350 {"SOUTHPORT_A", CNP_PMC_LTR_SPA},
351 {"SOUTHPORT_B", CNP_PMC_LTR_SPB},
352 {"SATA", CNP_PMC_LTR_SATA},
353 {"GIGABIT_ETHERNET", CNP_PMC_LTR_GBE},
354 {"XHCI", CNP_PMC_LTR_XHCI},
355 {"Reserved", CNP_PMC_LTR_RESERVED},
356 {"ME", CNP_PMC_LTR_ME},
357 /* EVA is Enterprise Value Add, doesn't really exist on PCH */
358 {"EVA", CNP_PMC_LTR_EVA},
359 {"SOUTHPORT_C", CNP_PMC_LTR_SPC},
360 {"HD_AUDIO", CNP_PMC_LTR_AZ},
361 {"CNV", CNP_PMC_LTR_CNV},
362 {"LPSS", CNP_PMC_LTR_LPSS},
363 {"SOUTHPORT_D", CNP_PMC_LTR_SPD},
364 {"SOUTHPORT_E", CNP_PMC_LTR_SPE},
365 {"CAMERA", CNP_PMC_LTR_CAM},
366 {"ESPI", CNP_PMC_LTR_ESPI},
367 {"SCC", CNP_PMC_LTR_SCC},
368 {"ISH", CNP_PMC_LTR_ISH},
369 {"UFSX2", CNP_PMC_LTR_UFSX2},
370 {"EMMC", CNP_PMC_LTR_EMMC},
371 /* Reserved for Cannon Lake but valid for Ice Lake */
372 {"WIGIG", ICL_PMC_LTR_WIGIG},
373 /* Below two cannot be used for LTR_IGNORE */
374 {"CURRENT_PLATFORM", CNP_PMC_LTR_CUR_PLT},
375 {"AGGREGATED_SYSTEM", CNP_PMC_LTR_CUR_ASLT},
379 static const struct pmc_reg_map cnp_reg_map = {
380 .pfear_sts = ext_cnp_pfear_map,
381 .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
382 .slps0_dbg_maps = cnp_slps0_dbg_maps,
383 .ltr_show_sts = cnp_ltr_show_map,
384 .msr_sts = msr_map,
385 .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
386 .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
387 .regmap_length = CNP_PMC_MMIO_REG_LEN,
388 .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
389 .ppfear_buckets = CNP_PPFEAR_NUM_ENTRIES,
390 .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
391 .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
392 .ltr_ignore_max = CNP_NUM_IP_IGN_ALLOWED,
395 static const struct pmc_reg_map icl_reg_map = {
396 .pfear_sts = ext_icl_pfear_map,
397 .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
398 .slps0_dbg_maps = cnp_slps0_dbg_maps,
399 .ltr_show_sts = cnp_ltr_show_map,
400 .msr_sts = msr_map,
401 .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
402 .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
403 .regmap_length = CNP_PMC_MMIO_REG_LEN,
404 .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
405 .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
406 .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
407 .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
408 .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED,
411 static const struct pmc_reg_map tgl_reg_map = {
412 .pfear_sts = ext_tgl_pfear_map,
413 .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET,
414 .slps0_dbg_maps = cnp_slps0_dbg_maps,
415 .ltr_show_sts = cnp_ltr_show_map,
416 .msr_sts = msr_map,
417 .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET,
418 .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET,
419 .regmap_length = CNP_PMC_MMIO_REG_LEN,
420 .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A,
421 .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES,
422 .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET,
423 .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT,
424 .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED,
427 static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
429 return readl(pmcdev->regbase + reg_offset);
432 static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset,
433 u32 val)
435 writel(val, pmcdev->regbase + reg_offset);
438 static inline u64 pmc_core_adjust_slp_s0_step(u32 value)
440 return (u64)value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
443 static int pmc_core_dev_state_get(void *data, u64 *val)
445 struct pmc_dev *pmcdev = data;
446 const struct pmc_reg_map *map = pmcdev->map;
447 u32 value;
449 value = pmc_core_reg_read(pmcdev, map->slp_s0_offset);
450 *val = pmc_core_adjust_slp_s0_step(value);
452 return 0;
455 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state, pmc_core_dev_state_get, NULL, "%llu\n");
457 static int pmc_core_check_read_lock_bit(void)
459 struct pmc_dev *pmcdev = &pmc;
460 u32 value;
462 value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_cfg_offset);
463 return value & BIT(pmcdev->map->pm_read_disable_bit);
466 #if IS_ENABLED(CONFIG_DEBUG_FS)
467 static bool slps0_dbg_latch;
469 static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset)
471 return readb(pmcdev->regbase + offset);
474 static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
475 u8 pf_reg, const struct pmc_bit_map **pf_map)
477 seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n",
478 ip, pf_map[idx][index].name,
479 pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
482 static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
484 struct pmc_dev *pmcdev = s->private;
485 const struct pmc_bit_map **maps = pmcdev->map->pfear_sts;
486 u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
487 int index, iter, idx, ip = 0;
489 iter = pmcdev->map->ppfear0_offset;
491 for (index = 0; index < pmcdev->map->ppfear_buckets &&
492 index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
493 pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
495 for (idx = 0; maps[idx]; idx++) {
496 for (index = 0; maps[idx][index].name &&
497 index < pmcdev->map->ppfear_buckets * 8; ip++, index++)
498 pmc_core_display_map(s, index, idx, ip,
499 pf_regs[index / 8], maps);
502 return 0;
504 DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear);
506 /* This function should return link status, 0 means ready */
507 static int pmc_core_mtpmc_link_status(void)
509 struct pmc_dev *pmcdev = &pmc;
510 u32 value;
512 value = pmc_core_reg_read(pmcdev, SPT_PMC_PM_STS_OFFSET);
513 return value & BIT(SPT_PMC_MSG_FULL_STS_BIT);
516 static int pmc_core_send_msg(u32 *addr_xram)
518 struct pmc_dev *pmcdev = &pmc;
519 u32 dest;
520 int timeout;
522 for (timeout = NUM_RETRIES; timeout > 0; timeout--) {
523 if (pmc_core_mtpmc_link_status() == 0)
524 break;
525 msleep(5);
528 if (timeout <= 0 && pmc_core_mtpmc_link_status())
529 return -EBUSY;
531 dest = (*addr_xram & MTPMC_MASK) | (1U << 1);
532 pmc_core_reg_write(pmcdev, SPT_PMC_MTPMC_OFFSET, dest);
533 return 0;
536 static int pmc_core_mphy_pg_show(struct seq_file *s, void *unused)
538 struct pmc_dev *pmcdev = s->private;
539 const struct pmc_bit_map *map = pmcdev->map->mphy_sts;
540 u32 mphy_core_reg_low, mphy_core_reg_high;
541 u32 val_low, val_high;
542 int index, err = 0;
544 if (pmcdev->pmc_xram_read_bit) {
545 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
546 return 0;
549 mphy_core_reg_low = (SPT_PMC_MPHY_CORE_STS_0 << 16);
550 mphy_core_reg_high = (SPT_PMC_MPHY_CORE_STS_1 << 16);
552 mutex_lock(&pmcdev->lock);
554 if (pmc_core_send_msg(&mphy_core_reg_low) != 0) {
555 err = -EBUSY;
556 goto out_unlock;
559 msleep(10);
560 val_low = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
562 if (pmc_core_send_msg(&mphy_core_reg_high) != 0) {
563 err = -EBUSY;
564 goto out_unlock;
567 msleep(10);
568 val_high = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
570 for (index = 0; map[index].name && index < 8; index++) {
571 seq_printf(s, "%-32s\tState: %s\n",
572 map[index].name,
573 map[index].bit_mask & val_low ? "Not power gated" :
574 "Power gated");
577 for (index = 8; map[index].name; index++) {
578 seq_printf(s, "%-32s\tState: %s\n",
579 map[index].name,
580 map[index].bit_mask & val_high ? "Not power gated" :
581 "Power gated");
584 out_unlock:
585 mutex_unlock(&pmcdev->lock);
586 return err;
588 DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg);
590 static int pmc_core_pll_show(struct seq_file *s, void *unused)
592 struct pmc_dev *pmcdev = s->private;
593 const struct pmc_bit_map *map = pmcdev->map->pll_sts;
594 u32 mphy_common_reg, val;
595 int index, err = 0;
597 if (pmcdev->pmc_xram_read_bit) {
598 seq_puts(s, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
599 return 0;
602 mphy_common_reg = (SPT_PMC_MPHY_COM_STS_0 << 16);
603 mutex_lock(&pmcdev->lock);
605 if (pmc_core_send_msg(&mphy_common_reg) != 0) {
606 err = -EBUSY;
607 goto out_unlock;
610 /* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
611 msleep(10);
612 val = pmc_core_reg_read(pmcdev, SPT_PMC_MFPMC_OFFSET);
614 for (index = 0; map[index].name ; index++) {
615 seq_printf(s, "%-32s\tState: %s\n",
616 map[index].name,
617 map[index].bit_mask & val ? "Active" : "Idle");
620 out_unlock:
621 mutex_unlock(&pmcdev->lock);
622 return err;
624 DEFINE_SHOW_ATTRIBUTE(pmc_core_pll);
626 static ssize_t pmc_core_ltr_ignore_write(struct file *file,
627 const char __user *userbuf,
628 size_t count, loff_t *ppos)
630 struct pmc_dev *pmcdev = &pmc;
631 const struct pmc_reg_map *map = pmcdev->map;
632 u32 val, buf_size, fd;
633 int err;
635 buf_size = count < 64 ? count : 64;
637 err = kstrtou32_from_user(userbuf, buf_size, 10, &val);
638 if (err)
639 return err;
641 mutex_lock(&pmcdev->lock);
643 if (val > map->ltr_ignore_max) {
644 err = -EINVAL;
645 goto out_unlock;
648 fd = pmc_core_reg_read(pmcdev, map->ltr_ignore_offset);
649 fd |= (1U << val);
650 pmc_core_reg_write(pmcdev, map->ltr_ignore_offset, fd);
652 out_unlock:
653 mutex_unlock(&pmcdev->lock);
654 return err == 0 ? count : err;
657 static int pmc_core_ltr_ignore_show(struct seq_file *s, void *unused)
659 return 0;
662 static int pmc_core_ltr_ignore_open(struct inode *inode, struct file *file)
664 return single_open(file, pmc_core_ltr_ignore_show, inode->i_private);
667 static const struct file_operations pmc_core_ltr_ignore_ops = {
668 .open = pmc_core_ltr_ignore_open,
669 .read = seq_read,
670 .write = pmc_core_ltr_ignore_write,
671 .llseek = seq_lseek,
672 .release = single_release,
675 static void pmc_core_slps0_dbg_latch(struct pmc_dev *pmcdev, bool reset)
677 const struct pmc_reg_map *map = pmcdev->map;
678 u32 fd;
680 mutex_lock(&pmcdev->lock);
682 if (!reset && !slps0_dbg_latch)
683 goto out_unlock;
685 fd = pmc_core_reg_read(pmcdev, map->slps0_dbg_offset);
686 if (reset)
687 fd &= ~CNP_PMC_LATCH_SLPS0_EVENTS;
688 else
689 fd |= CNP_PMC_LATCH_SLPS0_EVENTS;
690 pmc_core_reg_write(pmcdev, map->slps0_dbg_offset, fd);
692 slps0_dbg_latch = 0;
694 out_unlock:
695 mutex_unlock(&pmcdev->lock);
698 static int pmc_core_slps0_dbg_show(struct seq_file *s, void *unused)
700 struct pmc_dev *pmcdev = s->private;
701 const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
702 const struct pmc_bit_map *map;
703 int offset;
704 u32 data;
706 pmc_core_slps0_dbg_latch(pmcdev, false);
707 offset = pmcdev->map->slps0_dbg_offset;
708 while (*maps) {
709 map = *maps;
710 data = pmc_core_reg_read(pmcdev, offset);
711 offset += 4;
712 while (map->name) {
713 seq_printf(s, "SLP_S0_DBG: %-32s\tState: %s\n",
714 map->name,
715 data & map->bit_mask ?
716 "Yes" : "No");
717 ++map;
719 ++maps;
721 pmc_core_slps0_dbg_latch(pmcdev, true);
722 return 0;
724 DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg);
726 static u32 convert_ltr_scale(u32 val)
729 * As per PCIE specification supporting document
730 * ECN_LatencyTolnReporting_14Aug08.pdf the Latency
731 * Tolerance Reporting data payload is encoded in a
732 * 3 bit scale and 10 bit value fields. Values are
733 * multiplied by the indicated scale to yield an absolute time
734 * value, expressible in a range from 1 nanosecond to
735 * 2^25*(2^10-1) = 34,326,183,936 nanoseconds.
737 * scale encoding is as follows:
739 * ----------------------------------------------
740 * |scale factor | Multiplier (ns) |
741 * ----------------------------------------------
742 * | 0 | 1 |
743 * | 1 | 32 |
744 * | 2 | 1024 |
745 * | 3 | 32768 |
746 * | 4 | 1048576 |
747 * | 5 | 33554432 |
748 * | 6 | Invalid |
749 * | 7 | Invalid |
750 * ----------------------------------------------
752 if (val > 5) {
753 pr_warn("Invalid LTR scale factor.\n");
754 return 0;
757 return 1U << (5 * val);
760 static int pmc_core_ltr_show(struct seq_file *s, void *unused)
762 struct pmc_dev *pmcdev = s->private;
763 const struct pmc_bit_map *map = pmcdev->map->ltr_show_sts;
764 u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
765 u32 ltr_raw_data, scale, val;
766 u16 snoop_ltr, nonsnoop_ltr;
767 int index;
769 for (index = 0; map[index].name ; index++) {
770 decoded_snoop_ltr = decoded_non_snoop_ltr = 0;
771 ltr_raw_data = pmc_core_reg_read(pmcdev,
772 map[index].bit_mask);
773 snoop_ltr = ltr_raw_data & ~MTPMC_MASK;
774 nonsnoop_ltr = (ltr_raw_data >> 0x10) & ~MTPMC_MASK;
776 if (FIELD_GET(LTR_REQ_NONSNOOP, ltr_raw_data)) {
777 scale = FIELD_GET(LTR_DECODED_SCALE, nonsnoop_ltr);
778 val = FIELD_GET(LTR_DECODED_VAL, nonsnoop_ltr);
779 decoded_non_snoop_ltr = val * convert_ltr_scale(scale);
782 if (FIELD_GET(LTR_REQ_SNOOP, ltr_raw_data)) {
783 scale = FIELD_GET(LTR_DECODED_SCALE, snoop_ltr);
784 val = FIELD_GET(LTR_DECODED_VAL, snoop_ltr);
785 decoded_snoop_ltr = val * convert_ltr_scale(scale);
788 seq_printf(s, "%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\n",
789 map[index].name, ltr_raw_data,
790 decoded_non_snoop_ltr,
791 decoded_snoop_ltr);
793 return 0;
795 DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
797 static int pmc_core_pkgc_show(struct seq_file *s, void *unused)
799 struct pmc_dev *pmcdev = s->private;
800 const struct pmc_bit_map *map = pmcdev->map->msr_sts;
801 u64 pcstate_count;
802 int index;
804 for (index = 0; map[index].name ; index++) {
805 if (rdmsrl_safe(map[index].bit_mask, &pcstate_count))
806 continue;
808 pcstate_count *= 1000;
809 do_div(pcstate_count, tsc_khz);
810 seq_printf(s, "%-8s : %llu\n", map[index].name,
811 pcstate_count);
814 return 0;
816 DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc);
818 static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
820 debugfs_remove_recursive(pmcdev->dbgfs_dir);
823 static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
825 struct dentry *dir;
827 dir = debugfs_create_dir("pmc_core", NULL);
828 pmcdev->dbgfs_dir = dir;
830 debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev,
831 &pmc_core_dev_state);
833 if (pmcdev->map->pfear_sts)
834 debugfs_create_file("pch_ip_power_gating_status", 0444, dir,
835 pmcdev, &pmc_core_ppfear_fops);
837 debugfs_create_file("ltr_ignore", 0644, dir, pmcdev,
838 &pmc_core_ltr_ignore_ops);
840 debugfs_create_file("ltr_show", 0444, dir, pmcdev, &pmc_core_ltr_fops);
842 debugfs_create_file("package_cstate_show", 0444, dir, pmcdev,
843 &pmc_core_pkgc_fops);
845 if (pmcdev->map->pll_sts)
846 debugfs_create_file("pll_status", 0444, dir, pmcdev,
847 &pmc_core_pll_fops);
849 if (pmcdev->map->mphy_sts)
850 debugfs_create_file("mphy_core_lanes_power_gating_status",
851 0444, dir, pmcdev,
852 &pmc_core_mphy_pg_fops);
854 if (pmcdev->map->slps0_dbg_maps) {
855 debugfs_create_file("slp_s0_debug_status", 0444,
856 dir, pmcdev,
857 &pmc_core_slps0_dbg_fops);
859 debugfs_create_bool("slp_s0_dbg_latch", 0644,
860 dir, &slps0_dbg_latch);
863 #else
864 static inline void pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
868 static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
871 #endif /* CONFIG_DEBUG_FS */
873 static const struct x86_cpu_id intel_pmc_core_ids[] = {
874 INTEL_CPU_FAM6(SKYLAKE_L, spt_reg_map),
875 INTEL_CPU_FAM6(SKYLAKE, spt_reg_map),
876 INTEL_CPU_FAM6(KABYLAKE_L, spt_reg_map),
877 INTEL_CPU_FAM6(KABYLAKE, spt_reg_map),
878 INTEL_CPU_FAM6(CANNONLAKE_L, cnp_reg_map),
879 INTEL_CPU_FAM6(ICELAKE_L, icl_reg_map),
880 INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
881 INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map),
882 INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map),
883 INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map),
884 INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map),
885 INTEL_CPU_FAM6(ATOM_TREMONT, tgl_reg_map),
889 MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids);
891 static const struct pci_device_id pmc_pci_ids[] = {
892 { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) },
897 * This quirk can be used on those platforms where
898 * the platform BIOS enforces 24Mhz crystal to shutdown
899 * before PMC can assert SLP_S0#.
901 static int quirk_xtal_ignore(const struct dmi_system_id *id)
903 struct pmc_dev *pmcdev = &pmc;
904 u32 value;
906 value = pmc_core_reg_read(pmcdev, pmcdev->map->pm_vric1_offset);
907 /* 24MHz Crystal Shutdown Qualification Disable */
908 value |= SPT_PMC_VRIC1_XTALSDQDIS;
909 /* Low Voltage Mode Enable */
910 value &= ~SPT_PMC_VRIC1_SLPS0LVEN;
911 pmc_core_reg_write(pmcdev, pmcdev->map->pm_vric1_offset, value);
912 return 0;
915 static const struct dmi_system_id pmc_core_dmi_table[] = {
917 .callback = quirk_xtal_ignore,
918 .ident = "HP Elite x2 1013 G3",
919 .matches = {
920 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
921 DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite x2 1013 G3"),
927 static int pmc_core_probe(struct platform_device *pdev)
929 static bool device_initialized;
930 struct pmc_dev *pmcdev = &pmc;
931 const struct x86_cpu_id *cpu_id;
932 u64 slp_s0_addr;
934 if (device_initialized)
935 return -ENODEV;
937 cpu_id = x86_match_cpu(intel_pmc_core_ids);
938 if (!cpu_id)
939 return -ENODEV;
941 pmcdev->map = (struct pmc_reg_map *)cpu_id->driver_data;
944 * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
945 * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
946 * in this case.
948 if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
949 pmcdev->map = &cnp_reg_map;
951 if (lpit_read_residency_count_address(&slp_s0_addr)) {
952 pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT;
954 if (page_is_ram(PHYS_PFN(pmcdev->base_addr)))
955 return -ENODEV;
956 } else {
957 pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset;
960 pmcdev->regbase = ioremap(pmcdev->base_addr,
961 pmcdev->map->regmap_length);
962 if (!pmcdev->regbase)
963 return -ENOMEM;
965 mutex_init(&pmcdev->lock);
966 platform_set_drvdata(pdev, pmcdev);
967 pmcdev->pmc_xram_read_bit = pmc_core_check_read_lock_bit();
968 dmi_check_system(pmc_core_dmi_table);
970 pmc_core_dbgfs_register(pmcdev);
972 device_initialized = true;
973 dev_info(&pdev->dev, " initialized\n");
975 return 0;
978 static int pmc_core_remove(struct platform_device *pdev)
980 struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
982 pmc_core_dbgfs_unregister(pmcdev);
983 platform_set_drvdata(pdev, NULL);
984 mutex_destroy(&pmcdev->lock);
985 iounmap(pmcdev->regbase);
986 return 0;
989 #ifdef CONFIG_PM_SLEEP
991 static bool warn_on_s0ix_failures;
992 module_param(warn_on_s0ix_failures, bool, 0644);
993 MODULE_PARM_DESC(warn_on_s0ix_failures, "Check and warn for S0ix failures");
995 static int pmc_core_suspend(struct device *dev)
997 struct pmc_dev *pmcdev = dev_get_drvdata(dev);
999 pmcdev->check_counters = false;
1001 /* No warnings on S0ix failures */
1002 if (!warn_on_s0ix_failures)
1003 return 0;
1005 /* Check if the syspend will actually use S0ix */
1006 if (pm_suspend_via_firmware())
1007 return 0;
1009 /* Save PC10 residency for checking later */
1010 if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pmcdev->pc10_counter))
1011 return -EIO;
1013 /* Save S0ix residency for checking later */
1014 if (pmc_core_dev_state_get(pmcdev, &pmcdev->s0ix_counter))
1015 return -EIO;
1017 pmcdev->check_counters = true;
1018 return 0;
1021 static inline bool pmc_core_is_pc10_failed(struct pmc_dev *pmcdev)
1023 u64 pc10_counter;
1025 if (rdmsrl_safe(MSR_PKG_C10_RESIDENCY, &pc10_counter))
1026 return false;
1028 if (pc10_counter == pmcdev->pc10_counter)
1029 return true;
1031 return false;
1034 static inline bool pmc_core_is_s0ix_failed(struct pmc_dev *pmcdev)
1036 u64 s0ix_counter;
1038 if (pmc_core_dev_state_get(pmcdev, &s0ix_counter))
1039 return false;
1041 if (s0ix_counter == pmcdev->s0ix_counter)
1042 return true;
1044 return false;
1047 static int pmc_core_resume(struct device *dev)
1049 struct pmc_dev *pmcdev = dev_get_drvdata(dev);
1050 const struct pmc_bit_map **maps = pmcdev->map->slps0_dbg_maps;
1051 int offset = pmcdev->map->slps0_dbg_offset;
1052 const struct pmc_bit_map *map;
1053 u32 data;
1055 if (!pmcdev->check_counters)
1056 return 0;
1058 if (!pmc_core_is_s0ix_failed(pmcdev))
1059 return 0;
1061 if (pmc_core_is_pc10_failed(pmcdev)) {
1062 /* S0ix failed because of PC10 entry failure */
1063 dev_info(dev, "CPU did not enter PC10!!! (PC10 cnt=0x%llx)\n",
1064 pmcdev->pc10_counter);
1065 return 0;
1068 /* The real interesting case - S0ix failed - lets ask PMC why. */
1069 dev_warn(dev, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
1070 pmcdev->s0ix_counter);
1071 while (*maps) {
1072 map = *maps;
1073 data = pmc_core_reg_read(pmcdev, offset);
1074 offset += 4;
1075 while (map->name) {
1076 dev_dbg(dev, "SLP_S0_DBG: %-32s\tState: %s\n",
1077 map->name,
1078 data & map->bit_mask ? "Yes" : "No");
1079 map++;
1081 maps++;
1083 return 0;
1086 #endif
1088 static const struct dev_pm_ops pmc_core_pm_ops = {
1089 SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend, pmc_core_resume)
1092 static const struct acpi_device_id pmc_core_acpi_ids[] = {
1093 {"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/
1096 MODULE_DEVICE_TABLE(acpi, pmc_core_acpi_ids);
1098 static struct platform_driver pmc_core_driver = {
1099 .driver = {
1100 .name = "intel_pmc_core",
1101 .acpi_match_table = ACPI_PTR(pmc_core_acpi_ids),
1102 .pm = &pmc_core_pm_ops,
1104 .probe = pmc_core_probe,
1105 .remove = pmc_core_remove,
1108 module_platform_driver(pmc_core_driver);
1110 MODULE_LICENSE("GPL v2");
1111 MODULE_DESCRIPTION("Intel PMC Core Driver");