1 // SPDX-License-Identifier: GPL-2.0
3 * Intel Core SoC Power Management Controller Driver
5 * Copyright (c) 2016, Intel Corporation.
8 * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
9 * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/bitfield.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
17 #include <linux/dmi.h>
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/slab.h>
22 #include <linux/suspend.h>
23 #include <linux/units.h>
25 #include <asm/cpu_device_id.h>
26 #include <asm/intel-family.h>
31 #include "../pmt/telemetry.h"
33 /* Maximum number of modes supported by platfoms that has low power mode capability */
34 const char *pmc_lpm_modes
[] = {
46 /* PKGC MSRs are common across Intel Core SoCs */
47 const struct pmc_bit_map msr_map
[] = {
48 {"Package C2", MSR_PKG_C2_RESIDENCY
},
49 {"Package C3", MSR_PKG_C3_RESIDENCY
},
50 {"Package C6", MSR_PKG_C6_RESIDENCY
},
51 {"Package C7", MSR_PKG_C7_RESIDENCY
},
52 {"Package C8", MSR_PKG_C8_RESIDENCY
},
53 {"Package C9", MSR_PKG_C9_RESIDENCY
},
54 {"Package C10", MSR_PKG_C10_RESIDENCY
},
58 static inline u32
pmc_core_reg_read(struct pmc
*pmc
, int reg_offset
)
60 return readl(pmc
->regbase
+ reg_offset
);
63 static inline void pmc_core_reg_write(struct pmc
*pmc
, int reg_offset
,
66 writel(val
, pmc
->regbase
+ reg_offset
);
69 static inline u64
pmc_core_adjust_slp_s0_step(struct pmc
*pmc
, u32 value
)
72 * ADL PCH does not have the SLP_S0 counter and LPM Residency counters are
73 * used as a workaround which uses 30.5 usec tick. All other client
74 * programs have the legacy SLP_S0 residency counter that is using the 122
77 const int lpm_adj_x2
= pmc
->map
->lpm_res_counter_step_x2
;
79 if (pmc
->map
== &adl_reg_map
)
80 return (u64
)value
* GET_X2_COUNTER((u64
)lpm_adj_x2
);
82 return (u64
)value
* pmc
->map
->slp_s0_res_counter_step
;
85 static int set_etr3(struct pmc_dev
*pmcdev
)
87 struct pmc
*pmc
= pmcdev
->pmcs
[PMC_IDX_MAIN
];
88 const struct pmc_reg_map
*map
= pmc
->map
;
91 if (!map
->etr3_offset
)
94 guard(mutex
)(&pmcdev
->lock
);
96 /* check if CF9 is locked */
97 reg
= pmc_core_reg_read(pmc
, map
->etr3_offset
);
98 if (reg
& ETR3_CF9LOCK
)
101 /* write CF9 global reset bit */
103 pmc_core_reg_write(pmc
, map
->etr3_offset
, reg
);
105 reg
= pmc_core_reg_read(pmc
, map
->etr3_offset
);
106 if (!(reg
& ETR3_CF9GR
))
111 static umode_t
etr3_is_visible(struct kobject
*kobj
,
112 struct attribute
*attr
,
115 struct device
*dev
= kobj_to_dev(kobj
);
116 struct pmc_dev
*pmcdev
= dev_get_drvdata(dev
);
117 struct pmc
*pmc
= pmcdev
->pmcs
[PMC_IDX_MAIN
];
118 const struct pmc_reg_map
*map
= pmc
->map
;
121 scoped_guard(mutex
, &pmcdev
->lock
)
122 reg
= pmc_core_reg_read(pmc
, map
->etr3_offset
);
124 return reg
& ETR3_CF9LOCK
? attr
->mode
& (SYSFS_PREALLOC
| 0444) : attr
->mode
;
127 static ssize_t
etr3_show(struct device
*dev
,
128 struct device_attribute
*attr
, char *buf
)
130 struct pmc_dev
*pmcdev
= dev_get_drvdata(dev
);
131 struct pmc
*pmc
= pmcdev
->pmcs
[PMC_IDX_MAIN
];
132 const struct pmc_reg_map
*map
= pmc
->map
;
135 if (!map
->etr3_offset
)
138 scoped_guard(mutex
, &pmcdev
->lock
) {
139 reg
= pmc_core_reg_read(pmc
, map
->etr3_offset
);
140 reg
&= ETR3_CF9GR
| ETR3_CF9LOCK
;
143 return sysfs_emit(buf
, "0x%08x", reg
);
146 static ssize_t
etr3_store(struct device
*dev
,
147 struct device_attribute
*attr
,
148 const char *buf
, size_t len
)
150 struct pmc_dev
*pmcdev
= dev_get_drvdata(dev
);
154 err
= kstrtouint(buf
, 16, ®
);
158 /* allow only CF9 writes */
159 if (reg
!= ETR3_CF9GR
)
162 err
= set_etr3(pmcdev
);
168 static DEVICE_ATTR_RW(etr3
);
170 static struct attribute
*pmc_attrs
[] = {
175 static const struct attribute_group pmc_attr_group
= {
177 .is_visible
= etr3_is_visible
,
180 static const struct attribute_group
*pmc_dev_groups
[] = {
185 static int pmc_core_dev_state_get(void *data
, u64
*val
)
187 struct pmc
*pmc
= data
;
188 const struct pmc_reg_map
*map
= pmc
->map
;
191 value
= pmc_core_reg_read(pmc
, map
->slp_s0_offset
);
192 *val
= pmc_core_adjust_slp_s0_step(pmc
, value
);
197 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_dev_state
, pmc_core_dev_state_get
, NULL
, "%llu\n");
199 static int pmc_core_pson_residency_get(void *data
, u64
*val
)
201 struct pmc
*pmc
= data
;
202 const struct pmc_reg_map
*map
= pmc
->map
;
205 value
= pmc_core_reg_read(pmc
, map
->pson_residency_offset
);
206 *val
= (u64
)value
* map
->pson_residency_counter_step
;
211 DEFINE_DEBUGFS_ATTRIBUTE(pmc_core_pson_residency
, pmc_core_pson_residency_get
, NULL
, "%llu\n");
213 static int pmc_core_check_read_lock_bit(struct pmc
*pmc
)
217 value
= pmc_core_reg_read(pmc
, pmc
->map
->pm_cfg_offset
);
218 return value
& BIT(pmc
->map
->pm_read_disable_bit
);
221 static void pmc_core_slps0_display(struct pmc
*pmc
, struct device
*dev
,
224 const struct pmc_bit_map
**maps
= pmc
->map
->slps0_dbg_maps
;
225 const struct pmc_bit_map
*map
;
226 int offset
= pmc
->map
->slps0_dbg_offset
;
231 data
= pmc_core_reg_read(pmc
, offset
);
235 dev_info(dev
, "SLP_S0_DBG: %-32s\tState: %s\n",
237 data
& map
->bit_mask
? "Yes" : "No");
239 seq_printf(s
, "SLP_S0_DBG: %-32s\tState: %s\n",
241 data
& map
->bit_mask
? "Yes" : "No");
248 static unsigned int pmc_core_lpm_get_arr_size(const struct pmc_bit_map
**maps
)
252 for (idx
= 0; maps
[idx
]; idx
++)
258 static void pmc_core_lpm_display(struct pmc
*pmc
, struct device
*dev
,
259 struct seq_file
*s
, u32 offset
, int pmc_index
,
261 const struct pmc_bit_map
**maps
)
263 unsigned int index
, idx
, len
= 32, arr_size
;
264 u32 bit_mask
, *lpm_regs
;
266 arr_size
= pmc_core_lpm_get_arr_size(maps
);
267 lpm_regs
= kmalloc_array(arr_size
, sizeof(*lpm_regs
), GFP_KERNEL
);
271 for (index
= 0; index
< arr_size
; index
++) {
272 lpm_regs
[index
] = pmc_core_reg_read(pmc
, offset
);
276 for (idx
= 0; idx
< arr_size
; idx
++) {
278 dev_info(dev
, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index
, str
, idx
,
281 seq_printf(s
, "\nPMC%d:LPM_%s_%d:\t0x%x\n", pmc_index
, str
, idx
,
283 for (index
= 0; maps
[idx
][index
].name
&& index
< len
; index
++) {
284 bit_mask
= maps
[idx
][index
].bit_mask
;
286 dev_info(dev
, "PMC%d:%-30s %-30d\n", pmc_index
,
287 maps
[idx
][index
].name
,
288 lpm_regs
[idx
] & bit_mask
? 1 : 0);
290 seq_printf(s
, "PMC%d:%-30s %-30d\n", pmc_index
,
291 maps
[idx
][index
].name
,
292 lpm_regs
[idx
] & bit_mask
? 1 : 0);
299 static bool slps0_dbg_latch
;
301 static inline u8
pmc_core_reg_read_byte(struct pmc
*pmc
, int offset
)
303 return readb(pmc
->regbase
+ offset
);
306 static void pmc_core_display_map(struct seq_file
*s
, int index
, int idx
, int ip
,
307 int pmc_index
, u8 pf_reg
, const struct pmc_bit_map
**pf_map
)
309 seq_printf(s
, "PMC%d:PCH IP: %-2d - %-32s\tState: %s\n",
310 pmc_index
, ip
, pf_map
[idx
][index
].name
,
311 pf_map
[idx
][index
].bit_mask
& pf_reg
? "Off" : "On");
314 static int pmc_core_ppfear_show(struct seq_file
*s
, void *unused
)
316 struct pmc_dev
*pmcdev
= s
->private;
319 for (i
= 0; i
< ARRAY_SIZE(pmcdev
->pmcs
); ++i
) {
320 struct pmc
*pmc
= pmcdev
->pmcs
[i
];
321 const struct pmc_bit_map
**maps
;
322 u8 pf_regs
[PPFEAR_MAX_NUM_ENTRIES
];
323 unsigned int index
, iter
, idx
, ip
= 0;
328 maps
= pmc
->map
->pfear_sts
;
329 iter
= pmc
->map
->ppfear0_offset
;
331 for (index
= 0; index
< pmc
->map
->ppfear_buckets
&&
332 index
< PPFEAR_MAX_NUM_ENTRIES
; index
++, iter
++)
333 pf_regs
[index
] = pmc_core_reg_read_byte(pmc
, iter
);
335 for (idx
= 0; maps
[idx
]; idx
++) {
336 for (index
= 0; maps
[idx
][index
].name
&&
337 index
< pmc
->map
->ppfear_buckets
* 8; ip
++, index
++)
338 pmc_core_display_map(s
, index
, idx
, ip
, i
,
339 pf_regs
[index
/ 8], maps
);
345 DEFINE_SHOW_ATTRIBUTE(pmc_core_ppfear
);
347 /* This function should return link status, 0 means ready */
348 static int pmc_core_mtpmc_link_status(struct pmc
*pmc
)
352 value
= pmc_core_reg_read(pmc
, SPT_PMC_PM_STS_OFFSET
);
353 return value
& BIT(SPT_PMC_MSG_FULL_STS_BIT
);
356 static int pmc_core_send_msg(struct pmc
*pmc
, u32
*addr_xram
)
361 for (timeout
= NUM_RETRIES
; timeout
> 0; timeout
--) {
362 if (pmc_core_mtpmc_link_status(pmc
) == 0)
367 if (timeout
<= 0 && pmc_core_mtpmc_link_status(pmc
))
370 dest
= (*addr_xram
& MTPMC_MASK
) | (1U << 1);
371 pmc_core_reg_write(pmc
, SPT_PMC_MTPMC_OFFSET
, dest
);
375 static int pmc_core_mphy_pg_show(struct seq_file
*s
, void *unused
)
377 struct pmc_dev
*pmcdev
= s
->private;
378 struct pmc
*pmc
= pmcdev
->pmcs
[PMC_IDX_MAIN
];
379 const struct pmc_bit_map
*map
= pmc
->map
->mphy_sts
;
380 u32 mphy_core_reg_low
, mphy_core_reg_high
;
381 u32 val_low
, val_high
;
385 if (pmcdev
->pmc_xram_read_bit
) {
386 seq_puts(s
, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
390 mphy_core_reg_low
= (SPT_PMC_MPHY_CORE_STS_0
<< 16);
391 mphy_core_reg_high
= (SPT_PMC_MPHY_CORE_STS_1
<< 16);
393 guard(mutex
)(&pmcdev
->lock
);
395 err
= pmc_core_send_msg(pmc
, &mphy_core_reg_low
);
400 val_low
= pmc_core_reg_read(pmc
, SPT_PMC_MFPMC_OFFSET
);
402 err
= pmc_core_send_msg(pmc
, &mphy_core_reg_high
);
407 val_high
= pmc_core_reg_read(pmc
, SPT_PMC_MFPMC_OFFSET
);
409 for (index
= 0; index
< 8 && map
[index
].name
; index
++) {
410 seq_printf(s
, "%-32s\tState: %s\n",
412 map
[index
].bit_mask
& val_low
? "Not power gated" :
416 for (index
= 8; map
[index
].name
; index
++) {
417 seq_printf(s
, "%-32s\tState: %s\n",
419 map
[index
].bit_mask
& val_high
? "Not power gated" :
425 DEFINE_SHOW_ATTRIBUTE(pmc_core_mphy_pg
);
427 static int pmc_core_pll_show(struct seq_file
*s
, void *unused
)
429 struct pmc_dev
*pmcdev
= s
->private;
430 struct pmc
*pmc
= pmcdev
->pmcs
[PMC_IDX_MAIN
];
431 const struct pmc_bit_map
*map
= pmc
->map
->pll_sts
;
432 u32 mphy_common_reg
, val
;
436 if (pmcdev
->pmc_xram_read_bit
) {
437 seq_puts(s
, "Access denied: please disable PMC_READ_DISABLE setting in BIOS.");
441 mphy_common_reg
= (SPT_PMC_MPHY_COM_STS_0
<< 16);
442 guard(mutex
)(&pmcdev
->lock
);
444 err
= pmc_core_send_msg(pmc
, &mphy_common_reg
);
448 /* Observed PMC HW response latency for MTPMC-MFPMC is ~10 ms */
450 val
= pmc_core_reg_read(pmc
, SPT_PMC_MFPMC_OFFSET
);
452 for (index
= 0; map
[index
].name
; index
++) {
453 seq_printf(s
, "%-32s\tState: %s\n",
455 map
[index
].bit_mask
& val
? "Active" : "Idle");
460 DEFINE_SHOW_ATTRIBUTE(pmc_core_pll
);
462 int pmc_core_send_ltr_ignore(struct pmc_dev
*pmcdev
, u32 value
, int ignore
)
465 const struct pmc_reg_map
*map
;
467 unsigned int pmc_index
;
471 /* For platforms with multiple pmcs, ltr index value given by user
472 * is based on the contiguous indexes from ltr_show output.
473 * pmc index and ltr index needs to be calculated from it.
475 for (pmc_index
= 0; pmc_index
< ARRAY_SIZE(pmcdev
->pmcs
) && ltr_index
>= 0; pmc_index
++) {
476 pmc
= pmcdev
->pmcs
[pmc_index
];
482 if (ltr_index
<= map
->ltr_ignore_max
)
485 /* Along with IP names, ltr_show map includes CURRENT_PLATFORM
486 * and AGGREGATED_SYSTEM values per PMC. Take these two index
487 * values into account in ltr_index calculation. Also, to start
488 * ltr index from zero for next pmc, subtract it by 1.
490 ltr_index
= ltr_index
- (map
->ltr_ignore_max
+ 2) - 1;
493 if (pmc_index
>= ARRAY_SIZE(pmcdev
->pmcs
) || ltr_index
< 0)
496 pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_index
, ltr_index
);
498 guard(mutex
)(&pmcdev
->lock
);
500 reg
= pmc_core_reg_read(pmc
, map
->ltr_ignore_offset
);
502 reg
|= BIT(ltr_index
);
504 reg
&= ~BIT(ltr_index
);
505 pmc_core_reg_write(pmc
, map
->ltr_ignore_offset
, reg
);
510 static ssize_t
pmc_core_ltr_write(struct pmc_dev
*pmcdev
,
511 const char __user
*userbuf
,
512 size_t count
, int ignore
)
517 err
= kstrtou32_from_user(userbuf
, count
, 10, &value
);
521 err
= pmc_core_send_ltr_ignore(pmcdev
, value
, ignore
);
526 static ssize_t
pmc_core_ltr_ignore_write(struct file
*file
,
527 const char __user
*userbuf
,
528 size_t count
, loff_t
*ppos
)
530 struct seq_file
*s
= file
->private_data
;
531 struct pmc_dev
*pmcdev
= s
->private;
533 return pmc_core_ltr_write(pmcdev
, userbuf
, count
, 1);
536 static int pmc_core_ltr_ignore_show(struct seq_file
*s
, void *unused
)
540 DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_ignore
);
542 static ssize_t
pmc_core_ltr_restore_write(struct file
*file
,
543 const char __user
*userbuf
,
544 size_t count
, loff_t
*ppos
)
546 struct seq_file
*s
= file
->private_data
;
547 struct pmc_dev
*pmcdev
= s
->private;
549 return pmc_core_ltr_write(pmcdev
, userbuf
, count
, 0);
552 static int pmc_core_ltr_restore_show(struct seq_file
*s
, void *unused
)
556 DEFINE_SHOW_STORE_ATTRIBUTE(pmc_core_ltr_restore
);
558 static void pmc_core_slps0_dbg_latch(struct pmc_dev
*pmcdev
, bool reset
)
560 struct pmc
*pmc
= pmcdev
->pmcs
[PMC_IDX_MAIN
];
561 const struct pmc_reg_map
*map
= pmc
->map
;
564 guard(mutex
)(&pmcdev
->lock
);
566 if (!reset
&& !slps0_dbg_latch
)
569 fd
= pmc_core_reg_read(pmc
, map
->slps0_dbg_offset
);
571 fd
&= ~CNP_PMC_LATCH_SLPS0_EVENTS
;
573 fd
|= CNP_PMC_LATCH_SLPS0_EVENTS
;
574 pmc_core_reg_write(pmc
, map
->slps0_dbg_offset
, fd
);
576 slps0_dbg_latch
= false;
579 static int pmc_core_slps0_dbg_show(struct seq_file
*s
, void *unused
)
581 struct pmc_dev
*pmcdev
= s
->private;
583 pmc_core_slps0_dbg_latch(pmcdev
, false);
584 pmc_core_slps0_display(pmcdev
->pmcs
[PMC_IDX_MAIN
], NULL
, s
);
585 pmc_core_slps0_dbg_latch(pmcdev
, true);
589 DEFINE_SHOW_ATTRIBUTE(pmc_core_slps0_dbg
);
591 static u32
convert_ltr_scale(u32 val
)
594 * As per PCIE specification supporting document
595 * ECN_LatencyTolnReporting_14Aug08.pdf the Latency
596 * Tolerance Reporting data payload is encoded in a
597 * 3 bit scale and 10 bit value fields. Values are
598 * multiplied by the indicated scale to yield an absolute time
599 * value, expressible in a range from 1 nanosecond to
600 * 2^25*(2^10-1) = 34,326,183,936 nanoseconds.
602 * scale encoding is as follows:
604 * ----------------------------------------------
605 * |scale factor | Multiplier (ns) |
606 * ----------------------------------------------
615 * ----------------------------------------------
618 pr_warn("Invalid LTR scale factor.\n");
622 return 1U << (5 * val
);
625 static int pmc_core_ltr_show(struct seq_file
*s
, void *unused
)
627 struct pmc_dev
*pmcdev
= s
->private;
628 u64 decoded_snoop_ltr
, decoded_non_snoop_ltr
;
629 u32 ltr_raw_data
, scale
, val
;
630 u16 snoop_ltr
, nonsnoop_ltr
;
631 unsigned int i
, index
, ltr_index
= 0;
633 for (i
= 0; i
< ARRAY_SIZE(pmcdev
->pmcs
); ++i
) {
635 const struct pmc_bit_map
*map
;
638 pmc
= pmcdev
->pmcs
[i
];
642 scoped_guard(mutex
, &pmcdev
->lock
)
643 ltr_ign_reg
= pmc_core_reg_read(pmc
, pmc
->map
->ltr_ignore_offset
);
645 map
= pmc
->map
->ltr_show_sts
;
646 for (index
= 0; map
[index
].name
; index
++) {
649 if (index
> pmc
->map
->ltr_ignore_max
)
650 ltr_ign_data
= false;
652 ltr_ign_data
= ltr_ign_reg
& BIT(index
);
654 decoded_snoop_ltr
= decoded_non_snoop_ltr
= 0;
655 ltr_raw_data
= pmc_core_reg_read(pmc
,
656 map
[index
].bit_mask
);
657 snoop_ltr
= ltr_raw_data
& ~MTPMC_MASK
;
658 nonsnoop_ltr
= (ltr_raw_data
>> 0x10) & ~MTPMC_MASK
;
660 if (FIELD_GET(LTR_REQ_NONSNOOP
, ltr_raw_data
)) {
661 scale
= FIELD_GET(LTR_DECODED_SCALE
, nonsnoop_ltr
);
662 val
= FIELD_GET(LTR_DECODED_VAL
, nonsnoop_ltr
);
663 decoded_non_snoop_ltr
= val
* convert_ltr_scale(scale
);
665 if (FIELD_GET(LTR_REQ_SNOOP
, ltr_raw_data
)) {
666 scale
= FIELD_GET(LTR_DECODED_SCALE
, snoop_ltr
);
667 val
= FIELD_GET(LTR_DECODED_VAL
, snoop_ltr
);
668 decoded_snoop_ltr
= val
* convert_ltr_scale(scale
);
671 seq_printf(s
, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\tLTR_IGNORE: %d\n",
672 ltr_index
, i
, map
[index
].name
, ltr_raw_data
,
673 decoded_non_snoop_ltr
,
674 decoded_snoop_ltr
, ltr_ign_data
);
680 DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr
);
682 static int pmc_core_s0ix_blocker_show(struct seq_file
*s
, void *unused
)
684 struct pmc_dev
*pmcdev
= s
->private;
687 for (pmcidx
= 0; pmcidx
< ARRAY_SIZE(pmcdev
->pmcs
); pmcidx
++) {
688 const struct pmc_bit_map
**maps
;
689 unsigned int arr_size
, r_idx
;
693 pmc
= pmcdev
->pmcs
[pmcidx
];
696 maps
= pmc
->map
->s0ix_blocker_maps
;
697 offset
= pmc
->map
->s0ix_blocker_offset
;
698 arr_size
= pmc_core_lpm_get_arr_size(maps
);
700 for (r_idx
= 0; r_idx
< arr_size
; r_idx
++) {
701 const struct pmc_bit_map
*map
;
703 for (map
= maps
[r_idx
]; map
->name
; map
++) {
706 counter
= pmc_core_reg_read(pmc
, offset
);
707 seq_printf(s
, "PMC%d:%-30s %-30d\n", pmcidx
,
709 offset
+= map
->blk
* S0IX_BLK_SIZE
;
715 DEFINE_SHOW_ATTRIBUTE(pmc_core_s0ix_blocker
);
717 static void pmc_core_ltr_ignore_all(struct pmc_dev
*pmcdev
)
721 for (i
= 0; i
< ARRAY_SIZE(pmcdev
->pmcs
); i
++) {
725 pmc
= pmcdev
->pmcs
[i
];
729 guard(mutex
)(&pmcdev
->lock
);
730 pmc
->ltr_ign
= pmc_core_reg_read(pmc
, pmc
->map
->ltr_ignore_offset
);
732 /* ltr_ignore_max is the max index value for LTR ignore register */
733 ltr_ign
= pmc
->ltr_ign
| GENMASK(pmc
->map
->ltr_ignore_max
, 0);
734 pmc_core_reg_write(pmc
, pmc
->map
->ltr_ignore_offset
, ltr_ign
);
738 * Ignoring ME during suspend is blocking platforms with ADL PCH to get to
739 * deeper S0ix substate.
741 pmc_core_send_ltr_ignore(pmcdev
, 6, 0);
744 static void pmc_core_ltr_restore_all(struct pmc_dev
*pmcdev
)
748 for (i
= 0; i
< ARRAY_SIZE(pmcdev
->pmcs
); i
++) {
751 pmc
= pmcdev
->pmcs
[i
];
755 guard(mutex
)(&pmcdev
->lock
);
756 pmc_core_reg_write(pmc
, pmc
->map
->ltr_ignore_offset
, pmc
->ltr_ign
);
760 static inline u64
adjust_lpm_residency(struct pmc
*pmc
, u32 offset
,
761 const int lpm_adj_x2
)
763 u64 lpm_res
= pmc_core_reg_read(pmc
, offset
);
765 return GET_X2_COUNTER((u64
)lpm_adj_x2
* lpm_res
);
768 static int pmc_core_substate_res_show(struct seq_file
*s
, void *unused
)
770 struct pmc_dev
*pmcdev
= s
->private;
771 struct pmc
*pmc
= pmcdev
->pmcs
[PMC_IDX_MAIN
];
772 const int lpm_adj_x2
= pmc
->map
->lpm_res_counter_step_x2
;
773 u32 offset
= pmc
->map
->lpm_residency_offset
;
776 seq_printf(s
, "%-10s %-15s\n", "Substate", "Residency");
778 pmc_for_each_mode(mode
, pmcdev
) {
779 seq_printf(s
, "%-10s %-15llu\n", pmc_lpm_modes
[mode
],
780 adjust_lpm_residency(pmc
, offset
+ (4 * mode
), lpm_adj_x2
));
785 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res
);
787 static int pmc_core_substate_sts_regs_show(struct seq_file
*s
, void *unused
)
789 struct pmc_dev
*pmcdev
= s
->private;
792 for (i
= 0; i
< ARRAY_SIZE(pmcdev
->pmcs
); ++i
) {
793 struct pmc
*pmc
= pmcdev
->pmcs
[i
];
794 const struct pmc_bit_map
**maps
;
799 maps
= pmc
->map
->lpm_sts
;
800 offset
= pmc
->map
->lpm_status_offset
;
801 pmc_core_lpm_display(pmc
, NULL
, s
, offset
, i
, "STATUS", maps
);
806 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs
);
808 static int pmc_core_substate_l_sts_regs_show(struct seq_file
*s
, void *unused
)
810 struct pmc_dev
*pmcdev
= s
->private;
813 for (i
= 0; i
< ARRAY_SIZE(pmcdev
->pmcs
); ++i
) {
814 struct pmc
*pmc
= pmcdev
->pmcs
[i
];
815 const struct pmc_bit_map
**maps
;
820 maps
= pmc
->map
->lpm_sts
;
821 offset
= pmc
->map
->lpm_live_status_offset
;
822 pmc_core_lpm_display(pmc
, NULL
, s
, offset
, i
, "LIVE_STATUS", maps
);
827 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_l_sts_regs
);
829 static void pmc_core_substate_req_header_show(struct seq_file
*s
, int pmc_index
)
831 struct pmc_dev
*pmcdev
= s
->private;
834 seq_printf(s
, "%30s |", "Element");
835 pmc_for_each_mode(mode
, pmcdev
)
836 seq_printf(s
, " %9s |", pmc_lpm_modes
[mode
]);
838 seq_printf(s
, " %9s |", "Status");
839 seq_printf(s
, " %11s |\n", "Live Status");
842 static int pmc_core_substate_req_regs_show(struct seq_file
*s
, void *unused
)
844 struct pmc_dev
*pmcdev
= s
->private;
848 unsigned int mp
, pmc_index
;
851 for (pmc_index
= 0; pmc_index
< ARRAY_SIZE(pmcdev
->pmcs
); ++pmc_index
) {
852 struct pmc
*pmc
= pmcdev
->pmcs
[pmc_index
];
853 const struct pmc_bit_map
**maps
;
858 maps
= pmc
->map
->lpm_sts
;
859 num_maps
= pmc
->map
->lpm_num_maps
;
860 sts_offset
= pmc
->map
->lpm_status_offset
;
861 sts_offset_live
= pmc
->map
->lpm_live_status_offset
;
862 lpm_req_regs
= pmc
->lpm_req_regs
;
865 * When there are multiple PMCs, though the PMC may exist, the
866 * requirement register discovery could have failed so check
872 /* Display the header */
873 pmc_core_substate_req_header_show(s
, pmc_index
);
876 for (mp
= 0; mp
< num_maps
; mp
++) {
880 const struct pmc_bit_map
*map
;
881 int mode
, i
, len
= 32;
884 * Capture the requirements and create a mask so that we only
885 * show an element if it's required for at least one of the
886 * enabled low power modes
888 pmc_for_each_mode(mode
, pmcdev
)
889 req_mask
|= lpm_req_regs
[mp
+ (mode
* num_maps
)];
891 /* Get the last latched status for this map */
892 lpm_status
= pmc_core_reg_read(pmc
, sts_offset
+ (mp
* 4));
894 /* Get the runtime status for this map */
895 lpm_status_live
= pmc_core_reg_read(pmc
, sts_offset_live
+ (mp
* 4));
897 /* Loop over elements in this map */
899 for (i
= 0; map
[i
].name
&& i
< len
; i
++) {
900 u32 bit_mask
= map
[i
].bit_mask
;
902 if (!(bit_mask
& req_mask
)) {
904 * Not required for any enabled states
910 /* Display the element name in the first column */
911 seq_printf(s
, "pmc%d: %26s |", pmc_index
, map
[i
].name
);
913 /* Loop over the enabled states and display if required */
914 pmc_for_each_mode(mode
, pmcdev
) {
915 bool required
= lpm_req_regs
[mp
+ (mode
* num_maps
)] &
917 seq_printf(s
, " %9s |", required
? "Required" : " ");
920 /* In Status column, show the last captured state of this agent */
921 seq_printf(s
, " %9s |", lpm_status
& bit_mask
? "Yes" : " ");
923 /* In Live status column, show the live state of this agent */
924 seq_printf(s
, " %11s |", lpm_status_live
& bit_mask
? "Yes" : " ");
932 DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_req_regs
);
934 static unsigned int pmc_core_get_crystal_freq(void)
936 unsigned int eax_denominator
, ebx_numerator
, ecx_hz
, edx
;
938 if (boot_cpu_data
.cpuid_level
< 0x15)
941 eax_denominator
= ebx_numerator
= ecx_hz
= edx
= 0;
943 /* CPUID 15H TSC/Crystal ratio, plus optionally Crystal Hz */
944 cpuid(0x15, &eax_denominator
, &ebx_numerator
, &ecx_hz
, &edx
);
946 if (ebx_numerator
== 0 || eax_denominator
== 0)
952 static int pmc_core_die_c6_us_show(struct seq_file
*s
, void *unused
)
954 struct pmc_dev
*pmcdev
= s
->private;
955 u64 die_c6_res
, count
;
958 if (!pmcdev
->crystal_freq
) {
959 dev_warn_once(&pmcdev
->pdev
->dev
, "Crystal frequency unavailable\n");
963 ret
= pmt_telem_read(pmcdev
->punit_ep
, pmcdev
->die_c6_offset
,
968 die_c6_res
= div64_u64(count
* HZ_PER_MHZ
, pmcdev
->crystal_freq
);
969 seq_printf(s
, "%llu\n", die_c6_res
);
973 DEFINE_SHOW_ATTRIBUTE(pmc_core_die_c6_us
);
975 static int pmc_core_lpm_latch_mode_show(struct seq_file
*s
, void *unused
)
977 struct pmc_dev
*pmcdev
= s
->private;
978 struct pmc
*pmc
= pmcdev
->pmcs
[PMC_IDX_MAIN
];
983 reg
= pmc_core_reg_read(pmc
, pmc
->map
->lpm_sts_latch_en_offset
);
984 if (reg
& LPM_STS_LATCH_MODE
) {
988 seq_puts(s
, "[c10]");
992 pmc_for_each_mode(mode
, pmcdev
) {
993 if ((BIT(mode
) & reg
) && !c10
)
994 seq_printf(s
, " [%s]", pmc_lpm_modes
[mode
]);
996 seq_printf(s
, " %s", pmc_lpm_modes
[mode
]);
999 seq_puts(s
, " clear\n");
1004 static ssize_t
pmc_core_lpm_latch_mode_write(struct file
*file
,
1005 const char __user
*userbuf
,
1006 size_t count
, loff_t
*ppos
)
1008 struct seq_file
*s
= file
->private_data
;
1009 struct pmc_dev
*pmcdev
= s
->private;
1010 struct pmc
*pmc
= pmcdev
->pmcs
[PMC_IDX_MAIN
];
1011 bool clear
= false, c10
= false;
1012 unsigned char buf
[8];
1016 if (count
> sizeof(buf
) - 1)
1018 if (copy_from_user(buf
, userbuf
, count
))
1023 * Allowed strings are:
1024 * Any enabled substate, e.g. 'S0i2.0'
1028 mode
= sysfs_match_string(pmc_lpm_modes
, buf
);
1030 /* Check string matches enabled mode */
1031 pmc_for_each_mode(m
, pmcdev
)
1035 if (mode
!= m
|| mode
< 0) {
1036 if (sysfs_streq(buf
, "clear"))
1038 else if (sysfs_streq(buf
, "c10"))
1045 guard(mutex
)(&pmcdev
->lock
);
1047 reg
= pmc_core_reg_read(pmc
, pmc
->map
->etr3_offset
);
1048 reg
|= ETR3_CLEAR_LPM_EVENTS
;
1049 pmc_core_reg_write(pmc
, pmc
->map
->etr3_offset
, reg
);
1055 guard(mutex
)(&pmcdev
->lock
);
1057 reg
= pmc_core_reg_read(pmc
, pmc
->map
->lpm_sts_latch_en_offset
);
1058 reg
&= ~LPM_STS_LATCH_MODE
;
1059 pmc_core_reg_write(pmc
, pmc
->map
->lpm_sts_latch_en_offset
, reg
);
1065 * For LPM mode latching we set the latch enable bit and selected mode
1066 * and clear everything else.
1068 reg
= LPM_STS_LATCH_MODE
| BIT(mode
);
1069 guard(mutex
)(&pmcdev
->lock
);
1070 pmc_core_reg_write(pmc
, pmc
->map
->lpm_sts_latch_en_offset
, reg
);
1074 DEFINE_PMC_CORE_ATTR_WRITE(pmc_core_lpm_latch_mode
);
1076 static int pmc_core_pkgc_show(struct seq_file
*s
, void *unused
)
1078 struct pmc
*pmc
= s
->private;
1079 const struct pmc_bit_map
*map
= pmc
->map
->msr_sts
;
1083 for (index
= 0; map
[index
].name
; index
++) {
1084 if (rdmsrl_safe(map
[index
].bit_mask
, &pcstate_count
))
1087 pcstate_count
*= 1000;
1088 do_div(pcstate_count
, tsc_khz
);
1089 seq_printf(s
, "%-8s : %llu\n", map
[index
].name
,
1095 DEFINE_SHOW_ATTRIBUTE(pmc_core_pkgc
);
1097 static bool pmc_core_pri_verify(u32 lpm_pri
, u8
*mode_order
)
1104 * Each byte contains the priority level for 2 modes (7:4 and 3:0).
1105 * In a 32 bit register this allows for describing 8 modes. Store the
1106 * levels and look for values out of range.
1108 for (i
= 0; i
< 8; i
++) {
1109 int level
= lpm_pri
& GENMASK(3, 0);
1111 if (level
>= LPM_MAX_NUM_MODES
)
1114 mode_order
[i
] = level
;
1118 /* Check that we have unique values */
1119 for (i
= 0; i
< LPM_MAX_NUM_MODES
- 1; i
++)
1120 for (j
= i
+ 1; j
< LPM_MAX_NUM_MODES
; j
++)
1121 if (mode_order
[i
] == mode_order
[j
])
1127 void pmc_core_get_low_power_modes(struct pmc_dev
*pmcdev
)
1129 struct pmc
*pmc
= pmcdev
->pmcs
[PMC_IDX_MAIN
];
1130 u8 pri_order
[LPM_MAX_NUM_MODES
] = LPM_DEFAULT_PRI
;
1131 u8 mode_order
[LPM_MAX_NUM_MODES
];
1137 /* Use LPM Maps to indicate support for substates */
1138 if (!pmc
->map
->lpm_num_maps
)
1141 lpm_en
= pmc_core_reg_read(pmc
, pmc
->map
->lpm_en_offset
);
1142 /* For MTL, BIT 31 is not an lpm mode but a enable bit.
1143 * Lower byte is enough to cover the number of lpm modes for all
1144 * platforms and hence mask the upper 3 bytes.
1146 pmcdev
->num_lpm_modes
= hweight32(lpm_en
& 0xFF);
1148 /* Read 32 bit LPM_PRI register */
1149 lpm_pri
= pmc_core_reg_read(pmc
, pmc
->map
->lpm_priority_offset
);
1153 * If lpm_pri value passes verification, then override the default
1154 * modes here. Otherwise stick with the default.
1156 if (pmc_core_pri_verify(lpm_pri
, mode_order
))
1157 /* Get list of modes in priority order */
1158 for (mode
= 0; mode
< LPM_MAX_NUM_MODES
; mode
++)
1159 pri_order
[mode_order
[mode
]] = mode
;
1161 dev_warn(&pmcdev
->pdev
->dev
,
1162 "Assuming a default substate order for this platform\n");
1165 * Loop through all modes from lowest to highest priority,
1166 * and capture all enabled modes in order
1169 for (p
= LPM_MAX_NUM_MODES
- 1; p
>= 0; p
--) {
1170 int mode
= pri_order
[p
];
1172 if (!(BIT(mode
) & lpm_en
))
1175 pmcdev
->lpm_en_modes
[i
++] = mode
;
1179 int get_primary_reg_base(struct pmc
*pmc
)
1183 if (lpit_read_residency_count_address(&slp_s0_addr
)) {
1184 pmc
->base_addr
= PMC_BASE_ADDR_DEFAULT
;
1186 if (page_is_ram(PHYS_PFN(pmc
->base_addr
)))
1189 pmc
->base_addr
= slp_s0_addr
- pmc
->map
->slp_s0_offset
;
1192 pmc
->regbase
= ioremap(pmc
->base_addr
, pmc
->map
->regmap_length
);
1198 void pmc_core_punit_pmt_init(struct pmc_dev
*pmcdev
, u32 guid
)
1200 struct telem_endpoint
*ep
;
1201 struct pci_dev
*pcidev
;
1203 pcidev
= pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(10, 0));
1205 dev_err(&pmcdev
->pdev
->dev
, "PUNIT PMT device not found.");
1209 ep
= pmt_telem_find_and_register_endpoint(pcidev
, guid
, 0);
1210 pci_dev_put(pcidev
);
1212 dev_err(&pmcdev
->pdev
->dev
,
1213 "pmc_core: couldn't get DMU telem endpoint %ld",
1218 pmcdev
->punit_ep
= ep
;
1220 pmcdev
->has_die_c6
= true;
1221 pmcdev
->die_c6_offset
= MTL_PMT_DMU_DIE_C6_OFFSET
;
1224 void pmc_core_set_device_d3(unsigned int device
)
1226 struct pci_dev
*pcidev
;
1228 pcidev
= pci_get_device(PCI_VENDOR_ID_INTEL
, device
, NULL
);
1230 if (!device_trylock(&pcidev
->dev
)) {
1231 pci_dev_put(pcidev
);
1234 if (!pcidev
->dev
.driver
) {
1235 dev_info(&pcidev
->dev
, "Setting to D3hot\n");
1236 pci_set_power_state(pcidev
, PCI_D3hot
);
1238 device_unlock(&pcidev
->dev
);
1239 pci_dev_put(pcidev
);
1243 static bool pmc_core_is_pson_residency_enabled(struct pmc_dev
*pmcdev
)
1245 struct platform_device
*pdev
= pmcdev
->pdev
;
1246 struct acpi_device
*adev
= ACPI_COMPANION(&pdev
->dev
);
1252 if (fwnode_property_read_u8(acpi_fwnode_handle(adev
),
1253 "intel-cec-pson-switching-enabled-in-s0",
1260 static void pmc_core_dbgfs_unregister(struct pmc_dev
*pmcdev
)
1262 debugfs_remove_recursive(pmcdev
->dbgfs_dir
);
1265 static void pmc_core_dbgfs_register(struct pmc_dev
*pmcdev
)
1267 struct pmc
*primary_pmc
= pmcdev
->pmcs
[PMC_IDX_MAIN
];
1270 dir
= debugfs_create_dir("pmc_core", NULL
);
1271 pmcdev
->dbgfs_dir
= dir
;
1273 debugfs_create_file("slp_s0_residency_usec", 0444, dir
, primary_pmc
,
1274 &pmc_core_dev_state
);
1276 if (primary_pmc
->map
->pfear_sts
)
1277 debugfs_create_file("pch_ip_power_gating_status", 0444, dir
,
1278 pmcdev
, &pmc_core_ppfear_fops
);
1280 debugfs_create_file("ltr_ignore", 0644, dir
, pmcdev
,
1281 &pmc_core_ltr_ignore_fops
);
1283 debugfs_create_file("ltr_restore", 0200, dir
, pmcdev
, &pmc_core_ltr_restore_fops
);
1285 debugfs_create_file("ltr_show", 0444, dir
, pmcdev
, &pmc_core_ltr_fops
);
1287 if (primary_pmc
->map
->s0ix_blocker_maps
)
1288 debugfs_create_file("s0ix_blocker", 0444, dir
, pmcdev
, &pmc_core_s0ix_blocker_fops
);
1290 debugfs_create_file("package_cstate_show", 0444, dir
, primary_pmc
,
1291 &pmc_core_pkgc_fops
);
1293 if (primary_pmc
->map
->pll_sts
)
1294 debugfs_create_file("pll_status", 0444, dir
, pmcdev
,
1295 &pmc_core_pll_fops
);
1297 if (primary_pmc
->map
->mphy_sts
)
1298 debugfs_create_file("mphy_core_lanes_power_gating_status",
1300 &pmc_core_mphy_pg_fops
);
1302 if (primary_pmc
->map
->slps0_dbg_maps
) {
1303 debugfs_create_file("slp_s0_debug_status", 0444,
1305 &pmc_core_slps0_dbg_fops
);
1307 debugfs_create_bool("slp_s0_dbg_latch", 0644,
1308 dir
, &slps0_dbg_latch
);
1311 if (primary_pmc
->map
->lpm_en_offset
) {
1312 debugfs_create_file("substate_residencies", 0444,
1313 pmcdev
->dbgfs_dir
, pmcdev
,
1314 &pmc_core_substate_res_fops
);
1317 if (primary_pmc
->map
->lpm_status_offset
) {
1318 debugfs_create_file("substate_status_registers", 0444,
1319 pmcdev
->dbgfs_dir
, pmcdev
,
1320 &pmc_core_substate_sts_regs_fops
);
1321 debugfs_create_file("substate_live_status_registers", 0444,
1322 pmcdev
->dbgfs_dir
, pmcdev
,
1323 &pmc_core_substate_l_sts_regs_fops
);
1324 debugfs_create_file("lpm_latch_mode", 0644,
1325 pmcdev
->dbgfs_dir
, pmcdev
,
1326 &pmc_core_lpm_latch_mode_fops
);
1329 if (primary_pmc
->lpm_req_regs
) {
1330 debugfs_create_file("substate_requirements", 0444,
1331 pmcdev
->dbgfs_dir
, pmcdev
,
1332 &pmc_core_substate_req_regs_fops
);
1335 if (primary_pmc
->map
->pson_residency_offset
&& pmc_core_is_pson_residency_enabled(pmcdev
)) {
1336 debugfs_create_file("pson_residency_usec", 0444,
1337 pmcdev
->dbgfs_dir
, primary_pmc
, &pmc_core_pson_residency
);
1340 if (pmcdev
->has_die_c6
) {
1341 debugfs_create_file("die_c6_us_show", 0444,
1342 pmcdev
->dbgfs_dir
, pmcdev
,
1343 &pmc_core_die_c6_us_fops
);
1347 static const struct x86_cpu_id intel_pmc_core_ids
[] = {
1348 X86_MATCH_VFM(INTEL_SKYLAKE_L
, spt_core_init
),
1349 X86_MATCH_VFM(INTEL_SKYLAKE
, spt_core_init
),
1350 X86_MATCH_VFM(INTEL_KABYLAKE_L
, spt_core_init
),
1351 X86_MATCH_VFM(INTEL_KABYLAKE
, spt_core_init
),
1352 X86_MATCH_VFM(INTEL_CANNONLAKE_L
, cnp_core_init
),
1353 X86_MATCH_VFM(INTEL_ICELAKE_L
, icl_core_init
),
1354 X86_MATCH_VFM(INTEL_ICELAKE_NNPI
, icl_core_init
),
1355 X86_MATCH_VFM(INTEL_COMETLAKE
, cnp_core_init
),
1356 X86_MATCH_VFM(INTEL_COMETLAKE_L
, cnp_core_init
),
1357 X86_MATCH_VFM(INTEL_TIGERLAKE_L
, tgl_l_core_init
),
1358 X86_MATCH_VFM(INTEL_TIGERLAKE
, tgl_core_init
),
1359 X86_MATCH_VFM(INTEL_ATOM_TREMONT
, tgl_l_core_init
),
1360 X86_MATCH_VFM(INTEL_ATOM_TREMONT_L
, icl_core_init
),
1361 X86_MATCH_VFM(INTEL_ROCKETLAKE
, tgl_core_init
),
1362 X86_MATCH_VFM(INTEL_ALDERLAKE_L
, tgl_l_core_init
),
1363 X86_MATCH_VFM(INTEL_ATOM_GRACEMONT
, tgl_l_core_init
),
1364 X86_MATCH_VFM(INTEL_ALDERLAKE
, adl_core_init
),
1365 X86_MATCH_VFM(INTEL_RAPTORLAKE_P
, tgl_l_core_init
),
1366 X86_MATCH_VFM(INTEL_RAPTORLAKE
, adl_core_init
),
1367 X86_MATCH_VFM(INTEL_RAPTORLAKE_S
, adl_core_init
),
1368 X86_MATCH_VFM(INTEL_METEORLAKE_L
, mtl_core_init
),
1369 X86_MATCH_VFM(INTEL_ARROWLAKE
, arl_core_init
),
1370 X86_MATCH_VFM(INTEL_LUNARLAKE_M
, lnl_core_init
),
1374 MODULE_DEVICE_TABLE(x86cpu
, intel_pmc_core_ids
);
1376 static const struct pci_device_id pmc_pci_ids
[] = {
1377 { PCI_VDEVICE(INTEL
, SPT_PMC_PCI_DEVICE_ID
) },
1382 * This quirk can be used on those platforms where
1383 * the platform BIOS enforces 24Mhz crystal to shutdown
1384 * before PMC can assert SLP_S0#.
1386 static bool xtal_ignore
;
1387 static int quirk_xtal_ignore(const struct dmi_system_id
*id
)
1393 static void pmc_core_xtal_ignore(struct pmc
*pmc
)
1397 value
= pmc_core_reg_read(pmc
, pmc
->map
->pm_vric1_offset
);
1398 /* 24MHz Crystal Shutdown Qualification Disable */
1399 value
|= SPT_PMC_VRIC1_XTALSDQDIS
;
1400 /* Low Voltage Mode Enable */
1401 value
&= ~SPT_PMC_VRIC1_SLPS0LVEN
;
1402 pmc_core_reg_write(pmc
, pmc
->map
->pm_vric1_offset
, value
);
1405 static const struct dmi_system_id pmc_core_dmi_table
[] = {
1407 .callback
= quirk_xtal_ignore
,
1408 .ident
= "HP Elite x2 1013 G3",
1410 DMI_MATCH(DMI_SYS_VENDOR
, "HP"),
1411 DMI_MATCH(DMI_PRODUCT_NAME
, "HP Elite x2 1013 G3"),
1417 static void pmc_core_do_dmi_quirks(struct pmc
*pmc
)
1419 dmi_check_system(pmc_core_dmi_table
);
1422 pmc_core_xtal_ignore(pmc
);
1425 static void pmc_core_clean_structure(struct platform_device
*pdev
)
1427 struct pmc_dev
*pmcdev
= platform_get_drvdata(pdev
);
1430 for (i
= 0; i
< ARRAY_SIZE(pmcdev
->pmcs
); ++i
) {
1431 struct pmc
*pmc
= pmcdev
->pmcs
[i
];
1434 iounmap(pmc
->regbase
);
1437 if (pmcdev
->ssram_pcidev
) {
1438 pci_dev_put(pmcdev
->ssram_pcidev
);
1439 pci_disable_device(pmcdev
->ssram_pcidev
);
1442 if (pmcdev
->punit_ep
)
1443 pmt_telem_unregister_endpoint(pmcdev
->punit_ep
);
1445 platform_set_drvdata(pdev
, NULL
);
1446 mutex_destroy(&pmcdev
->lock
);
1449 static int pmc_core_probe(struct platform_device
*pdev
)
1451 static bool device_initialized
;
1452 struct pmc_dev
*pmcdev
;
1453 const struct x86_cpu_id
*cpu_id
;
1454 int (*core_init
)(struct pmc_dev
*pmcdev
);
1455 struct pmc
*primary_pmc
;
1458 if (device_initialized
)
1461 pmcdev
= devm_kzalloc(&pdev
->dev
, sizeof(*pmcdev
), GFP_KERNEL
);
1465 pmcdev
->crystal_freq
= pmc_core_get_crystal_freq();
1467 platform_set_drvdata(pdev
, pmcdev
);
1468 pmcdev
->pdev
= pdev
;
1470 cpu_id
= x86_match_cpu(intel_pmc_core_ids
);
1474 core_init
= (int (*)(struct pmc_dev
*))cpu_id
->driver_data
;
1477 primary_pmc
= devm_kzalloc(&pdev
->dev
, sizeof(*primary_pmc
), GFP_KERNEL
);
1480 pmcdev
->pmcs
[PMC_IDX_MAIN
] = primary_pmc
;
1482 /* The last element in msr_map is empty */
1483 pmcdev
->num_of_pkgc
= ARRAY_SIZE(msr_map
) - 1;
1484 pmcdev
->pkgc_res_cnt
= devm_kcalloc(&pdev
->dev
,
1485 pmcdev
->num_of_pkgc
,
1486 sizeof(*pmcdev
->pkgc_res_cnt
),
1488 if (!pmcdev
->pkgc_res_cnt
)
1492 * Coffee Lake has CPU ID of Kaby Lake and Cannon Lake PCH. So here
1493 * Sunrisepoint PCH regmap can't be used. Use Cannon Lake PCH regmap
1496 if (core_init
== spt_core_init
&& !pci_dev_present(pmc_pci_ids
))
1497 core_init
= cnp_core_init
;
1499 mutex_init(&pmcdev
->lock
);
1500 ret
= core_init(pmcdev
);
1502 pmc_core_clean_structure(pdev
);
1506 pmcdev
->pmc_xram_read_bit
= pmc_core_check_read_lock_bit(primary_pmc
);
1507 pmc_core_do_dmi_quirks(primary_pmc
);
1509 pmc_core_dbgfs_register(pmcdev
);
1510 pm_report_max_hw_sleep(FIELD_MAX(SLP_S0_RES_COUNTER_MASK
) *
1511 pmc_core_adjust_slp_s0_step(primary_pmc
, 1));
1513 device_initialized
= true;
1514 dev_info(&pdev
->dev
, " initialized\n");
1519 static void pmc_core_remove(struct platform_device
*pdev
)
1521 struct pmc_dev
*pmcdev
= platform_get_drvdata(pdev
);
1522 pmc_core_dbgfs_unregister(pmcdev
);
1523 pmc_core_clean_structure(pdev
);
1526 static bool warn_on_s0ix_failures
;
1527 module_param(warn_on_s0ix_failures
, bool, 0644);
1528 MODULE_PARM_DESC(warn_on_s0ix_failures
, "Check and warn for S0ix failures");
1530 static bool ltr_ignore_all_suspend
= true;
1531 module_param(ltr_ignore_all_suspend
, bool, 0644);
1532 MODULE_PARM_DESC(ltr_ignore_all_suspend
, "Ignore all LTRs during suspend");
1534 static __maybe_unused
int pmc_core_suspend(struct device
*dev
)
1536 struct pmc_dev
*pmcdev
= dev_get_drvdata(dev
);
1537 struct pmc
*pmc
= pmcdev
->pmcs
[PMC_IDX_MAIN
];
1540 if (pmcdev
->suspend
)
1541 pmcdev
->suspend(pmcdev
);
1543 if (ltr_ignore_all_suspend
)
1544 pmc_core_ltr_ignore_all(pmcdev
);
1546 /* Check if the syspend will actually use S0ix */
1547 if (pm_suspend_via_firmware())
1550 /* Save PKGC residency for checking later */
1551 for (i
= 0; i
< pmcdev
->num_of_pkgc
; i
++) {
1552 if (rdmsrl_safe(msr_map
[i
].bit_mask
, &pmcdev
->pkgc_res_cnt
[i
]))
1556 /* Save S0ix residency for checking later */
1557 if (pmc_core_dev_state_get(pmc
, &pmcdev
->s0ix_counter
))
1563 static inline bool pmc_core_is_deepest_pkgc_failed(struct pmc_dev
*pmcdev
)
1565 u32 deepest_pkgc_msr
= msr_map
[pmcdev
->num_of_pkgc
- 1].bit_mask
;
1566 u64 deepest_pkgc_residency
;
1568 if (rdmsrl_safe(deepest_pkgc_msr
, &deepest_pkgc_residency
))
1571 if (deepest_pkgc_residency
== pmcdev
->pkgc_res_cnt
[pmcdev
->num_of_pkgc
- 1])
1577 static inline bool pmc_core_is_s0ix_failed(struct pmc_dev
*pmcdev
)
1581 if (pmc_core_dev_state_get(pmcdev
->pmcs
[PMC_IDX_MAIN
], &s0ix_counter
))
1584 pm_report_hw_sleep_time((u32
)(s0ix_counter
- pmcdev
->s0ix_counter
));
1586 if (s0ix_counter
== pmcdev
->s0ix_counter
)
1592 int pmc_core_resume_common(struct pmc_dev
*pmcdev
)
1594 struct device
*dev
= &pmcdev
->pdev
->dev
;
1595 struct pmc
*pmc
= pmcdev
->pmcs
[PMC_IDX_MAIN
];
1596 const struct pmc_bit_map
**maps
= pmc
->map
->lpm_sts
;
1597 int offset
= pmc
->map
->lpm_status_offset
;
1600 /* Check if the syspend used S0ix */
1601 if (pm_suspend_via_firmware())
1604 if (!pmc_core_is_s0ix_failed(pmcdev
))
1607 if (!warn_on_s0ix_failures
)
1610 if (pmc_core_is_deepest_pkgc_failed(pmcdev
)) {
1611 /* S0ix failed because of deepest PKGC entry failure */
1612 dev_info(dev
, "CPU did not enter %s!!! (%s cnt=0x%llx)\n",
1613 msr_map
[pmcdev
->num_of_pkgc
- 1].name
,
1614 msr_map
[pmcdev
->num_of_pkgc
- 1].name
,
1615 pmcdev
->pkgc_res_cnt
[pmcdev
->num_of_pkgc
- 1]);
1617 for (i
= 0; i
< pmcdev
->num_of_pkgc
; i
++) {
1620 if (!rdmsrl_safe(msr_map
[i
].bit_mask
, &pc_cnt
)) {
1621 dev_info(dev
, "Prev %s cnt = 0x%llx, Current %s cnt = 0x%llx\n",
1622 msr_map
[i
].name
, pmcdev
->pkgc_res_cnt
[i
],
1623 msr_map
[i
].name
, pc_cnt
);
1629 /* The real interesting case - S0ix failed - lets ask PMC why. */
1630 dev_warn(dev
, "CPU did not enter SLP_S0!!! (S0ix cnt=%llu)\n",
1631 pmcdev
->s0ix_counter
);
1633 if (pmc
->map
->slps0_dbg_maps
)
1634 pmc_core_slps0_display(pmc
, dev
, NULL
);
1636 for (i
= 0; i
< ARRAY_SIZE(pmcdev
->pmcs
); ++i
) {
1637 struct pmc
*pmc
= pmcdev
->pmcs
[i
];
1641 if (pmc
->map
->lpm_sts
)
1642 pmc_core_lpm_display(pmc
, dev
, NULL
, offset
, i
, "STATUS", maps
);
1648 static __maybe_unused
int pmc_core_resume(struct device
*dev
)
1650 struct pmc_dev
*pmcdev
= dev_get_drvdata(dev
);
1652 if (ltr_ignore_all_suspend
)
1653 pmc_core_ltr_restore_all(pmcdev
);
1656 return pmcdev
->resume(pmcdev
);
1658 return pmc_core_resume_common(pmcdev
);
1661 static const struct dev_pm_ops pmc_core_pm_ops
= {
1662 SET_LATE_SYSTEM_SLEEP_PM_OPS(pmc_core_suspend
, pmc_core_resume
)
1665 static const struct acpi_device_id pmc_core_acpi_ids
[] = {
1666 {"INT33A1", 0}, /* _HID for Intel Power Engine, _CID PNP0D80*/
1669 MODULE_DEVICE_TABLE(acpi
, pmc_core_acpi_ids
);
1671 static struct platform_driver pmc_core_driver
= {
1673 .name
= "intel_pmc_core",
1674 .acpi_match_table
= ACPI_PTR(pmc_core_acpi_ids
),
1675 .pm
= &pmc_core_pm_ops
,
1676 .dev_groups
= pmc_dev_groups
,
1678 .probe
= pmc_core_probe
,
1679 .remove
= pmc_core_remove
,
1682 module_platform_driver(pmc_core_driver
);
1684 MODULE_LICENSE("GPL v2");
1685 MODULE_DESCRIPTION("Intel PMC Core Driver");