WIP FPC-III support
[linux/fpc-iii.git] / drivers / platform / x86 / intel_turbo_max_3.c
blob892140b628985baa20ba047e50ae274eb175e744
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver
4 * Copyright (c) 2017, Intel Corporation.
5 * All rights reserved.
7 * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
8 */
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/cpufeature.h>
12 #include <linux/cpuhotplug.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/topology.h>
16 #include <linux/workqueue.h>
18 #include <asm/cpu_device_id.h>
19 #include <asm/intel-family.h>
21 #define MSR_OC_MAILBOX 0x150
22 #define MSR_OC_MAILBOX_CMD_OFFSET 32
23 #define MSR_OC_MAILBOX_RSP_OFFSET 32
24 #define MSR_OC_MAILBOX_BUSY_BIT 63
25 #define OC_MAILBOX_FC_CONTROL_CMD 0x1C
28 * Typical latency to get mail box response is ~3us, It takes +3 us to
29 * process reading mailbox after issuing mailbox write on a Broadwell 3.4 GHz
30 * system. So for most of the time, the first mailbox read should have the
31 * response, but to avoid some boundary cases retry twice.
33 #define OC_MAILBOX_RETRY_COUNT 2
35 static int get_oc_core_priority(unsigned int cpu)
37 u64 value, cmd = OC_MAILBOX_FC_CONTROL_CMD;
38 int ret, i;
40 /* Issue favored core read command */
41 value = cmd << MSR_OC_MAILBOX_CMD_OFFSET;
42 /* Set the busy bit to indicate OS is trying to issue command */
43 value |= BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT);
44 ret = wrmsrl_safe(MSR_OC_MAILBOX, value);
45 if (ret) {
46 pr_debug("cpu %d OC mailbox write failed\n", cpu);
47 return ret;
50 for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) {
51 ret = rdmsrl_safe(MSR_OC_MAILBOX, &value);
52 if (ret) {
53 pr_debug("cpu %d OC mailbox read failed\n", cpu);
54 break;
57 if (value & BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT)) {
58 pr_debug("cpu %d OC mailbox still processing\n", cpu);
59 ret = -EBUSY;
60 continue;
63 if ((value >> MSR_OC_MAILBOX_RSP_OFFSET) & 0xff) {
64 pr_debug("cpu %d OC mailbox cmd failed\n", cpu);
65 ret = -ENXIO;
66 break;
69 ret = value & 0xff;
70 pr_debug("cpu %d max_ratio %d\n", cpu, ret);
71 break;
74 return ret;
78 * The work item is needed to avoid CPU hotplug locking issues. The function
79 * itmt_legacy_set_priority() is called from CPU online callback, so can't
80 * call sched_set_itmt_support() from there as this function will aquire
81 * hotplug locks in its path.
83 static void itmt_legacy_work_fn(struct work_struct *work)
85 sched_set_itmt_support();
88 static DECLARE_WORK(sched_itmt_work, itmt_legacy_work_fn);
90 static int itmt_legacy_cpu_online(unsigned int cpu)
92 static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
93 int priority;
95 priority = get_oc_core_priority(cpu);
96 if (priority < 0)
97 return 0;
99 sched_set_itmt_core_prio(priority, cpu);
101 /* Enable ITMT feature when a core with different priority is found */
102 if (max_highest_perf <= min_highest_perf) {
103 if (priority > max_highest_perf)
104 max_highest_perf = priority;
106 if (priority < min_highest_perf)
107 min_highest_perf = priority;
109 if (max_highest_perf > min_highest_perf)
110 schedule_work(&sched_itmt_work);
113 return 0;
116 static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
117 X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, NULL),
118 X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
122 static int __init itmt_legacy_init(void)
124 const struct x86_cpu_id *id;
125 int ret;
127 id = x86_match_cpu(itmt_legacy_cpu_ids);
128 if (!id)
129 return -ENODEV;
131 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
132 "platform/x86/turbo_max_3:online",
133 itmt_legacy_cpu_online, NULL);
134 if (ret < 0)
135 return ret;
137 return 0;
139 late_initcall(itmt_legacy_init)