2 * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver
3 * Copyright (c) 2017, Intel Corporation.
6 * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/topology.h>
23 #include <linux/workqueue.h>
24 #include <linux/cpuhotplug.h>
25 #include <linux/cpufeature.h>
26 #include <asm/cpu_device_id.h>
27 #include <asm/intel-family.h>
29 #define MSR_OC_MAILBOX 0x150
30 #define MSR_OC_MAILBOX_CMD_OFFSET 32
31 #define MSR_OC_MAILBOX_RSP_OFFSET 32
32 #define MSR_OC_MAILBOX_BUSY_BIT 63
33 #define OC_MAILBOX_FC_CONTROL_CMD 0x1C
36 * Typical latency to get mail box response is ~3us, It takes +3 us to
37 * process reading mailbox after issuing mailbox write on a Broadwell 3.4 GHz
38 * system. So for most of the time, the first mailbox read should have the
39 * response, but to avoid some boundary cases retry twice.
41 #define OC_MAILBOX_RETRY_COUNT 2
43 static int get_oc_core_priority(unsigned int cpu
)
45 u64 value
, cmd
= OC_MAILBOX_FC_CONTROL_CMD
;
48 /* Issue favored core read command */
49 value
= cmd
<< MSR_OC_MAILBOX_CMD_OFFSET
;
50 /* Set the busy bit to indicate OS is trying to issue command */
51 value
|= BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT
);
52 ret
= wrmsrl_safe(MSR_OC_MAILBOX
, value
);
54 pr_debug("cpu %d OC mailbox write failed\n", cpu
);
58 for (i
= 0; i
< OC_MAILBOX_RETRY_COUNT
; ++i
) {
59 ret
= rdmsrl_safe(MSR_OC_MAILBOX
, &value
);
61 pr_debug("cpu %d OC mailbox read failed\n", cpu
);
65 if (value
& BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT
)) {
66 pr_debug("cpu %d OC mailbox still processing\n", cpu
);
71 if ((value
>> MSR_OC_MAILBOX_RSP_OFFSET
) & 0xff) {
72 pr_debug("cpu %d OC mailbox cmd failed\n", cpu
);
78 pr_debug("cpu %d max_ratio %d\n", cpu
, ret
);
86 * The work item is needed to avoid CPU hotplug locking issues. The function
87 * itmt_legacy_set_priority() is called from CPU online callback, so can't
88 * call sched_set_itmt_support() from there as this function will aquire
89 * hotplug locks in its path.
91 static void itmt_legacy_work_fn(struct work_struct
*work
)
93 sched_set_itmt_support();
96 static DECLARE_WORK(sched_itmt_work
, itmt_legacy_work_fn
);
98 static int itmt_legacy_cpu_online(unsigned int cpu
)
100 static u32 max_highest_perf
= 0, min_highest_perf
= U32_MAX
;
103 priority
= get_oc_core_priority(cpu
);
107 sched_set_itmt_core_prio(priority
, cpu
);
109 /* Enable ITMT feature when a core with different priority is found */
110 if (max_highest_perf
<= min_highest_perf
) {
111 if (priority
> max_highest_perf
)
112 max_highest_perf
= priority
;
114 if (priority
< min_highest_perf
)
115 min_highest_perf
= priority
;
117 if (max_highest_perf
> min_highest_perf
)
118 schedule_work(&sched_itmt_work
);
124 #define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, }
126 static const struct x86_cpu_id itmt_legacy_cpu_ids
[] = {
127 ICPU(INTEL_FAM6_BROADWELL_X
),
128 ICPU(INTEL_FAM6_SKYLAKE_X
),
132 static int __init
itmt_legacy_init(void)
134 const struct x86_cpu_id
*id
;
137 id
= x86_match_cpu(itmt_legacy_cpu_ids
);
141 ret
= cpuhp_setup_state(CPUHP_AP_ONLINE_DYN
,
142 "platform/x86/turbo_max_3:online",
143 itmt_legacy_cpu_online
, NULL
);
149 late_initcall(itmt_legacy_init
)