Merge branch 'fixes-modulesplit' into fixes
[zen-stable.git] / arch / x86 / kernel / cpu / perf_event_intel_lbr.c
blob3fab3de3ce96dde8b3bece9bdf65cae8c5b1e466
1 #include <linux/perf_event.h>
2 #include <linux/types.h>
4 #include <asm/perf_event.h>
5 #include <asm/msr.h>
7 #include "perf_event.h"
9 enum {
10 LBR_FORMAT_32 = 0x00,
11 LBR_FORMAT_LIP = 0x01,
12 LBR_FORMAT_EIP = 0x02,
13 LBR_FORMAT_EIP_FLAGS = 0x03,
17 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
18 * otherwise it becomes near impossible to get a reliable stack.
21 static void __intel_pmu_lbr_enable(void)
23 u64 debugctl;
25 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
26 debugctl |= (DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
27 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
30 static void __intel_pmu_lbr_disable(void)
32 u64 debugctl;
34 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
35 debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI);
36 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
39 static void intel_pmu_lbr_reset_32(void)
41 int i;
43 for (i = 0; i < x86_pmu.lbr_nr; i++)
44 wrmsrl(x86_pmu.lbr_from + i, 0);
47 static void intel_pmu_lbr_reset_64(void)
49 int i;
51 for (i = 0; i < x86_pmu.lbr_nr; i++) {
52 wrmsrl(x86_pmu.lbr_from + i, 0);
53 wrmsrl(x86_pmu.lbr_to + i, 0);
57 void intel_pmu_lbr_reset(void)
59 if (!x86_pmu.lbr_nr)
60 return;
62 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
63 intel_pmu_lbr_reset_32();
64 else
65 intel_pmu_lbr_reset_64();
68 void intel_pmu_lbr_enable(struct perf_event *event)
70 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
72 if (!x86_pmu.lbr_nr)
73 return;
75 WARN_ON_ONCE(cpuc->enabled);
78 * Reset the LBR stack if we changed task context to
79 * avoid data leaks.
82 if (event->ctx->task && cpuc->lbr_context != event->ctx) {
83 intel_pmu_lbr_reset();
84 cpuc->lbr_context = event->ctx;
87 cpuc->lbr_users++;
90 void intel_pmu_lbr_disable(struct perf_event *event)
92 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
94 if (!x86_pmu.lbr_nr)
95 return;
97 cpuc->lbr_users--;
98 WARN_ON_ONCE(cpuc->lbr_users < 0);
100 if (cpuc->enabled && !cpuc->lbr_users)
101 __intel_pmu_lbr_disable();
104 void intel_pmu_lbr_enable_all(void)
106 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
108 if (cpuc->lbr_users)
109 __intel_pmu_lbr_enable();
112 void intel_pmu_lbr_disable_all(void)
114 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
116 if (cpuc->lbr_users)
117 __intel_pmu_lbr_disable();
120 static inline u64 intel_pmu_lbr_tos(void)
122 u64 tos;
124 rdmsrl(x86_pmu.lbr_tos, tos);
126 return tos;
129 static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
131 unsigned long mask = x86_pmu.lbr_nr - 1;
132 u64 tos = intel_pmu_lbr_tos();
133 int i;
135 for (i = 0; i < x86_pmu.lbr_nr; i++) {
136 unsigned long lbr_idx = (tos - i) & mask;
137 union {
138 struct {
139 u32 from;
140 u32 to;
142 u64 lbr;
143 } msr_lastbranch;
145 rdmsrl(x86_pmu.lbr_from + lbr_idx, msr_lastbranch.lbr);
147 cpuc->lbr_entries[i].from = msr_lastbranch.from;
148 cpuc->lbr_entries[i].to = msr_lastbranch.to;
149 cpuc->lbr_entries[i].flags = 0;
151 cpuc->lbr_stack.nr = i;
154 #define LBR_FROM_FLAG_MISPRED (1ULL << 63)
157 * Due to lack of segmentation in Linux the effective address (offset)
158 * is the same as the linear address, allowing us to merge the LIP and EIP
159 * LBR formats.
161 static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
163 unsigned long mask = x86_pmu.lbr_nr - 1;
164 int lbr_format = x86_pmu.intel_cap.lbr_format;
165 u64 tos = intel_pmu_lbr_tos();
166 int i;
168 for (i = 0; i < x86_pmu.lbr_nr; i++) {
169 unsigned long lbr_idx = (tos - i) & mask;
170 u64 from, to, flags = 0;
172 rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
173 rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
175 if (lbr_format == LBR_FORMAT_EIP_FLAGS) {
176 flags = !!(from & LBR_FROM_FLAG_MISPRED);
177 from = (u64)((((s64)from) << 1) >> 1);
180 cpuc->lbr_entries[i].from = from;
181 cpuc->lbr_entries[i].to = to;
182 cpuc->lbr_entries[i].flags = flags;
184 cpuc->lbr_stack.nr = i;
187 void intel_pmu_lbr_read(void)
189 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
191 if (!cpuc->lbr_users)
192 return;
194 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32)
195 intel_pmu_lbr_read_32(cpuc);
196 else
197 intel_pmu_lbr_read_64(cpuc);
200 void intel_pmu_lbr_init_core(void)
202 x86_pmu.lbr_nr = 4;
203 x86_pmu.lbr_tos = 0x01c9;
204 x86_pmu.lbr_from = 0x40;
205 x86_pmu.lbr_to = 0x60;
208 void intel_pmu_lbr_init_nhm(void)
210 x86_pmu.lbr_nr = 16;
211 x86_pmu.lbr_tos = 0x01c9;
212 x86_pmu.lbr_from = 0x680;
213 x86_pmu.lbr_to = 0x6c0;
216 void intel_pmu_lbr_init_atom(void)
218 x86_pmu.lbr_nr = 8;
219 x86_pmu.lbr_tos = 0x01c9;
220 x86_pmu.lbr_from = 0x40;
221 x86_pmu.lbr_to = 0x60;