1 #ifdef CONFIG_CPU_SUP_INTEL
7 LBR_FORMAT_EIP_FLAGS
= 0x03,
11 * We only support LBR implementations that have FREEZE_LBRS_ON_PMI
12 * otherwise it becomes near impossible to get a reliable stack.
15 static void __intel_pmu_lbr_enable(void)
19 rdmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
20 debugctl
|= (DEBUGCTLMSR_LBR
| DEBUGCTLMSR_FREEZE_LBRS_ON_PMI
);
21 wrmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
24 static void __intel_pmu_lbr_disable(void)
28 rdmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
29 debugctl
&= ~(DEBUGCTLMSR_LBR
| DEBUGCTLMSR_FREEZE_LBRS_ON_PMI
);
30 wrmsrl(MSR_IA32_DEBUGCTLMSR
, debugctl
);
33 static void intel_pmu_lbr_reset_32(void)
37 for (i
= 0; i
< x86_pmu
.lbr_nr
; i
++)
38 wrmsrl(x86_pmu
.lbr_from
+ i
, 0);
41 static void intel_pmu_lbr_reset_64(void)
45 for (i
= 0; i
< x86_pmu
.lbr_nr
; i
++) {
46 wrmsrl(x86_pmu
.lbr_from
+ i
, 0);
47 wrmsrl(x86_pmu
.lbr_to
+ i
, 0);
51 static void intel_pmu_lbr_reset(void)
56 if (x86_pmu
.intel_cap
.lbr_format
== LBR_FORMAT_32
)
57 intel_pmu_lbr_reset_32();
59 intel_pmu_lbr_reset_64();
62 static void intel_pmu_lbr_enable(struct perf_event
*event
)
64 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
69 WARN_ON_ONCE(cpuc
->enabled
);
72 * Reset the LBR stack if we changed task context to
76 if (event
->ctx
->task
&& cpuc
->lbr_context
!= event
->ctx
) {
77 intel_pmu_lbr_reset();
78 cpuc
->lbr_context
= event
->ctx
;
84 static void intel_pmu_lbr_disable(struct perf_event
*event
)
86 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
92 WARN_ON_ONCE(cpuc
->lbr_users
< 0);
94 if (cpuc
->enabled
&& !cpuc
->lbr_users
)
95 __intel_pmu_lbr_disable();
98 static void intel_pmu_lbr_enable_all(void)
100 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
103 __intel_pmu_lbr_enable();
106 static void intel_pmu_lbr_disable_all(void)
108 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
111 __intel_pmu_lbr_disable();
114 static inline u64
intel_pmu_lbr_tos(void)
118 rdmsrl(x86_pmu
.lbr_tos
, tos
);
123 static void intel_pmu_lbr_read_32(struct cpu_hw_events
*cpuc
)
125 unsigned long mask
= x86_pmu
.lbr_nr
- 1;
126 u64 tos
= intel_pmu_lbr_tos();
129 for (i
= 0; i
< x86_pmu
.lbr_nr
; i
++) {
130 unsigned long lbr_idx
= (tos
- i
) & mask
;
139 rdmsrl(x86_pmu
.lbr_from
+ lbr_idx
, msr_lastbranch
.lbr
);
141 cpuc
->lbr_entries
[i
].from
= msr_lastbranch
.from
;
142 cpuc
->lbr_entries
[i
].to
= msr_lastbranch
.to
;
143 cpuc
->lbr_entries
[i
].flags
= 0;
145 cpuc
->lbr_stack
.nr
= i
;
148 #define LBR_FROM_FLAG_MISPRED (1ULL << 63)
151 * Due to lack of segmentation in Linux the effective address (offset)
152 * is the same as the linear address, allowing us to merge the LIP and EIP
155 static void intel_pmu_lbr_read_64(struct cpu_hw_events
*cpuc
)
157 unsigned long mask
= x86_pmu
.lbr_nr
- 1;
158 int lbr_format
= x86_pmu
.intel_cap
.lbr_format
;
159 u64 tos
= intel_pmu_lbr_tos();
162 for (i
= 0; i
< x86_pmu
.lbr_nr
; i
++) {
163 unsigned long lbr_idx
= (tos
- i
) & mask
;
164 u64 from
, to
, flags
= 0;
166 rdmsrl(x86_pmu
.lbr_from
+ lbr_idx
, from
);
167 rdmsrl(x86_pmu
.lbr_to
+ lbr_idx
, to
);
169 if (lbr_format
== LBR_FORMAT_EIP_FLAGS
) {
170 flags
= !!(from
& LBR_FROM_FLAG_MISPRED
);
171 from
= (u64
)((((s64
)from
) << 1) >> 1);
174 cpuc
->lbr_entries
[i
].from
= from
;
175 cpuc
->lbr_entries
[i
].to
= to
;
176 cpuc
->lbr_entries
[i
].flags
= flags
;
178 cpuc
->lbr_stack
.nr
= i
;
181 static void intel_pmu_lbr_read(void)
183 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
185 if (!cpuc
->lbr_users
)
188 if (x86_pmu
.intel_cap
.lbr_format
== LBR_FORMAT_32
)
189 intel_pmu_lbr_read_32(cpuc
);
191 intel_pmu_lbr_read_64(cpuc
);
194 static void intel_pmu_lbr_init_core(void)
197 x86_pmu
.lbr_tos
= 0x01c9;
198 x86_pmu
.lbr_from
= 0x40;
199 x86_pmu
.lbr_to
= 0x60;
202 static void intel_pmu_lbr_init_nhm(void)
205 x86_pmu
.lbr_tos
= 0x01c9;
206 x86_pmu
.lbr_from
= 0x680;
207 x86_pmu
.lbr_to
= 0x6c0;
210 static void intel_pmu_lbr_init_atom(void)
213 x86_pmu
.lbr_tos
= 0x01c9;
214 x86_pmu
.lbr_from
= 0x40;
215 x86_pmu
.lbr_to
= 0x60;
218 #endif /* CONFIG_CPU_SUP_INTEL */