workqueue: reimplement work flushing using linked works
[linux/fpc-iii.git] / arch / x86 / oprofile / op_model_amd.c
blobb67a6b5aa8d449ee06e0c22b586b6c51af4d2170
1 /*
2 * @file op_model_amd.c
3 * athlon / K7 / K8 / Family 10h model-specific MSR operations
5 * @remark Copyright 2002-2009 OProfile authors
6 * @remark Read the file COPYING
8 * @author John Levon
9 * @author Philippe Elie
10 * @author Graydon Hoare
11 * @author Robert Richter <robert.richter@amd.com>
12 * @author Barry Kasindorf <barry.kasindorf@amd.com>
13 * @author Jason Yeh <jason.yeh@amd.com>
14 * @author Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
17 #include <linux/oprofile.h>
18 #include <linux/device.h>
19 #include <linux/pci.h>
20 #include <linux/percpu.h>
22 #include <asm/ptrace.h>
23 #include <asm/msr.h>
24 #include <asm/nmi.h>
25 #include <asm/apic.h>
26 #include <asm/processor.h>
27 #include <asm/cpufeature.h>
29 #include "op_x86_model.h"
30 #include "op_counter.h"
32 #define NUM_COUNTERS 4
33 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
34 #define NUM_VIRT_COUNTERS 32
35 #else
36 #define NUM_VIRT_COUNTERS NUM_COUNTERS
37 #endif
39 #define OP_EVENT_MASK 0x0FFF
40 #define OP_CTR_OVERFLOW (1ULL<<31)
42 #define MSR_AMD_EVENTSEL_RESERVED ((0xFFFFFCF0ULL<<32)|(1ULL<<21))
44 static unsigned long reset_value[NUM_VIRT_COUNTERS];
46 #define IBS_FETCH_SIZE 6
47 #define IBS_OP_SIZE 12
49 static u32 ibs_caps;
51 struct op_ibs_config {
52 unsigned long op_enabled;
53 unsigned long fetch_enabled;
54 unsigned long max_cnt_fetch;
55 unsigned long max_cnt_op;
56 unsigned long rand_en;
57 unsigned long dispatched_ops;
60 static struct op_ibs_config ibs_config;
61 static u64 ibs_op_ctl;
64 * IBS cpuid feature detection
67 #define IBS_CPUID_FEATURES 0x8000001b
70 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
71 * bit 0 is used to indicate the existence of IBS.
73 #define IBS_CAPS_AVAIL (1LL<<0)
74 #define IBS_CAPS_RDWROPCNT (1LL<<3)
75 #define IBS_CAPS_OPCNT (1LL<<4)
78 * IBS randomization macros
80 #define IBS_RANDOM_BITS 12
81 #define IBS_RANDOM_MASK ((1ULL << IBS_RANDOM_BITS) - 1)
82 #define IBS_RANDOM_MAXCNT_OFFSET (1ULL << (IBS_RANDOM_BITS - 5))
84 static u32 get_ibs_caps(void)
86 u32 ibs_caps;
87 unsigned int max_level;
89 if (!boot_cpu_has(X86_FEATURE_IBS))
90 return 0;
92 /* check IBS cpuid feature flags */
93 max_level = cpuid_eax(0x80000000);
94 if (max_level < IBS_CPUID_FEATURES)
95 return IBS_CAPS_AVAIL;
97 ibs_caps = cpuid_eax(IBS_CPUID_FEATURES);
98 if (!(ibs_caps & IBS_CAPS_AVAIL))
99 /* cpuid flags not valid */
100 return IBS_CAPS_AVAIL;
102 return ibs_caps;
106 * 16-bit Linear Feedback Shift Register (LFSR)
108 * 16 14 13 11
109 * Feedback polynomial = X + X + X + X + 1
111 static unsigned int lfsr_random(void)
113 static unsigned int lfsr_value = 0xF00D;
114 unsigned int bit;
116 /* Compute next bit to shift in */
117 bit = ((lfsr_value >> 0) ^
118 (lfsr_value >> 2) ^
119 (lfsr_value >> 3) ^
120 (lfsr_value >> 5)) & 0x0001;
122 /* Advance to next register value */
123 lfsr_value = (lfsr_value >> 1) | (bit << 15);
125 return lfsr_value;
129 * IBS software randomization
131 * The IBS periodic op counter is randomized in software. The lower 12
132 * bits of the 20 bit counter are randomized. IbsOpCurCnt is
133 * initialized with a 12 bit random value.
135 static inline u64 op_amd_randomize_ibs_op(u64 val)
137 unsigned int random = lfsr_random();
139 if (!(ibs_caps & IBS_CAPS_RDWROPCNT))
141 * Work around if the hw can not write to IbsOpCurCnt
143 * Randomize the lower 8 bits of the 16 bit
144 * IbsOpMaxCnt [15:0] value in the range of -128 to
145 * +127 by adding/subtracting an offset to the
146 * maximum count (IbsOpMaxCnt).
148 * To avoid over or underflows and protect upper bits
149 * starting at bit 16, the initial value for
150 * IbsOpMaxCnt must fit in the range from 0x0081 to
151 * 0xff80.
153 val += (s8)(random >> 4);
154 else
155 val |= (u64)(random & IBS_RANDOM_MASK) << 32;
157 return val;
160 static inline void
161 op_amd_handle_ibs(struct pt_regs * const regs,
162 struct op_msrs const * const msrs)
164 u64 val, ctl;
165 struct op_entry entry;
167 if (!ibs_caps)
168 return;
170 if (ibs_config.fetch_enabled) {
171 rdmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
172 if (ctl & IBS_FETCH_VAL) {
173 rdmsrl(MSR_AMD64_IBSFETCHLINAD, val);
174 oprofile_write_reserve(&entry, regs, val,
175 IBS_FETCH_CODE, IBS_FETCH_SIZE);
176 oprofile_add_data64(&entry, val);
177 oprofile_add_data64(&entry, ctl);
178 rdmsrl(MSR_AMD64_IBSFETCHPHYSAD, val);
179 oprofile_add_data64(&entry, val);
180 oprofile_write_commit(&entry);
182 /* reenable the IRQ */
183 ctl &= ~(IBS_FETCH_VAL | IBS_FETCH_CNT);
184 ctl |= IBS_FETCH_ENABLE;
185 wrmsrl(MSR_AMD64_IBSFETCHCTL, ctl);
189 if (ibs_config.op_enabled) {
190 rdmsrl(MSR_AMD64_IBSOPCTL, ctl);
191 if (ctl & IBS_OP_VAL) {
192 rdmsrl(MSR_AMD64_IBSOPRIP, val);
193 oprofile_write_reserve(&entry, regs, val,
194 IBS_OP_CODE, IBS_OP_SIZE);
195 oprofile_add_data64(&entry, val);
196 rdmsrl(MSR_AMD64_IBSOPDATA, val);
197 oprofile_add_data64(&entry, val);
198 rdmsrl(MSR_AMD64_IBSOPDATA2, val);
199 oprofile_add_data64(&entry, val);
200 rdmsrl(MSR_AMD64_IBSOPDATA3, val);
201 oprofile_add_data64(&entry, val);
202 rdmsrl(MSR_AMD64_IBSDCLINAD, val);
203 oprofile_add_data64(&entry, val);
204 rdmsrl(MSR_AMD64_IBSDCPHYSAD, val);
205 oprofile_add_data64(&entry, val);
206 oprofile_write_commit(&entry);
208 /* reenable the IRQ */
209 ctl = op_amd_randomize_ibs_op(ibs_op_ctl);
210 wrmsrl(MSR_AMD64_IBSOPCTL, ctl);
215 static inline void op_amd_start_ibs(void)
217 u64 val;
219 if (!ibs_caps)
220 return;
222 if (ibs_config.fetch_enabled) {
223 val = (ibs_config.max_cnt_fetch >> 4) & IBS_FETCH_MAX_CNT;
224 val |= ibs_config.rand_en ? IBS_FETCH_RAND_EN : 0;
225 val |= IBS_FETCH_ENABLE;
226 wrmsrl(MSR_AMD64_IBSFETCHCTL, val);
229 if (ibs_config.op_enabled) {
230 ibs_op_ctl = ibs_config.max_cnt_op >> 4;
231 if (!(ibs_caps & IBS_CAPS_RDWROPCNT)) {
233 * IbsOpCurCnt not supported. See
234 * op_amd_randomize_ibs_op() for details.
236 ibs_op_ctl = clamp(ibs_op_ctl, 0x0081ULL, 0xFF80ULL);
237 } else {
239 * The start value is randomized with a
240 * positive offset, we need to compensate it
241 * with the half of the randomized range. Also
242 * avoid underflows.
244 ibs_op_ctl = min(ibs_op_ctl + IBS_RANDOM_MAXCNT_OFFSET,
245 IBS_OP_MAX_CNT);
247 if (ibs_caps & IBS_CAPS_OPCNT && ibs_config.dispatched_ops)
248 ibs_op_ctl |= IBS_OP_CNT_CTL;
249 ibs_op_ctl |= IBS_OP_ENABLE;
250 val = op_amd_randomize_ibs_op(ibs_op_ctl);
251 wrmsrl(MSR_AMD64_IBSOPCTL, val);
255 static void op_amd_stop_ibs(void)
257 if (!ibs_caps)
258 return;
260 if (ibs_config.fetch_enabled)
261 /* clear max count and enable */
262 wrmsrl(MSR_AMD64_IBSFETCHCTL, 0);
264 if (ibs_config.op_enabled)
265 /* clear max count and enable */
266 wrmsrl(MSR_AMD64_IBSOPCTL, 0);
269 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
271 static void op_mux_switch_ctrl(struct op_x86_model_spec const *model,
272 struct op_msrs const * const msrs)
274 u64 val;
275 int i;
277 /* enable active counters */
278 for (i = 0; i < NUM_COUNTERS; ++i) {
279 int virt = op_x86_phys_to_virt(i);
280 if (!reset_value[virt])
281 continue;
282 rdmsrl(msrs->controls[i].addr, val);
283 val &= model->reserved;
284 val |= op_x86_get_ctrl(model, &counter_config[virt]);
285 wrmsrl(msrs->controls[i].addr, val);
289 #endif
291 /* functions for op_amd_spec */
293 static void op_amd_shutdown(struct op_msrs const * const msrs)
295 int i;
297 for (i = 0; i < NUM_COUNTERS; ++i) {
298 if (!msrs->counters[i].addr)
299 continue;
300 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
301 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
305 static int op_amd_fill_in_addresses(struct op_msrs * const msrs)
307 int i;
309 for (i = 0; i < NUM_COUNTERS; i++) {
310 if (!reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i))
311 goto fail;
312 if (!reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i)) {
313 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
314 goto fail;
316 /* both registers must be reserved */
317 msrs->counters[i].addr = MSR_K7_PERFCTR0 + i;
318 msrs->controls[i].addr = MSR_K7_EVNTSEL0 + i;
319 continue;
320 fail:
321 if (!counter_config[i].enabled)
322 continue;
323 op_x86_warn_reserved(i);
324 op_amd_shutdown(msrs);
325 return -EBUSY;
328 return 0;
331 static void op_amd_setup_ctrs(struct op_x86_model_spec const *model,
332 struct op_msrs const * const msrs)
334 u64 val;
335 int i;
337 /* setup reset_value */
338 for (i = 0; i < NUM_VIRT_COUNTERS; ++i) {
339 if (counter_config[i].enabled
340 && msrs->counters[op_x86_virt_to_phys(i)].addr)
341 reset_value[i] = counter_config[i].count;
342 else
343 reset_value[i] = 0;
346 /* clear all counters */
347 for (i = 0; i < NUM_COUNTERS; ++i) {
348 if (!msrs->controls[i].addr)
349 continue;
350 rdmsrl(msrs->controls[i].addr, val);
351 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
352 op_x86_warn_in_use(i);
353 val &= model->reserved;
354 wrmsrl(msrs->controls[i].addr, val);
356 * avoid a false detection of ctr overflows in NMI
357 * handler
359 wrmsrl(msrs->counters[i].addr, -1LL);
362 /* enable active counters */
363 for (i = 0; i < NUM_COUNTERS; ++i) {
364 int virt = op_x86_phys_to_virt(i);
365 if (!reset_value[virt])
366 continue;
368 /* setup counter registers */
369 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
371 /* setup control registers */
372 rdmsrl(msrs->controls[i].addr, val);
373 val &= model->reserved;
374 val |= op_x86_get_ctrl(model, &counter_config[virt]);
375 wrmsrl(msrs->controls[i].addr, val);
378 if (ibs_caps)
379 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_NMI, 0);
382 static void op_amd_cpu_shutdown(void)
384 if (ibs_caps)
385 setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
388 static int op_amd_check_ctrs(struct pt_regs * const regs,
389 struct op_msrs const * const msrs)
391 u64 val;
392 int i;
394 for (i = 0; i < NUM_COUNTERS; ++i) {
395 int virt = op_x86_phys_to_virt(i);
396 if (!reset_value[virt])
397 continue;
398 rdmsrl(msrs->counters[i].addr, val);
399 /* bit is clear if overflowed: */
400 if (val & OP_CTR_OVERFLOW)
401 continue;
402 oprofile_add_sample(regs, virt);
403 wrmsrl(msrs->counters[i].addr, -(u64)reset_value[virt]);
406 op_amd_handle_ibs(regs, msrs);
408 /* See op_model_ppro.c */
409 return 1;
412 static void op_amd_start(struct op_msrs const * const msrs)
414 u64 val;
415 int i;
417 for (i = 0; i < NUM_COUNTERS; ++i) {
418 if (!reset_value[op_x86_phys_to_virt(i)])
419 continue;
420 rdmsrl(msrs->controls[i].addr, val);
421 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
422 wrmsrl(msrs->controls[i].addr, val);
425 op_amd_start_ibs();
428 static void op_amd_stop(struct op_msrs const * const msrs)
430 u64 val;
431 int i;
434 * Subtle: stop on all counters to avoid race with setting our
435 * pm callback
437 for (i = 0; i < NUM_COUNTERS; ++i) {
438 if (!reset_value[op_x86_phys_to_virt(i)])
439 continue;
440 rdmsrl(msrs->controls[i].addr, val);
441 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
442 wrmsrl(msrs->controls[i].addr, val);
445 op_amd_stop_ibs();
448 static int __init_ibs_nmi(void)
450 #define IBSCTL_LVTOFFSETVAL (1 << 8)
451 #define IBSCTL 0x1cc
452 struct pci_dev *cpu_cfg;
453 int nodes;
454 u32 value = 0;
455 u8 ibs_eilvt_off;
457 ibs_eilvt_off = setup_APIC_eilvt_ibs(0, APIC_EILVT_MSG_FIX, 1);
459 nodes = 0;
460 cpu_cfg = NULL;
461 do {
462 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
463 PCI_DEVICE_ID_AMD_10H_NB_MISC,
464 cpu_cfg);
465 if (!cpu_cfg)
466 break;
467 ++nodes;
468 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
469 | IBSCTL_LVTOFFSETVAL);
470 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
471 if (value != (ibs_eilvt_off | IBSCTL_LVTOFFSETVAL)) {
472 pci_dev_put(cpu_cfg);
473 printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
474 "IBSCTL = 0x%08x", value);
475 return 1;
477 } while (1);
479 if (!nodes) {
480 printk(KERN_DEBUG "No CPU node configured for IBS");
481 return 1;
484 return 0;
487 /* initialize the APIC for the IBS interrupts if available */
488 static void init_ibs(void)
490 ibs_caps = get_ibs_caps();
492 if (!ibs_caps)
493 return;
495 if (__init_ibs_nmi()) {
496 ibs_caps = 0;
497 return;
500 printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n",
501 (unsigned)ibs_caps);
504 static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
506 static int setup_ibs_files(struct super_block *sb, struct dentry *root)
508 struct dentry *dir;
509 int ret = 0;
511 /* architecture specific files */
512 if (create_arch_files)
513 ret = create_arch_files(sb, root);
515 if (ret)
516 return ret;
518 if (!ibs_caps)
519 return ret;
521 /* model specific files */
523 /* setup some reasonable defaults */
524 ibs_config.max_cnt_fetch = 250000;
525 ibs_config.fetch_enabled = 0;
526 ibs_config.max_cnt_op = 250000;
527 ibs_config.op_enabled = 0;
528 ibs_config.dispatched_ops = 0;
530 dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
531 oprofilefs_create_ulong(sb, dir, "enable",
532 &ibs_config.fetch_enabled);
533 oprofilefs_create_ulong(sb, dir, "max_count",
534 &ibs_config.max_cnt_fetch);
535 oprofilefs_create_ulong(sb, dir, "rand_enable",
536 &ibs_config.rand_en);
538 dir = oprofilefs_mkdir(sb, root, "ibs_op");
539 oprofilefs_create_ulong(sb, dir, "enable",
540 &ibs_config.op_enabled);
541 oprofilefs_create_ulong(sb, dir, "max_count",
542 &ibs_config.max_cnt_op);
543 if (ibs_caps & IBS_CAPS_OPCNT)
544 oprofilefs_create_ulong(sb, dir, "dispatched_ops",
545 &ibs_config.dispatched_ops);
547 return 0;
550 static int op_amd_init(struct oprofile_operations *ops)
552 init_ibs();
553 create_arch_files = ops->create_files;
554 ops->create_files = setup_ibs_files;
555 return 0;
558 struct op_x86_model_spec op_amd_spec = {
559 .num_counters = NUM_COUNTERS,
560 .num_controls = NUM_COUNTERS,
561 .num_virt_counters = NUM_VIRT_COUNTERS,
562 .reserved = MSR_AMD_EVENTSEL_RESERVED,
563 .event_mask = OP_EVENT_MASK,
564 .init = op_amd_init,
565 .fill_in_addresses = &op_amd_fill_in_addresses,
566 .setup_ctrs = &op_amd_setup_ctrs,
567 .cpu_down = &op_amd_cpu_shutdown,
568 .check_ctrs = &op_amd_check_ctrs,
569 .start = &op_amd_start,
570 .stop = &op_amd_stop,
571 .shutdown = &op_amd_shutdown,
572 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
573 .switch_ctrl = &op_mux_switch_ctrl,
574 #endif