1 #ifdef CONFIG_CPU_SUP_INTEL
4 * Not sure about some of these
6 static const u64 p6_perfmon_event_map
[] =
8 [PERF_COUNT_HW_CPU_CYCLES
] = 0x0079,
9 [PERF_COUNT_HW_INSTRUCTIONS
] = 0x00c0,
10 [PERF_COUNT_HW_CACHE_REFERENCES
] = 0x0f2e,
11 [PERF_COUNT_HW_CACHE_MISSES
] = 0x012e,
12 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS
] = 0x00c4,
13 [PERF_COUNT_HW_BRANCH_MISSES
] = 0x00c5,
14 [PERF_COUNT_HW_BUS_CYCLES
] = 0x0062,
17 static u64
p6_pmu_event_map(int hw_event
)
19 return p6_perfmon_event_map
[hw_event
];
23 * Event setting that is specified not to count anything.
24 * We use this to effectively disable a counter.
26 * L2_RQSTS with 0 MESI unit mask.
28 #define P6_NOP_EVENT 0x0000002EULL
30 static u64
p6_pmu_raw_event(u64 hw_event
)
32 #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
33 #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
34 #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
35 #define P6_EVNTSEL_INV_MASK 0x00800000ULL
36 #define P6_EVNTSEL_REG_MASK 0xFF000000ULL
38 #define P6_EVNTSEL_MASK \
39 (P6_EVNTSEL_EVENT_MASK | \
40 P6_EVNTSEL_UNIT_MASK | \
41 P6_EVNTSEL_EDGE_MASK | \
42 P6_EVNTSEL_INV_MASK | \
45 return hw_event
& P6_EVNTSEL_MASK
;
48 static struct event_constraint p6_event_constraints
[] =
50 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
51 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
52 INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
53 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
54 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
55 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
59 static void p6_pmu_disable_all(void)
63 /* p6 only has one enable register */
64 rdmsrl(MSR_P6_EVNTSEL0
, val
);
65 val
&= ~ARCH_PERFMON_EVENTSEL_ENABLE
;
66 wrmsrl(MSR_P6_EVNTSEL0
, val
);
69 static void p6_pmu_enable_all(void)
73 /* p6 only has one enable register */
74 rdmsrl(MSR_P6_EVNTSEL0
, val
);
75 val
|= ARCH_PERFMON_EVENTSEL_ENABLE
;
76 wrmsrl(MSR_P6_EVNTSEL0
, val
);
80 p6_pmu_disable_event(struct perf_event
*event
)
82 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
83 struct hw_perf_event
*hwc
= &event
->hw
;
84 u64 val
= P6_NOP_EVENT
;
87 val
|= ARCH_PERFMON_EVENTSEL_ENABLE
;
89 (void)checking_wrmsrl(hwc
->config_base
+ hwc
->idx
, val
);
92 static void p6_pmu_enable_event(struct perf_event
*event
)
94 struct cpu_hw_events
*cpuc
= &__get_cpu_var(cpu_hw_events
);
95 struct hw_perf_event
*hwc
= &event
->hw
;
100 val
|= ARCH_PERFMON_EVENTSEL_ENABLE
;
102 (void)checking_wrmsrl(hwc
->config_base
+ hwc
->idx
, val
);
105 static __initconst
struct x86_pmu p6_pmu
= {
107 .handle_irq
= x86_pmu_handle_irq
,
108 .disable_all
= p6_pmu_disable_all
,
109 .enable_all
= p6_pmu_enable_all
,
110 .enable
= p6_pmu_enable_event
,
111 .disable
= p6_pmu_disable_event
,
112 .eventsel
= MSR_P6_EVNTSEL0
,
113 .perfctr
= MSR_P6_PERFCTR0
,
114 .event_map
= p6_pmu_event_map
,
115 .raw_event
= p6_pmu_raw_event
,
116 .max_events
= ARRAY_SIZE(p6_perfmon_event_map
),
118 .max_period
= (1ULL << 31) - 1,
122 * Events have 40 bits implemented. However they are designed such
123 * that bits [32-39] are sign extensions of bit 31. As such the
124 * effective width of a event for P6-like PMU is 32 bits only.
126 * See IA-32 Intel Architecture Software developer manual Vol 3B
129 .event_mask
= (1ULL << 32) - 1,
130 .get_event_constraints
= x86_get_event_constraints
,
131 .event_constraints
= p6_event_constraints
,
134 static __init
int p6_pmu_init(void)
136 switch (boot_cpu_data
.x86_model
) {
138 case 3: /* Pentium Pro */
140 case 6: /* Pentium II */
143 case 11: /* Pentium III */
149 pr_cont("unsupported p6 CPU model %d ",
150 boot_cpu_data
.x86_model
);
159 #endif /* CONFIG_CPU_SUP_INTEL */