2 * linux/arch/arm/include/asm/pmu.h
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
15 #include <linux/interrupt.h>
16 #include <linux/perf_event.h>
18 #include <asm/cputype.h>
21 * struct arm_pmu_platdata - ARM PMU platform data
23 * @handle_irq: an optional handler which will be called from the
24 * interrupt and passed the address of the low level handler,
25 * and can be used to implement any platform specific handling
26 * before or after calling it.
27 * @runtime_resume: an optional handler which will be called by the
28 * runtime PM framework following a call to pm_runtime_get().
29 * Note that if pm_runtime_get() is called more than once in
30 * succession this handler will only be called once.
31 * @runtime_suspend: an optional handler which will be called by the
32 * runtime PM framework following a call to pm_runtime_put().
33 * Note that if pm_runtime_get() is called more than once in
34 * succession this handler will only be called following the
35 * final call to pm_runtime_put() that actually disables the
38 struct arm_pmu_platdata
{
39 irqreturn_t (*handle_irq
)(int irq
, void *dev
,
40 irq_handler_t pmu_handler
);
41 int (*runtime_resume
)(struct device
*dev
);
42 int (*runtime_suspend
)(struct device
*dev
);
45 #ifdef CONFIG_HW_PERF_EVENTS
48 * The ARMv7 CPU PMU supports up to 32 event counters.
50 #define ARMPMU_MAX_HWEVENTS 32
52 #define HW_OP_UNSUPPORTED 0xFFFF
53 #define C(_x) PERF_COUNT_HW_CACHE_##_x
54 #define CACHE_OP_UNSUPPORTED 0xFFFF
56 #define PERF_MAP_ALL_UNSUPPORTED \
57 [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
59 #define PERF_CACHE_MAP_ALL_UNSUPPORTED \
60 [0 ... C(MAX) - 1] = { \
61 [0 ... C(OP_MAX) - 1] = { \
62 [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
66 /* The events for a given PMU register set. */
67 struct pmu_hw_events
{
69 * The events that are active on the PMU for the given index.
71 struct perf_event
*events
[ARMPMU_MAX_HWEVENTS
];
74 * A 1 bit for an index indicates that the counter is being used for
75 * an event. A 0 means that the counter can be used.
77 DECLARE_BITMAP(used_mask
, ARMPMU_MAX_HWEVENTS
);
80 * Hardware lock to serialize accesses to PMU registers. Needed for the
81 * read/modify/write sequences.
83 raw_spinlock_t pmu_lock
;
86 * When using percpu IRQs, we need a percpu dev_id. Place it here as we
87 * already have to allocate this struct per cpu.
89 struct arm_pmu
*percpu_pmu
;
94 cpumask_t active_irqs
;
97 irqreturn_t (*handle_irq
)(int irq_num
, void *dev
);
98 void (*enable
)(struct perf_event
*event
);
99 void (*disable
)(struct perf_event
*event
);
100 int (*get_event_idx
)(struct pmu_hw_events
*hw_events
,
101 struct perf_event
*event
);
102 void (*clear_event_idx
)(struct pmu_hw_events
*hw_events
,
103 struct perf_event
*event
);
104 int (*set_event_filter
)(struct hw_perf_event
*evt
,
105 struct perf_event_attr
*attr
);
106 u32 (*read_counter
)(struct perf_event
*event
);
107 void (*write_counter
)(struct perf_event
*event
, u32 val
);
108 void (*start
)(struct arm_pmu
*);
109 void (*stop
)(struct arm_pmu
*);
110 void (*reset
)(void *);
111 int (*request_irq
)(struct arm_pmu
*, irq_handler_t handler
);
112 void (*free_irq
)(struct arm_pmu
*);
113 int (*map_event
)(struct perf_event
*event
);
115 atomic_t active_events
;
116 struct mutex reserve_mutex
;
118 struct platform_device
*plat_device
;
119 struct pmu_hw_events __percpu
*hw_events
;
120 struct notifier_block hotplug_nb
;
123 #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
125 extern const struct dev_pm_ops armpmu_dev_pm_ops
;
127 int armpmu_register(struct arm_pmu
*armpmu
, int type
);
129 u64
armpmu_event_update(struct perf_event
*event
);
131 int armpmu_event_set_period(struct perf_event
*event
);
133 int armpmu_map_event(struct perf_event
*event
,
134 const unsigned (*event_map
)[PERF_COUNT_HW_MAX
],
135 const unsigned (*cache_map
)[PERF_COUNT_HW_CACHE_MAX
]
136 [PERF_COUNT_HW_CACHE_OP_MAX
]
137 [PERF_COUNT_HW_CACHE_RESULT_MAX
],
140 struct pmu_probe_info
{
143 int (*init
)(struct arm_pmu
*);
146 #define PMU_PROBE(_cpuid, _mask, _fn) \
153 #define ARM_PMU_PROBE(_cpuid, _fn) \
154 PMU_PROBE(_cpuid, ARM_CPU_PART_MASK, _fn)
156 #define ARM_PMU_XSCALE_MASK ((0xff << 24) | ARM_CPU_XSCALE_ARCH_MASK)
158 #define XSCALE_PMU_PROBE(_version, _fn) \
159 PMU_PROBE(ARM_CPU_IMP_INTEL << 24 | _version, ARM_PMU_XSCALE_MASK, _fn)
161 #endif /* CONFIG_HW_PERF_EVENTS */
163 #endif /* __ARM_PMU_H__ */