2 * Performance event support - PPC 8xx
4 * Copyright 2016 Christophe Leroy, CS Systemes d'Information
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/perf_event.h>
15 #include <linux/percpu.h>
16 #include <linux/hardirq.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/ptrace.h>
22 #define PERF_8xx_ID_CPU_CYCLES 1
23 #define PERF_8xx_ID_HW_INSTRUCTIONS 2
24 #define PERF_8xx_ID_ITLB_LOAD_MISS 3
25 #define PERF_8xx_ID_DTLB_LOAD_MISS 4
27 #define C(x) PERF_COUNT_HW_CACHE_##x
28 #define DTLB_LOAD_MISS (C(DTLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))
29 #define ITLB_LOAD_MISS (C(ITLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))
31 extern unsigned long itlb_miss_counter
, dtlb_miss_counter
;
32 extern atomic_t instruction_counter
;
34 static atomic_t insn_ctr_ref
;
36 static s64
get_insn_ctr(void)
42 ctr
= atomic_read(&instruction_counter
);
43 counta
= mfspr(SPRN_COUNTA
);
44 } while (ctr
!= atomic_read(&instruction_counter
));
46 return ((s64
)ctr
<< 16) | (counta
>> 16);
49 static int event_type(struct perf_event
*event
)
51 switch (event
->attr
.type
) {
52 case PERF_TYPE_HARDWARE
:
53 if (event
->attr
.config
== PERF_COUNT_HW_CPU_CYCLES
)
54 return PERF_8xx_ID_CPU_CYCLES
;
55 if (event
->attr
.config
== PERF_COUNT_HW_INSTRUCTIONS
)
56 return PERF_8xx_ID_HW_INSTRUCTIONS
;
58 case PERF_TYPE_HW_CACHE
:
59 if (event
->attr
.config
== ITLB_LOAD_MISS
)
60 return PERF_8xx_ID_ITLB_LOAD_MISS
;
61 if (event
->attr
.config
== DTLB_LOAD_MISS
)
62 return PERF_8xx_ID_DTLB_LOAD_MISS
;
72 static int mpc8xx_pmu_event_init(struct perf_event
*event
)
74 int type
= event_type(event
);
81 static int mpc8xx_pmu_add(struct perf_event
*event
, int flags
)
83 int type
= event_type(event
);
90 case PERF_8xx_ID_CPU_CYCLES
:
93 case PERF_8xx_ID_HW_INSTRUCTIONS
:
94 if (atomic_inc_return(&insn_ctr_ref
) == 1)
95 mtspr(SPRN_ICTRL
, 0xc0080007);
98 case PERF_8xx_ID_ITLB_LOAD_MISS
:
99 val
= itlb_miss_counter
;
101 case PERF_8xx_ID_DTLB_LOAD_MISS
:
102 val
= dtlb_miss_counter
;
105 local64_set(&event
->hw
.prev_count
, val
);
109 static void mpc8xx_pmu_read(struct perf_event
*event
)
111 int type
= event_type(event
);
112 s64 prev
, val
= 0, delta
= 0;
118 prev
= local64_read(&event
->hw
.prev_count
);
120 case PERF_8xx_ID_CPU_CYCLES
:
122 delta
= 16 * (val
- prev
);
124 case PERF_8xx_ID_HW_INSTRUCTIONS
:
125 val
= get_insn_ctr();
128 delta
+= 0x1000000000000LL
;
130 case PERF_8xx_ID_ITLB_LOAD_MISS
:
131 val
= itlb_miss_counter
;
132 delta
= (s64
)((s32
)val
- (s32
)prev
);
134 case PERF_8xx_ID_DTLB_LOAD_MISS
:
135 val
= dtlb_miss_counter
;
136 delta
= (s64
)((s32
)val
- (s32
)prev
);
139 } while (local64_cmpxchg(&event
->hw
.prev_count
, prev
, val
) != prev
);
141 local64_add(delta
, &event
->count
);
144 static void mpc8xx_pmu_del(struct perf_event
*event
, int flags
)
146 mpc8xx_pmu_read(event
);
147 if (event_type(event
) != PERF_8xx_ID_HW_INSTRUCTIONS
)
150 /* If it was the last user, stop counting to avoid useles overhead */
151 if (atomic_dec_return(&insn_ctr_ref
) == 0)
152 mtspr(SPRN_ICTRL
, 7);
155 static struct pmu mpc8xx_pmu
= {
156 .event_init
= mpc8xx_pmu_event_init
,
157 .add
= mpc8xx_pmu_add
,
158 .del
= mpc8xx_pmu_del
,
159 .read
= mpc8xx_pmu_read
,
160 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
|
164 static int init_mpc8xx_pmu(void)
166 mtspr(SPRN_ICTRL
, 7);
168 mtspr(SPRN_COUNTA
, 0xffff);
170 return perf_pmu_register(&mpc8xx_pmu
, "cpu", PERF_TYPE_RAW
);
173 early_initcall(init_mpc8xx_pmu
);