2 * Performance event support - PPC 8xx
4 * Copyright 2016 Christophe Leroy, CS Systemes d'Information
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/perf_event.h>
15 #include <linux/percpu.h>
16 #include <linux/hardirq.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/ptrace.h>
21 #include <asm/code-patching.h>
23 #define PERF_8xx_ID_CPU_CYCLES 1
24 #define PERF_8xx_ID_HW_INSTRUCTIONS 2
25 #define PERF_8xx_ID_ITLB_LOAD_MISS 3
26 #define PERF_8xx_ID_DTLB_LOAD_MISS 4
28 #define C(x) PERF_COUNT_HW_CACHE_##x
29 #define DTLB_LOAD_MISS (C(DTLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))
30 #define ITLB_LOAD_MISS (C(ITLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))
32 extern unsigned long itlb_miss_counter
, dtlb_miss_counter
;
33 extern atomic_t instruction_counter
;
35 static atomic_t insn_ctr_ref
;
36 static atomic_t itlb_miss_ref
;
37 static atomic_t dtlb_miss_ref
;
39 static s64
get_insn_ctr(void)
45 ctr
= atomic_read(&instruction_counter
);
46 counta
= mfspr(SPRN_COUNTA
);
47 } while (ctr
!= atomic_read(&instruction_counter
));
49 return ((s64
)ctr
<< 16) | (counta
>> 16);
52 static int event_type(struct perf_event
*event
)
54 switch (event
->attr
.type
) {
55 case PERF_TYPE_HARDWARE
:
56 if (event
->attr
.config
== PERF_COUNT_HW_CPU_CYCLES
)
57 return PERF_8xx_ID_CPU_CYCLES
;
58 if (event
->attr
.config
== PERF_COUNT_HW_INSTRUCTIONS
)
59 return PERF_8xx_ID_HW_INSTRUCTIONS
;
61 case PERF_TYPE_HW_CACHE
:
62 if (event
->attr
.config
== ITLB_LOAD_MISS
)
63 return PERF_8xx_ID_ITLB_LOAD_MISS
;
64 if (event
->attr
.config
== DTLB_LOAD_MISS
)
65 return PERF_8xx_ID_DTLB_LOAD_MISS
;
75 static int mpc8xx_pmu_event_init(struct perf_event
*event
)
77 int type
= event_type(event
);
84 static int mpc8xx_pmu_add(struct perf_event
*event
, int flags
)
86 int type
= event_type(event
);
93 case PERF_8xx_ID_CPU_CYCLES
:
96 case PERF_8xx_ID_HW_INSTRUCTIONS
:
97 if (atomic_inc_return(&insn_ctr_ref
) == 1)
98 mtspr(SPRN_ICTRL
, 0xc0080007);
101 case PERF_8xx_ID_ITLB_LOAD_MISS
:
102 if (atomic_inc_return(&itlb_miss_ref
) == 1) {
103 unsigned long target
= patch_site_addr(&patch__itlbmiss_perf
);
105 patch_branch_site(&patch__itlbmiss_exit_1
, target
, 0);
106 #ifndef CONFIG_PIN_TLB_TEXT
107 patch_branch_site(&patch__itlbmiss_exit_2
, target
, 0);
110 val
= itlb_miss_counter
;
112 case PERF_8xx_ID_DTLB_LOAD_MISS
:
113 if (atomic_inc_return(&dtlb_miss_ref
) == 1) {
114 unsigned long target
= patch_site_addr(&patch__dtlbmiss_perf
);
116 patch_branch_site(&patch__dtlbmiss_exit_1
, target
, 0);
117 patch_branch_site(&patch__dtlbmiss_exit_2
, target
, 0);
118 patch_branch_site(&patch__dtlbmiss_exit_3
, target
, 0);
120 val
= dtlb_miss_counter
;
123 local64_set(&event
->hw
.prev_count
, val
);
127 static void mpc8xx_pmu_read(struct perf_event
*event
)
129 int type
= event_type(event
);
130 s64 prev
, val
= 0, delta
= 0;
136 prev
= local64_read(&event
->hw
.prev_count
);
138 case PERF_8xx_ID_CPU_CYCLES
:
140 delta
= 16 * (val
- prev
);
142 case PERF_8xx_ID_HW_INSTRUCTIONS
:
143 val
= get_insn_ctr();
146 delta
+= 0x1000000000000LL
;
148 case PERF_8xx_ID_ITLB_LOAD_MISS
:
149 val
= itlb_miss_counter
;
150 delta
= (s64
)((s32
)val
- (s32
)prev
);
152 case PERF_8xx_ID_DTLB_LOAD_MISS
:
153 val
= dtlb_miss_counter
;
154 delta
= (s64
)((s32
)val
- (s32
)prev
);
157 } while (local64_cmpxchg(&event
->hw
.prev_count
, prev
, val
) != prev
);
159 local64_add(delta
, &event
->count
);
162 static void mpc8xx_pmu_del(struct perf_event
*event
, int flags
)
164 /* mfspr r10, SPRN_SPRG_SCRATCH0 */
165 unsigned int insn
= PPC_INST_MFSPR
| __PPC_RS(R10
) |
166 __PPC_SPR(SPRN_SPRG_SCRATCH0
);
168 mpc8xx_pmu_read(event
);
170 /* If it was the last user, stop counting to avoid useles overhead */
171 switch (event_type(event
)) {
172 case PERF_8xx_ID_CPU_CYCLES
:
174 case PERF_8xx_ID_HW_INSTRUCTIONS
:
175 if (atomic_dec_return(&insn_ctr_ref
) == 0)
176 mtspr(SPRN_ICTRL
, 7);
178 case PERF_8xx_ID_ITLB_LOAD_MISS
:
179 if (atomic_dec_return(&itlb_miss_ref
) == 0) {
180 patch_instruction_site(&patch__itlbmiss_exit_1
, insn
);
181 #ifndef CONFIG_PIN_TLB_TEXT
182 patch_instruction_site(&patch__itlbmiss_exit_2
, insn
);
186 case PERF_8xx_ID_DTLB_LOAD_MISS
:
187 if (atomic_dec_return(&dtlb_miss_ref
) == 0) {
188 patch_instruction_site(&patch__dtlbmiss_exit_1
, insn
);
189 patch_instruction_site(&patch__dtlbmiss_exit_2
, insn
);
190 patch_instruction_site(&patch__dtlbmiss_exit_3
, insn
);
196 static struct pmu mpc8xx_pmu
= {
197 .event_init
= mpc8xx_pmu_event_init
,
198 .add
= mpc8xx_pmu_add
,
199 .del
= mpc8xx_pmu_del
,
200 .read
= mpc8xx_pmu_read
,
201 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
|
205 static int init_mpc8xx_pmu(void)
207 mtspr(SPRN_ICTRL
, 7);
209 mtspr(SPRN_COUNTA
, 0xffff);
211 return perf_pmu_register(&mpc8xx_pmu
, "cpu", PERF_TYPE_RAW
);
214 early_initcall(init_mpc8xx_pmu
);