2 * Performance event support - PPC 8xx
4 * Copyright 2016 Christophe Leroy, CS Systemes d'Information
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/perf_event.h>
15 #include <linux/percpu.h>
16 #include <linux/hardirq.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/ptrace.h>
21 #include <asm/code-patching.h>
23 #define PERF_8xx_ID_CPU_CYCLES 1
24 #define PERF_8xx_ID_HW_INSTRUCTIONS 2
25 #define PERF_8xx_ID_ITLB_LOAD_MISS 3
26 #define PERF_8xx_ID_DTLB_LOAD_MISS 4
28 #define C(x) PERF_COUNT_HW_CACHE_##x
29 #define DTLB_LOAD_MISS (C(DTLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))
30 #define ITLB_LOAD_MISS (C(ITLB) | (C(OP_READ) << 8) | (C(RESULT_MISS) << 16))
32 extern unsigned long itlb_miss_counter
, dtlb_miss_counter
;
33 extern atomic_t instruction_counter
;
34 extern unsigned int itlb_miss_perf
, dtlb_miss_perf
;
35 extern unsigned int itlb_miss_exit_1
, itlb_miss_exit_2
;
36 extern unsigned int dtlb_miss_exit_1
, dtlb_miss_exit_2
, dtlb_miss_exit_3
;
38 static atomic_t insn_ctr_ref
;
39 static atomic_t itlb_miss_ref
;
40 static atomic_t dtlb_miss_ref
;
42 static s64
get_insn_ctr(void)
48 ctr
= atomic_read(&instruction_counter
);
49 counta
= mfspr(SPRN_COUNTA
);
50 } while (ctr
!= atomic_read(&instruction_counter
));
52 return ((s64
)ctr
<< 16) | (counta
>> 16);
55 static int event_type(struct perf_event
*event
)
57 switch (event
->attr
.type
) {
58 case PERF_TYPE_HARDWARE
:
59 if (event
->attr
.config
== PERF_COUNT_HW_CPU_CYCLES
)
60 return PERF_8xx_ID_CPU_CYCLES
;
61 if (event
->attr
.config
== PERF_COUNT_HW_INSTRUCTIONS
)
62 return PERF_8xx_ID_HW_INSTRUCTIONS
;
64 case PERF_TYPE_HW_CACHE
:
65 if (event
->attr
.config
== ITLB_LOAD_MISS
)
66 return PERF_8xx_ID_ITLB_LOAD_MISS
;
67 if (event
->attr
.config
== DTLB_LOAD_MISS
)
68 return PERF_8xx_ID_DTLB_LOAD_MISS
;
78 static int mpc8xx_pmu_event_init(struct perf_event
*event
)
80 int type
= event_type(event
);
87 static int mpc8xx_pmu_add(struct perf_event
*event
, int flags
)
89 int type
= event_type(event
);
96 case PERF_8xx_ID_CPU_CYCLES
:
99 case PERF_8xx_ID_HW_INSTRUCTIONS
:
100 if (atomic_inc_return(&insn_ctr_ref
) == 1)
101 mtspr(SPRN_ICTRL
, 0xc0080007);
102 val
= get_insn_ctr();
104 case PERF_8xx_ID_ITLB_LOAD_MISS
:
105 if (atomic_inc_return(&itlb_miss_ref
) == 1) {
106 unsigned long target
= (unsigned long)&itlb_miss_perf
;
108 patch_branch(&itlb_miss_exit_1
, target
, 0);
109 #ifndef CONFIG_PIN_TLB_TEXT
110 patch_branch(&itlb_miss_exit_2
, target
, 0);
113 val
= itlb_miss_counter
;
115 case PERF_8xx_ID_DTLB_LOAD_MISS
:
116 if (atomic_inc_return(&dtlb_miss_ref
) == 1) {
117 unsigned long target
= (unsigned long)&dtlb_miss_perf
;
119 patch_branch(&dtlb_miss_exit_1
, target
, 0);
120 patch_branch(&dtlb_miss_exit_2
, target
, 0);
121 patch_branch(&dtlb_miss_exit_3
, target
, 0);
123 val
= dtlb_miss_counter
;
126 local64_set(&event
->hw
.prev_count
, val
);
130 static void mpc8xx_pmu_read(struct perf_event
*event
)
132 int type
= event_type(event
);
133 s64 prev
, val
= 0, delta
= 0;
139 prev
= local64_read(&event
->hw
.prev_count
);
141 case PERF_8xx_ID_CPU_CYCLES
:
143 delta
= 16 * (val
- prev
);
145 case PERF_8xx_ID_HW_INSTRUCTIONS
:
146 val
= get_insn_ctr();
149 delta
+= 0x1000000000000LL
;
151 case PERF_8xx_ID_ITLB_LOAD_MISS
:
152 val
= itlb_miss_counter
;
153 delta
= (s64
)((s32
)val
- (s32
)prev
);
155 case PERF_8xx_ID_DTLB_LOAD_MISS
:
156 val
= dtlb_miss_counter
;
157 delta
= (s64
)((s32
)val
- (s32
)prev
);
160 } while (local64_cmpxchg(&event
->hw
.prev_count
, prev
, val
) != prev
);
162 local64_add(delta
, &event
->count
);
165 static void mpc8xx_pmu_del(struct perf_event
*event
, int flags
)
167 /* mfspr r10, SPRN_SPRG_SCRATCH0 */
168 unsigned int insn
= PPC_INST_MFSPR
| __PPC_RS(R10
) |
169 __PPC_SPR(SPRN_SPRG_SCRATCH0
);
171 mpc8xx_pmu_read(event
);
173 /* If it was the last user, stop counting to avoid useles overhead */
174 switch (event_type(event
)) {
175 case PERF_8xx_ID_CPU_CYCLES
:
177 case PERF_8xx_ID_HW_INSTRUCTIONS
:
178 if (atomic_dec_return(&insn_ctr_ref
) == 0)
179 mtspr(SPRN_ICTRL
, 7);
181 case PERF_8xx_ID_ITLB_LOAD_MISS
:
182 if (atomic_dec_return(&itlb_miss_ref
) == 0) {
183 patch_instruction(&itlb_miss_exit_1
, insn
);
184 #ifndef CONFIG_PIN_TLB_TEXT
185 patch_instruction(&itlb_miss_exit_2
, insn
);
189 case PERF_8xx_ID_DTLB_LOAD_MISS
:
190 if (atomic_dec_return(&dtlb_miss_ref
) == 0) {
191 patch_instruction(&dtlb_miss_exit_1
, insn
);
192 patch_instruction(&dtlb_miss_exit_2
, insn
);
193 patch_instruction(&dtlb_miss_exit_3
, insn
);
199 static struct pmu mpc8xx_pmu
= {
200 .event_init
= mpc8xx_pmu_event_init
,
201 .add
= mpc8xx_pmu_add
,
202 .del
= mpc8xx_pmu_del
,
203 .read
= mpc8xx_pmu_read
,
204 .capabilities
= PERF_PMU_CAP_NO_INTERRUPT
|
208 static int init_mpc8xx_pmu(void)
210 mtspr(SPRN_ICTRL
, 7);
212 mtspr(SPRN_COUNTA
, 0xffff);
214 return perf_pmu_register(&mpc8xx_pmu
, "cpu", PERF_TYPE_RAW
);
217 early_initcall(init_mpc8xx_pmu
);