io_uring: ensure finish_wait() is always called in __io_uring_task_cancel()
[linux/fpc-iii.git] / arch / powerpc / oprofile / op_model_pa6t.c
blobd23061cf76bc7eb9748391a00e805d90d4b48049
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2006-2007 PA Semi, Inc
5 * Author: Shashi Rao, PA Semi
7 * Maintained by: Olof Johansson <olof@lixom.net>
9 * Based on arch/powerpc/oprofile/op_model_power4.c
12 #include <linux/oprofile.h>
13 #include <linux/smp.h>
14 #include <linux/percpu.h>
15 #include <asm/processor.h>
16 #include <asm/cputable.h>
17 #include <asm/oprofile_impl.h>
18 #include <asm/reg.h>
20 static unsigned char oprofile_running;
22 /* mmcr values are set in pa6t_reg_setup, used in pa6t_cpu_setup */
23 static u64 mmcr0_val;
24 static u64 mmcr1_val;
26 /* inited in pa6t_reg_setup */
27 static u64 reset_value[OP_MAX_COUNTER];
29 static inline u64 ctr_read(unsigned int i)
31 switch (i) {
32 case 0:
33 return mfspr(SPRN_PA6T_PMC0);
34 case 1:
35 return mfspr(SPRN_PA6T_PMC1);
36 case 2:
37 return mfspr(SPRN_PA6T_PMC2);
38 case 3:
39 return mfspr(SPRN_PA6T_PMC3);
40 case 4:
41 return mfspr(SPRN_PA6T_PMC4);
42 case 5:
43 return mfspr(SPRN_PA6T_PMC5);
44 default:
45 printk(KERN_ERR "ctr_read called with bad arg %u\n", i);
46 return 0;
50 static inline void ctr_write(unsigned int i, u64 val)
52 switch (i) {
53 case 0:
54 mtspr(SPRN_PA6T_PMC0, val);
55 break;
56 case 1:
57 mtspr(SPRN_PA6T_PMC1, val);
58 break;
59 case 2:
60 mtspr(SPRN_PA6T_PMC2, val);
61 break;
62 case 3:
63 mtspr(SPRN_PA6T_PMC3, val);
64 break;
65 case 4:
66 mtspr(SPRN_PA6T_PMC4, val);
67 break;
68 case 5:
69 mtspr(SPRN_PA6T_PMC5, val);
70 break;
71 default:
72 printk(KERN_ERR "ctr_write called with bad arg %u\n", i);
73 break;
78 /* precompute the values to stuff in the hardware registers */
79 static int pa6t_reg_setup(struct op_counter_config *ctr,
80 struct op_system_config *sys,
81 int num_ctrs)
83 int pmc;
86 * adjust the mmcr0.en[0-5] and mmcr0.inten[0-5] values obtained from the
87 * event_mappings file by turning off the counters that the user doesn't
88 * care about
90 * setup user and kernel profiling
92 for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++)
93 if (!ctr[pmc].enabled) {
94 sys->mmcr0 &= ~(0x1UL << pmc);
95 sys->mmcr0 &= ~(0x1UL << (pmc+12));
96 pr_debug("turned off counter %u\n", pmc);
99 if (sys->enable_kernel)
100 sys->mmcr0 |= PA6T_MMCR0_SUPEN | PA6T_MMCR0_HYPEN;
101 else
102 sys->mmcr0 &= ~(PA6T_MMCR0_SUPEN | PA6T_MMCR0_HYPEN);
104 if (sys->enable_user)
105 sys->mmcr0 |= PA6T_MMCR0_PREN;
106 else
107 sys->mmcr0 &= ~PA6T_MMCR0_PREN;
110 * The performance counter event settings are given in the mmcr0 and
111 * mmcr1 values passed from the user in the op_system_config
112 * structure (sys variable).
114 mmcr0_val = sys->mmcr0;
115 mmcr1_val = sys->mmcr1;
116 pr_debug("mmcr0_val inited to %016lx\n", sys->mmcr0);
117 pr_debug("mmcr1_val inited to %016lx\n", sys->mmcr1);
119 for (pmc = 0; pmc < cur_cpu_spec->num_pmcs; pmc++) {
120 /* counters are 40 bit. Move to cputable at some point? */
121 reset_value[pmc] = (0x1UL << 39) - ctr[pmc].count;
122 pr_debug("reset_value for pmc%u inited to 0x%llx\n",
123 pmc, reset_value[pmc]);
126 return 0;
129 /* configure registers on this cpu */
130 static int pa6t_cpu_setup(struct op_counter_config *ctr)
132 u64 mmcr0 = mmcr0_val;
133 u64 mmcr1 = mmcr1_val;
135 /* Default is all PMCs off */
136 mmcr0 &= ~(0x3FUL);
137 mtspr(SPRN_PA6T_MMCR0, mmcr0);
139 /* program selected programmable events in */
140 mtspr(SPRN_PA6T_MMCR1, mmcr1);
142 pr_debug("setup on cpu %d, mmcr0 %016lx\n", smp_processor_id(),
143 mfspr(SPRN_PA6T_MMCR0));
144 pr_debug("setup on cpu %d, mmcr1 %016lx\n", smp_processor_id(),
145 mfspr(SPRN_PA6T_MMCR1));
147 return 0;
150 static int pa6t_start(struct op_counter_config *ctr)
152 int i;
154 /* Hold off event counting until rfid */
155 u64 mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS;
157 for (i = 0; i < cur_cpu_spec->num_pmcs; i++)
158 if (ctr[i].enabled)
159 ctr_write(i, reset_value[i]);
160 else
161 ctr_write(i, 0UL);
163 mtspr(SPRN_PA6T_MMCR0, mmcr0);
165 oprofile_running = 1;
167 pr_debug("start on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
169 return 0;
172 static void pa6t_stop(void)
174 u64 mmcr0;
176 /* freeze counters */
177 mmcr0 = mfspr(SPRN_PA6T_MMCR0);
178 mmcr0 |= PA6T_MMCR0_FCM0;
179 mtspr(SPRN_PA6T_MMCR0, mmcr0);
181 oprofile_running = 0;
183 pr_debug("stop on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0);
186 /* handle the perfmon overflow vector */
187 static void pa6t_handle_interrupt(struct pt_regs *regs,
188 struct op_counter_config *ctr)
190 unsigned long pc = mfspr(SPRN_PA6T_SIAR);
191 int is_kernel = is_kernel_addr(pc);
192 u64 val;
193 int i;
194 u64 mmcr0;
196 /* disable perfmon counting until rfid */
197 mmcr0 = mfspr(SPRN_PA6T_MMCR0);
198 mtspr(SPRN_PA6T_MMCR0, mmcr0 | PA6T_MMCR0_HANDDIS);
200 /* Record samples. We've got one global bit for whether a sample
201 * was taken, so add it for any counter that triggered overflow.
203 for (i = 0; i < cur_cpu_spec->num_pmcs; i++) {
204 val = ctr_read(i);
205 if (val & (0x1UL << 39)) { /* Overflow bit set */
206 if (oprofile_running && ctr[i].enabled) {
207 if (mmcr0 & PA6T_MMCR0_SIARLOG)
208 oprofile_add_ext_sample(pc, regs, i, is_kernel);
209 ctr_write(i, reset_value[i]);
210 } else {
211 ctr_write(i, 0UL);
216 /* Restore mmcr0 to a good known value since the PMI changes it */
217 mmcr0 = mmcr0_val | PA6T_MMCR0_HANDDIS;
218 mtspr(SPRN_PA6T_MMCR0, mmcr0);
221 struct op_powerpc_model op_model_pa6t = {
222 .reg_setup = pa6t_reg_setup,
223 .cpu_setup = pa6t_cpu_setup,
224 .start = pa6t_start,
225 .stop = pa6t_stop,
226 .handle_interrupt = pa6t_handle_interrupt,