mm: hugetlb: fix hugepage memory leak caused by wrong reserve count
[linux/fpc-iii.git] / arch / alpha / oprofile / op_model_ev5.c
blobc300f5ef3482b82330d41c0b4d318362d76d4092
1 /**
2 * @file arch/alpha/oprofile/op_model_ev5.c
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author Richard Henderson <rth@twiddle.net>
8 */
10 #include <linux/oprofile.h>
11 #include <linux/smp.h>
12 #include <asm/ptrace.h>
14 #include "op_impl.h"
17 /* Compute all of the registers in preparation for enabling profiling.
19 The 21164 (EV5) and 21164PC (PCA65) vary in the bit placement and
20 meaning of the "CBOX" events. Given that we don't care about meaning
21 at this point, arrange for the difference in bit placement to be
22 handled by common code. */
24 static void
25 common_reg_setup(struct op_register_config *reg,
26 struct op_counter_config *ctr,
27 struct op_system_config *sys,
28 int cbox1_ofs, int cbox2_ofs)
30 int i, ctl, reset, need_reset;
32 /* Select desired events. The event numbers are selected such
33 that they map directly into the event selection fields:
35 PCSEL0: 0, 1
36 PCSEL1: 24-39
37 CBOX1: 40-47
38 PCSEL2: 48-63
39 CBOX2: 64-71
41 There are two special cases, in that CYCLES can be measured
42 on PCSEL[02], and SCACHE_WRITE can be measured on CBOX[12].
43 These event numbers are canonicalizes to their first appearance. */
45 ctl = 0;
46 for (i = 0; i < 3; ++i) {
47 unsigned long event = ctr[i].event;
48 if (!ctr[i].enabled)
49 continue;
51 /* Remap the duplicate events, as described above. */
52 if (i == 2) {
53 if (event == 0)
54 event = 12+48;
55 else if (event == 2+41)
56 event = 4+65;
59 /* Convert the event numbers onto mux_select bit mask. */
60 if (event < 2)
61 ctl |= event << 31;
62 else if (event < 24)
63 /* error */;
64 else if (event < 40)
65 ctl |= (event - 24) << 4;
66 else if (event < 48)
67 ctl |= (event - 40) << cbox1_ofs | 15 << 4;
68 else if (event < 64)
69 ctl |= event - 48;
70 else if (event < 72)
71 ctl |= (event - 64) << cbox2_ofs | 15;
73 reg->mux_select = ctl;
75 /* Select processor mode. */
76 /* ??? Need to come up with some mechanism to trace only selected
77 processes. For now select from pal, kernel and user mode. */
78 ctl = 0;
79 ctl |= !sys->enable_pal << 9;
80 ctl |= !sys->enable_kernel << 8;
81 ctl |= !sys->enable_user << 30;
82 reg->proc_mode = ctl;
84 /* Select interrupt frequencies. Take the interrupt count selected
85 by the user, and map it onto one of the possible counter widths.
86 If the user value is in between, compute a value to which the
87 counter is reset at each interrupt. */
89 ctl = reset = need_reset = 0;
90 for (i = 0; i < 3; ++i) {
91 unsigned long max, hilo, count = ctr[i].count;
92 if (!ctr[i].enabled)
93 continue;
95 if (count <= 256)
96 count = 256, hilo = 3, max = 256;
97 else {
98 max = (i == 2 ? 16384 : 65536);
99 hilo = 2;
100 if (count > max)
101 count = max;
103 ctr[i].count = count;
105 ctl |= hilo << (8 - i*2);
106 reset |= (max - count) << (48 - 16*i);
107 if (count != max)
108 need_reset |= 1 << i;
110 reg->freq = ctl;
111 reg->reset_values = reset;
112 reg->need_reset = need_reset;
115 static void
116 ev5_reg_setup(struct op_register_config *reg,
117 struct op_counter_config *ctr,
118 struct op_system_config *sys)
120 common_reg_setup(reg, ctr, sys, 19, 22);
123 static void
124 pca56_reg_setup(struct op_register_config *reg,
125 struct op_counter_config *ctr,
126 struct op_system_config *sys)
128 common_reg_setup(reg, ctr, sys, 8, 11);
131 /* Program all of the registers in preparation for enabling profiling. */
133 static void
134 ev5_cpu_setup (void *x)
136 struct op_register_config *reg = x;
138 wrperfmon(2, reg->mux_select);
139 wrperfmon(3, reg->proc_mode);
140 wrperfmon(4, reg->freq);
141 wrperfmon(6, reg->reset_values);
144 /* CTR is a counter for which the user has requested an interrupt count
145 in between one of the widths selectable in hardware. Reset the count
146 for CTR to the value stored in REG->RESET_VALUES.
148 For EV5, this means disabling profiling, reading the current values,
149 masking in the value for the desired register, writing, then turning
150 profiling back on.
152 This can be streamlined if profiling is only enabled for user mode.
153 In that case we know that the counters are not currently incrementing
154 (due to being in kernel mode). */
156 static void
157 ev5_reset_ctr(struct op_register_config *reg, unsigned long ctr)
159 unsigned long values, mask, not_pk, reset_values;
161 mask = (ctr == 0 ? 0xfffful << 48
162 : ctr == 1 ? 0xfffful << 32
163 : 0x3fff << 16);
165 not_pk = 1 << 9 | 1 << 8;
167 reset_values = reg->reset_values;
169 if ((reg->proc_mode & not_pk) == not_pk) {
170 values = wrperfmon(5, 0);
171 values = (reset_values & mask) | (values & ~mask & -2);
172 wrperfmon(6, values);
173 } else {
174 wrperfmon(0, -1);
175 values = wrperfmon(5, 0);
176 values = (reset_values & mask) | (values & ~mask & -2);
177 wrperfmon(6, values);
178 wrperfmon(1, reg->enable);
182 static void
183 ev5_handle_interrupt(unsigned long which, struct pt_regs *regs,
184 struct op_counter_config *ctr)
186 /* Record the sample. */
187 oprofile_add_sample(regs, which);
191 struct op_axp_model op_model_ev5 = {
192 .reg_setup = ev5_reg_setup,
193 .cpu_setup = ev5_cpu_setup,
194 .reset_ctr = ev5_reset_ctr,
195 .handle_interrupt = ev5_handle_interrupt,
196 .cpu_type = "alpha/ev5",
197 .num_counters = 3,
198 .can_set_proc_mode = 1,
201 struct op_axp_model op_model_pca56 = {
202 .reg_setup = pca56_reg_setup,
203 .cpu_setup = ev5_cpu_setup,
204 .reset_ctr = ev5_reset_ctr,
205 .handle_interrupt = ev5_handle_interrupt,
206 .cpu_type = "alpha/pca56",
207 .num_counters = 3,
208 .can_set_proc_mode = 1,