treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / arch / alpha / oprofile / op_model_ev4.c
blob086a0d5445c528b631cec10fd48c5643a4101f86
1 /**
2 * @file arch/alpha/oprofile/op_model_ev4.c
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
7 * @author Richard Henderson <rth@twiddle.net>
8 */
10 #include <linux/oprofile.h>
11 #include <linux/smp.h>
12 #include <asm/ptrace.h>
14 #include "op_impl.h"
17 /* Compute all of the registers in preparation for enabling profiling. */
19 static void
20 ev4_reg_setup(struct op_register_config *reg,
21 struct op_counter_config *ctr,
22 struct op_system_config *sys)
24 unsigned long ctl = 0, count, hilo;
26 /* Select desired events. We've mapped the event numbers
27 such that they fit directly into the event selection fields.
29 Note that there is no "off" setting. In both cases we select
30 the EXTERNAL event source, hoping that it'll be the lowest
31 frequency, and set the frequency counter to LOW. The interrupts
32 for these "disabled" counter overflows are ignored by the
33 interrupt handler.
35 This is most irritating, because the hardware *can* enable and
36 disable the interrupts for these counters independently, but the
37 wrperfmon interface doesn't allow it. */
39 ctl |= (ctr[0].enabled ? ctr[0].event << 8 : 14 << 8);
40 ctl |= (ctr[1].enabled ? (ctr[1].event - 16) << 32 : 7ul << 32);
42 /* EV4 can not read or write its counter registers. The only
43 thing one can do at all is see if you overflow and get an
44 interrupt. We can set the width of the counters, to some
45 extent. Take the interrupt count selected by the user,
46 map it onto one of the possible values, and write it back. */
48 count = ctr[0].count;
49 if (count <= 4096)
50 count = 4096, hilo = 1;
51 else
52 count = 65536, hilo = 0;
53 ctr[0].count = count;
54 ctl |= (ctr[0].enabled && hilo) << 3;
56 count = ctr[1].count;
57 if (count <= 256)
58 count = 256, hilo = 1;
59 else
60 count = 4096, hilo = 0;
61 ctr[1].count = count;
62 ctl |= (ctr[1].enabled && hilo);
64 reg->mux_select = ctl;
66 /* Select performance monitoring options. */
67 /* ??? Need to come up with some mechanism to trace only
68 selected processes. EV4 does not have a mechanism to
69 select kernel or user mode only. For now, enable always. */
70 reg->proc_mode = 0;
72 /* Frequency is folded into mux_select for EV4. */
73 reg->freq = 0;
75 /* See above regarding no writes. */
76 reg->reset_values = 0;
77 reg->need_reset = 0;
81 /* Program all of the registers in preparation for enabling profiling. */
83 static void
84 ev4_cpu_setup(void *x)
86 struct op_register_config *reg = x;
88 wrperfmon(2, reg->mux_select);
89 wrperfmon(3, reg->proc_mode);
92 static void
93 ev4_handle_interrupt(unsigned long which, struct pt_regs *regs,
94 struct op_counter_config *ctr)
96 /* EV4 can't properly disable counters individually.
97 Discard "disabled" events now. */
98 if (!ctr[which].enabled)
99 return;
101 /* Record the sample. */
102 oprofile_add_sample(regs, which);
106 struct op_axp_model op_model_ev4 = {
107 .reg_setup = ev4_reg_setup,
108 .cpu_setup = ev4_cpu_setup,
109 .reset_ctr = NULL,
110 .handle_interrupt = ev4_handle_interrupt,
111 .cpu_type = "alpha/ev4",
112 .num_counters = 2,
113 .can_set_proc_mode = 0,