4 #include "parse-events.h"
9 typedef void (*setup_probe_fn_t
)(struct perf_evsel
*evsel
);
11 static int perf_do_probe_api(setup_probe_fn_t fn
, int cpu
, const char *str
)
13 struct perf_evlist
*evlist
;
14 struct perf_evsel
*evsel
;
15 unsigned long flags
= perf_event_open_cloexec_flag();
16 int err
= -EAGAIN
, fd
;
17 static pid_t pid
= -1;
19 evlist
= perf_evlist__new();
23 if (parse_events(evlist
, str
, NULL
))
26 evsel
= perf_evlist__first(evlist
);
29 fd
= sys_perf_event_open(&evsel
->attr
, pid
, cpu
, -1, flags
);
31 if (pid
== -1 && errno
== EACCES
) {
43 fd
= sys_perf_event_open(&evsel
->attr
, pid
, cpu
, -1, flags
);
53 perf_evlist__delete(evlist
);
57 static bool perf_probe_api(setup_probe_fn_t fn
)
59 const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL
};
63 cpus
= cpu_map__new(NULL
);
70 ret
= perf_do_probe_api(fn
, cpu
, try[i
++]);
73 } while (ret
== -EAGAIN
&& try[i
]);
78 static void perf_probe_sample_identifier(struct perf_evsel
*evsel
)
80 evsel
->attr
.sample_type
|= PERF_SAMPLE_IDENTIFIER
;
83 static void perf_probe_comm_exec(struct perf_evsel
*evsel
)
85 evsel
->attr
.comm_exec
= 1;
88 static void perf_probe_context_switch(struct perf_evsel
*evsel
)
90 evsel
->attr
.context_switch
= 1;
93 bool perf_can_sample_identifier(void)
95 return perf_probe_api(perf_probe_sample_identifier
);
98 static bool perf_can_comm_exec(void)
100 return perf_probe_api(perf_probe_comm_exec
);
103 bool perf_can_record_switch_events(void)
105 return perf_probe_api(perf_probe_context_switch
);
108 bool perf_can_record_cpu_wide(void)
110 struct perf_event_attr attr
= {
111 .type
= PERF_TYPE_SOFTWARE
,
112 .config
= PERF_COUNT_SW_CPU_CLOCK
,
115 struct cpu_map
*cpus
;
118 cpus
= cpu_map__new(NULL
);
124 fd
= sys_perf_event_open(&attr
, -1, cpu
, -1, 0);
132 void perf_evlist__config(struct perf_evlist
*evlist
, struct record_opts
*opts
)
134 struct perf_evsel
*evsel
;
135 bool use_sample_identifier
= false;
139 * Set the evsel leader links before we configure attributes,
140 * since some might depend on this info.
143 perf_evlist__set_leader(evlist
);
145 if (evlist
->cpus
->map
[0] < 0)
146 opts
->no_inherit
= true;
148 use_comm_exec
= perf_can_comm_exec();
150 evlist__for_each(evlist
, evsel
) {
151 perf_evsel__config(evsel
, opts
);
152 if (evsel
->tracking
&& use_comm_exec
)
153 evsel
->attr
.comm_exec
= 1;
156 if (opts
->full_auxtrace
) {
158 * Need to be able to synthesize and parse selected events with
159 * arbitrary sample types, which requires always being able to
162 use_sample_identifier
= perf_can_sample_identifier();
163 evlist__for_each(evlist
, evsel
)
164 perf_evsel__set_sample_id(evsel
, use_sample_identifier
);
165 } else if (evlist
->nr_entries
> 1) {
166 struct perf_evsel
*first
= perf_evlist__first(evlist
);
168 evlist__for_each(evlist
, evsel
) {
169 if (evsel
->attr
.sample_type
== first
->attr
.sample_type
)
171 use_sample_identifier
= perf_can_sample_identifier();
174 evlist__for_each(evlist
, evsel
)
175 perf_evsel__set_sample_id(evsel
, use_sample_identifier
);
178 perf_evlist__set_id_pos(evlist
);
181 static int get_max_rate(unsigned int *rate
)
183 return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate
);
186 static int record_opts__config_freq(struct record_opts
*opts
)
188 bool user_freq
= opts
->user_freq
!= UINT_MAX
;
189 unsigned int max_rate
;
191 if (opts
->user_interval
!= ULLONG_MAX
)
192 opts
->default_interval
= opts
->user_interval
;
194 opts
->freq
= opts
->user_freq
;
197 * User specified count overrides default frequency.
199 if (opts
->default_interval
)
201 else if (opts
->freq
) {
202 opts
->default_interval
= opts
->freq
;
204 pr_err("frequency and count are zero, aborting\n");
208 if (get_max_rate(&max_rate
))
212 * User specified frequency is over current maximum.
214 if (user_freq
&& (max_rate
< opts
->freq
)) {
215 pr_err("Maximum frequency rate (%u) reached.\n"
216 "Please use -F freq option with lower value or consider\n"
217 "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
223 * Default frequency is over current maximum.
225 if (max_rate
< opts
->freq
) {
226 pr_warning("Lowering default frequency rate to %u.\n"
227 "Please consider tweaking "
228 "/proc/sys/kernel/perf_event_max_sample_rate.\n",
230 opts
->freq
= max_rate
;
236 int record_opts__config(struct record_opts
*opts
)
238 return record_opts__config_freq(opts
);
241 bool perf_evlist__can_select_event(struct perf_evlist
*evlist
, const char *str
)
243 struct perf_evlist
*temp_evlist
;
244 struct perf_evsel
*evsel
;
249 temp_evlist
= perf_evlist__new();
253 err
= parse_events(temp_evlist
, str
, NULL
);
257 evsel
= perf_evlist__last(temp_evlist
);
259 if (!evlist
|| cpu_map__empty(evlist
->cpus
)) {
260 struct cpu_map
*cpus
= cpu_map__new(NULL
);
262 cpu
= cpus
? cpus
->map
[0] : 0;
265 cpu
= evlist
->cpus
->map
[0];
269 fd
= sys_perf_event_open(&evsel
->attr
, pid
, cpu
, -1,
270 perf_event_open_cloexec_flag());
272 if (pid
== -1 && errno
== EACCES
) {
284 perf_evlist__delete(temp_evlist
);