1 // SPDX-License-Identifier: GPL-2.0
5 #include "evsel_config.h"
6 #include "parse-events.h"
10 #include <api/fs/fs.h>
11 #include <subcmd/parse-options.h>
12 #include <perf/cpumap.h>
14 #include "util/perf_api_probe.h"
16 #include "../perf-sys.h"
18 #include "map_symbol.h"
19 #include "mem-events.h"
22 * evsel__config_leader_sampling() uses special rules for leader sampling.
23 * However, if the leader is an AUX area event, then assume the event to sample
26 static struct evsel
*evsel__read_sampler(struct evsel
*evsel
, struct evlist
*evlist
)
28 struct evsel
*leader
= evsel__leader(evsel
);
30 if (evsel__is_aux_event(leader
) || arch_topdown_sample_read(leader
) ||
31 is_mem_loads_aux_event(leader
)) {
32 evlist__for_each_entry(evlist
, evsel
) {
33 if (evsel__leader(evsel
) == leader
&& evsel
!= evsel__leader(evsel
))
41 static u64
evsel__config_term_mask(struct evsel
*evsel
)
43 struct evsel_config_term
*term
;
44 struct list_head
*config_terms
= &evsel
->config_terms
;
47 list_for_each_entry(term
, config_terms
, list
) {
48 term_types
|= 1 << term
->type
;
53 static void evsel__config_leader_sampling(struct evsel
*evsel
, struct evlist
*evlist
)
55 struct perf_event_attr
*attr
= &evsel
->core
.attr
;
56 struct evsel
*leader
= evsel__leader(evsel
);
57 struct evsel
*read_sampler
;
58 u64 term_types
, freq_mask
;
60 if (!leader
->sample_read
)
63 read_sampler
= evsel__read_sampler(evsel
, evlist
);
65 if (evsel
== read_sampler
)
68 term_types
= evsel__config_term_mask(evsel
);
70 * Disable sampling for all group members except those with explicit
71 * config terms or the leader. In the case of an AUX area event, the 2nd
72 * event in the group is the one that 'leads' the sampling.
74 freq_mask
= (1 << EVSEL__CONFIG_TERM_FREQ
) | (1 << EVSEL__CONFIG_TERM_PERIOD
);
75 if ((term_types
& freq_mask
) == 0) {
77 attr
->sample_freq
= 0;
78 attr
->sample_period
= 0;
80 if ((term_types
& (1 << EVSEL__CONFIG_TERM_OVERWRITE
)) == 0)
81 attr
->write_backward
= 0;
84 * We don't get a sample for slave events, we make them when delivering
85 * the group leader sample. Set the slave event to follow the master
86 * sample_type to ease up reporting.
87 * An AUX area event also has sample_type requirements, so also include
88 * the sample type bits from the leader's sample_type to cover that
91 attr
->sample_type
= read_sampler
->core
.attr
.sample_type
|
92 leader
->core
.attr
.sample_type
;
95 void evlist__config(struct evlist
*evlist
, struct record_opts
*opts
, struct callchain_param
*callchain
)
98 bool use_sample_identifier
= false;
100 bool sample_id
= opts
->sample_id
;
102 if (perf_cpu_map__cpu(evlist
->core
.user_requested_cpus
, 0).cpu
< 0)
103 opts
->no_inherit
= true;
105 use_comm_exec
= perf_can_comm_exec();
107 evlist__for_each_entry(evlist
, evsel
) {
108 evsel__config(evsel
, opts
, callchain
);
109 if (evsel
->tracking
&& use_comm_exec
)
110 evsel
->core
.attr
.comm_exec
= 1;
113 /* Configure leader sampling here now that the sample type is known */
114 evlist__for_each_entry(evlist
, evsel
)
115 evsel__config_leader_sampling(evsel
, evlist
);
117 if (opts
->full_auxtrace
|| opts
->sample_identifier
) {
119 * Need to be able to synthesize and parse selected events with
120 * arbitrary sample types, which requires always being able to
123 use_sample_identifier
= perf_can_sample_identifier();
125 } else if (evlist
->core
.nr_entries
> 1) {
126 struct evsel
*first
= evlist__first(evlist
);
128 evlist__for_each_entry(evlist
, evsel
) {
129 if (evsel
->core
.attr
.sample_type
== first
->core
.attr
.sample_type
)
131 use_sample_identifier
= perf_can_sample_identifier();
138 evlist__for_each_entry(evlist
, evsel
)
139 evsel__set_sample_id(evsel
, use_sample_identifier
);
142 evlist__set_id_pos(evlist
);
145 static int get_max_rate(unsigned int *rate
)
147 return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate
);
150 static int record_opts__config_freq(struct record_opts
*opts
)
152 bool user_freq
= opts
->user_freq
!= UINT_MAX
;
153 bool user_interval
= opts
->user_interval
!= ULLONG_MAX
;
154 unsigned int max_rate
;
156 if (user_interval
&& user_freq
) {
157 pr_err("cannot set frequency and period at the same time\n");
162 opts
->default_interval
= opts
->user_interval
;
164 opts
->freq
= opts
->user_freq
;
167 * User specified count overrides default frequency.
169 if (opts
->default_interval
)
171 else if (opts
->freq
) {
172 opts
->default_interval
= opts
->freq
;
174 pr_err("frequency and count are zero, aborting\n");
178 if (get_max_rate(&max_rate
))
182 * User specified frequency is over current maximum.
184 if (user_freq
&& (max_rate
< opts
->freq
)) {
185 if (opts
->strict_freq
) {
186 pr_err("error: Maximum frequency rate (%'u Hz) exceeded.\n"
187 " Please use -F freq option with a lower value or consider\n"
188 " tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
192 pr_warning("warning: Maximum frequency rate (%'u Hz) exceeded, throttling from %'u Hz to %'u Hz.\n"
193 " The limit can be raised via /proc/sys/kernel/perf_event_max_sample_rate.\n"
194 " The kernel will lower it when perf's interrupts take too long.\n"
195 " Use --strict-freq to disable this throttling, refusing to record.\n",
196 max_rate
, opts
->freq
, max_rate
);
198 opts
->freq
= max_rate
;
203 * Default frequency is over current maximum.
205 if (max_rate
< opts
->freq
) {
206 pr_warning("Lowering default frequency rate from %u to %u.\n"
207 "Please consider tweaking "
208 "/proc/sys/kernel/perf_event_max_sample_rate.\n",
209 opts
->freq
, max_rate
);
210 opts
->freq
= max_rate
;
216 int record_opts__config(struct record_opts
*opts
)
218 return record_opts__config_freq(opts
);
221 bool evlist__can_select_event(struct evlist
*evlist
, const char *str
)
223 struct evlist
*temp_evlist
;
226 struct perf_cpu cpu
= { .cpu
= 0 };
230 temp_evlist
= evlist__new();
234 err
= parse_event(temp_evlist
, str
);
238 evsel
= evlist__last(temp_evlist
);
240 if (!evlist
|| perf_cpu_map__is_any_cpu_or_is_empty(evlist
->core
.user_requested_cpus
)) {
241 struct perf_cpu_map
*cpus
= perf_cpu_map__new_online_cpus();
244 cpu
= perf_cpu_map__cpu(cpus
, 0);
246 perf_cpu_map__put(cpus
);
248 cpu
= perf_cpu_map__cpu(evlist
->core
.user_requested_cpus
, 0);
252 fd
= sys_perf_event_open(&evsel
->core
.attr
, pid
, cpu
.cpu
, -1,
253 perf_event_open_cloexec_flag());
255 if (pid
== -1 && errno
== EACCES
) {
267 evlist__delete(temp_evlist
);
271 int record__parse_freq(const struct option
*opt
, const char *str
, int unset __maybe_unused
)
274 struct record_opts
*opts
= opt
->value
;
279 if (strcasecmp(str
, "max") == 0) {
280 if (get_max_rate(&freq
)) {
281 pr_err("couldn't read /proc/sys/kernel/perf_event_max_sample_rate\n");
284 pr_info("info: Using a maximum frequency rate of %'d Hz\n", freq
);
289 opts
->user_freq
= freq
;