Merge tag 'block-5.9-2020-08-14' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / tools / perf / util / record.c
bloba4cc11592f6b38ebf1fb86a31da6e9920b5c6187
1 // SPDX-License-Identifier: GPL-2.0
2 #include "debug.h"
3 #include "evlist.h"
4 #include "evsel.h"
5 #include "parse-events.h"
6 #include <errno.h>
7 #include <limits.h>
8 #include <stdlib.h>
9 #include <api/fs/fs.h>
10 #include <subcmd/parse-options.h>
11 #include <perf/cpumap.h>
12 #include "cloexec.h"
13 #include "util/perf_api_probe.h"
14 #include "record.h"
15 #include "../perf-sys.h"
18 * evsel__config_leader_sampling() uses special rules for leader sampling.
19 * However, if the leader is an AUX area event, then assume the event to sample
20 * is the next event.
22 static struct evsel *evsel__read_sampler(struct evsel *evsel, struct evlist *evlist)
24 struct evsel *leader = evsel->leader;
26 if (evsel__is_aux_event(leader)) {
27 evlist__for_each_entry(evlist, evsel) {
28 if (evsel->leader == leader && evsel != evsel->leader)
29 return evsel;
33 return leader;
36 static void evsel__config_leader_sampling(struct evsel *evsel, struct evlist *evlist)
38 struct perf_event_attr *attr = &evsel->core.attr;
39 struct evsel *leader = evsel->leader;
40 struct evsel *read_sampler;
42 if (!leader->sample_read)
43 return;
45 read_sampler = evsel__read_sampler(evsel, evlist);
47 if (evsel == read_sampler)
48 return;
51 * Disable sampling for all group members other than the leader in
52 * case the leader 'leads' the sampling, except when the leader is an
53 * AUX area event, in which case the 2nd event in the group is the one
54 * that 'leads' the sampling.
56 attr->freq = 0;
57 attr->sample_freq = 0;
58 attr->sample_period = 0;
59 attr->write_backward = 0;
62 * We don't get a sample for slave events, we make them when delivering
63 * the group leader sample. Set the slave event to follow the master
64 * sample_type to ease up reporting.
65 * An AUX area event also has sample_type requirements, so also include
66 * the sample type bits from the leader's sample_type to cover that
67 * case.
69 attr->sample_type = read_sampler->core.attr.sample_type |
70 leader->core.attr.sample_type;
73 void perf_evlist__config(struct evlist *evlist, struct record_opts *opts,
74 struct callchain_param *callchain)
76 struct evsel *evsel;
77 bool use_sample_identifier = false;
78 bool use_comm_exec;
79 bool sample_id = opts->sample_id;
82 * Set the evsel leader links before we configure attributes,
83 * since some might depend on this info.
85 if (opts->group)
86 perf_evlist__set_leader(evlist);
88 if (evlist->core.cpus->map[0] < 0)
89 opts->no_inherit = true;
91 use_comm_exec = perf_can_comm_exec();
93 evlist__for_each_entry(evlist, evsel) {
94 evsel__config(evsel, opts, callchain);
95 if (evsel->tracking && use_comm_exec)
96 evsel->core.attr.comm_exec = 1;
99 /* Configure leader sampling here now that the sample type is known */
100 evlist__for_each_entry(evlist, evsel)
101 evsel__config_leader_sampling(evsel, evlist);
103 if (opts->full_auxtrace) {
105 * Need to be able to synthesize and parse selected events with
106 * arbitrary sample types, which requires always being able to
107 * match the id.
109 use_sample_identifier = perf_can_sample_identifier();
110 sample_id = true;
111 } else if (evlist->core.nr_entries > 1) {
112 struct evsel *first = evlist__first(evlist);
114 evlist__for_each_entry(evlist, evsel) {
115 if (evsel->core.attr.sample_type == first->core.attr.sample_type)
116 continue;
117 use_sample_identifier = perf_can_sample_identifier();
118 break;
120 sample_id = true;
123 if (sample_id) {
124 evlist__for_each_entry(evlist, evsel)
125 evsel__set_sample_id(evsel, use_sample_identifier);
128 perf_evlist__set_id_pos(evlist);
131 static int get_max_rate(unsigned int *rate)
133 return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
136 static int record_opts__config_freq(struct record_opts *opts)
138 bool user_freq = opts->user_freq != UINT_MAX;
139 unsigned int max_rate;
141 if (opts->user_interval != ULLONG_MAX)
142 opts->default_interval = opts->user_interval;
143 if (user_freq)
144 opts->freq = opts->user_freq;
147 * User specified count overrides default frequency.
149 if (opts->default_interval)
150 opts->freq = 0;
151 else if (opts->freq) {
152 opts->default_interval = opts->freq;
153 } else {
154 pr_err("frequency and count are zero, aborting\n");
155 return -1;
158 if (get_max_rate(&max_rate))
159 return 0;
162 * User specified frequency is over current maximum.
164 if (user_freq && (max_rate < opts->freq)) {
165 if (opts->strict_freq) {
166 pr_err("error: Maximum frequency rate (%'u Hz) exceeded.\n"
167 " Please use -F freq option with a lower value or consider\n"
168 " tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
169 max_rate);
170 return -1;
171 } else {
172 pr_warning("warning: Maximum frequency rate (%'u Hz) exceeded, throttling from %'u Hz to %'u Hz.\n"
173 " The limit can be raised via /proc/sys/kernel/perf_event_max_sample_rate.\n"
174 " The kernel will lower it when perf's interrupts take too long.\n"
175 " Use --strict-freq to disable this throttling, refusing to record.\n",
176 max_rate, opts->freq, max_rate);
178 opts->freq = max_rate;
183 * Default frequency is over current maximum.
185 if (max_rate < opts->freq) {
186 pr_warning("Lowering default frequency rate to %u.\n"
187 "Please consider tweaking "
188 "/proc/sys/kernel/perf_event_max_sample_rate.\n",
189 max_rate);
190 opts->freq = max_rate;
193 return 0;
196 int record_opts__config(struct record_opts *opts)
198 return record_opts__config_freq(opts);
201 bool perf_evlist__can_select_event(struct evlist *evlist, const char *str)
203 struct evlist *temp_evlist;
204 struct evsel *evsel;
205 int err, fd, cpu;
206 bool ret = false;
207 pid_t pid = -1;
209 temp_evlist = evlist__new();
210 if (!temp_evlist)
211 return false;
213 err = parse_events(temp_evlist, str, NULL);
214 if (err)
215 goto out_delete;
217 evsel = evlist__last(temp_evlist);
219 if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) {
220 struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
222 cpu = cpus ? cpus->map[0] : 0;
223 perf_cpu_map__put(cpus);
224 } else {
225 cpu = evlist->core.cpus->map[0];
228 while (1) {
229 fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, -1,
230 perf_event_open_cloexec_flag());
231 if (fd < 0) {
232 if (pid == -1 && errno == EACCES) {
233 pid = 0;
234 continue;
236 goto out_delete;
238 break;
240 close(fd);
241 ret = true;
243 out_delete:
244 evlist__delete(temp_evlist);
245 return ret;
248 int record__parse_freq(const struct option *opt, const char *str, int unset __maybe_unused)
250 unsigned int freq;
251 struct record_opts *opts = opt->value;
253 if (!str)
254 return -EINVAL;
256 if (strcasecmp(str, "max") == 0) {
257 if (get_max_rate(&freq)) {
258 pr_err("couldn't read /proc/sys/kernel/perf_event_max_sample_rate\n");
259 return -1;
261 pr_info("info: Using a maximum frequency rate of %'d Hz\n", freq);
262 } else {
263 freq = atoi(str);
266 opts->user_freq = freq;
267 return 0;