treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / tools / perf / util / mmap.c
blob3b664fa673a6ce031b492e84ad2964c3814fb833
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
6 * copyright notes.
7 */
9 #include <sys/mman.h>
10 #include <inttypes.h>
11 #include <asm/bug.h>
12 #include <linux/zalloc.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <unistd.h> // sysconf()
16 #include <perf/mmap.h>
17 #ifdef HAVE_LIBNUMA_SUPPORT
18 #include <numaif.h>
19 #endif
20 #include "cpumap.h"
21 #include "debug.h"
22 #include "event.h"
23 #include "mmap.h"
24 #include "../perf.h"
25 #include <internal/lib.h> /* page_size */
26 #include <linux/bitmap.h>
28 #define MASK_SIZE 1023
29 void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag)
31 char buf[MASK_SIZE + 1];
32 size_t len;
34 len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE);
35 buf[len] = '\0';
36 pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf);
39 size_t mmap__mmap_len(struct mmap *map)
41 return perf_mmap__mmap_len(&map->core);
44 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
45 struct auxtrace_mmap_params *mp __maybe_unused,
46 void *userpg __maybe_unused,
47 int fd __maybe_unused)
49 return 0;
52 void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
56 void __weak auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp __maybe_unused,
57 off_t auxtrace_offset __maybe_unused,
58 unsigned int auxtrace_pages __maybe_unused,
59 bool auxtrace_overwrite __maybe_unused)
63 void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __maybe_unused,
64 struct evlist *evlist __maybe_unused,
65 int idx __maybe_unused,
66 bool per_cpu __maybe_unused)
70 #ifdef HAVE_AIO_SUPPORT
71 static int perf_mmap__aio_enabled(struct mmap *map)
73 return map->aio.nr_cblocks > 0;
76 #ifdef HAVE_LIBNUMA_SUPPORT
77 static int perf_mmap__aio_alloc(struct mmap *map, int idx)
79 map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
80 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
81 if (map->aio.data[idx] == MAP_FAILED) {
82 map->aio.data[idx] = NULL;
83 return -1;
86 return 0;
89 static void perf_mmap__aio_free(struct mmap *map, int idx)
91 if (map->aio.data[idx]) {
92 munmap(map->aio.data[idx], mmap__mmap_len(map));
93 map->aio.data[idx] = NULL;
97 static int perf_mmap__aio_bind(struct mmap *map, int idx, int cpu, int affinity)
99 void *data;
100 size_t mmap_len;
101 unsigned long node_mask;
103 if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
104 data = map->aio.data[idx];
105 mmap_len = mmap__mmap_len(map);
106 node_mask = 1UL << cpu__get_node(cpu);
107 if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
108 pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
109 data, data + mmap_len, cpu__get_node(cpu));
110 return -1;
114 return 0;
116 #else /* !HAVE_LIBNUMA_SUPPORT */
117 static int perf_mmap__aio_alloc(struct mmap *map, int idx)
119 map->aio.data[idx] = malloc(mmap__mmap_len(map));
120 if (map->aio.data[idx] == NULL)
121 return -1;
123 return 0;
126 static void perf_mmap__aio_free(struct mmap *map, int idx)
128 zfree(&(map->aio.data[idx]));
131 static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused,
132 int cpu __maybe_unused, int affinity __maybe_unused)
134 return 0;
136 #endif
138 static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp)
140 int delta_max, i, prio, ret;
142 map->aio.nr_cblocks = mp->nr_cblocks;
143 if (map->aio.nr_cblocks) {
144 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *));
145 if (!map->aio.aiocb) {
146 pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
147 return -1;
149 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb));
150 if (!map->aio.cblocks) {
151 pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
152 return -1;
154 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *));
155 if (!map->aio.data) {
156 pr_debug2("failed to allocate data buffer, error %m\n");
157 return -1;
159 delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
160 for (i = 0; i < map->aio.nr_cblocks; ++i) {
161 ret = perf_mmap__aio_alloc(map, i);
162 if (ret == -1) {
163 pr_debug2("failed to allocate data buffer area, error %m");
164 return -1;
166 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity);
167 if (ret == -1)
168 return -1;
170 * Use cblock.aio_fildes value different from -1
171 * to denote started aio write operation on the
172 * cblock so it requires explicit record__aio_sync()
173 * call prior the cblock may be reused again.
175 map->aio.cblocks[i].aio_fildes = -1;
177 * Allocate cblocks with priority delta to have
178 * faster aio write system calls because queued requests
179 * are kept in separate per-prio queues and adding
180 * a new request will iterate thru shorter per-prio
181 * list. Blocks with numbers higher than
182 * _SC_AIO_PRIO_DELTA_MAX go with priority 0.
184 prio = delta_max - i;
185 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0;
189 return 0;
192 static void perf_mmap__aio_munmap(struct mmap *map)
194 int i;
196 for (i = 0; i < map->aio.nr_cblocks; ++i)
197 perf_mmap__aio_free(map, i);
198 if (map->aio.data)
199 zfree(&map->aio.data);
200 zfree(&map->aio.cblocks);
201 zfree(&map->aio.aiocb);
203 #else /* !HAVE_AIO_SUPPORT */
204 static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused)
206 return 0;
209 static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused,
210 struct mmap_params *mp __maybe_unused)
212 return 0;
215 static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused)
218 #endif
220 void mmap__munmap(struct mmap *map)
222 bitmap_free(map->affinity_mask.bits);
224 perf_mmap__aio_munmap(map);
225 if (map->data != NULL) {
226 munmap(map->data, mmap__mmap_len(map));
227 map->data = NULL;
229 auxtrace_mmap__munmap(&map->auxtrace_mmap);
232 static void build_node_mask(int node, struct mmap_cpu_mask *mask)
234 int c, cpu, nr_cpus;
235 const struct perf_cpu_map *cpu_map = NULL;
237 cpu_map = cpu_map__online();
238 if (!cpu_map)
239 return;
241 nr_cpus = perf_cpu_map__nr(cpu_map);
242 for (c = 0; c < nr_cpus; c++) {
243 cpu = cpu_map->map[c]; /* map c index to online cpu index */
244 if (cpu__get_node(cpu) == node)
245 set_bit(cpu, mask->bits);
249 static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
251 map->affinity_mask.nbits = cpu__max_cpu();
252 map->affinity_mask.bits = bitmap_alloc(map->affinity_mask.nbits);
253 if (!map->affinity_mask.bits)
254 return -1;
256 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1)
257 build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask);
258 else if (mp->affinity == PERF_AFFINITY_CPU)
259 set_bit(map->core.cpu, map->affinity_mask.bits);
261 return 0;
264 int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
266 if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) {
267 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
268 errno);
269 return -1;
272 if (mp->affinity != PERF_AFFINITY_SYS &&
273 perf_mmap__setup_affinity_mask(map, mp)) {
274 pr_debug2("failed to alloc mmap affinity mask, error %d\n",
275 errno);
276 return -1;
279 if (verbose == 2)
280 mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap");
282 map->core.flush = mp->flush;
284 map->comp_level = mp->comp_level;
286 if (map->comp_level && !perf_mmap__aio_enabled(map)) {
287 map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE,
288 MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
289 if (map->data == MAP_FAILED) {
290 pr_debug2("failed to mmap data buffer, error %d\n",
291 errno);
292 map->data = NULL;
293 return -1;
297 if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
298 &mp->auxtrace_mp, map->core.base, fd))
299 return -1;
301 return perf_mmap__aio_mmap(map, mp);
304 int perf_mmap__push(struct mmap *md, void *to,
305 int push(struct mmap *map, void *to, void *buf, size_t size))
307 u64 head = perf_mmap__read_head(&md->core);
308 unsigned char *data = md->core.base + page_size;
309 unsigned long size;
310 void *buf;
311 int rc = 0;
313 rc = perf_mmap__read_init(&md->core);
314 if (rc < 0)
315 return (rc == -EAGAIN) ? 1 : -1;
317 size = md->core.end - md->core.start;
319 if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) {
320 buf = &data[md->core.start & md->core.mask];
321 size = md->core.mask + 1 - (md->core.start & md->core.mask);
322 md->core.start += size;
324 if (push(md, to, buf, size) < 0) {
325 rc = -1;
326 goto out;
330 buf = &data[md->core.start & md->core.mask];
331 size = md->core.end - md->core.start;
332 md->core.start += size;
334 if (push(md, to, buf, size) < 0) {
335 rc = -1;
336 goto out;
339 md->core.prev = head;
340 perf_mmap__consume(&md->core);
341 out:
342 return rc;