1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
12 #include <linux/zalloc.h>
15 #include <unistd.h> // sysconf()
16 #include <perf/mmap.h>
17 #ifdef HAVE_LIBNUMA_SUPPORT
25 #include <internal/lib.h> /* page_size */
26 #include <linux/bitmap.h>
28 #define MASK_SIZE 1023
29 void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask
*mask
, const char *tag
)
31 char buf
[MASK_SIZE
+ 1];
34 len
= bitmap_scnprintf(mask
->bits
, mask
->nbits
, buf
, MASK_SIZE
);
36 pr_debug("%p: %s mask[%zd]: %s\n", mask
, tag
, mask
->nbits
, buf
);
39 size_t mmap__mmap_len(struct mmap
*map
)
41 return perf_mmap__mmap_len(&map
->core
);
44 int __weak
auxtrace_mmap__mmap(struct auxtrace_mmap
*mm __maybe_unused
,
45 struct auxtrace_mmap_params
*mp __maybe_unused
,
46 void *userpg __maybe_unused
,
47 int fd __maybe_unused
)
52 void __weak
auxtrace_mmap__munmap(struct auxtrace_mmap
*mm __maybe_unused
)
56 void __weak
auxtrace_mmap_params__init(struct auxtrace_mmap_params
*mp __maybe_unused
,
57 off_t auxtrace_offset __maybe_unused
,
58 unsigned int auxtrace_pages __maybe_unused
,
59 bool auxtrace_overwrite __maybe_unused
)
63 void __weak
auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params
*mp __maybe_unused
,
64 struct evlist
*evlist __maybe_unused
,
65 int idx __maybe_unused
,
66 bool per_cpu __maybe_unused
)
70 #ifdef HAVE_AIO_SUPPORT
71 static int perf_mmap__aio_enabled(struct mmap
*map
)
73 return map
->aio
.nr_cblocks
> 0;
76 #ifdef HAVE_LIBNUMA_SUPPORT
77 static int perf_mmap__aio_alloc(struct mmap
*map
, int idx
)
79 map
->aio
.data
[idx
] = mmap(NULL
, mmap__mmap_len(map
), PROT_READ
|PROT_WRITE
,
80 MAP_PRIVATE
|MAP_ANONYMOUS
, 0, 0);
81 if (map
->aio
.data
[idx
] == MAP_FAILED
) {
82 map
->aio
.data
[idx
] = NULL
;
89 static void perf_mmap__aio_free(struct mmap
*map
, int idx
)
91 if (map
->aio
.data
[idx
]) {
92 munmap(map
->aio
.data
[idx
], mmap__mmap_len(map
));
93 map
->aio
.data
[idx
] = NULL
;
97 static int perf_mmap__aio_bind(struct mmap
*map
, int idx
, int cpu
, int affinity
)
101 unsigned long *node_mask
;
102 unsigned long node_index
;
105 if (affinity
!= PERF_AFFINITY_SYS
&& cpu__max_node() > 1) {
106 data
= map
->aio
.data
[idx
];
107 mmap_len
= mmap__mmap_len(map
);
108 node_index
= cpu__get_node(cpu
);
109 node_mask
= bitmap_alloc(node_index
+ 1);
111 pr_err("Failed to allocate node mask for mbind: error %m\n");
114 set_bit(node_index
, node_mask
);
115 if (mbind(data
, mmap_len
, MPOL_BIND
, node_mask
, node_index
+ 1 + 1, 0)) {
116 pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n",
117 data
, data
+ mmap_len
, node_index
);
120 bitmap_free(node_mask
);
125 #else /* !HAVE_LIBNUMA_SUPPORT */
126 static int perf_mmap__aio_alloc(struct mmap
*map
, int idx
)
128 map
->aio
.data
[idx
] = malloc(mmap__mmap_len(map
));
129 if (map
->aio
.data
[idx
] == NULL
)
135 static void perf_mmap__aio_free(struct mmap
*map
, int idx
)
137 zfree(&(map
->aio
.data
[idx
]));
140 static int perf_mmap__aio_bind(struct mmap
*map __maybe_unused
, int idx __maybe_unused
,
141 int cpu __maybe_unused
, int affinity __maybe_unused
)
147 static int perf_mmap__aio_mmap(struct mmap
*map
, struct mmap_params
*mp
)
149 int delta_max
, i
, prio
, ret
;
151 map
->aio
.nr_cblocks
= mp
->nr_cblocks
;
152 if (map
->aio
.nr_cblocks
) {
153 map
->aio
.aiocb
= calloc(map
->aio
.nr_cblocks
, sizeof(struct aiocb
*));
154 if (!map
->aio
.aiocb
) {
155 pr_debug2("failed to allocate aiocb for data buffer, error %m\n");
158 map
->aio
.cblocks
= calloc(map
->aio
.nr_cblocks
, sizeof(struct aiocb
));
159 if (!map
->aio
.cblocks
) {
160 pr_debug2("failed to allocate cblocks for data buffer, error %m\n");
163 map
->aio
.data
= calloc(map
->aio
.nr_cblocks
, sizeof(void *));
164 if (!map
->aio
.data
) {
165 pr_debug2("failed to allocate data buffer, error %m\n");
168 delta_max
= sysconf(_SC_AIO_PRIO_DELTA_MAX
);
169 for (i
= 0; i
< map
->aio
.nr_cblocks
; ++i
) {
170 ret
= perf_mmap__aio_alloc(map
, i
);
172 pr_debug2("failed to allocate data buffer area, error %m");
175 ret
= perf_mmap__aio_bind(map
, i
, map
->core
.cpu
, mp
->affinity
);
179 * Use cblock.aio_fildes value different from -1
180 * to denote started aio write operation on the
181 * cblock so it requires explicit record__aio_sync()
182 * call prior the cblock may be reused again.
184 map
->aio
.cblocks
[i
].aio_fildes
= -1;
186 * Allocate cblocks with priority delta to have
187 * faster aio write system calls because queued requests
188 * are kept in separate per-prio queues and adding
189 * a new request will iterate thru shorter per-prio
190 * list. Blocks with numbers higher than
191 * _SC_AIO_PRIO_DELTA_MAX go with priority 0.
193 prio
= delta_max
- i
;
194 map
->aio
.cblocks
[i
].aio_reqprio
= prio
>= 0 ? prio
: 0;
201 static void perf_mmap__aio_munmap(struct mmap
*map
)
205 for (i
= 0; i
< map
->aio
.nr_cblocks
; ++i
)
206 perf_mmap__aio_free(map
, i
);
208 zfree(&map
->aio
.data
);
209 zfree(&map
->aio
.cblocks
);
210 zfree(&map
->aio
.aiocb
);
212 #else /* !HAVE_AIO_SUPPORT */
213 static int perf_mmap__aio_enabled(struct mmap
*map __maybe_unused
)
218 static int perf_mmap__aio_mmap(struct mmap
*map __maybe_unused
,
219 struct mmap_params
*mp __maybe_unused
)
224 static void perf_mmap__aio_munmap(struct mmap
*map __maybe_unused
)
229 void mmap__munmap(struct mmap
*map
)
231 bitmap_free(map
->affinity_mask
.bits
);
233 perf_mmap__aio_munmap(map
);
234 if (map
->data
!= NULL
) {
235 munmap(map
->data
, mmap__mmap_len(map
));
238 auxtrace_mmap__munmap(&map
->auxtrace_mmap
);
241 static void build_node_mask(int node
, struct mmap_cpu_mask
*mask
)
244 const struct perf_cpu_map
*cpu_map
= NULL
;
246 cpu_map
= cpu_map__online();
250 nr_cpus
= perf_cpu_map__nr(cpu_map
);
251 for (c
= 0; c
< nr_cpus
; c
++) {
252 cpu
= cpu_map
->map
[c
]; /* map c index to online cpu index */
253 if (cpu__get_node(cpu
) == node
)
254 set_bit(cpu
, mask
->bits
);
258 static int perf_mmap__setup_affinity_mask(struct mmap
*map
, struct mmap_params
*mp
)
260 map
->affinity_mask
.nbits
= cpu__max_cpu();
261 map
->affinity_mask
.bits
= bitmap_alloc(map
->affinity_mask
.nbits
);
262 if (!map
->affinity_mask
.bits
)
265 if (mp
->affinity
== PERF_AFFINITY_NODE
&& cpu__max_node() > 1)
266 build_node_mask(cpu__get_node(map
->core
.cpu
), &map
->affinity_mask
);
267 else if (mp
->affinity
== PERF_AFFINITY_CPU
)
268 set_bit(map
->core
.cpu
, map
->affinity_mask
.bits
);
273 int mmap__mmap(struct mmap
*map
, struct mmap_params
*mp
, int fd
, int cpu
)
275 if (perf_mmap__mmap(&map
->core
, &mp
->core
, fd
, cpu
)) {
276 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
281 if (mp
->affinity
!= PERF_AFFINITY_SYS
&&
282 perf_mmap__setup_affinity_mask(map
, mp
)) {
283 pr_debug2("failed to alloc mmap affinity mask, error %d\n",
289 mmap_cpu_mask__scnprintf(&map
->affinity_mask
, "mmap");
291 map
->core
.flush
= mp
->flush
;
293 map
->comp_level
= mp
->comp_level
;
295 if (map
->comp_level
&& !perf_mmap__aio_enabled(map
)) {
296 map
->data
= mmap(NULL
, mmap__mmap_len(map
), PROT_READ
|PROT_WRITE
,
297 MAP_PRIVATE
|MAP_ANONYMOUS
, 0, 0);
298 if (map
->data
== MAP_FAILED
) {
299 pr_debug2("failed to mmap data buffer, error %d\n",
306 if (auxtrace_mmap__mmap(&map
->auxtrace_mmap
,
307 &mp
->auxtrace_mp
, map
->core
.base
, fd
))
310 return perf_mmap__aio_mmap(map
, mp
);
313 int perf_mmap__push(struct mmap
*md
, void *to
,
314 int push(struct mmap
*map
, void *to
, void *buf
, size_t size
))
316 u64 head
= perf_mmap__read_head(&md
->core
);
317 unsigned char *data
= md
->core
.base
+ page_size
;
322 rc
= perf_mmap__read_init(&md
->core
);
324 return (rc
== -EAGAIN
) ? 1 : -1;
326 size
= md
->core
.end
- md
->core
.start
;
328 if ((md
->core
.start
& md
->core
.mask
) + size
!= (md
->core
.end
& md
->core
.mask
)) {
329 buf
= &data
[md
->core
.start
& md
->core
.mask
];
330 size
= md
->core
.mask
+ 1 - (md
->core
.start
& md
->core
.mask
);
331 md
->core
.start
+= size
;
333 if (push(md
, to
, buf
, size
) < 0) {
339 buf
= &data
[md
->core
.start
& md
->core
.mask
];
340 size
= md
->core
.end
- md
->core
.start
;
341 md
->core
.start
+= size
;
343 if (push(md
, to
, buf
, size
) < 0) {
348 md
->core
.prev
= head
;
349 perf_mmap__consume(&md
->core
);