1 // SPDX-License-Identifier: GPL-2.0
3 * Performance events ring-buffer code:
5 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
6 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
7 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
8 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
11 #include <linux/perf_event.h>
12 #include <linux/vmalloc.h>
13 #include <linux/slab.h>
14 #include <linux/circ_buf.h>
15 #include <linux/poll.h>
16 #include <linux/nospec.h>
20 static void perf_output_wakeup(struct perf_output_handle
*handle
)
22 atomic_set(&handle
->rb
->poll
, EPOLLIN
);
24 handle
->event
->pending_wakeup
= 1;
25 irq_work_queue(&handle
->event
->pending
);
29 * We need to ensure a later event_id doesn't publish a head when a former
30 * event isn't done writing. However since we need to deal with NMIs we
31 * cannot fully serialize things.
33 * We only publish the head (and generate a wakeup) when the outer-most
36 static void perf_output_get_handle(struct perf_output_handle
*handle
)
38 struct perf_buffer
*rb
= handle
->rb
;
43 * Avoid an explicit LOAD/STORE such that architectures with memops
46 (*(volatile unsigned int *)&rb
->nest
)++;
47 handle
->wakeup
= local_read(&rb
->wakeup
);
50 static void perf_output_put_handle(struct perf_output_handle
*handle
)
52 struct perf_buffer
*rb
= handle
->rb
;
57 * If this isn't the outermost nesting, we don't have to update
58 * @rb->user_page->data_head.
60 nest
= READ_ONCE(rb
->nest
);
62 WRITE_ONCE(rb
->nest
, nest
- 1);
68 * In order to avoid publishing a head value that goes backwards,
69 * we must ensure the load of @rb->head happens after we've
70 * incremented @rb->nest.
72 * Otherwise we can observe a @rb->head value before one published
73 * by an IRQ/NMI happening between the load and the increment.
76 head
= local_read(&rb
->head
);
79 * IRQ/NMI can happen here and advance @rb->head, causing our
80 * load above to be stale.
84 * Since the mmap() consumer (userspace) can run on a different CPU:
88 * if (LOAD ->data_tail) { LOAD ->data_head
90 * STORE $data LOAD $data
91 * smp_wmb() (B) smp_mb() (D)
92 * STORE ->data_head STORE ->data_tail
95 * Where A pairs with D, and B pairs with C.
97 * In our case (A) is a control dependency that separates the load of
98 * the ->data_tail and the stores of $data. In case ->data_tail
99 * indicates there is no room in the buffer to store $data we do not.
101 * D needs to be a full barrier since it separates the data READ
102 * from the tail WRITE.
104 * For B a WMB is sufficient since it separates two WRITEs, and for C
105 * an RMB is sufficient since it separates two READs.
107 * See perf_output_begin().
109 smp_wmb(); /* B, matches C */
110 WRITE_ONCE(rb
->user_page
->data_head
, head
);
113 * We must publish the head before decrementing the nest count,
114 * otherwise an IRQ/NMI can publish a more recent head value and our
115 * write will (temporarily) publish a stale value.
118 WRITE_ONCE(rb
->nest
, 0);
121 * Ensure we decrement @rb->nest before we validate the @rb->head.
122 * Otherwise we cannot be sure we caught the 'last' nested update.
125 if (unlikely(head
!= local_read(&rb
->head
))) {
126 WRITE_ONCE(rb
->nest
, 1);
130 if (handle
->wakeup
!= local_read(&rb
->wakeup
))
131 perf_output_wakeup(handle
);
137 static __always_inline
bool
138 ring_buffer_has_space(unsigned long head
, unsigned long tail
,
139 unsigned long data_size
, unsigned int size
,
143 return CIRC_SPACE(head
, tail
, data_size
) >= size
;
145 return CIRC_SPACE(tail
, head
, data_size
) >= size
;
148 static __always_inline
int
149 __perf_output_begin(struct perf_output_handle
*handle
,
150 struct perf_event
*event
, unsigned int size
,
153 struct perf_buffer
*rb
;
154 unsigned long tail
, offset
, head
;
155 int have_lost
, page_shift
;
157 struct perf_event_header header
;
164 * For inherited events we send all the output towards the parent.
167 event
= event
->parent
;
169 rb
= rcu_dereference(event
->rb
);
173 if (unlikely(rb
->paused
)) {
175 local_inc(&rb
->lost
);
180 handle
->event
= event
;
182 have_lost
= local_read(&rb
->lost
);
183 if (unlikely(have_lost
)) {
184 size
+= sizeof(lost_event
);
185 if (event
->attr
.sample_id_all
)
186 size
+= event
->id_header_size
;
189 perf_output_get_handle(handle
);
192 tail
= READ_ONCE(rb
->user_page
->data_tail
);
193 offset
= head
= local_read(&rb
->head
);
194 if (!rb
->overwrite
) {
195 if (unlikely(!ring_buffer_has_space(head
, tail
,
202 * The above forms a control dependency barrier separating the
203 * @tail load above from the data stores below. Since the @tail
204 * load is required to compute the branch to fail below.
206 * A, matches D; the full memory barrier userspace SHOULD issue
207 * after reading the data and before storing the new tail
210 * See perf_output_put_handle().
217 } while (local_cmpxchg(&rb
->head
, offset
, head
) != offset
);
225 * We rely on the implied barrier() by local_cmpxchg() to ensure
226 * none of the data stores below can be lifted up by the compiler.
229 if (unlikely(head
- local_read(&rb
->wakeup
) > rb
->watermark
))
230 local_add(rb
->watermark
, &rb
->wakeup
);
232 page_shift
= PAGE_SHIFT
+ page_order(rb
);
234 handle
->page
= (offset
>> page_shift
) & (rb
->nr_pages
- 1);
235 offset
&= (1UL << page_shift
) - 1;
236 handle
->addr
= rb
->data_pages
[handle
->page
] + offset
;
237 handle
->size
= (1UL << page_shift
) - offset
;
239 if (unlikely(have_lost
)) {
240 struct perf_sample_data sample_data
;
242 lost_event
.header
.size
= sizeof(lost_event
);
243 lost_event
.header
.type
= PERF_RECORD_LOST
;
244 lost_event
.header
.misc
= 0;
245 lost_event
.id
= event
->id
;
246 lost_event
.lost
= local_xchg(&rb
->lost
, 0);
248 perf_event_header__init_id(&lost_event
.header
,
249 &sample_data
, event
);
250 perf_output_put(handle
, lost_event
);
251 perf_event__output_id_sample(event
, handle
, &sample_data
);
257 local_inc(&rb
->lost
);
258 perf_output_put_handle(handle
);
265 int perf_output_begin_forward(struct perf_output_handle
*handle
,
266 struct perf_event
*event
, unsigned int size
)
268 return __perf_output_begin(handle
, event
, size
, false);
271 int perf_output_begin_backward(struct perf_output_handle
*handle
,
272 struct perf_event
*event
, unsigned int size
)
274 return __perf_output_begin(handle
, event
, size
, true);
277 int perf_output_begin(struct perf_output_handle
*handle
,
278 struct perf_event
*event
, unsigned int size
)
281 return __perf_output_begin(handle
, event
, size
,
282 unlikely(is_write_backward(event
)));
285 unsigned int perf_output_copy(struct perf_output_handle
*handle
,
286 const void *buf
, unsigned int len
)
288 return __output_copy(handle
, buf
, len
);
291 unsigned int perf_output_skip(struct perf_output_handle
*handle
,
294 return __output_skip(handle
, NULL
, len
);
297 void perf_output_end(struct perf_output_handle
*handle
)
299 perf_output_put_handle(handle
);
304 ring_buffer_init(struct perf_buffer
*rb
, long watermark
, int flags
)
306 long max_size
= perf_data_size(rb
);
309 rb
->watermark
= min(max_size
, watermark
);
312 rb
->watermark
= max_size
/ 2;
314 if (flags
& RING_BUFFER_WRITABLE
)
319 refcount_set(&rb
->refcount
, 1);
321 INIT_LIST_HEAD(&rb
->event_list
);
322 spin_lock_init(&rb
->event_lock
);
325 * perf_output_begin() only checks rb->paused, therefore
326 * rb->paused must be true if we have no pages for output.
332 void perf_aux_output_flag(struct perf_output_handle
*handle
, u64 flags
)
335 * OVERWRITE is determined by perf_aux_output_end() and can't
336 * be passed in directly.
338 if (WARN_ON_ONCE(flags
& PERF_AUX_FLAG_OVERWRITE
))
341 handle
->aux_flags
|= flags
;
343 EXPORT_SYMBOL_GPL(perf_aux_output_flag
);
346 * This is called before hardware starts writing to the AUX area to
347 * obtain an output handle and make sure there's room in the buffer.
348 * When the capture completes, call perf_aux_output_end() to commit
349 * the recorded data to the buffer.
351 * The ordering is similar to that of perf_output_{begin,end}, with
352 * the exception of (B), which should be taken care of by the pmu
353 * driver, since ordering rules will differ depending on hardware.
355 * Call this from pmu::start(); see the comment in perf_aux_output_end()
356 * about its use in pmu callbacks. Both can also be called from the PMI
359 void *perf_aux_output_begin(struct perf_output_handle
*handle
,
360 struct perf_event
*event
)
362 struct perf_event
*output_event
= event
;
363 unsigned long aux_head
, aux_tail
;
364 struct perf_buffer
*rb
;
367 if (output_event
->parent
)
368 output_event
= output_event
->parent
;
371 * Since this will typically be open across pmu::add/pmu::del, we
372 * grab ring_buffer's refcount instead of holding rcu read lock
373 * to make sure it doesn't disappear under us.
375 rb
= ring_buffer_get(output_event
);
383 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
384 * about to get freed, so we leave immediately.
386 * Checking rb::aux_mmap_count and rb::refcount has to be done in
387 * the same order, see perf_mmap_close. Otherwise we end up freeing
388 * aux pages in this path, which is a bug, because in_atomic().
390 if (!atomic_read(&rb
->aux_mmap_count
))
393 if (!refcount_inc_not_zero(&rb
->aux_refcount
))
396 nest
= READ_ONCE(rb
->aux_nest
);
398 * Nesting is not supported for AUX area, make sure nested
399 * writers are caught early
401 if (WARN_ON_ONCE(nest
))
404 WRITE_ONCE(rb
->aux_nest
, nest
+ 1);
406 aux_head
= rb
->aux_head
;
409 handle
->event
= event
;
410 handle
->head
= aux_head
;
412 handle
->aux_flags
= 0;
415 * In overwrite mode, AUX data stores do not depend on aux_tail,
416 * therefore (A) control dependency barrier does not exist. The
417 * (B) <-> (C) ordering is still observed by the pmu driver.
419 if (!rb
->aux_overwrite
) {
420 aux_tail
= READ_ONCE(rb
->user_page
->aux_tail
);
421 handle
->wakeup
= rb
->aux_wakeup
+ rb
->aux_watermark
;
422 if (aux_head
- aux_tail
< perf_aux_size(rb
))
423 handle
->size
= CIRC_SPACE(aux_head
, aux_tail
, perf_aux_size(rb
));
426 * handle->size computation depends on aux_tail load; this forms a
427 * control dependency barrier separating aux_tail load from aux data
428 * store that will be enabled on successful return
430 if (!handle
->size
) { /* A, matches D */
431 event
->pending_disable
= smp_processor_id();
432 perf_output_wakeup(handle
);
433 WRITE_ONCE(rb
->aux_nest
, 0);
438 return handle
->rb
->aux_priv
;
446 handle
->event
= NULL
;
450 EXPORT_SYMBOL_GPL(perf_aux_output_begin
);
452 static __always_inline
bool rb_need_aux_wakeup(struct perf_buffer
*rb
)
454 if (rb
->aux_overwrite
)
457 if (rb
->aux_head
- rb
->aux_wakeup
>= rb
->aux_watermark
) {
458 rb
->aux_wakeup
= rounddown(rb
->aux_head
, rb
->aux_watermark
);
466 * Commit the data written by hardware into the ring buffer by adjusting
467 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
468 * pmu driver's responsibility to observe ordering rules of the hardware,
469 * so that all the data is externally visible before this is called.
471 * Note: this has to be called from pmu::stop() callback, as the assumption
472 * of the AUX buffer management code is that after pmu::stop(), the AUX
473 * transaction must be stopped and therefore drop the AUX reference count.
475 void perf_aux_output_end(struct perf_output_handle
*handle
, unsigned long size
)
477 bool wakeup
= !!(handle
->aux_flags
& PERF_AUX_FLAG_TRUNCATED
);
478 struct perf_buffer
*rb
= handle
->rb
;
479 unsigned long aux_head
;
481 /* in overwrite mode, driver provides aux_head via handle */
482 if (rb
->aux_overwrite
) {
483 handle
->aux_flags
|= PERF_AUX_FLAG_OVERWRITE
;
485 aux_head
= handle
->head
;
486 rb
->aux_head
= aux_head
;
488 handle
->aux_flags
&= ~PERF_AUX_FLAG_OVERWRITE
;
490 aux_head
= rb
->aux_head
;
491 rb
->aux_head
+= size
;
495 * Only send RECORD_AUX if we have something useful to communicate
497 * Note: the OVERWRITE records by themselves are not considered
498 * useful, as they don't communicate any *new* information,
499 * aside from the short-lived offset, that becomes history at
500 * the next event sched-in and therefore isn't useful.
501 * The userspace that needs to copy out AUX data in overwrite
502 * mode should know to use user_page::aux_head for the actual
503 * offset. So, from now on we don't output AUX records that
504 * have *only* OVERWRITE flag set.
506 if (size
|| (handle
->aux_flags
& ~(u64
)PERF_AUX_FLAG_OVERWRITE
))
507 perf_event_aux_event(handle
->event
, aux_head
, size
,
510 WRITE_ONCE(rb
->user_page
->aux_head
, rb
->aux_head
);
511 if (rb_need_aux_wakeup(rb
))
515 if (handle
->aux_flags
& PERF_AUX_FLAG_TRUNCATED
)
516 handle
->event
->pending_disable
= smp_processor_id();
517 perf_output_wakeup(handle
);
520 handle
->event
= NULL
;
522 WRITE_ONCE(rb
->aux_nest
, 0);
527 EXPORT_SYMBOL_GPL(perf_aux_output_end
);
530 * Skip over a given number of bytes in the AUX buffer, due to, for example,
531 * hardware's alignment constraints.
533 int perf_aux_output_skip(struct perf_output_handle
*handle
, unsigned long size
)
535 struct perf_buffer
*rb
= handle
->rb
;
537 if (size
> handle
->size
)
540 rb
->aux_head
+= size
;
542 WRITE_ONCE(rb
->user_page
->aux_head
, rb
->aux_head
);
543 if (rb_need_aux_wakeup(rb
)) {
544 perf_output_wakeup(handle
);
545 handle
->wakeup
= rb
->aux_wakeup
+ rb
->aux_watermark
;
548 handle
->head
= rb
->aux_head
;
549 handle
->size
-= size
;
553 EXPORT_SYMBOL_GPL(perf_aux_output_skip
);
555 void *perf_get_aux(struct perf_output_handle
*handle
)
557 /* this is only valid between perf_aux_output_begin and *_end */
561 return handle
->rb
->aux_priv
;
563 EXPORT_SYMBOL_GPL(perf_get_aux
);
566 * Copy out AUX data from an AUX handle.
568 long perf_output_copy_aux(struct perf_output_handle
*aux_handle
,
569 struct perf_output_handle
*handle
,
570 unsigned long from
, unsigned long to
)
572 struct perf_buffer
*rb
= aux_handle
->rb
;
573 unsigned long tocopy
, remainder
, len
= 0;
576 from
&= (rb
->aux_nr_pages
<< PAGE_SHIFT
) - 1;
577 to
&= (rb
->aux_nr_pages
<< PAGE_SHIFT
) - 1;
580 tocopy
= PAGE_SIZE
- offset_in_page(from
);
582 tocopy
= min(tocopy
, to
- from
);
586 addr
= rb
->aux_pages
[from
>> PAGE_SHIFT
];
587 addr
+= offset_in_page(from
);
589 remainder
= perf_output_copy(handle
, addr
, tocopy
);
595 from
&= (rb
->aux_nr_pages
<< PAGE_SHIFT
) - 1;
596 } while (to
!= from
);
601 #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
603 static struct page
*rb_alloc_aux_page(int node
, int order
)
607 if (order
> MAX_ORDER
)
611 page
= alloc_pages_node(node
, PERF_AUX_GFP
, order
);
612 } while (!page
&& order
--);
616 * Communicate the allocation size to the driver:
617 * if we managed to secure a high-order allocation,
618 * set its first page's private to this order;
619 * !PagePrivate(page) means it's just a normal page.
621 split_page(page
, order
);
622 SetPagePrivate(page
);
623 set_page_private(page
, order
);
629 static void rb_free_aux_page(struct perf_buffer
*rb
, int idx
)
631 struct page
*page
= virt_to_page(rb
->aux_pages
[idx
]);
633 ClearPagePrivate(page
);
634 page
->mapping
= NULL
;
638 static void __rb_free_aux(struct perf_buffer
*rb
)
643 * Should never happen, the last reference should be dropped from
644 * perf_mmap_close() path, which first stops aux transactions (which
645 * in turn are the atomic holders of aux_refcount) and then does the
646 * last rb_free_aux().
648 WARN_ON_ONCE(in_atomic());
651 rb
->free_aux(rb
->aux_priv
);
656 if (rb
->aux_nr_pages
) {
657 for (pg
= 0; pg
< rb
->aux_nr_pages
; pg
++)
658 rb_free_aux_page(rb
, pg
);
660 kfree(rb
->aux_pages
);
661 rb
->aux_nr_pages
= 0;
665 int rb_alloc_aux(struct perf_buffer
*rb
, struct perf_event
*event
,
666 pgoff_t pgoff
, int nr_pages
, long watermark
, int flags
)
668 bool overwrite
= !(flags
& RING_BUFFER_WRITABLE
);
669 int node
= (event
->cpu
== -1) ? -1 : cpu_to_node(event
->cpu
);
670 int ret
= -ENOMEM
, max_order
;
676 * We need to start with the max_order that fits in nr_pages,
677 * not the other way around, hence ilog2() and not get_order.
679 max_order
= ilog2(nr_pages
);
682 * PMU requests more than one contiguous chunks of memory
683 * for SW double buffering
692 rb
->aux_pages
= kcalloc_node(nr_pages
, sizeof(void *), GFP_KERNEL
,
697 rb
->free_aux
= event
->pmu
->free_aux
;
698 for (rb
->aux_nr_pages
= 0; rb
->aux_nr_pages
< nr_pages
;) {
702 order
= min(max_order
, ilog2(nr_pages
- rb
->aux_nr_pages
));
703 page
= rb_alloc_aux_page(node
, order
);
707 for (last
= rb
->aux_nr_pages
+ (1 << page_private(page
));
708 last
> rb
->aux_nr_pages
; rb
->aux_nr_pages
++)
709 rb
->aux_pages
[rb
->aux_nr_pages
] = page_address(page
++);
713 * In overwrite mode, PMUs that don't support SG may not handle more
714 * than one contiguous allocation, since they rely on PMI to do double
715 * buffering. In this case, the entire buffer has to be one contiguous
718 if ((event
->pmu
->capabilities
& PERF_PMU_CAP_AUX_NO_SG
) &&
720 struct page
*page
= virt_to_page(rb
->aux_pages
[0]);
722 if (page_private(page
) != max_order
)
726 rb
->aux_priv
= event
->pmu
->setup_aux(event
, rb
->aux_pages
, nr_pages
,
734 * aux_pages (and pmu driver's private data, aux_priv) will be
735 * referenced in both producer's and consumer's contexts, thus
736 * we keep a refcount here to make sure either of the two can
737 * reference them safely.
739 refcount_set(&rb
->aux_refcount
, 1);
741 rb
->aux_overwrite
= overwrite
;
742 rb
->aux_watermark
= watermark
;
744 if (!rb
->aux_watermark
&& !rb
->aux_overwrite
)
745 rb
->aux_watermark
= nr_pages
<< (PAGE_SHIFT
- 1);
749 rb
->aux_pgoff
= pgoff
;
756 void rb_free_aux(struct perf_buffer
*rb
)
758 if (refcount_dec_and_test(&rb
->aux_refcount
))
762 #ifndef CONFIG_PERF_USE_VMALLOC
765 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
769 __perf_mmap_to_page(struct perf_buffer
*rb
, unsigned long pgoff
)
771 if (pgoff
> rb
->nr_pages
)
775 return virt_to_page(rb
->user_page
);
777 return virt_to_page(rb
->data_pages
[pgoff
- 1]);
780 static void *perf_mmap_alloc_page(int cpu
)
785 node
= (cpu
== -1) ? cpu
: cpu_to_node(cpu
);
786 page
= alloc_pages_node(node
, GFP_KERNEL
| __GFP_ZERO
, 0);
790 return page_address(page
);
793 static void perf_mmap_free_page(void *addr
)
795 struct page
*page
= virt_to_page(addr
);
797 page
->mapping
= NULL
;
801 struct perf_buffer
*rb_alloc(int nr_pages
, long watermark
, int cpu
, int flags
)
803 struct perf_buffer
*rb
;
807 size
= sizeof(struct perf_buffer
);
808 size
+= nr_pages
* sizeof(void *);
810 if (order_base_2(size
) >= PAGE_SHIFT
+MAX_ORDER
)
813 rb
= kzalloc(size
, GFP_KERNEL
);
817 rb
->user_page
= perf_mmap_alloc_page(cpu
);
821 for (i
= 0; i
< nr_pages
; i
++) {
822 rb
->data_pages
[i
] = perf_mmap_alloc_page(cpu
);
823 if (!rb
->data_pages
[i
])
824 goto fail_data_pages
;
827 rb
->nr_pages
= nr_pages
;
829 ring_buffer_init(rb
, watermark
, flags
);
834 for (i
--; i
>= 0; i
--)
835 perf_mmap_free_page(rb
->data_pages
[i
]);
837 perf_mmap_free_page(rb
->user_page
);
846 void rb_free(struct perf_buffer
*rb
)
850 perf_mmap_free_page(rb
->user_page
);
851 for (i
= 0; i
< rb
->nr_pages
; i
++)
852 perf_mmap_free_page(rb
->data_pages
[i
]);
857 static int data_page_nr(struct perf_buffer
*rb
)
859 return rb
->nr_pages
<< page_order(rb
);
863 __perf_mmap_to_page(struct perf_buffer
*rb
, unsigned long pgoff
)
865 /* The '>' counts in the user page. */
866 if (pgoff
> data_page_nr(rb
))
869 return vmalloc_to_page((void *)rb
->user_page
+ pgoff
* PAGE_SIZE
);
872 static void perf_mmap_unmark_page(void *addr
)
874 struct page
*page
= vmalloc_to_page(addr
);
876 page
->mapping
= NULL
;
879 static void rb_free_work(struct work_struct
*work
)
881 struct perf_buffer
*rb
;
885 rb
= container_of(work
, struct perf_buffer
, work
);
886 nr
= data_page_nr(rb
);
888 base
= rb
->user_page
;
889 /* The '<=' counts in the user page. */
890 for (i
= 0; i
<= nr
; i
++)
891 perf_mmap_unmark_page(base
+ (i
* PAGE_SIZE
));
897 void rb_free(struct perf_buffer
*rb
)
899 schedule_work(&rb
->work
);
902 struct perf_buffer
*rb_alloc(int nr_pages
, long watermark
, int cpu
, int flags
)
904 struct perf_buffer
*rb
;
908 size
= sizeof(struct perf_buffer
);
909 size
+= sizeof(void *);
911 rb
= kzalloc(size
, GFP_KERNEL
);
915 INIT_WORK(&rb
->work
, rb_free_work
);
917 all_buf
= vmalloc_user((nr_pages
+ 1) * PAGE_SIZE
);
921 rb
->user_page
= all_buf
;
922 rb
->data_pages
[0] = all_buf
+ PAGE_SIZE
;
925 rb
->page_order
= ilog2(nr_pages
);
928 ring_buffer_init(rb
, watermark
, flags
);
942 perf_mmap_to_page(struct perf_buffer
*rb
, unsigned long pgoff
)
944 if (rb
->aux_nr_pages
) {
945 /* above AUX space */
946 if (pgoff
> rb
->aux_pgoff
+ rb
->aux_nr_pages
)
950 if (pgoff
>= rb
->aux_pgoff
) {
951 int aux_pgoff
= array_index_nospec(pgoff
- rb
->aux_pgoff
, rb
->aux_nr_pages
);
952 return virt_to_page(rb
->aux_pages
[aux_pgoff
]);
956 return __perf_mmap_to_page(rb
, pgoff
);