Linux 4.9.151
[linux/fpc-iii.git] / kernel / events / ring_buffer.c
blob017f7933a37da300df968471afe23dbf922a9e2d
1 /*
2 * Performance events ring-buffer code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
12 #include <linux/perf_event.h>
13 #include <linux/vmalloc.h>
14 #include <linux/slab.h>
15 #include <linux/circ_buf.h>
16 #include <linux/poll.h>
17 #include <linux/nospec.h>
19 #include "internal.h"
21 static void perf_output_wakeup(struct perf_output_handle *handle)
23 atomic_set(&handle->rb->poll, POLLIN);
25 handle->event->pending_wakeup = 1;
26 irq_work_queue(&handle->event->pending);
30 * We need to ensure a later event_id doesn't publish a head when a former
31 * event isn't done writing. However since we need to deal with NMIs we
32 * cannot fully serialize things.
34 * We only publish the head (and generate a wakeup) when the outer-most
35 * event completes.
37 static void perf_output_get_handle(struct perf_output_handle *handle)
39 struct ring_buffer *rb = handle->rb;
41 preempt_disable();
42 local_inc(&rb->nest);
43 handle->wakeup = local_read(&rb->wakeup);
46 static void perf_output_put_handle(struct perf_output_handle *handle)
48 struct ring_buffer *rb = handle->rb;
49 unsigned long head;
51 again:
52 head = local_read(&rb->head);
55 * IRQ/NMI can happen here, which means we can miss a head update.
58 if (!local_dec_and_test(&rb->nest))
59 goto out;
62 * Since the mmap() consumer (userspace) can run on a different CPU:
64 * kernel user
66 * if (LOAD ->data_tail) { LOAD ->data_head
67 * (A) smp_rmb() (C)
68 * STORE $data LOAD $data
69 * smp_wmb() (B) smp_mb() (D)
70 * STORE ->data_head STORE ->data_tail
71 * }
73 * Where A pairs with D, and B pairs with C.
75 * In our case (A) is a control dependency that separates the load of
76 * the ->data_tail and the stores of $data. In case ->data_tail
77 * indicates there is no room in the buffer to store $data we do not.
79 * D needs to be a full barrier since it separates the data READ
80 * from the tail WRITE.
82 * For B a WMB is sufficient since it separates two WRITEs, and for C
83 * an RMB is sufficient since it separates two READs.
85 * See perf_output_begin().
87 smp_wmb(); /* B, matches C */
88 rb->user_page->data_head = head;
91 * Now check if we missed an update -- rely on previous implied
92 * compiler barriers to force a re-read.
94 if (unlikely(head != local_read(&rb->head))) {
95 local_inc(&rb->nest);
96 goto again;
99 if (handle->wakeup != local_read(&rb->wakeup))
100 perf_output_wakeup(handle);
102 out:
103 preempt_enable();
106 static bool __always_inline
107 ring_buffer_has_space(unsigned long head, unsigned long tail,
108 unsigned long data_size, unsigned int size,
109 bool backward)
111 if (!backward)
112 return CIRC_SPACE(head, tail, data_size) >= size;
113 else
114 return CIRC_SPACE(tail, head, data_size) >= size;
117 static int __always_inline
118 __perf_output_begin(struct perf_output_handle *handle,
119 struct perf_event *event, unsigned int size,
120 bool backward)
122 struct ring_buffer *rb;
123 unsigned long tail, offset, head;
124 int have_lost, page_shift;
125 struct {
126 struct perf_event_header header;
127 u64 id;
128 u64 lost;
129 } lost_event;
131 rcu_read_lock();
133 * For inherited events we send all the output towards the parent.
135 if (event->parent)
136 event = event->parent;
138 rb = rcu_dereference(event->rb);
139 if (unlikely(!rb))
140 goto out;
142 if (unlikely(rb->paused)) {
143 if (rb->nr_pages)
144 local_inc(&rb->lost);
145 goto out;
148 handle->rb = rb;
149 handle->event = event;
151 have_lost = local_read(&rb->lost);
152 if (unlikely(have_lost)) {
153 size += sizeof(lost_event);
154 if (event->attr.sample_id_all)
155 size += event->id_header_size;
158 perf_output_get_handle(handle);
160 do {
161 tail = READ_ONCE(rb->user_page->data_tail);
162 offset = head = local_read(&rb->head);
163 if (!rb->overwrite) {
164 if (unlikely(!ring_buffer_has_space(head, tail,
165 perf_data_size(rb),
166 size, backward)))
167 goto fail;
171 * The above forms a control dependency barrier separating the
172 * @tail load above from the data stores below. Since the @tail
173 * load is required to compute the branch to fail below.
175 * A, matches D; the full memory barrier userspace SHOULD issue
176 * after reading the data and before storing the new tail
177 * position.
179 * See perf_output_put_handle().
182 if (!backward)
183 head += size;
184 else
185 head -= size;
186 } while (local_cmpxchg(&rb->head, offset, head) != offset);
188 if (backward) {
189 offset = head;
190 head = (u64)(-head);
194 * We rely on the implied barrier() by local_cmpxchg() to ensure
195 * none of the data stores below can be lifted up by the compiler.
198 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
199 local_add(rb->watermark, &rb->wakeup);
201 page_shift = PAGE_SHIFT + page_order(rb);
203 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
204 offset &= (1UL << page_shift) - 1;
205 handle->addr = rb->data_pages[handle->page] + offset;
206 handle->size = (1UL << page_shift) - offset;
208 if (unlikely(have_lost)) {
209 struct perf_sample_data sample_data;
211 lost_event.header.size = sizeof(lost_event);
212 lost_event.header.type = PERF_RECORD_LOST;
213 lost_event.header.misc = 0;
214 lost_event.id = event->id;
215 lost_event.lost = local_xchg(&rb->lost, 0);
217 perf_event_header__init_id(&lost_event.header,
218 &sample_data, event);
219 perf_output_put(handle, lost_event);
220 perf_event__output_id_sample(event, handle, &sample_data);
223 return 0;
225 fail:
226 local_inc(&rb->lost);
227 perf_output_put_handle(handle);
228 out:
229 rcu_read_unlock();
231 return -ENOSPC;
234 int perf_output_begin_forward(struct perf_output_handle *handle,
235 struct perf_event *event, unsigned int size)
237 return __perf_output_begin(handle, event, size, false);
240 int perf_output_begin_backward(struct perf_output_handle *handle,
241 struct perf_event *event, unsigned int size)
243 return __perf_output_begin(handle, event, size, true);
246 int perf_output_begin(struct perf_output_handle *handle,
247 struct perf_event *event, unsigned int size)
250 return __perf_output_begin(handle, event, size,
251 unlikely(is_write_backward(event)));
254 unsigned int perf_output_copy(struct perf_output_handle *handle,
255 const void *buf, unsigned int len)
257 return __output_copy(handle, buf, len);
260 unsigned int perf_output_skip(struct perf_output_handle *handle,
261 unsigned int len)
263 return __output_skip(handle, NULL, len);
266 void perf_output_end(struct perf_output_handle *handle)
268 perf_output_put_handle(handle);
269 rcu_read_unlock();
272 static void
273 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
275 long max_size = perf_data_size(rb);
277 if (watermark)
278 rb->watermark = min(max_size, watermark);
280 if (!rb->watermark)
281 rb->watermark = max_size / 2;
283 if (flags & RING_BUFFER_WRITABLE)
284 rb->overwrite = 0;
285 else
286 rb->overwrite = 1;
288 atomic_set(&rb->refcount, 1);
290 INIT_LIST_HEAD(&rb->event_list);
291 spin_lock_init(&rb->event_lock);
294 * perf_output_begin() only checks rb->paused, therefore
295 * rb->paused must be true if we have no pages for output.
297 if (!rb->nr_pages)
298 rb->paused = 1;
302 * This is called before hardware starts writing to the AUX area to
303 * obtain an output handle and make sure there's room in the buffer.
304 * When the capture completes, call perf_aux_output_end() to commit
305 * the recorded data to the buffer.
307 * The ordering is similar to that of perf_output_{begin,end}, with
308 * the exception of (B), which should be taken care of by the pmu
309 * driver, since ordering rules will differ depending on hardware.
311 * Call this from pmu::start(); see the comment in perf_aux_output_end()
312 * about its use in pmu callbacks. Both can also be called from the PMI
313 * handler if needed.
315 void *perf_aux_output_begin(struct perf_output_handle *handle,
316 struct perf_event *event)
318 struct perf_event *output_event = event;
319 unsigned long aux_head, aux_tail;
320 struct ring_buffer *rb;
322 if (output_event->parent)
323 output_event = output_event->parent;
326 * Since this will typically be open across pmu::add/pmu::del, we
327 * grab ring_buffer's refcount instead of holding rcu read lock
328 * to make sure it doesn't disappear under us.
330 rb = ring_buffer_get(output_event);
331 if (!rb)
332 return NULL;
334 if (!rb_has_aux(rb))
335 goto err;
338 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
339 * about to get freed, so we leave immediately.
341 * Checking rb::aux_mmap_count and rb::refcount has to be done in
342 * the same order, see perf_mmap_close. Otherwise we end up freeing
343 * aux pages in this path, which is a bug, because in_atomic().
345 if (!atomic_read(&rb->aux_mmap_count))
346 goto err;
348 if (!atomic_inc_not_zero(&rb->aux_refcount))
349 goto err;
352 * Nesting is not supported for AUX area, make sure nested
353 * writers are caught early
355 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
356 goto err_put;
358 aux_head = local_read(&rb->aux_head);
360 handle->rb = rb;
361 handle->event = event;
362 handle->head = aux_head;
363 handle->size = 0;
366 * In overwrite mode, AUX data stores do not depend on aux_tail,
367 * therefore (A) control dependency barrier does not exist. The
368 * (B) <-> (C) ordering is still observed by the pmu driver.
370 if (!rb->aux_overwrite) {
371 aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
372 handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark;
373 if (aux_head - aux_tail < perf_aux_size(rb))
374 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
377 * handle->size computation depends on aux_tail load; this forms a
378 * control dependency barrier separating aux_tail load from aux data
379 * store that will be enabled on successful return
381 if (!handle->size) { /* A, matches D */
382 event->pending_disable = 1;
383 perf_output_wakeup(handle);
384 local_set(&rb->aux_nest, 0);
385 goto err_put;
389 return handle->rb->aux_priv;
391 err_put:
392 /* can't be last */
393 rb_free_aux(rb);
395 err:
396 ring_buffer_put(rb);
397 handle->event = NULL;
399 return NULL;
403 * Commit the data written by hardware into the ring buffer by adjusting
404 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
405 * pmu driver's responsibility to observe ordering rules of the hardware,
406 * so that all the data is externally visible before this is called.
408 * Note: this has to be called from pmu::stop() callback, as the assumption
409 * of the AUX buffer management code is that after pmu::stop(), the AUX
410 * transaction must be stopped and therefore drop the AUX reference count.
412 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
413 bool truncated)
415 struct ring_buffer *rb = handle->rb;
416 bool wakeup = truncated;
417 unsigned long aux_head;
418 u64 flags = 0;
420 if (truncated)
421 flags |= PERF_AUX_FLAG_TRUNCATED;
423 /* in overwrite mode, driver provides aux_head via handle */
424 if (rb->aux_overwrite) {
425 flags |= PERF_AUX_FLAG_OVERWRITE;
427 aux_head = handle->head;
428 local_set(&rb->aux_head, aux_head);
429 } else {
430 aux_head = local_read(&rb->aux_head);
431 local_add(size, &rb->aux_head);
434 if (size || flags) {
436 * Only send RECORD_AUX if we have something useful to communicate
439 perf_event_aux_event(handle->event, aux_head, size, flags);
442 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
444 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
445 wakeup = true;
446 local_add(rb->aux_watermark, &rb->aux_wakeup);
449 if (wakeup) {
450 if (truncated)
451 handle->event->pending_disable = 1;
452 perf_output_wakeup(handle);
455 handle->event = NULL;
457 local_set(&rb->aux_nest, 0);
458 /* can't be last */
459 rb_free_aux(rb);
460 ring_buffer_put(rb);
464 * Skip over a given number of bytes in the AUX buffer, due to, for example,
465 * hardware's alignment constraints.
467 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
469 struct ring_buffer *rb = handle->rb;
470 unsigned long aux_head;
472 if (size > handle->size)
473 return -ENOSPC;
475 local_add(size, &rb->aux_head);
477 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
478 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
479 perf_output_wakeup(handle);
480 local_add(rb->aux_watermark, &rb->aux_wakeup);
481 handle->wakeup = local_read(&rb->aux_wakeup) +
482 rb->aux_watermark;
485 handle->head = aux_head;
486 handle->size -= size;
488 return 0;
491 void *perf_get_aux(struct perf_output_handle *handle)
493 /* this is only valid between perf_aux_output_begin and *_end */
494 if (!handle->event)
495 return NULL;
497 return handle->rb->aux_priv;
500 #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
502 static struct page *rb_alloc_aux_page(int node, int order)
504 struct page *page;
506 if (order > MAX_ORDER)
507 order = MAX_ORDER;
509 do {
510 page = alloc_pages_node(node, PERF_AUX_GFP, order);
511 } while (!page && order--);
513 if (page && order) {
515 * Communicate the allocation size to the driver:
516 * if we managed to secure a high-order allocation,
517 * set its first page's private to this order;
518 * !PagePrivate(page) means it's just a normal page.
520 split_page(page, order);
521 SetPagePrivate(page);
522 set_page_private(page, order);
525 return page;
528 static void rb_free_aux_page(struct ring_buffer *rb, int idx)
530 struct page *page = virt_to_page(rb->aux_pages[idx]);
532 ClearPagePrivate(page);
533 page->mapping = NULL;
534 __free_page(page);
537 static void __rb_free_aux(struct ring_buffer *rb)
539 int pg;
542 * Should never happen, the last reference should be dropped from
543 * perf_mmap_close() path, which first stops aux transactions (which
544 * in turn are the atomic holders of aux_refcount) and then does the
545 * last rb_free_aux().
547 WARN_ON_ONCE(in_atomic());
549 if (rb->aux_priv) {
550 rb->free_aux(rb->aux_priv);
551 rb->free_aux = NULL;
552 rb->aux_priv = NULL;
555 if (rb->aux_nr_pages) {
556 for (pg = 0; pg < rb->aux_nr_pages; pg++)
557 rb_free_aux_page(rb, pg);
559 kfree(rb->aux_pages);
560 rb->aux_nr_pages = 0;
564 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
565 pgoff_t pgoff, int nr_pages, long watermark, int flags)
567 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
568 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
569 int ret = -ENOMEM, max_order = 0;
571 if (!has_aux(event))
572 return -ENOTSUPP;
574 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
576 * We need to start with the max_order that fits in nr_pages,
577 * not the other way around, hence ilog2() and not get_order.
579 max_order = ilog2(nr_pages);
582 * PMU requests more than one contiguous chunks of memory
583 * for SW double buffering
585 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
586 !overwrite) {
587 if (!max_order)
588 return -EINVAL;
590 max_order--;
594 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
595 if (!rb->aux_pages)
596 return -ENOMEM;
598 rb->free_aux = event->pmu->free_aux;
599 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
600 struct page *page;
601 int last, order;
603 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
604 page = rb_alloc_aux_page(node, order);
605 if (!page)
606 goto out;
608 for (last = rb->aux_nr_pages + (1 << page_private(page));
609 last > rb->aux_nr_pages; rb->aux_nr_pages++)
610 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
614 * In overwrite mode, PMUs that don't support SG may not handle more
615 * than one contiguous allocation, since they rely on PMI to do double
616 * buffering. In this case, the entire buffer has to be one contiguous
617 * chunk.
619 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
620 overwrite) {
621 struct page *page = virt_to_page(rb->aux_pages[0]);
623 if (page_private(page) != max_order)
624 goto out;
627 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
628 overwrite);
629 if (!rb->aux_priv)
630 goto out;
632 ret = 0;
635 * aux_pages (and pmu driver's private data, aux_priv) will be
636 * referenced in both producer's and consumer's contexts, thus
637 * we keep a refcount here to make sure either of the two can
638 * reference them safely.
640 atomic_set(&rb->aux_refcount, 1);
642 rb->aux_overwrite = overwrite;
643 rb->aux_watermark = watermark;
645 if (!rb->aux_watermark && !rb->aux_overwrite)
646 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
648 out:
649 if (!ret)
650 rb->aux_pgoff = pgoff;
651 else
652 __rb_free_aux(rb);
654 return ret;
657 void rb_free_aux(struct ring_buffer *rb)
659 if (atomic_dec_and_test(&rb->aux_refcount))
660 __rb_free_aux(rb);
663 #ifndef CONFIG_PERF_USE_VMALLOC
666 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
669 static struct page *
670 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
672 if (pgoff > rb->nr_pages)
673 return NULL;
675 if (pgoff == 0)
676 return virt_to_page(rb->user_page);
678 return virt_to_page(rb->data_pages[pgoff - 1]);
681 static void *perf_mmap_alloc_page(int cpu)
683 struct page *page;
684 int node;
686 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
687 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
688 if (!page)
689 return NULL;
691 return page_address(page);
694 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
696 struct ring_buffer *rb;
697 unsigned long size;
698 int i;
700 size = sizeof(struct ring_buffer);
701 size += nr_pages * sizeof(void *);
703 rb = kzalloc(size, GFP_KERNEL);
704 if (!rb)
705 goto fail;
707 rb->user_page = perf_mmap_alloc_page(cpu);
708 if (!rb->user_page)
709 goto fail_user_page;
711 for (i = 0; i < nr_pages; i++) {
712 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
713 if (!rb->data_pages[i])
714 goto fail_data_pages;
717 rb->nr_pages = nr_pages;
719 ring_buffer_init(rb, watermark, flags);
721 return rb;
723 fail_data_pages:
724 for (i--; i >= 0; i--)
725 free_page((unsigned long)rb->data_pages[i]);
727 free_page((unsigned long)rb->user_page);
729 fail_user_page:
730 kfree(rb);
732 fail:
733 return NULL;
736 static void perf_mmap_free_page(unsigned long addr)
738 struct page *page = virt_to_page((void *)addr);
740 page->mapping = NULL;
741 __free_page(page);
744 void rb_free(struct ring_buffer *rb)
746 int i;
748 perf_mmap_free_page((unsigned long)rb->user_page);
749 for (i = 0; i < rb->nr_pages; i++)
750 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
751 kfree(rb);
754 #else
755 static int data_page_nr(struct ring_buffer *rb)
757 return rb->nr_pages << page_order(rb);
760 static struct page *
761 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
763 /* The '>' counts in the user page. */
764 if (pgoff > data_page_nr(rb))
765 return NULL;
767 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
770 static void perf_mmap_unmark_page(void *addr)
772 struct page *page = vmalloc_to_page(addr);
774 page->mapping = NULL;
777 static void rb_free_work(struct work_struct *work)
779 struct ring_buffer *rb;
780 void *base;
781 int i, nr;
783 rb = container_of(work, struct ring_buffer, work);
784 nr = data_page_nr(rb);
786 base = rb->user_page;
787 /* The '<=' counts in the user page. */
788 for (i = 0; i <= nr; i++)
789 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
791 vfree(base);
792 kfree(rb);
795 void rb_free(struct ring_buffer *rb)
797 schedule_work(&rb->work);
800 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
802 struct ring_buffer *rb;
803 unsigned long size;
804 void *all_buf;
806 size = sizeof(struct ring_buffer);
807 size += sizeof(void *);
809 rb = kzalloc(size, GFP_KERNEL);
810 if (!rb)
811 goto fail;
813 INIT_WORK(&rb->work, rb_free_work);
815 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
816 if (!all_buf)
817 goto fail_all_buf;
819 rb->user_page = all_buf;
820 rb->data_pages[0] = all_buf + PAGE_SIZE;
821 if (nr_pages) {
822 rb->nr_pages = 1;
823 rb->page_order = ilog2(nr_pages);
826 ring_buffer_init(rb, watermark, flags);
828 return rb;
830 fail_all_buf:
831 kfree(rb);
833 fail:
834 return NULL;
837 #endif
839 struct page *
840 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
842 if (rb->aux_nr_pages) {
843 /* above AUX space */
844 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
845 return NULL;
847 /* AUX space */
848 if (pgoff >= rb->aux_pgoff) {
849 int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
850 return virt_to_page(rb->aux_pages[aux_pgoff]);
854 return __perf_mmap_to_page(rb, pgoff);