Linux 4.2.6
[linux/fpc-iii.git] / kernel / events / ring_buffer.c
blobc8aa3f75bc4db8ad7a2242aae6406bfd6f86f8c5
1 /*
2 * Performance events ring-buffer code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
12 #include <linux/perf_event.h>
13 #include <linux/vmalloc.h>
14 #include <linux/slab.h>
15 #include <linux/circ_buf.h>
16 #include <linux/poll.h>
18 #include "internal.h"
20 static void perf_output_wakeup(struct perf_output_handle *handle)
22 atomic_set(&handle->rb->poll, POLLIN);
24 handle->event->pending_wakeup = 1;
25 irq_work_queue(&handle->event->pending);
29 * We need to ensure a later event_id doesn't publish a head when a former
30 * event isn't done writing. However since we need to deal with NMIs we
31 * cannot fully serialize things.
33 * We only publish the head (and generate a wakeup) when the outer-most
34 * event completes.
36 static void perf_output_get_handle(struct perf_output_handle *handle)
38 struct ring_buffer *rb = handle->rb;
40 preempt_disable();
41 local_inc(&rb->nest);
42 handle->wakeup = local_read(&rb->wakeup);
45 static void perf_output_put_handle(struct perf_output_handle *handle)
47 struct ring_buffer *rb = handle->rb;
48 unsigned long head;
50 again:
51 head = local_read(&rb->head);
54 * IRQ/NMI can happen here, which means we can miss a head update.
57 if (!local_dec_and_test(&rb->nest))
58 goto out;
61 * Since the mmap() consumer (userspace) can run on a different CPU:
63 * kernel user
65 * if (LOAD ->data_tail) { LOAD ->data_head
66 * (A) smp_rmb() (C)
67 * STORE $data LOAD $data
68 * smp_wmb() (B) smp_mb() (D)
69 * STORE ->data_head STORE ->data_tail
70 * }
72 * Where A pairs with D, and B pairs with C.
74 * In our case (A) is a control dependency that separates the load of
75 * the ->data_tail and the stores of $data. In case ->data_tail
76 * indicates there is no room in the buffer to store $data we do not.
78 * D needs to be a full barrier since it separates the data READ
79 * from the tail WRITE.
81 * For B a WMB is sufficient since it separates two WRITEs, and for C
82 * an RMB is sufficient since it separates two READs.
84 * See perf_output_begin().
86 smp_wmb(); /* B, matches C */
87 rb->user_page->data_head = head;
90 * Now check if we missed an update -- rely on previous implied
91 * compiler barriers to force a re-read.
93 if (unlikely(head != local_read(&rb->head))) {
94 local_inc(&rb->nest);
95 goto again;
98 if (handle->wakeup != local_read(&rb->wakeup))
99 perf_output_wakeup(handle);
101 out:
102 preempt_enable();
105 int perf_output_begin(struct perf_output_handle *handle,
106 struct perf_event *event, unsigned int size)
108 struct ring_buffer *rb;
109 unsigned long tail, offset, head;
110 int have_lost, page_shift;
111 struct {
112 struct perf_event_header header;
113 u64 id;
114 u64 lost;
115 } lost_event;
117 rcu_read_lock();
119 * For inherited events we send all the output towards the parent.
121 if (event->parent)
122 event = event->parent;
124 rb = rcu_dereference(event->rb);
125 if (unlikely(!rb))
126 goto out;
128 if (unlikely(!rb->nr_pages))
129 goto out;
131 handle->rb = rb;
132 handle->event = event;
134 have_lost = local_read(&rb->lost);
135 if (unlikely(have_lost)) {
136 size += sizeof(lost_event);
137 if (event->attr.sample_id_all)
138 size += event->id_header_size;
141 perf_output_get_handle(handle);
143 do {
144 tail = READ_ONCE_CTRL(rb->user_page->data_tail);
145 offset = head = local_read(&rb->head);
146 if (!rb->overwrite &&
147 unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
148 goto fail;
151 * The above forms a control dependency barrier separating the
152 * @tail load above from the data stores below. Since the @tail
153 * load is required to compute the branch to fail below.
155 * A, matches D; the full memory barrier userspace SHOULD issue
156 * after reading the data and before storing the new tail
157 * position.
159 * See perf_output_put_handle().
162 head += size;
163 } while (local_cmpxchg(&rb->head, offset, head) != offset);
166 * We rely on the implied barrier() by local_cmpxchg() to ensure
167 * none of the data stores below can be lifted up by the compiler.
170 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
171 local_add(rb->watermark, &rb->wakeup);
173 page_shift = PAGE_SHIFT + page_order(rb);
175 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
176 offset &= (1UL << page_shift) - 1;
177 handle->addr = rb->data_pages[handle->page] + offset;
178 handle->size = (1UL << page_shift) - offset;
180 if (unlikely(have_lost)) {
181 struct perf_sample_data sample_data;
183 lost_event.header.size = sizeof(lost_event);
184 lost_event.header.type = PERF_RECORD_LOST;
185 lost_event.header.misc = 0;
186 lost_event.id = event->id;
187 lost_event.lost = local_xchg(&rb->lost, 0);
189 perf_event_header__init_id(&lost_event.header,
190 &sample_data, event);
191 perf_output_put(handle, lost_event);
192 perf_event__output_id_sample(event, handle, &sample_data);
195 return 0;
197 fail:
198 local_inc(&rb->lost);
199 perf_output_put_handle(handle);
200 out:
201 rcu_read_unlock();
203 return -ENOSPC;
206 unsigned int perf_output_copy(struct perf_output_handle *handle,
207 const void *buf, unsigned int len)
209 return __output_copy(handle, buf, len);
212 unsigned int perf_output_skip(struct perf_output_handle *handle,
213 unsigned int len)
215 return __output_skip(handle, NULL, len);
218 void perf_output_end(struct perf_output_handle *handle)
220 perf_output_put_handle(handle);
221 rcu_read_unlock();
224 static void rb_irq_work(struct irq_work *work);
226 static void
227 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
229 long max_size = perf_data_size(rb);
231 if (watermark)
232 rb->watermark = min(max_size, watermark);
234 if (!rb->watermark)
235 rb->watermark = max_size / 2;
237 if (flags & RING_BUFFER_WRITABLE)
238 rb->overwrite = 0;
239 else
240 rb->overwrite = 1;
242 atomic_set(&rb->refcount, 1);
244 INIT_LIST_HEAD(&rb->event_list);
245 spin_lock_init(&rb->event_lock);
246 init_irq_work(&rb->irq_work, rb_irq_work);
249 static void ring_buffer_put_async(struct ring_buffer *rb)
251 if (!atomic_dec_and_test(&rb->refcount))
252 return;
254 rb->rcu_head.next = (void *)rb;
255 irq_work_queue(&rb->irq_work);
259 * This is called before hardware starts writing to the AUX area to
260 * obtain an output handle and make sure there's room in the buffer.
261 * When the capture completes, call perf_aux_output_end() to commit
262 * the recorded data to the buffer.
264 * The ordering is similar to that of perf_output_{begin,end}, with
265 * the exception of (B), which should be taken care of by the pmu
266 * driver, since ordering rules will differ depending on hardware.
268 void *perf_aux_output_begin(struct perf_output_handle *handle,
269 struct perf_event *event)
271 struct perf_event *output_event = event;
272 unsigned long aux_head, aux_tail;
273 struct ring_buffer *rb;
275 if (output_event->parent)
276 output_event = output_event->parent;
279 * Since this will typically be open across pmu::add/pmu::del, we
280 * grab ring_buffer's refcount instead of holding rcu read lock
281 * to make sure it doesn't disappear under us.
283 rb = ring_buffer_get(output_event);
284 if (!rb)
285 return NULL;
287 if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
288 goto err;
291 * Nesting is not supported for AUX area, make sure nested
292 * writers are caught early
294 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
295 goto err_put;
297 aux_head = local_read(&rb->aux_head);
299 handle->rb = rb;
300 handle->event = event;
301 handle->head = aux_head;
302 handle->size = 0;
305 * In overwrite mode, AUX data stores do not depend on aux_tail,
306 * therefore (A) control dependency barrier does not exist. The
307 * (B) <-> (C) ordering is still observed by the pmu driver.
309 if (!rb->aux_overwrite) {
310 aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
311 handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark;
312 if (aux_head - aux_tail < perf_aux_size(rb))
313 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
316 * handle->size computation depends on aux_tail load; this forms a
317 * control dependency barrier separating aux_tail load from aux data
318 * store that will be enabled on successful return
320 if (!handle->size) { /* A, matches D */
321 event->pending_disable = 1;
322 perf_output_wakeup(handle);
323 local_set(&rb->aux_nest, 0);
324 goto err_put;
328 return handle->rb->aux_priv;
330 err_put:
331 rb_free_aux(rb);
333 err:
334 ring_buffer_put_async(rb);
335 handle->event = NULL;
337 return NULL;
341 * Commit the data written by hardware into the ring buffer by adjusting
342 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
343 * pmu driver's responsibility to observe ordering rules of the hardware,
344 * so that all the data is externally visible before this is called.
346 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
347 bool truncated)
349 struct ring_buffer *rb = handle->rb;
350 unsigned long aux_head;
351 u64 flags = 0;
353 if (truncated)
354 flags |= PERF_AUX_FLAG_TRUNCATED;
356 /* in overwrite mode, driver provides aux_head via handle */
357 if (rb->aux_overwrite) {
358 flags |= PERF_AUX_FLAG_OVERWRITE;
360 aux_head = handle->head;
361 local_set(&rb->aux_head, aux_head);
362 } else {
363 aux_head = local_read(&rb->aux_head);
364 local_add(size, &rb->aux_head);
367 if (size || flags) {
369 * Only send RECORD_AUX if we have something useful to communicate
372 perf_event_aux_event(handle->event, aux_head, size, flags);
375 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
377 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
378 perf_output_wakeup(handle);
379 local_add(rb->aux_watermark, &rb->aux_wakeup);
381 handle->event = NULL;
383 local_set(&rb->aux_nest, 0);
384 rb_free_aux(rb);
385 ring_buffer_put_async(rb);
389 * Skip over a given number of bytes in the AUX buffer, due to, for example,
390 * hardware's alignment constraints.
392 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
394 struct ring_buffer *rb = handle->rb;
395 unsigned long aux_head;
397 if (size > handle->size)
398 return -ENOSPC;
400 local_add(size, &rb->aux_head);
402 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
403 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
404 perf_output_wakeup(handle);
405 local_add(rb->aux_watermark, &rb->aux_wakeup);
406 handle->wakeup = local_read(&rb->aux_wakeup) +
407 rb->aux_watermark;
410 handle->head = aux_head;
411 handle->size -= size;
413 return 0;
416 void *perf_get_aux(struct perf_output_handle *handle)
418 /* this is only valid between perf_aux_output_begin and *_end */
419 if (!handle->event)
420 return NULL;
422 return handle->rb->aux_priv;
425 #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
427 static struct page *rb_alloc_aux_page(int node, int order)
429 struct page *page;
431 if (order > MAX_ORDER)
432 order = MAX_ORDER;
434 do {
435 page = alloc_pages_node(node, PERF_AUX_GFP, order);
436 } while (!page && order--);
438 if (page && order) {
440 * Communicate the allocation size to the driver
442 split_page(page, order);
443 SetPagePrivate(page);
444 set_page_private(page, order);
447 return page;
450 static void rb_free_aux_page(struct ring_buffer *rb, int idx)
452 struct page *page = virt_to_page(rb->aux_pages[idx]);
454 ClearPagePrivate(page);
455 page->mapping = NULL;
456 __free_page(page);
459 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
460 pgoff_t pgoff, int nr_pages, long watermark, int flags)
462 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
463 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
464 int ret = -ENOMEM, max_order = 0;
466 if (!has_aux(event))
467 return -ENOTSUPP;
469 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
471 * We need to start with the max_order that fits in nr_pages,
472 * not the other way around, hence ilog2() and not get_order.
474 max_order = ilog2(nr_pages);
477 * PMU requests more than one contiguous chunks of memory
478 * for SW double buffering
480 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
481 !overwrite) {
482 if (!max_order)
483 return -EINVAL;
485 max_order--;
489 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
490 if (!rb->aux_pages)
491 return -ENOMEM;
493 rb->free_aux = event->pmu->free_aux;
494 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
495 struct page *page;
496 int last, order;
498 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
499 page = rb_alloc_aux_page(node, order);
500 if (!page)
501 goto out;
503 for (last = rb->aux_nr_pages + (1 << page_private(page));
504 last > rb->aux_nr_pages; rb->aux_nr_pages++)
505 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
509 * In overwrite mode, PMUs that don't support SG may not handle more
510 * than one contiguous allocation, since they rely on PMI to do double
511 * buffering. In this case, the entire buffer has to be one contiguous
512 * chunk.
514 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
515 overwrite) {
516 struct page *page = virt_to_page(rb->aux_pages[0]);
518 if (page_private(page) != max_order)
519 goto out;
522 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
523 overwrite);
524 if (!rb->aux_priv)
525 goto out;
527 ret = 0;
530 * aux_pages (and pmu driver's private data, aux_priv) will be
531 * referenced in both producer's and consumer's contexts, thus
532 * we keep a refcount here to make sure either of the two can
533 * reference them safely.
535 atomic_set(&rb->aux_refcount, 1);
537 rb->aux_overwrite = overwrite;
538 rb->aux_watermark = watermark;
540 if (!rb->aux_watermark && !rb->aux_overwrite)
541 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
543 out:
544 if (!ret)
545 rb->aux_pgoff = pgoff;
546 else
547 rb_free_aux(rb);
549 return ret;
552 static void __rb_free_aux(struct ring_buffer *rb)
554 int pg;
556 if (rb->aux_priv) {
557 rb->free_aux(rb->aux_priv);
558 rb->free_aux = NULL;
559 rb->aux_priv = NULL;
562 if (rb->aux_nr_pages) {
563 for (pg = 0; pg < rb->aux_nr_pages; pg++)
564 rb_free_aux_page(rb, pg);
566 kfree(rb->aux_pages);
567 rb->aux_nr_pages = 0;
571 void rb_free_aux(struct ring_buffer *rb)
573 if (atomic_dec_and_test(&rb->aux_refcount))
574 irq_work_queue(&rb->irq_work);
577 static void rb_irq_work(struct irq_work *work)
579 struct ring_buffer *rb = container_of(work, struct ring_buffer, irq_work);
581 if (!atomic_read(&rb->aux_refcount))
582 __rb_free_aux(rb);
584 if (rb->rcu_head.next == (void *)rb)
585 call_rcu(&rb->rcu_head, rb_free_rcu);
588 #ifndef CONFIG_PERF_USE_VMALLOC
591 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
594 static struct page *
595 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
597 if (pgoff > rb->nr_pages)
598 return NULL;
600 if (pgoff == 0)
601 return virt_to_page(rb->user_page);
603 return virt_to_page(rb->data_pages[pgoff - 1]);
606 static void *perf_mmap_alloc_page(int cpu)
608 struct page *page;
609 int node;
611 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
612 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
613 if (!page)
614 return NULL;
616 return page_address(page);
619 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
621 struct ring_buffer *rb;
622 unsigned long size;
623 int i;
625 size = sizeof(struct ring_buffer);
626 size += nr_pages * sizeof(void *);
628 rb = kzalloc(size, GFP_KERNEL);
629 if (!rb)
630 goto fail;
632 rb->user_page = perf_mmap_alloc_page(cpu);
633 if (!rb->user_page)
634 goto fail_user_page;
636 for (i = 0; i < nr_pages; i++) {
637 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
638 if (!rb->data_pages[i])
639 goto fail_data_pages;
642 rb->nr_pages = nr_pages;
644 ring_buffer_init(rb, watermark, flags);
646 return rb;
648 fail_data_pages:
649 for (i--; i >= 0; i--)
650 free_page((unsigned long)rb->data_pages[i]);
652 free_page((unsigned long)rb->user_page);
654 fail_user_page:
655 kfree(rb);
657 fail:
658 return NULL;
661 static void perf_mmap_free_page(unsigned long addr)
663 struct page *page = virt_to_page((void *)addr);
665 page->mapping = NULL;
666 __free_page(page);
669 void rb_free(struct ring_buffer *rb)
671 int i;
673 perf_mmap_free_page((unsigned long)rb->user_page);
674 for (i = 0; i < rb->nr_pages; i++)
675 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
676 kfree(rb);
679 #else
680 static int data_page_nr(struct ring_buffer *rb)
682 return rb->nr_pages << page_order(rb);
685 static struct page *
686 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
688 /* The '>' counts in the user page. */
689 if (pgoff > data_page_nr(rb))
690 return NULL;
692 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
695 static void perf_mmap_unmark_page(void *addr)
697 struct page *page = vmalloc_to_page(addr);
699 page->mapping = NULL;
702 static void rb_free_work(struct work_struct *work)
704 struct ring_buffer *rb;
705 void *base;
706 int i, nr;
708 rb = container_of(work, struct ring_buffer, work);
709 nr = data_page_nr(rb);
711 base = rb->user_page;
712 /* The '<=' counts in the user page. */
713 for (i = 0; i <= nr; i++)
714 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
716 vfree(base);
717 kfree(rb);
720 void rb_free(struct ring_buffer *rb)
722 schedule_work(&rb->work);
725 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
727 struct ring_buffer *rb;
728 unsigned long size;
729 void *all_buf;
731 size = sizeof(struct ring_buffer);
732 size += sizeof(void *);
734 rb = kzalloc(size, GFP_KERNEL);
735 if (!rb)
736 goto fail;
738 INIT_WORK(&rb->work, rb_free_work);
740 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
741 if (!all_buf)
742 goto fail_all_buf;
744 rb->user_page = all_buf;
745 rb->data_pages[0] = all_buf + PAGE_SIZE;
746 rb->page_order = ilog2(nr_pages);
747 rb->nr_pages = !!nr_pages;
749 ring_buffer_init(rb, watermark, flags);
751 return rb;
753 fail_all_buf:
754 kfree(rb);
756 fail:
757 return NULL;
760 #endif
762 struct page *
763 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
765 if (rb->aux_nr_pages) {
766 /* above AUX space */
767 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
768 return NULL;
770 /* AUX space */
771 if (pgoff >= rb->aux_pgoff)
772 return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
775 return __perf_mmap_to_page(rb, pgoff);