2 * Performance events ring-buffer code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
12 #include <linux/perf_event.h>
13 #include <linux/vmalloc.h>
14 #include <linux/slab.h>
15 #include <linux/circ_buf.h>
19 static void perf_output_wakeup(struct perf_output_handle
*handle
)
21 atomic_set(&handle
->rb
->poll
, POLL_IN
);
23 handle
->event
->pending_wakeup
= 1;
24 irq_work_queue(&handle
->event
->pending
);
28 * We need to ensure a later event_id doesn't publish a head when a former
29 * event isn't done writing. However since we need to deal with NMIs we
30 * cannot fully serialize things.
32 * We only publish the head (and generate a wakeup) when the outer-most
35 static void perf_output_get_handle(struct perf_output_handle
*handle
)
37 struct ring_buffer
*rb
= handle
->rb
;
41 handle
->wakeup
= local_read(&rb
->wakeup
);
44 static void perf_output_put_handle(struct perf_output_handle
*handle
)
46 struct ring_buffer
*rb
= handle
->rb
;
50 head
= local_read(&rb
->head
);
53 * IRQ/NMI can happen here, which means we can miss a head update.
56 if (!local_dec_and_test(&rb
->nest
))
60 * Since the mmap() consumer (userspace) can run on a different CPU:
64 * if (LOAD ->data_tail) { LOAD ->data_head
66 * STORE $data LOAD $data
67 * smp_wmb() (B) smp_mb() (D)
68 * STORE ->data_head STORE ->data_tail
71 * Where A pairs with D, and B pairs with C.
73 * In our case (A) is a control dependency that separates the load of
74 * the ->data_tail and the stores of $data. In case ->data_tail
75 * indicates there is no room in the buffer to store $data we do not.
77 * D needs to be a full barrier since it separates the data READ
78 * from the tail WRITE.
80 * For B a WMB is sufficient since it separates two WRITEs, and for C
81 * an RMB is sufficient since it separates two READs.
83 * See perf_output_begin().
85 smp_wmb(); /* B, matches C */
86 rb
->user_page
->data_head
= head
;
89 * Now check if we missed an update -- rely on previous implied
90 * compiler barriers to force a re-read.
92 if (unlikely(head
!= local_read(&rb
->head
))) {
97 if (handle
->wakeup
!= local_read(&rb
->wakeup
))
98 perf_output_wakeup(handle
);
104 int perf_output_begin(struct perf_output_handle
*handle
,
105 struct perf_event
*event
, unsigned int size
)
107 struct ring_buffer
*rb
;
108 unsigned long tail
, offset
, head
;
109 int have_lost
, page_shift
;
111 struct perf_event_header header
;
118 * For inherited events we send all the output towards the parent.
121 event
= event
->parent
;
123 rb
= rcu_dereference(event
->rb
);
127 if (unlikely(!rb
->nr_pages
))
131 handle
->event
= event
;
133 have_lost
= local_read(&rb
->lost
);
134 if (unlikely(have_lost
)) {
135 size
+= sizeof(lost_event
);
136 if (event
->attr
.sample_id_all
)
137 size
+= event
->id_header_size
;
140 perf_output_get_handle(handle
);
143 tail
= ACCESS_ONCE(rb
->user_page
->data_tail
);
144 offset
= head
= local_read(&rb
->head
);
145 if (!rb
->overwrite
&&
146 unlikely(CIRC_SPACE(head
, tail
, perf_data_size(rb
)) < size
))
150 * The above forms a control dependency barrier separating the
151 * @tail load above from the data stores below. Since the @tail
152 * load is required to compute the branch to fail below.
154 * A, matches D; the full memory barrier userspace SHOULD issue
155 * after reading the data and before storing the new tail
158 * See perf_output_put_handle().
162 } while (local_cmpxchg(&rb
->head
, offset
, head
) != offset
);
165 * We rely on the implied barrier() by local_cmpxchg() to ensure
166 * none of the data stores below can be lifted up by the compiler.
169 if (unlikely(head
- local_read(&rb
->wakeup
) > rb
->watermark
))
170 local_add(rb
->watermark
, &rb
->wakeup
);
172 page_shift
= PAGE_SHIFT
+ page_order(rb
);
174 handle
->page
= (offset
>> page_shift
) & (rb
->nr_pages
- 1);
175 offset
&= (1UL << page_shift
) - 1;
176 handle
->addr
= rb
->data_pages
[handle
->page
] + offset
;
177 handle
->size
= (1UL << page_shift
) - offset
;
179 if (unlikely(have_lost
)) {
180 struct perf_sample_data sample_data
;
182 lost_event
.header
.size
= sizeof(lost_event
);
183 lost_event
.header
.type
= PERF_RECORD_LOST
;
184 lost_event
.header
.misc
= 0;
185 lost_event
.id
= event
->id
;
186 lost_event
.lost
= local_xchg(&rb
->lost
, 0);
188 perf_event_header__init_id(&lost_event
.header
,
189 &sample_data
, event
);
190 perf_output_put(handle
, lost_event
);
191 perf_event__output_id_sample(event
, handle
, &sample_data
);
197 local_inc(&rb
->lost
);
198 perf_output_put_handle(handle
);
205 unsigned int perf_output_copy(struct perf_output_handle
*handle
,
206 const void *buf
, unsigned int len
)
208 return __output_copy(handle
, buf
, len
);
211 unsigned int perf_output_skip(struct perf_output_handle
*handle
,
214 return __output_skip(handle
, NULL
, len
);
217 void perf_output_end(struct perf_output_handle
*handle
)
219 perf_output_put_handle(handle
);
224 ring_buffer_init(struct ring_buffer
*rb
, long watermark
, int flags
)
226 long max_size
= perf_data_size(rb
);
229 rb
->watermark
= min(max_size
, watermark
);
232 rb
->watermark
= max_size
/ 2;
234 if (flags
& RING_BUFFER_WRITABLE
)
239 atomic_set(&rb
->refcount
, 1);
241 INIT_LIST_HEAD(&rb
->event_list
);
242 spin_lock_init(&rb
->event_lock
);
245 #ifndef CONFIG_PERF_USE_VMALLOC
248 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
252 perf_mmap_to_page(struct ring_buffer
*rb
, unsigned long pgoff
)
254 if (pgoff
> rb
->nr_pages
)
258 return virt_to_page(rb
->user_page
);
260 return virt_to_page(rb
->data_pages
[pgoff
- 1]);
263 static void *perf_mmap_alloc_page(int cpu
)
268 node
= (cpu
== -1) ? cpu
: cpu_to_node(cpu
);
269 page
= alloc_pages_node(node
, GFP_KERNEL
| __GFP_ZERO
, 0);
273 return page_address(page
);
276 struct ring_buffer
*rb_alloc(int nr_pages
, long watermark
, int cpu
, int flags
)
278 struct ring_buffer
*rb
;
282 size
= sizeof(struct ring_buffer
);
283 size
+= nr_pages
* sizeof(void *);
285 rb
= kzalloc(size
, GFP_KERNEL
);
289 rb
->user_page
= perf_mmap_alloc_page(cpu
);
293 for (i
= 0; i
< nr_pages
; i
++) {
294 rb
->data_pages
[i
] = perf_mmap_alloc_page(cpu
);
295 if (!rb
->data_pages
[i
])
296 goto fail_data_pages
;
299 rb
->nr_pages
= nr_pages
;
301 ring_buffer_init(rb
, watermark
, flags
);
306 for (i
--; i
>= 0; i
--)
307 free_page((unsigned long)rb
->data_pages
[i
]);
309 free_page((unsigned long)rb
->user_page
);
318 static void perf_mmap_free_page(unsigned long addr
)
320 struct page
*page
= virt_to_page((void *)addr
);
322 page
->mapping
= NULL
;
326 void rb_free(struct ring_buffer
*rb
)
330 perf_mmap_free_page((unsigned long)rb
->user_page
);
331 for (i
= 0; i
< rb
->nr_pages
; i
++)
332 perf_mmap_free_page((unsigned long)rb
->data_pages
[i
]);
337 static int data_page_nr(struct ring_buffer
*rb
)
339 return rb
->nr_pages
<< page_order(rb
);
343 perf_mmap_to_page(struct ring_buffer
*rb
, unsigned long pgoff
)
345 /* The '>' counts in the user page. */
346 if (pgoff
> data_page_nr(rb
))
349 return vmalloc_to_page((void *)rb
->user_page
+ pgoff
* PAGE_SIZE
);
352 static void perf_mmap_unmark_page(void *addr
)
354 struct page
*page
= vmalloc_to_page(addr
);
356 page
->mapping
= NULL
;
359 static void rb_free_work(struct work_struct
*work
)
361 struct ring_buffer
*rb
;
365 rb
= container_of(work
, struct ring_buffer
, work
);
366 nr
= data_page_nr(rb
);
368 base
= rb
->user_page
;
369 /* The '<=' counts in the user page. */
370 for (i
= 0; i
<= nr
; i
++)
371 perf_mmap_unmark_page(base
+ (i
* PAGE_SIZE
));
377 void rb_free(struct ring_buffer
*rb
)
379 schedule_work(&rb
->work
);
382 struct ring_buffer
*rb_alloc(int nr_pages
, long watermark
, int cpu
, int flags
)
384 struct ring_buffer
*rb
;
388 size
= sizeof(struct ring_buffer
);
389 size
+= sizeof(void *);
391 rb
= kzalloc(size
, GFP_KERNEL
);
395 INIT_WORK(&rb
->work
, rb_free_work
);
397 all_buf
= vmalloc_user((nr_pages
+ 1) * PAGE_SIZE
);
401 rb
->user_page
= all_buf
;
402 rb
->data_pages
[0] = all_buf
+ PAGE_SIZE
;
403 rb
->page_order
= ilog2(nr_pages
);
404 rb
->nr_pages
= !!nr_pages
;
406 ring_buffer_init(rb
, watermark
, flags
);