1 #ifndef _KERNEL_EVENTS_INTERNAL_H
2 #define _KERNEL_EVENTS_INTERNAL_H
4 #include <linux/hardirq.h>
5 #include <linux/uaccess.h>
9 #define RING_BUFFER_WRITABLE 0x01
13 struct rcu_head rcu_head
;
14 #ifdef CONFIG_PERF_USE_VMALLOC
15 struct work_struct work
;
16 int page_order
; /* allocation order */
18 int nr_pages
; /* nr of data pages */
19 int overwrite
; /* can overwrite itself */
21 atomic_t poll
; /* POLL_ for wakeups */
23 local_t head
; /* write position */
24 local_t nest
; /* nested writers */
25 local_t events
; /* event limit */
26 local_t wakeup
; /* wakeup stamp */
27 local_t lost
; /* nr records lost */
29 long watermark
; /* wakeup watermark */
31 spinlock_t event_lock
;
32 struct list_head event_list
;
35 unsigned long mmap_locked
;
36 struct user_struct
*mmap_user
;
38 struct perf_event_mmap_page
*user_page
;
42 extern void rb_free(struct ring_buffer
*rb
);
43 extern struct ring_buffer
*
44 rb_alloc(int nr_pages
, long watermark
, int cpu
, int flags
);
45 extern void perf_event_wakeup(struct perf_event
*event
);
48 perf_event_header__init_id(struct perf_event_header
*header
,
49 struct perf_sample_data
*data
,
50 struct perf_event
*event
);
52 perf_event__output_id_sample(struct perf_event
*event
,
53 struct perf_output_handle
*handle
,
54 struct perf_sample_data
*sample
);
57 perf_mmap_to_page(struct ring_buffer
*rb
, unsigned long pgoff
);
59 #ifdef CONFIG_PERF_USE_VMALLOC
61 * Back perf_mmap() with vmalloc memory.
63 * Required for architectures that have d-cache aliasing issues.
66 static inline int page_order(struct ring_buffer
*rb
)
68 return rb
->page_order
;
73 static inline int page_order(struct ring_buffer
*rb
)
79 static inline unsigned long perf_data_size(struct ring_buffer
*rb
)
81 return rb
->nr_pages
<< (PAGE_SHIFT
+ page_order(rb
));
84 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
85 static inline unsigned int \
86 func_name(struct perf_output_handle *handle, \
87 const void *buf, unsigned int len) \
89 unsigned long size, written; \
92 size = min_t(unsigned long, handle->size, len); \
94 written = memcpy_func(handle->addr, buf, size); \
97 handle->addr += written; \
99 handle->size -= written; \
100 if (!handle->size) { \
101 struct ring_buffer *rb = handle->rb; \
104 handle->page &= rb->nr_pages - 1; \
105 handle->addr = rb->data_pages[handle->page]; \
106 handle->size = PAGE_SIZE << page_order(rb); \
108 } while (len && written == size); \
113 static inline int memcpy_common(void *dst
, const void *src
, size_t n
)
119 DEFINE_OUTPUT_COPY(__output_copy
, memcpy_common
)
121 #define MEMCPY_SKIP(dst, src, n) (n)
123 DEFINE_OUTPUT_COPY(__output_skip
, MEMCPY_SKIP
)
125 #ifndef arch_perf_out_copy_user
126 #define arch_perf_out_copy_user __copy_from_user_inatomic
129 DEFINE_OUTPUT_COPY(__output_copy_user
, arch_perf_out_copy_user
)
131 /* Callchain handling */
132 extern struct perf_callchain_entry
*
133 perf_callchain(struct perf_event
*event
, struct pt_regs
*regs
);
134 extern int get_callchain_buffers(void);
135 extern void put_callchain_buffers(void);
137 static inline int get_recursion_context(int *recursion
)
145 else if (in_softirq())
159 static inline void put_recursion_context(int *recursion
, int rctx
)
165 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
166 static inline bool arch_perf_have_user_stack_dump(void)
171 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
173 static inline bool arch_perf_have_user_stack_dump(void)
178 #define perf_user_stack_pointer(regs) 0
179 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
181 #endif /* _KERNEL_EVENTS_INTERNAL_H */